org.opencv.imgproc.Imgproc Maven / Gradle / Ivy
Show all versions of opencv Show documentation
// // This file is auto-generated. Please don't modify it! // package org.opencv.imgproc; import java.util.ArrayList; import java.util.List; import org.opencv.core.Mat; import org.opencv.core.MatOfFloat; import org.opencv.core.MatOfInt; import org.opencv.core.MatOfInt4; import org.opencv.core.MatOfPoint; import org.opencv.core.MatOfPoint2f; import org.opencv.core.Point; import org.opencv.core.Rect; import org.opencv.core.RotatedRect; import org.opencv.core.Scalar; import org.opencv.core.Size; import org.opencv.core.TermCriteria; import org.opencv.utils.Converters; public class Imgproc { private static final int IPL_BORDER_CONSTANT = 0, IPL_BORDER_REPLICATE = 1, IPL_BORDER_REFLECT = 2, IPL_BORDER_WRAP = 3, IPL_BORDER_REFLECT_101 = 4, IPL_BORDER_TRANSPARENT = 5, CV_INTER_NN = 0, CV_INTER_LINEAR = 1, CV_INTER_CUBIC = 2, CV_INTER_AREA = 3, CV_INTER_LANCZOS4 = 4, CV_MOP_ERODE = 0, CV_MOP_DILATE = 1, CV_MOP_OPEN = 2, CV_MOP_CLOSE = 3, CV_MOP_GRADIENT = 4, CV_MOP_TOPHAT = 5, CV_MOP_BLACKHAT = 6, CV_RETR_EXTERNAL = 0, CV_RETR_LIST = 1, CV_RETR_CCOMP = 2, CV_RETR_TREE = 3, CV_RETR_FLOODFILL = 4, CV_CHAIN_APPROX_NONE = 1, CV_CHAIN_APPROX_SIMPLE = 2, CV_CHAIN_APPROX_TC89_L1 = 3, CV_CHAIN_APPROX_TC89_KCOS = 4, CV_THRESH_BINARY = 0, CV_THRESH_BINARY_INV = 1, CV_THRESH_TRUNC = 2, CV_THRESH_TOZERO = 3, CV_THRESH_TOZERO_INV = 4, CV_THRESH_MASK = 7, CV_THRESH_OTSU = 8; public static final int CV_BLUR_NO_SCALE = 0, CV_BLUR = 1, CV_GAUSSIAN = 2, CV_MEDIAN = 3, CV_BILATERAL = 4, CV_GAUSSIAN_5x5 = 7, CV_SCHARR = -1, CV_MAX_SOBEL_KSIZE = 7, CV_RGBA2mRGBA = 125, CV_mRGBA2RGBA = 126, CV_WARP_FILL_OUTLIERS = 8, CV_WARP_INVERSE_MAP = 16, CV_SHAPE_RECT = 0, CV_SHAPE_CROSS = 1, CV_SHAPE_ELLIPSE = 2, CV_SHAPE_CUSTOM = 100, CV_CHAIN_CODE = 0, CV_LINK_RUNS = 5, CV_POLY_APPROX_DP = 0, CV_CONTOURS_MATCH_I1 = 1, CV_CONTOURS_MATCH_I2 = 2, CV_CONTOURS_MATCH_I3 = 3, CV_CLOCKWISE = 1, CV_COUNTER_CLOCKWISE = 2, CV_COMP_CORREL = 0, CV_COMP_CHISQR = 1, CV_COMP_INTERSECT = 2, CV_COMP_BHATTACHARYYA = 3, CV_COMP_HELLINGER = CV_COMP_BHATTACHARYYA, CV_DIST_MASK_3 = 3, CV_DIST_MASK_5 = 5, CV_DIST_MASK_PRECISE = 0, CV_DIST_LABEL_CCOMP = 0, CV_DIST_LABEL_PIXEL = 1, CV_DIST_USER = -1, CV_DIST_L1 = 1, CV_DIST_L2 = 2, CV_DIST_C = 3, CV_DIST_L12 = 4, CV_DIST_FAIR = 5, CV_DIST_WELSCH = 6, CV_DIST_HUBER = 7, CV_CANNY_L2_GRADIENT = (1 << 31), CV_HOUGH_STANDARD = 0, CV_HOUGH_PROBABILISTIC = 1, CV_HOUGH_MULTI_SCALE = 2, CV_HOUGH_GRADIENT = 3, BORDER_REPLICATE = IPL_BORDER_REPLICATE, BORDER_CONSTANT = IPL_BORDER_CONSTANT, BORDER_REFLECT = IPL_BORDER_REFLECT, BORDER_WRAP = IPL_BORDER_WRAP, BORDER_REFLECT_101 = IPL_BORDER_REFLECT_101, BORDER_REFLECT101 = BORDER_REFLECT_101, BORDER_TRANSPARENT = IPL_BORDER_TRANSPARENT, BORDER_DEFAULT = BORDER_REFLECT_101, BORDER_ISOLATED = 16, KERNEL_GENERAL = 0, KERNEL_SYMMETRICAL = 1, KERNEL_ASYMMETRICAL = 2, KERNEL_SMOOTH = 4, KERNEL_INTEGER = 8, MORPH_ERODE = CV_MOP_ERODE, MORPH_DILATE = CV_MOP_DILATE, MORPH_OPEN = CV_MOP_OPEN, MORPH_CLOSE = CV_MOP_CLOSE, MORPH_GRADIENT = CV_MOP_GRADIENT, MORPH_TOPHAT = CV_MOP_TOPHAT, MORPH_BLACKHAT = CV_MOP_BLACKHAT, MORPH_RECT = 0, MORPH_CROSS = 1, MORPH_ELLIPSE = 2, GHT_POSITION = 0, GHT_SCALE = 1, GHT_ROTATION = 2, INTER_NEAREST = CV_INTER_NN, INTER_LINEAR = CV_INTER_LINEAR, INTER_CUBIC = CV_INTER_CUBIC, INTER_AREA = CV_INTER_AREA, INTER_LANCZOS4 = CV_INTER_LANCZOS4, INTER_MAX = 7, WARP_INVERSE_MAP = CV_WARP_INVERSE_MAP, INTER_BITS = 5, INTER_BITS2 = INTER_BITS*2, INTER_TAB_SIZE = (1<
* *Finds edges in an image using the [Canny86] algorithm. * * The function finds edges in the input image
* *image
and marks them * in the output mapedges
using the Canny algorithm. The smallest * value betweenthreshold1
andthreshold2
is used for * edge linking. The largest value is used to find initial segments of strong * edges. See http://en.wikipedia.org/wiki/Canny_edge_detectorNote:
**
* * @param image single-channel 8-bit input image. * @param edges output edge map; it has the same size and type as *- An example on using the canny edge detector can be found at * opencv_source_code/samples/cpp/edge.cpp *
- (Python) An example on using the canny edge detector can be found at * opencv_source_code/samples/python/edge.py *
image
. * @param threshold1 first threshold for the hysteresis procedure. * @param threshold2 second threshold for the hysteresis procedure. * @param apertureSize aperture size for the "Sobel" operator. * @param L2gradient a flag, indicating whether a more accurate L_2 * norm =sqrt((dI/dx)^2 + (dI/dy)^2) should be used to calculate the * image gradient magnitude (L2gradient=true
), or whether the * default L_1 norm =|dI/dx|+|dI/dy| is enough * (L2gradient=false
). * * @see org.opencv.imgproc.Imgproc.Canny */ public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize, boolean L2gradient) { Canny_0(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize, L2gradient); return; } /** *Finds edges in an image using the [Canny86] algorithm.
* *The function finds edges in the input image
* *image
and marks them * in the output mapedges
using the Canny algorithm. The smallest * value betweenthreshold1
andthreshold2
is used for * edge linking. The largest value is used to find initial segments of strong * edges. See http://en.wikipedia.org/wiki/Canny_edge_detectorNote:
**
* * @param image single-channel 8-bit input image. * @param edges output edge map; it has the same size and type as *- An example on using the canny edge detector can be found at * opencv_source_code/samples/cpp/edge.cpp *
- (Python) An example on using the canny edge detector can be found at * opencv_source_code/samples/python/edge.py *
image
. * @param threshold1 first threshold for the hysteresis procedure. * @param threshold2 second threshold for the hysteresis procedure. * * @see org.opencv.imgproc.Imgproc.Canny */ public static void Canny(Mat image, Mat edges, double threshold1, double threshold2) { Canny_1(image.nativeObj, edges.nativeObj, threshold1, threshold2); return; } // // C++: void GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) // /** *Blurs an image using a Gaussian filter.
* *The function convolves the source image with the specified Gaussian kernel. * In-place filtering is supported.
* * @param src input image; the image can have any number of channels, which are * processed independently, but the depth should beCV_8U
, *CV_16U
,CV_16S
,CV_32F
or *CV_64F
. * @param dst output image of the same size and type assrc
. * @param ksize Gaussian kernel size.ksize.width
and *ksize.height
can differ but they both must be positive and odd. * Or, they can be zero's and then they are computed fromsigma*
. * @param sigmaX Gaussian kernel standard deviation in X direction. * @param sigmaY Gaussian kernel standard deviation in Y direction; if *sigmaY
is zero, it is set to be equal tosigmaX
, if * both sigmas are zeros, they are computed fromksize.width
and *ksize.height
, respectively (see "getGaussianKernel" for * details); to fully control the result regardless of possible future * modifications of all this semantics, it is recommended to specify all of *ksize
,sigmaX
, andsigmaY
. * @param borderType pixel extrapolation method (see "borderInterpolate" for * details). * * @see org.opencv.imgproc.Imgproc.GaussianBlur * @see org.opencv.imgproc.Imgproc#sepFilter2D * @see org.opencv.imgproc.Imgproc#medianBlur * @see org.opencv.imgproc.Imgproc#boxFilter * @see org.opencv.imgproc.Imgproc#blur * @see org.opencv.imgproc.Imgproc#filter2D * @see org.opencv.imgproc.Imgproc#bilateralFilter */ public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY, int borderType) { GaussianBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY, borderType); return; } /** *Blurs an image using a Gaussian filter.
* *The function convolves the source image with the specified Gaussian kernel. * In-place filtering is supported.
* * @param src input image; the image can have any number of channels, which are * processed independently, but the depth should beCV_8U
, *CV_16U
,CV_16S
,CV_32F
or *CV_64F
. * @param dst output image of the same size and type assrc
. * @param ksize Gaussian kernel size.ksize.width
and *ksize.height
can differ but they both must be positive and odd. * Or, they can be zero's and then they are computed fromsigma*
. * @param sigmaX Gaussian kernel standard deviation in X direction. * @param sigmaY Gaussian kernel standard deviation in Y direction; if *sigmaY
is zero, it is set to be equal tosigmaX
, if * both sigmas are zeros, they are computed fromksize.width
and *ksize.height
, respectively (see "getGaussianKernel" for * details); to fully control the result regardless of possible future * modifications of all this semantics, it is recommended to specify all of *ksize
,sigmaX
, andsigmaY
. * * @see org.opencv.imgproc.Imgproc.GaussianBlur * @see org.opencv.imgproc.Imgproc#sepFilter2D * @see org.opencv.imgproc.Imgproc#medianBlur * @see org.opencv.imgproc.Imgproc#boxFilter * @see org.opencv.imgproc.Imgproc#blur * @see org.opencv.imgproc.Imgproc#filter2D * @see org.opencv.imgproc.Imgproc#bilateralFilter */ public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY) { GaussianBlur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY); return; } /** *Blurs an image using a Gaussian filter.
* *The function convolves the source image with the specified Gaussian kernel. * In-place filtering is supported.
* * @param src input image; the image can have any number of channels, which are * processed independently, but the depth should beCV_8U
, *CV_16U
,CV_16S
,CV_32F
or *CV_64F
. * @param dst output image of the same size and type assrc
. * @param ksize Gaussian kernel size.ksize.width
and *ksize.height
can differ but they both must be positive and odd. * Or, they can be zero's and then they are computed fromsigma*
. * @param sigmaX Gaussian kernel standard deviation in X direction. * * @see org.opencv.imgproc.Imgproc.GaussianBlur * @see org.opencv.imgproc.Imgproc#sepFilter2D * @see org.opencv.imgproc.Imgproc#medianBlur * @see org.opencv.imgproc.Imgproc#boxFilter * @see org.opencv.imgproc.Imgproc#blur * @see org.opencv.imgproc.Imgproc#filter2D * @see org.opencv.imgproc.Imgproc#bilateralFilter */ public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX) { GaussianBlur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX); return; } // // C++: void HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) // /** *Finds circles in a grayscale image using the Hough transform.
* *The function finds circles in a grayscale image using a modification of the * Hough transform. * Example:
* *// C++ code:
* *#include
* *#include
* *#include
* *using namespace cv;
* *int main(int argc, char argv)
* * *Mat img, gray;
* *if(argc != 2 && !(img=imread(argv[1], 1)).data)
* *return -1;
* *cvtColor(img, gray, CV_BGR2GRAY);
* *// smooth it, otherwise a lot of false circles may be detected
* *GaussianBlur(gray, gray, Size(9, 9), 2, 2);
* *vector
* *circles; HoughCircles(gray, circles, CV_HOUGH_GRADIENT,
* *2, gray->rows/4, 200, 100);
* *for(size_t i = 0; i < circles.size(); i++)
* * *Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
* *int radius = cvRound(circles[i][2]);
* *// draw the circle center
* *circle(img, center, 3, Scalar(0,255,0), -1, 8, 0);
* *// draw the circle outline
* *circle(img, center, radius, Scalar(0,0,255), 3, 8, 0);
* * *namedWindow("circles", 1);
* *imshow("circles", img);
* *return 0;
* * *Note: Usually the function detects the centers of circles well. However, it * may fail to find correct radii. You can assist to the function by specifying * the radius range (
minRadius
andmaxRadius
) if you * know it. Or, you may ignore the returned radius, use only the center, and * find the correct radius using an additional procedure. *Note:
*
-
*
- An example using the Hough circle detector can be found at * opencv_source_code/samples/cpp/houghcircles.cpp *
CV_HOUGH_GRADIENT
, which is basically *21HT*, described in
* [Yuen90].
* @param dp Inverse ratio of the accumulator resolution to the image
* resolution. For example, if dp=1
, the accumulator has the same
* resolution as the input image. If dp=2
, the accumulator has half
* as big width and height.
* @param minDist Minimum distance between the centers of the detected circles.
* If the parameter is too small, multiple neighbor circles may be falsely
* detected in addition to a true one. If it is too large, some circles may be
* missed.
* @param param1 First method-specific parameter. In case of CV_HOUGH_GRADIENT
,
* it is the higher threshold of the two passed to the "Canny" edge detector
* (the lower one is twice smaller).
* @param param2 Second method-specific parameter. In case of CV_HOUGH_GRADIENT
,
* it is the accumulator threshold for the circle centers at the detection
* stage. The smaller it is, the more false circles may be detected. Circles,
* corresponding to the larger accumulator values, will be returned first.
* @param minRadius Minimum circle radius.
* @param maxRadius Maximum circle radius.
*
* @see org.opencv.imgproc.Imgproc.HoughCircles
* @see org.opencv.imgproc.Imgproc#minEnclosingCircle
* @see org.opencv.imgproc.Imgproc#fitEllipse
*/
public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius)
{
HoughCircles_0(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius, maxRadius);
return;
}
/**
* Finds circles in a grayscale image using the Hough transform.
* *The function finds circles in a grayscale image using a modification of the
* Hough transform.
* Example:
// C++ code:
* *#include
#include
#include
using namespace cv;
* *int main(int argc, char argv)
* * *Mat img, gray;
* *if(argc != 2 && !(img=imread(argv[1], 1)).data)
* *return -1;
* *cvtColor(img, gray, CV_BGR2GRAY);
* *// smooth it, otherwise a lot of false circles may be detected
* *GaussianBlur(gray, gray, Size(9, 9), 2, 2);
* *vector
HoughCircles(gray, circles, CV_HOUGH_GRADIENT,
* *2, gray->rows/4, 200, 100);
* *for(size_t i = 0; i < circles.size(); i++)
* * *Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
* *int radius = cvRound(circles[i][2]);
* *// draw the circle center
* *circle(img, center, 3, Scalar(0,255,0), -1, 8, 0);
* *// draw the circle outline
* *circle(img, center, radius, Scalar(0,0,255), 3, 8, 0);
* * *namedWindow("circles", 1);
* *imshow("circles", img);
* *return 0;
* * *Note: Usually the function detects the centers of circles well. However, it
* may fail to find correct radii. You can assist to the function by specifying
* the radius range (minRadius
and maxRadius
) if you
* know it. Or, you may ignore the returned radius, use only the center, and
* find the correct radius using an additional procedure.
*
Note:
*-
*
- An example using the Hough circle detector can be found at * opencv_source_code/samples/cpp/houghcircles.cpp *
CV_HOUGH_GRADIENT
, which is basically *21HT*, described in
* [Yuen90].
* @param dp Inverse ratio of the accumulator resolution to the image
* resolution. For example, if dp=1
, the accumulator has the same
* resolution as the input image. If dp=2
, the accumulator has half
* as big width and height.
* @param minDist Minimum distance between the centers of the detected circles.
* If the parameter is too small, multiple neighbor circles may be falsely
* detected in addition to a true one. If it is too large, some circles may be
* missed.
*
* @see org.opencv.imgproc.Imgproc.HoughCircles
* @see org.opencv.imgproc.Imgproc#minEnclosingCircle
* @see org.opencv.imgproc.Imgproc#fitEllipse
*/
public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist)
{
HoughCircles_1(image.nativeObj, circles.nativeObj, method, dp, minDist);
return;
}
//
// C++: void HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0)
//
/**
* Finds lines in a binary image using the standard Hough transform.
* *The function implements the standard or standard multi-scale Hough transform * algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm * for a good explanation of Hough transform. * See also the example in "HoughLinesP" description.
* *Note:
*-
*
- An example using the Hough line detector can be found at * opencv_source_code/samples/cpp/houghlines.cpp *
rho
. The coarse accumulator distance
* resolution is rho
and the accurate accumulator resolution is
* rho/srn
. If both srn=0
and stn=0
, the
* classical Hough transform is used. Otherwise, both these parameters should be
* positive.
* @param stn For the multi-scale Hough transform, it is a divisor for the
* distance resolution theta
.
*
* @see org.opencv.imgproc.Imgproc.HoughLines
*/
public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn)
{
HoughLines_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn);
return;
}
/**
* Finds lines in a binary image using the standard Hough transform.
* *The function implements the standard or standard multi-scale Hough transform * algorithm for line detection. See http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm * for a good explanation of Hough transform. * See also the example in "HoughLinesP" description.
* *Note:
*-
*
- An example using the Hough line detector can be found at * opencv_source_code/samples/cpp/houghlines.cpp *
Finds line segments in a binary image using the probabilistic Hough * transform.
* *The function implements the probabilistic Hough transform algorithm for line
* detection, described in[Matas00]. See the line detection example below:
*
// C++ code:
* */ * This is a standalone program. Pass an image name as the first parameter
* *of the program. Switch between standard and probabilistic Hough transform
* *by changing "#if 1" to "#if 0" and back * /
* *#include
#include
#include
using namespace cv;
* *int main(int argc, char argv)
* * *Mat src, dst, color_dst;
* *if(argc != 2 || !(src=imread(argv[1], 0)).data)
* *return -1;
* *Canny(src, dst, 50, 200, 3);
* *cvtColor(dst, color_dst, CV_GRAY2BGR);
* *#if 0
* *vector
HoughLines(dst, lines, 1, CV_PI/180, 100);
* *for(size_t i = 0; i < lines.size(); i++)
* * *float rho = lines[i][0];
* *float theta = lines[i][1];
* *double a = cos(theta), b = sin(theta);
* *double x0 = a*rho, y0 = b*rho;
* *Point pt1(cvRound(x0 + 1000*(-b)),
* *cvRound(y0 + 1000*(a)));
* *Point pt2(cvRound(x0 - 1000*(-b)),
* *cvRound(y0 - 1000*(a)));
* *line(color_dst, pt1, pt2, Scalar(0,0,255), 3, 8);
* * *#else
* *vector
HoughLinesP(dst, lines, 1, CV_PI/180, 80, 30, 10);
* *for(size_t i = 0; i < lines.size(); i++)
* * *line(color_dst, Point(lines[i][0], lines[i][1]),
* *Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8);
* * *#endif
* *namedWindow("Source", 1);
* *imshow("Source", src);
* *namedWindow("Detected Lines", 1);
* *imshow("Detected Lines", color_dst);
* *waitKey(0);
* *return 0;
* * *This is a sample picture the function parameters have been tuned for:
* *And this is the output of the above program in case of the probabilistic * Hough transform:
* * @param image 8-bit, single-channel binary source image. The image may be * modified by the function. * @param lines Output vector of lines. Each line is represented by a 4-element * vector (x_1, y_1, x_2, y_2), where (x_1,y_1) and (x_2, * y_2) are the ending points of each detected line segment. * @param rho Distance resolution of the accumulator in pixels. * @param theta Angle resolution of the accumulator in radians. * @param threshold Accumulator threshold parameter. Only those lines are * returned that get enough votes (>threshold). * @param minLineLength Minimum line length. Line segments shorter than that are * rejected. * @param maxLineGap Maximum allowed gap between points on the same line to link * them. * * @see org.opencv.imgproc.Imgproc.HoughLinesP */ public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) { HoughLinesP_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength, maxLineGap); return; } /** *Finds line segments in a binary image using the probabilistic Hough * transform.
* *The function implements the probabilistic Hough transform algorithm for line
* detection, described in[Matas00]. See the line detection example below:
*
// C++ code:
* */ * This is a standalone program. Pass an image name as the first parameter
* *of the program. Switch between standard and probabilistic Hough transform
* *by changing "#if 1" to "#if 0" and back * /
* *#include
#include
#include
using namespace cv;
* *int main(int argc, char argv)
* * *Mat src, dst, color_dst;
* *if(argc != 2 || !(src=imread(argv[1], 0)).data)
* *return -1;
* *Canny(src, dst, 50, 200, 3);
* *cvtColor(dst, color_dst, CV_GRAY2BGR);
* *#if 0
* *vector
HoughLines(dst, lines, 1, CV_PI/180, 100);
* *for(size_t i = 0; i < lines.size(); i++)
* * *float rho = lines[i][0];
* *float theta = lines[i][1];
* *double a = cos(theta), b = sin(theta);
* *double x0 = a*rho, y0 = b*rho;
* *Point pt1(cvRound(x0 + 1000*(-b)),
* *cvRound(y0 + 1000*(a)));
* *Point pt2(cvRound(x0 - 1000*(-b)),
* *cvRound(y0 - 1000*(a)));
* *line(color_dst, pt1, pt2, Scalar(0,0,255), 3, 8);
* * *#else
* *vector
HoughLinesP(dst, lines, 1, CV_PI/180, 80, 30, 10);
* *for(size_t i = 0; i < lines.size(); i++)
* * *line(color_dst, Point(lines[i][0], lines[i][1]),
* *Point(lines[i][2], lines[i][3]), Scalar(0,0,255), 3, 8);
* * *#endif
* *namedWindow("Source", 1);
* *imshow("Source", src);
* *namedWindow("Detected Lines", 1);
* *imshow("Detected Lines", color_dst);
* *waitKey(0);
* *return 0;
* * *This is a sample picture the function parameters have been tuned for:
* *And this is the output of the above program in case of the probabilistic * Hough transform:
* * @param image 8-bit, single-channel binary source image. The image may be * modified by the function. * @param lines Output vector of lines. Each line is represented by a 4-element * vector (x_1, y_1, x_2, y_2), where (x_1,y_1) and (x_2, * y_2) are the ending points of each detected line segment. * @param rho Distance resolution of the accumulator in pixels. * @param theta Angle resolution of the accumulator in radians. * @param threshold Accumulator threshold parameter. Only those lines are * returned that get enough votes (>threshold). * * @see org.opencv.imgproc.Imgproc.HoughLinesP */ public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold) { HoughLinesP_1(image.nativeObj, lines.nativeObj, rho, theta, threshold); return; } // // C++: void HuMoments(Moments m, Mat& hu) // /** *Calculates seven Hu invariants.
* *The function calculates seven Hu invariants (introduced in [Hu62]; see also * http://en.wikipedia.org/wiki/Image_moment) defined as:
* *hu[0]= eta _20+ eta _02 * hu[1]=(eta _20- eta _02)^2+4 eta _11^2 * hu[2]=(eta _30-3 eta _12)^2+ (3 eta _21- eta _03)^2 * hu[3]=(eta _30+ eta _12)^2+ (eta _21+ eta _03)^2 * hu[4]=(eta _30-3 eta _12)(eta _30+ eta _12)[(eta _30+ eta _12)^2-3(eta _21+ * eta _03)^2]+(3 eta _21- eta _03)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta * _21+ eta _03)^2] * hu[5]=(eta _20- eta _02)[(eta _30+ eta _12)^2- (eta _21+ eta _03)^2]+4 eta * _11(eta _30+ eta _12)(eta _21+ eta _03) * hu[6]=(3 eta _21- eta _03)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta _21+ * eta _03)^2]-(eta _30-3 eta _12)(eta _21+ eta _03)[3(eta _30+ eta _12)^2-(eta * _21+ eta _03)^2] *
* *where eta_(ji) stands for Moments.nu_(ji).
* *These values are proved to be invariants to the image scale, rotation, and * reflection except the seventh one, whose sign is changed by reflection. This * invariance is proved with the assumption of infinite image resolution. In * case of raster images, the computed Hu invariants for the original and * transformed images are a bit different.
* * @param m a m * @param hu Output Hu invariants. * * @see org.opencv.imgproc.Imgproc.HuMoments * @see org.opencv.imgproc.Imgproc#matchShapes */ public static void HuMoments(Moments m, Mat hu) { HuMoments_0(m.nativeObj, hu.nativeObj); return; } // // C++: void Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) // /** *Calculates the Laplacian of an image.
* *The function calculates the Laplacian of the source image by adding up the * second x and y derivatives calculated using the Sobel operator:
* *dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)
* *This is done when ksize > 1
. When ksize == 1
, the
* Laplacian is computed by filtering the image with the following 3 x
* 3 aperture:
vecthreethree 0101(-4)1010
* *Note:
*-
*
- An example using the Laplace transformation for edge detection can be * found at opencv_source_code/samples/cpp/laplace.cpp *
src
.
* @param ddepth Desired depth of the destination image.
* @param ksize Aperture size used to compute the second-derivative filters. See
* "getDerivKernels" for details. The size must be positive and odd.
* @param scale Optional scale factor for the computed Laplacian values. By
* default, no scaling is applied. See "getDerivKernels" for details.
* @param delta Optional delta value that is added to the results prior to
* storing them in dst
.
* @param borderType Pixel extrapolation method. See "borderInterpolate" for
* details.
*
* @see org.opencv.imgproc.Imgproc.Laplacian
* @see org.opencv.imgproc.Imgproc#Scharr
* @see org.opencv.imgproc.Imgproc#Sobel
*/
public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta, int borderType)
{
Laplacian_0(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta, borderType);
return;
}
/**
* Calculates the Laplacian of an image.
* *The function calculates the Laplacian of the source image by adding up the * second x and y derivatives calculated using the Sobel operator:
* *dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)
* *This is done when ksize > 1
. When ksize == 1
, the
* Laplacian is computed by filtering the image with the following 3 x
* 3 aperture:
vecthreethree 0101(-4)1010
* *Note:
*-
*
- An example using the Laplace transformation for edge detection can be * found at opencv_source_code/samples/cpp/laplace.cpp *
src
.
* @param ddepth Desired depth of the destination image.
* @param ksize Aperture size used to compute the second-derivative filters. See
* "getDerivKernels" for details. The size must be positive and odd.
* @param scale Optional scale factor for the computed Laplacian values. By
* default, no scaling is applied. See "getDerivKernels" for details.
* @param delta Optional delta value that is added to the results prior to
* storing them in dst
.
*
* @see org.opencv.imgproc.Imgproc.Laplacian
* @see org.opencv.imgproc.Imgproc#Scharr
* @see org.opencv.imgproc.Imgproc#Sobel
*/
public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta)
{
Laplacian_1(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta);
return;
}
/**
* Calculates the Laplacian of an image.
* *The function calculates the Laplacian of the source image by adding up the * second x and y derivatives calculated using the Sobel operator:
* *dst = Delta src = (d^2 src)/(dx^2) + (d^2 src)/(dy^2)
* *This is done when ksize > 1
. When ksize == 1
, the
* Laplacian is computed by filtering the image with the following 3 x
* 3 aperture:
vecthreethree 0101(-4)1010
* *Note:
*-
*
- An example using the Laplace transformation for edge detection can be * found at opencv_source_code/samples/cpp/laplace.cpp *
src
.
* @param ddepth Desired depth of the destination image.
*
* @see org.opencv.imgproc.Imgproc.Laplacian
* @see org.opencv.imgproc.Imgproc#Scharr
* @see org.opencv.imgproc.Imgproc#Sobel
*/
public static void Laplacian(Mat src, Mat dst, int ddepth)
{
Laplacian_2(src.nativeObj, dst.nativeObj, ddepth);
return;
}
//
// C++: double PSNR(Mat src1, Mat src2)
//
public static double PSNR(Mat src1, Mat src2)
{
double retVal = PSNR_0(src1.nativeObj, src2.nativeObj);
return retVal;
}
//
// C++: void Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
//
/**
* Calculates the first x- or y- image derivative using Scharr operator.
* *The function computes the first x- or y- spatial image derivative using the * Scharr operator. The call
* *Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)
* *is equivalent to
* *Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, * borderType).
* * @param src input image. * @param dst output image of the same size and the same number of channels as *src
.
* @param ddepth output image depth (see "Sobel" for the list of supported
* combination of src.depth()
and ddepth
).
* @param dx order of the derivative x.
* @param dy order of the derivative y.
* @param scale optional scale factor for the computed derivative values; by
* default, no scaling is applied (see "getDerivKernels" for details).
* @param delta optional delta value that is added to the results prior to
* storing them in dst
.
* @param borderType pixel extrapolation method (see "borderInterpolate" for
* details).
*
* @see org.opencv.imgproc.Imgproc.Scharr
* @see org.opencv.core.Core#cartToPolar
*/
public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta, int borderType)
{
Scharr_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta, borderType);
return;
}
/**
* Calculates the first x- or y- image derivative using Scharr operator.
* *The function computes the first x- or y- spatial image derivative using the * Scharr operator. The call
* *Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)
* *is equivalent to
* *Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, * borderType).
* * @param src input image. * @param dst output image of the same size and the same number of channels as *src
.
* @param ddepth output image depth (see "Sobel" for the list of supported
* combination of src.depth()
and ddepth
).
* @param dx order of the derivative x.
* @param dy order of the derivative y.
* @param scale optional scale factor for the computed derivative values; by
* default, no scaling is applied (see "getDerivKernels" for details).
* @param delta optional delta value that is added to the results prior to
* storing them in dst
.
*
* @see org.opencv.imgproc.Imgproc.Scharr
* @see org.opencv.core.Core#cartToPolar
*/
public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta)
{
Scharr_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta);
return;
}
/**
* Calculates the first x- or y- image derivative using Scharr operator.
* *The function computes the first x- or y- spatial image derivative using the * Scharr operator. The call
* *Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)
* *is equivalent to
* *Sobel(src, dst, ddepth, dx, dy, CV_SCHARR, scale, delta, * borderType).
* * @param src input image. * @param dst output image of the same size and the same number of channels as *src
.
* @param ddepth output image depth (see "Sobel" for the list of supported
* combination of src.depth()
and ddepth
).
* @param dx order of the derivative x.
* @param dy order of the derivative y.
*
* @see org.opencv.imgproc.Imgproc.Scharr
* @see org.opencv.core.Core#cartToPolar
*/
public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy)
{
Scharr_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy);
return;
}
//
// C++: void Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
//
/**
* Calculates the first, second, third, or mixed image derivatives using an * extended Sobel operator.
* *In all cases except one, the ksize x<BR>ksize separable kernel
* is used to calculate the derivative. When ksize = 1, the 3 x
* 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is
* done). ksize = 1
can only be used for the first or the second x-
* or y- derivatives.
There is also the special value ksize = CV_SCHARR
(-1) that
* corresponds to the 3x3 Scharr filter that may give more accurate
* results than the 3x3 Sobel. The Scharr aperture is
* |-3 0 3| * |-10 0 10| * |-3 0 3| *
* *for the x-derivative, or transposed for the y-derivative.
* *The function calculates an image derivative by convolving the image with the * appropriate kernel:
* *dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))
* *The Sobel operators combine Gaussian smoothing and differentiation, so the
* result is more or less resistant to the noise. Most often, the function is
* called with (xorder
= 1, yorder
= 0,
* ksize
= 3) or (xorder
= 0, yorder
= 1,
* ksize
= 3) to calculate the first x- or y- image derivative. The
* first case corresponds to a kernel of:
* |-1 0 1| * |-2 0 2| * |-1 0 1| *
* *The second case corresponds to a kernel of:
* ** |-1 -2 -1| * |0 0 0| * |1 2 1| *
* * @param src input image. * @param dst output image of the same size and the same number of channels as *src
.
* @param ddepth output image depth; the following combinations of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the destination image will have the same depth
* as the source; in the case of 8-bit input images it will result in truncated
* derivatives.
dst
.
* @param borderType pixel extrapolation method (see "borderInterpolate" for
* details).
*
* @see org.opencv.imgproc.Imgproc.Sobel
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.core.Core#cartToPolar
* @see org.opencv.imgproc.Imgproc#sepFilter2D
* @see org.opencv.imgproc.Imgproc#Laplacian
* @see org.opencv.imgproc.Imgproc#Scharr
* @see org.opencv.imgproc.Imgproc#filter2D
*/
public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType)
{
Sobel_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta, borderType);
return;
}
/**
* Calculates the first, second, third, or mixed image derivatives using an * extended Sobel operator.
* *In all cases except one, the ksize x<BR>ksize separable kernel
* is used to calculate the derivative. When ksize = 1, the 3 x
* 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is
* done). ksize = 1
can only be used for the first or the second x-
* or y- derivatives.
There is also the special value ksize = CV_SCHARR
(-1) that
* corresponds to the 3x3 Scharr filter that may give more accurate
* results than the 3x3 Sobel. The Scharr aperture is
* |-3 0 3| * |-10 0 10| * |-3 0 3| *
* *for the x-derivative, or transposed for the y-derivative.
* *The function calculates an image derivative by convolving the image with the * appropriate kernel:
* *dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))
* *The Sobel operators combine Gaussian smoothing and differentiation, so the
* result is more or less resistant to the noise. Most often, the function is
* called with (xorder
= 1, yorder
= 0,
* ksize
= 3) or (xorder
= 0, yorder
= 1,
* ksize
= 3) to calculate the first x- or y- image derivative. The
* first case corresponds to a kernel of:
* |-1 0 1| * |-2 0 2| * |-1 0 1| *
* *The second case corresponds to a kernel of:
* ** |-1 -2 -1| * |0 0 0| * |1 2 1| *
* * @param src input image. * @param dst output image of the same size and the same number of channels as *src
.
* @param ddepth output image depth; the following combinations of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the destination image will have the same depth
* as the source; in the case of 8-bit input images it will result in truncated
* derivatives.
dst
.
*
* @see org.opencv.imgproc.Imgproc.Sobel
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.core.Core#cartToPolar
* @see org.opencv.imgproc.Imgproc#sepFilter2D
* @see org.opencv.imgproc.Imgproc#Laplacian
* @see org.opencv.imgproc.Imgproc#Scharr
* @see org.opencv.imgproc.Imgproc#filter2D
*/
public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta)
{
Sobel_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta);
return;
}
/**
* Calculates the first, second, third, or mixed image derivatives using an * extended Sobel operator.
* *In all cases except one, the ksize x<BR>ksize separable kernel
* is used to calculate the derivative. When ksize = 1, the 3 x
* 1 or 1 x 3 kernel is used (that is, no Gaussian smoothing is
* done). ksize = 1
can only be used for the first or the second x-
* or y- derivatives.
There is also the special value ksize = CV_SCHARR
(-1) that
* corresponds to the 3x3 Scharr filter that may give more accurate
* results than the 3x3 Sobel. The Scharr aperture is
* |-3 0 3| * |-10 0 10| * |-3 0 3| *
* *for the x-derivative, or transposed for the y-derivative.
* *The function calculates an image derivative by convolving the image with the * appropriate kernel:
* *dst = (d^(xorder+yorder) src)/(dx^(xorder) dy^(yorder))
* *The Sobel operators combine Gaussian smoothing and differentiation, so the
* result is more or less resistant to the noise. Most often, the function is
* called with (xorder
= 1, yorder
= 0,
* ksize
= 3) or (xorder
= 0, yorder
= 1,
* ksize
= 3) to calculate the first x- or y- image derivative. The
* first case corresponds to a kernel of:
* |-1 0 1| * |-2 0 2| * |-1 0 1| *
* *The second case corresponds to a kernel of:
* ** |-1 -2 -1| * |0 0 0| * |1 2 1| *
* * @param src input image. * @param dst output image of the same size and the same number of channels as *src
.
* @param ddepth output image depth; the following combinations of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the destination image will have the same depth
* as the source; in the case of 8-bit input images it will result in truncated
* derivatives.
Adds an image to the accumulator.
* *The function adds src
or some of its elements to
* dst
:
dst(x,y) <- dst(x,y) + src(x,y) if mask(x,y) != 0
* *The function supports multi-channel images. Each channel is processed * independently.
* *The functions accumulate*
can be used, for example, to collect
* statistics of a scene background viewed by a still camera and for the further
* foreground-background segmentation.
Adds an image to the accumulator.
* *The function adds src
or some of its elements to
* dst
:
dst(x,y) <- dst(x,y) + src(x,y) if mask(x,y) != 0
* *The function supports multi-channel images. Each channel is processed * independently.
* *The functions accumulate*
can be used, for example, to collect
* statistics of a scene background viewed by a still camera and for the further
* foreground-background segmentation.
Adds the per-element product of two input images to the accumulator.
* *The function adds the product of two images or their selected regions to the
* accumulator dst
:
dst(x,y) <- dst(x,y) + src1(x,y) * src2(x,y) if mask(x,y) != 0
* *The function supports multi-channel images. Each channel is processed * independently.
* * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating * point. * @param src2 Second input image of the same type and the same size as *src1
.
* @param dst Accumulator with the same number of channels as input images,
* 32-bit or 64-bit floating-point.
* @param mask Optional operation mask.
*
* @see org.opencv.imgproc.Imgproc.accumulateProduct
* @see org.opencv.imgproc.Imgproc#accumulate
* @see org.opencv.imgproc.Imgproc#accumulateWeighted
* @see org.opencv.imgproc.Imgproc#accumulateSquare
*/
public static void accumulateProduct(Mat src1, Mat src2, Mat dst, Mat mask)
{
accumulateProduct_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj);
return;
}
/**
* Adds the per-element product of two input images to the accumulator.
* *The function adds the product of two images or their selected regions to the
* accumulator dst
:
dst(x,y) <- dst(x,y) + src1(x,y) * src2(x,y) if mask(x,y) != 0
* *The function supports multi-channel images. Each channel is processed * independently.
* * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating * point. * @param src2 Second input image of the same type and the same size as *src1
.
* @param dst Accumulator with the same number of channels as input images,
* 32-bit or 64-bit floating-point.
*
* @see org.opencv.imgproc.Imgproc.accumulateProduct
* @see org.opencv.imgproc.Imgproc#accumulate
* @see org.opencv.imgproc.Imgproc#accumulateWeighted
* @see org.opencv.imgproc.Imgproc#accumulateSquare
*/
public static void accumulateProduct(Mat src1, Mat src2, Mat dst)
{
accumulateProduct_1(src1.nativeObj, src2.nativeObj, dst.nativeObj);
return;
}
//
// C++: void accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
//
/**
* Adds the square of a source image to the accumulator.
* *The function adds the input image src
or its selected region,
* raised to a power of 2, to the accumulator dst
:
dst(x,y) <- dst(x,y) + src(x,y)^2 if mask(x,y) != 0
* *The function supports multi-channel images. Each channel is processed * independently.
* * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. * @param dst Accumulator image with the same number of channels as input image, * 32-bit or 64-bit floating-point. * @param mask Optional operation mask. * * @see org.opencv.imgproc.Imgproc.accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulateWeighted * @see org.opencv.imgproc.Imgproc#accumulateProduct * @see org.opencv.imgproc.Imgproc#accumulateSquare */ public static void accumulateSquare(Mat src, Mat dst, Mat mask) { accumulateSquare_0(src.nativeObj, dst.nativeObj, mask.nativeObj); return; } /** *Adds the square of a source image to the accumulator.
* *The function adds the input image src
or its selected region,
* raised to a power of 2, to the accumulator dst
:
dst(x,y) <- dst(x,y) + src(x,y)^2 if mask(x,y) != 0
* *The function supports multi-channel images. Each channel is processed * independently.
* * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. * @param dst Accumulator image with the same number of channels as input image, * 32-bit or 64-bit floating-point. * * @see org.opencv.imgproc.Imgproc.accumulateSquare * @see org.opencv.imgproc.Imgproc#accumulateWeighted * @see org.opencv.imgproc.Imgproc#accumulateProduct * @see org.opencv.imgproc.Imgproc#accumulateSquare */ public static void accumulateSquare(Mat src, Mat dst) { accumulateSquare_1(src.nativeObj, dst.nativeObj); return; } // // C++: void accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) // /** *Updates a running average.
* *The function calculates the weighted sum of the input image src
* and the accumulator dst
so that dst
becomes a
* running average of a frame sequence:
dst(x,y) <- (1- alpha) * dst(x,y) + alpha * src(x,y) if mask(x,y) != * 0
* *That is, alpha
regulates the update speed (how fast the
* accumulator "forgets" about earlier images).
* The function supports multi-channel images. Each channel is processed
* independently.
Updates a running average.
* *The function calculates the weighted sum of the input image src
* and the accumulator dst
so that dst
becomes a
* running average of a frame sequence:
dst(x,y) <- (1- alpha) * dst(x,y) + alpha * src(x,y) if mask(x,y) != * 0
* *That is, alpha
regulates the update speed (how fast the
* accumulator "forgets" about earlier images).
* The function supports multi-channel images. Each channel is processed
* independently.
Applies the adaptive bilateral filter to an image.
* *A main part of our strategy will be to load each raw pixel once, and reuse it * to calculate all pixels in the output (filtered) image that need this pixel * value. The math of the filter is that of the usual bilateral filter, except * that the sigma color is calculated in the neighborhood, and clamped by the * optional input value.
* * @param src The source image * @param dst The destination image; will have the same size and the same type * as src * @param ksize The kernel size. This is the neighborhood where the local * variance will be calculated, and where pixels will contribute (in a weighted * manner). * @param sigmaSpace Filter sigma in the coordinate space. Larger value of the * parameter means that farther pixels will influence each other (as long as * their colors are close enough; see sigmaColor). Then d>0, it specifies the * neighborhood size regardless of sigmaSpace, otherwise d is proportional to * sigmaSpace. * @param maxSigmaColor Maximum allowed sigma color (will clamp the value * calculated in the ksize neighborhood. Larger value of the parameter means * that more dissimilar pixels will influence each other (as long as their * colors are close enough; see sigmaColor). Then d>0, it specifies the * neighborhood size regardless of sigmaSpace, otherwise d is proportional to * sigmaSpace. * @param anchor a anchor * @param borderType Pixel extrapolation method. * * @see org.opencv.imgproc.Imgproc.adaptiveBilateralFilter */ public static void adaptiveBilateralFilter(Mat src, Mat dst, Size ksize, double sigmaSpace, double maxSigmaColor, Point anchor, int borderType) { adaptiveBilateralFilter_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaSpace, maxSigmaColor, anchor.x, anchor.y, borderType); return; } /** *Applies the adaptive bilateral filter to an image.
* *A main part of our strategy will be to load each raw pixel once, and reuse it * to calculate all pixels in the output (filtered) image that need this pixel * value. The math of the filter is that of the usual bilateral filter, except * that the sigma color is calculated in the neighborhood, and clamped by the * optional input value.
* * @param src The source image * @param dst The destination image; will have the same size and the same type * as src * @param ksize The kernel size. This is the neighborhood where the local * variance will be calculated, and where pixels will contribute (in a weighted * manner). * @param sigmaSpace Filter sigma in the coordinate space. Larger value of the * parameter means that farther pixels will influence each other (as long as * their colors are close enough; see sigmaColor). Then d>0, it specifies the * neighborhood size regardless of sigmaSpace, otherwise d is proportional to * sigmaSpace. * @param maxSigmaColor Maximum allowed sigma color (will clamp the value * calculated in the ksize neighborhood. Larger value of the parameter means * that more dissimilar pixels will influence each other (as long as their * colors are close enough; see sigmaColor). Then d>0, it specifies the * neighborhood size regardless of sigmaSpace, otherwise d is proportional to * sigmaSpace. * @param anchor a anchor * * @see org.opencv.imgproc.Imgproc.adaptiveBilateralFilter */ public static void adaptiveBilateralFilter(Mat src, Mat dst, Size ksize, double sigmaSpace, double maxSigmaColor, Point anchor) { adaptiveBilateralFilter_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaSpace, maxSigmaColor, anchor.x, anchor.y); return; } /** *Applies the adaptive bilateral filter to an image.
* *A main part of our strategy will be to load each raw pixel once, and reuse it * to calculate all pixels in the output (filtered) image that need this pixel * value. The math of the filter is that of the usual bilateral filter, except * that the sigma color is calculated in the neighborhood, and clamped by the * optional input value.
* * @param src The source image * @param dst The destination image; will have the same size and the same type * as src * @param ksize The kernel size. This is the neighborhood where the local * variance will be calculated, and where pixels will contribute (in a weighted * manner). * @param sigmaSpace Filter sigma in the coordinate space. Larger value of the * parameter means that farther pixels will influence each other (as long as * their colors are close enough; see sigmaColor). Then d>0, it specifies the * neighborhood size regardless of sigmaSpace, otherwise d is proportional to * sigmaSpace. * * @see org.opencv.imgproc.Imgproc.adaptiveBilateralFilter */ public static void adaptiveBilateralFilter(Mat src, Mat dst, Size ksize, double sigmaSpace) { adaptiveBilateralFilter_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaSpace); return; } // // C++: void adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) // /** *Applies an adaptive threshold to an array.
* *The function transforms a grayscale image to a binary image according to the * formulae:
*-
*
- THRESH_BINARY *
dst(x,y) = maxValue if src(x,y) > T(x,y); 0 otherwise
* *-
*
- THRESH_BINARY_INV *
dst(x,y) = 0 if src(x,y) > T(x,y); maxValue otherwise
* *where T(x,y) is a threshold calculated individually for each pixel.
*-
*
- For the method
ADAPTIVE_THRESH_MEAN_C
, the threshold * value T(x,y) is a mean of the blockSize x blockSize * neighborhood of (x, y) minusC
. * - For the method
ADAPTIVE_THRESH_GAUSSIAN_C
, the threshold * value T(x, y) is a weighted sum (cross-correlation with a Gaussian * window) of the blockSize x blockSize neighborhood of (x, y) * minusC
. The default sigma (standard deviation) is used for the * specifiedblockSize
. See "getGaussianKernel". *
The function can process the image in-place.
* * @param src Source 8-bit single-channel image. * @param dst Destination image of the same size and the same type as *src
.
* @param maxValue Non-zero value assigned to the pixels for which the condition
* is satisfied. See the details below.
* @param adaptiveMethod Adaptive thresholding algorithm to use,
* ADAPTIVE_THRESH_MEAN_C
or ADAPTIVE_THRESH_GAUSSIAN_C
.
* See the details below.
* @param thresholdType Thresholding type that must be either THRESH_BINARY
* or THRESH_BINARY_INV
.
* @param blockSize Size of a pixel neighborhood that is used to calculate a
* threshold value for the pixel: 3, 5, 7, and so on.
* @param C Constant subtracted from the mean or weighted mean (see the details
* below). Normally, it is positive but may be zero or negative as well.
*
* @see org.opencv.imgproc.Imgproc.adaptiveThreshold
* @see org.opencv.imgproc.Imgproc#threshold
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#blur
*/
public static void adaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
{
adaptiveThreshold_0(src.nativeObj, dst.nativeObj, maxValue, adaptiveMethod, thresholdType, blockSize, C);
return;
}
//
// C++: void approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
//
/**
* Approximates a polygonal curve(s) with the specified precision.
* *The functions approxPolyDP
approximate a curve or a polygon with
* another curve/polygon with less vertices so that the distance between them is
* less or equal to the specified precision. It uses the Douglas-Peucker
* algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
See https://github.com/Itseez/opencv/tree/master/samples/cpp/contours2.cpp * for the function usage model.
* * @param curve Input vector of a 2D point stored in: *-
*
-
std.vector
orMat
(C++ interface) * -
Nx2
numpy array (Python interface) * -
CvSeq
or
Calculates a contour perimeter or a curve length.
* *The function computes a curve length or a closed contour perimeter.
* * @param curve Input vector of 2D points, stored instd.vector
or
* Mat
.
* @param closed Flag indicating whether the curve is closed or not.
*
* @see org.opencv.imgproc.Imgproc.arcLength
*/
public static double arcLength(MatOfPoint2f curve, boolean closed)
{
Mat curve_mat = curve;
double retVal = arcLength_0(curve_mat.nativeObj, closed);
return retVal;
}
//
// C++: void bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
//
/**
* Applies the bilateral filter to an image.
* *The function applies bilateral filtering to the input image, as described in
* http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
* bilateralFilter
can reduce unwanted noise very well while
* keeping edges fairly sharp. However, it is very slow compared to most
* filters.
-
*
- Sigma values*: For simplicity, you can set the 2 sigma values to be the * same. If they are small (< 10), the filter will not have much effect, whereas * if they are large (> 150), they will have a very strong effect, making the * image look "cartoonish". *
- Filter size*: Large filters (d > 5) are very slow, so it is recommended * to use d=5 for real-time applications, and perhaps d=9 for offline * applications that need heavy noise filtering. *
This filter does not work inplace.
* * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. * @param dst Destination image of the same size and type assrc
.
* @param d Diameter of each pixel neighborhood that is used during filtering.
* If it is non-positive, it is computed from sigmaSpace
.
* @param sigmaColor Filter sigma in the color space. A larger value of the
* parameter means that farther colors within the pixel neighborhood (see
* sigmaSpace
) will be mixed together, resulting in larger areas of
* semi-equal color.
* @param sigmaSpace Filter sigma in the coordinate space. A larger value of the
* parameter means that farther pixels will influence each other as long as
* their colors are close enough (see sigmaColor
). When
* d>0
, it specifies the neighborhood size regardless of
* sigmaSpace
. Otherwise, d
is proportional to
* sigmaSpace
.
* @param borderType a borderType
*
* @see org.opencv.imgproc.Imgproc.bilateralFilter
*/
public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType)
{
bilateralFilter_0(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace, borderType);
return;
}
/**
* Applies the bilateral filter to an image.
* *The function applies bilateral filtering to the input image, as described in
* http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
* bilateralFilter
can reduce unwanted noise very well while
* keeping edges fairly sharp. However, it is very slow compared to most
* filters.
-
*
- Sigma values*: For simplicity, you can set the 2 sigma values to be the * same. If they are small (< 10), the filter will not have much effect, whereas * if they are large (> 150), they will have a very strong effect, making the * image look "cartoonish". *
- Filter size*: Large filters (d > 5) are very slow, so it is recommended * to use d=5 for real-time applications, and perhaps d=9 for offline * applications that need heavy noise filtering. *
This filter does not work inplace.
* * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. * @param dst Destination image of the same size and type assrc
.
* @param d Diameter of each pixel neighborhood that is used during filtering.
* If it is non-positive, it is computed from sigmaSpace
.
* @param sigmaColor Filter sigma in the color space. A larger value of the
* parameter means that farther colors within the pixel neighborhood (see
* sigmaSpace
) will be mixed together, resulting in larger areas of
* semi-equal color.
* @param sigmaSpace Filter sigma in the coordinate space. A larger value of the
* parameter means that farther pixels will influence each other as long as
* their colors are close enough (see sigmaColor
). When
* d>0
, it specifies the neighborhood size regardless of
* sigmaSpace
. Otherwise, d
is proportional to
* sigmaSpace
.
*
* @see org.opencv.imgproc.Imgproc.bilateralFilter
*/
public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace)
{
bilateralFilter_1(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace);
return;
}
//
// C++: void blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
//
/**
* Blurs an image using the normalized box filter.
* *The function smoothes an image using the kernel:
* *K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 * 1 1 1 *s 1 1.................. * 1 1 1 *s 1 1 *
* *The call blur(src, dst, ksize, anchor, borderType)
is equivalent
* to boxFilter(src, dst, src.type(), anchor, true, borderType)
.
CV_8U
,
* CV_16U
, CV_16S
, CV_32F
or
* CV_64F
.
* @param dst output image of the same size and type as src
.
* @param ksize blurring kernel size.
* @param anchor anchor point; default value Point(-1,-1)
means
* that the anchor is at the kernel center.
* @param borderType border mode used to extrapolate pixels outside of the
* image.
*
* @see org.opencv.imgproc.Imgproc.blur
* @see org.opencv.imgproc.Imgproc#boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#medianBlur
*/
public static void blur(Mat src, Mat dst, Size ksize, Point anchor, int borderType)
{
blur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y, borderType);
return;
}
/**
* Blurs an image using the normalized box filter.
* *The function smoothes an image using the kernel:
* *K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 * 1 1 1 *s 1 1.................. * 1 1 1 *s 1 1 *
* *The call blur(src, dst, ksize, anchor, borderType)
is equivalent
* to boxFilter(src, dst, src.type(), anchor, true, borderType)
.
CV_8U
,
* CV_16U
, CV_16S
, CV_32F
or
* CV_64F
.
* @param dst output image of the same size and type as src
.
* @param ksize blurring kernel size.
* @param anchor anchor point; default value Point(-1,-1)
means
* that the anchor is at the kernel center.
*
* @see org.opencv.imgproc.Imgproc.blur
* @see org.opencv.imgproc.Imgproc#boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#medianBlur
*/
public static void blur(Mat src, Mat dst, Size ksize, Point anchor)
{
blur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y);
return;
}
/**
* Blurs an image using the normalized box filter.
* *The function smoothes an image using the kernel:
* *K = 1/(ksize.width*ksize.height) 1 1 1 *s 1 1 * 1 1 1 *s 1 1.................. * 1 1 1 *s 1 1 *
* *The call blur(src, dst, ksize, anchor, borderType)
is equivalent
* to boxFilter(src, dst, src.type(), anchor, true, borderType)
.
CV_8U
,
* CV_16U
, CV_16S
, CV_32F
or
* CV_64F
.
* @param dst output image of the same size and type as src
.
* @param ksize blurring kernel size.
*
* @see org.opencv.imgproc.Imgproc.blur
* @see org.opencv.imgproc.Imgproc#boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#medianBlur
*/
public static void blur(Mat src, Mat dst, Size ksize)
{
blur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height);
return;
}
//
// C++: int borderInterpolate(int p, int len, int borderType)
//
/**
* Computes the source location of an extrapolated pixel.
* *The function computes and returns the coordinate of a donor pixel
* corresponding to the specified extrapolated pixel when using the specified
* extrapolation border mode. For example, if you use BORDER_WRAP
* mode in the horizontal direction, BORDER_REFLECT_101
in the
* vertical direction and want to compute value of the "virtual" pixel
* Point(-5, 100)
in a floating-point image img
, it
* looks like:
// C++ code:
* *float val = img.at
borderInterpolate(-5, img.cols, BORDER_WRAP));
* *Normally, the function is not called directly. It is used inside
* *"FilterEngine" and "copyMakeBorder" to compute tables for quick * extrapolation.
* * @param p 0-based coordinate of the extrapolated pixel along one of the axes, * likely <0 or >=len
.
* @param len Length of the array along the corresponding axis.
* @param borderType Border type, one of the BORDER_*
, except for
* BORDER_TRANSPARENT
and BORDER_ISOLATED
. When
* borderType==BORDER_CONSTANT
, the function always returns -1,
* regardless of p
and len
.
*
* @see org.opencv.imgproc.Imgproc.borderInterpolate
* @see org.opencv.imgproc.Imgproc#copyMakeBorder
*/
public static int borderInterpolate(int p, int len, int borderType)
{
int retVal = borderInterpolate_0(p, len, borderType);
return retVal;
}
//
// C++: Rect boundingRect(vector_Point points)
//
/**
* Calculates the up-right bounding rectangle of a point set.
* *The function calculates and returns the minimal up-right bounding rectangle * for the specified point set.
* * @param points Input 2D point set, stored instd.vector
or
* Mat
.
*
* @see org.opencv.imgproc.Imgproc.boundingRect
*/
public static Rect boundingRect(MatOfPoint points)
{
Mat points_mat = points;
Rect retVal = new Rect(boundingRect_0(points_mat.nativeObj));
return retVal;
}
//
// C++: void boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT)
//
/**
* Blurs an image using the box filter.
* *The function smoothes an image using the kernel:
* *K = alpha 1 1 1 *s 1 1 * 1 1 1 *s 1 1.................. * 1 1 1 *s 1 1
* *where
* *alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 * otherwise
* *Unnormalized box filter is useful for computing various integral * characteristics over each pixel neighborhood, such as covariance matrices of * image derivatives (used in dense optical flow algorithms, and so on). If you * need to compute pixel sums over variable-size windows, use "integral".
* * @param src input image. * @param dst output image of the same size and type assrc
.
* @param ddepth the output image depth (-1 to use src.depth()
).
* @param ksize blurring kernel size.
* @param anchor anchor point; default value Point(-1,-1)
means
* that the anchor is at the kernel center.
* @param normalize flag, specifying whether the kernel is normalized by its
* area or not.
* @param borderType border mode used to extrapolate pixels outside of the
* image.
*
* @see org.opencv.imgproc.Imgproc.boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#medianBlur
* @see org.opencv.imgproc.Imgproc#integral
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#blur
*/
public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType)
{
boxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType);
return;
}
/**
* Blurs an image using the box filter.
* *The function smoothes an image using the kernel:
* *K = alpha 1 1 1 *s 1 1 * 1 1 1 *s 1 1.................. * 1 1 1 *s 1 1
* *where
* *alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 * otherwise
* *Unnormalized box filter is useful for computing various integral * characteristics over each pixel neighborhood, such as covariance matrices of * image derivatives (used in dense optical flow algorithms, and so on). If you * need to compute pixel sums over variable-size windows, use "integral".
* * @param src input image. * @param dst output image of the same size and type assrc
.
* @param ddepth the output image depth (-1 to use src.depth()
).
* @param ksize blurring kernel size.
* @param anchor anchor point; default value Point(-1,-1)
means
* that the anchor is at the kernel center.
* @param normalize flag, specifying whether the kernel is normalized by its
* area or not.
*
* @see org.opencv.imgproc.Imgproc.boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#medianBlur
* @see org.opencv.imgproc.Imgproc#integral
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#blur
*/
public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize)
{
boxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize);
return;
}
/**
* Blurs an image using the box filter.
* *The function smoothes an image using the kernel:
* *K = alpha 1 1 1 *s 1 1 * 1 1 1 *s 1 1.................. * 1 1 1 *s 1 1
* *where
* *alpha = 1/(ksize.width*ksize.height) when normalize=true; 1 * otherwise
* *Unnormalized box filter is useful for computing various integral * characteristics over each pixel neighborhood, such as covariance matrices of * image derivatives (used in dense optical flow algorithms, and so on). If you * need to compute pixel sums over variable-size windows, use "integral".
* * @param src input image. * @param dst output image of the same size and type assrc
.
* @param ddepth the output image depth (-1 to use src.depth()
).
* @param ksize blurring kernel size.
*
* @see org.opencv.imgproc.Imgproc.boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#medianBlur
* @see org.opencv.imgproc.Imgproc#integral
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#blur
*/
public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize)
{
boxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height);
return;
}
//
// C++: void calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
//
/**
* Calculates the back projection of a histogram.
* *The functions calcBackProject
calculate the back project of the
* histogram. That is, similarly to calcHist
, at each location
* (x, y)
the function collects the values from the selected
* channels in the input images and finds the corresponding histogram bin. But
* instead of incrementing it, the function reads the bin value, scales it by
* scale
, and stores in backProject(x,y)
. In terms of
* statistics, the function computes probability of each element value in
* respect with the empirical probability distribution represented by the
* histogram. See how, for example, you can find and track a bright-colored
* object in a scene:
-
*
- Before tracking, show the object to the camera so that it covers * almost the whole frame. Calculate a hue histogram. The histogram may have * strong maximums, corresponding to the dominant colors in the object. *
- When tracking, calculate a back projection of a hue plane of each * input video frame using that pre-computed histogram. Threshold the back * projection to suppress weak colors. It may also make sense to suppress pixels * with non-sufficient color saturation and too dark or too bright pixels. *
- Find connected components in the resulting picture and choose, for * example, the largest component. *
This is an approximate algorithm of the "CamShift" color object tracker.
* * @param images Source arrays. They all should have the same depth, *CV_8U
or CV_32F
, and the same size. Each of them
* can have an arbitrary number of channels.
* @param channels The list of channels used to compute the back projection. The
* number of channels must match the histogram dimensionality. The first array
* channels are numerated from 0 to images[0].channels()-1
, the
* second array channels are counted from images[0].channels()
to
* images[0].channels() + images[1].channels()-1
, and so on.
* @param hist Input histogram that can be dense or sparse.
* @param dst a dst
* @param ranges Array of arrays of the histogram bin boundaries in each
* dimension. See "calcHist".
* @param scale Optional scale factor for the output back projection.
*
* @see org.opencv.imgproc.Imgproc.calcBackProject
* @see org.opencv.imgproc.Imgproc#calcHist
*/
public static void calcBackProject(ListCalculates a histogram of a set of arrays.
* *The functions calcHist
calculate the histogram of one or more
* arrays. The elements of a tuple used to increment a histogram bin are taken
* from the correspondinginput arrays at the same location. The sample below
* shows how to compute a 2D Hue-Saturation histogram for a color image.
*
// C++ code:
* *#include
#include
using namespace cv;
* *int main(int argc, char argv)
* * *Mat src, hsv;
* *if(argc != 2 || !(src=imread(argv[1], 1)).data)
* *return -1;
* *cvtColor(src, hsv, CV_BGR2HSV);
* *// Quantize the hue to 30 levels
* *// and the saturation to 32 levels
* *int hbins = 30, sbins = 32;
* *int histSize[] = {hbins, sbins};
* *// hue varies from 0 to 179, see cvtColor
* *float hranges[] = { 0, 180 };
* *// saturation varies from 0 (black-gray-white) to
* *// 255 (pure spectrum color)
* *float sranges[] = { 0, 256 };
* *const float* ranges[] = { hranges, sranges };
* *MatND hist;
* *// we compute the histogram from the 0-th and 1-st channels
* *int channels[] = {0, 1};
* *calcHist(&hsv, 1, channels, Mat(), // do not use mask
* *hist, 2, histSize, ranges,
* *true, // the histogram is uniform
* *false);
* *double maxVal=0;
* *minMaxLoc(hist, 0, &maxVal, 0, 0);
* *int scale = 10;
* *Mat histImg = Mat.zeros(sbins*scale, hbins*10, CV_8UC3);
* *for(int h = 0; h < hbins; h++)
* *for(int s = 0; s < sbins; s++)
* * *float binVal = hist.at
int intensity = cvRound(binVal*255/maxVal);
* *rectangle(histImg, Point(h*scale, s*scale),
* *Point((h+1)*scale - 1, (s+1)*scale - 1),
* *Scalar.all(intensity),
* *CV_FILLED);
* * *namedWindow("Source", 1);
* *imshow("Source", src);
* *namedWindow("H-S Histogram", 1);
* *imshow("H-S Histogram", histImg);
* *waitKey();
* * *Note:
*-
*
- An example for creating histograms of an image can be found at * opencv_source_code/samples/cpp/demhist.cpp *
- (Python) An example for creating color histograms can be found at * opencv_source/samples/python2/color_histogram.py *
- (Python) An example illustrating RGB and grayscale histogram plotting * can be found at opencv_source/samples/python2/hist.py *
CV_8U
or CV_32F
, and the same size. Each of them
* can have an arbitrary number of channels.
* @param channels List of the dims
channels used to compute the
* histogram. The first array channels are numerated from 0 to images[0].channels()-1
,
* the second array channels are counted from images[0].channels()
* to images[0].channels() + images[1].channels()-1
, and so on.
* @param mask Optional mask. If the matrix is not empty, it must be an 8-bit
* array of the same size as images[i]
. The non-zero mask elements
* mark the array elements counted in the histogram.
* @param hist Output histogram, which is a dense or sparse dims
* -dimensional array.
* @param histSize Array of histogram sizes in each dimension.
* @param ranges Array of the dims
arrays of the histogram bin
* boundaries in each dimension. When the histogram is uniform (uniform
* =true), then for each dimension i
it is enough to specify the
* lower (inclusive) boundary L_0 of the 0-th histogram bin and the
* upper (exclusive) boundary U_(histSize[i]-1) for the last histogram
* bin histSize[i]-1
. That is, in case of a uniform histogram each
* of ranges[i]
is an array of 2 elements. When the histogram is
* not uniform (uniform=false
), then each of ranges[i]
* contains histSize[i]+1
elements: L_0, U_0=L_1, U_1=L_2,...,
* U_(histSize[i]-2)=L_(histSize[i]-1), U_(histSize[i]-1). The array
* elements, that are not between L_0 and U_(histSize[i]-1),
* are not counted in the histogram.
* @param accumulate Accumulation flag. If it is set, the histogram is not
* cleared in the beginning when it is allocated. This feature enables you to
* compute a single histogram from several sets of arrays, or to update the
* histogram in time.
*
* @see org.opencv.imgproc.Imgproc.calcHist
*/
public static void calcHist(ListCalculates a histogram of a set of arrays.
* *The functions calcHist
calculate the histogram of one or more
* arrays. The elements of a tuple used to increment a histogram bin are taken
* from the correspondinginput arrays at the same location. The sample below
* shows how to compute a 2D Hue-Saturation histogram for a color image.
*
// C++ code:
* *#include
#include
using namespace cv;
* *int main(int argc, char argv)
* * *Mat src, hsv;
* *if(argc != 2 || !(src=imread(argv[1], 1)).data)
* *return -1;
* *cvtColor(src, hsv, CV_BGR2HSV);
* *// Quantize the hue to 30 levels
* *// and the saturation to 32 levels
* *int hbins = 30, sbins = 32;
* *int histSize[] = {hbins, sbins};
* *// hue varies from 0 to 179, see cvtColor
* *float hranges[] = { 0, 180 };
* *// saturation varies from 0 (black-gray-white) to
* *// 255 (pure spectrum color)
* *float sranges[] = { 0, 256 };
* *const float* ranges[] = { hranges, sranges };
* *MatND hist;
* *// we compute the histogram from the 0-th and 1-st channels
* *int channels[] = {0, 1};
* *calcHist(&hsv, 1, channels, Mat(), // do not use mask
* *hist, 2, histSize, ranges,
* *true, // the histogram is uniform
* *false);
* *double maxVal=0;
* *minMaxLoc(hist, 0, &maxVal, 0, 0);
* *int scale = 10;
* *Mat histImg = Mat.zeros(sbins*scale, hbins*10, CV_8UC3);
* *for(int h = 0; h < hbins; h++)
* *for(int s = 0; s < sbins; s++)
* * *float binVal = hist.at
int intensity = cvRound(binVal*255/maxVal);
* *rectangle(histImg, Point(h*scale, s*scale),
* *Point((h+1)*scale - 1, (s+1)*scale - 1),
* *Scalar.all(intensity),
* *CV_FILLED);
* * *namedWindow("Source", 1);
* *imshow("Source", src);
* *namedWindow("H-S Histogram", 1);
* *imshow("H-S Histogram", histImg);
* *waitKey();
* * *Note:
*-
*
- An example for creating histograms of an image can be found at * opencv_source_code/samples/cpp/demhist.cpp *
- (Python) An example for creating color histograms can be found at * opencv_source/samples/python2/color_histogram.py *
- (Python) An example illustrating RGB and grayscale histogram plotting * can be found at opencv_source/samples/python2/hist.py *
CV_8U
or CV_32F
, and the same size. Each of them
* can have an arbitrary number of channels.
* @param channels List of the dims
channels used to compute the
* histogram. The first array channels are numerated from 0 to images[0].channels()-1
,
* the second array channels are counted from images[0].channels()
* to images[0].channels() + images[1].channels()-1
, and so on.
* @param mask Optional mask. If the matrix is not empty, it must be an 8-bit
* array of the same size as images[i]
. The non-zero mask elements
* mark the array elements counted in the histogram.
* @param hist Output histogram, which is a dense or sparse dims
* -dimensional array.
* @param histSize Array of histogram sizes in each dimension.
* @param ranges Array of the dims
arrays of the histogram bin
* boundaries in each dimension. When the histogram is uniform (uniform
* =true), then for each dimension i
it is enough to specify the
* lower (inclusive) boundary L_0 of the 0-th histogram bin and the
* upper (exclusive) boundary U_(histSize[i]-1) for the last histogram
* bin histSize[i]-1
. That is, in case of a uniform histogram each
* of ranges[i]
is an array of 2 elements. When the histogram is
* not uniform (uniform=false
), then each of ranges[i]
* contains histSize[i]+1
elements: L_0, U_0=L_1, U_1=L_2,...,
* U_(histSize[i]-2)=L_(histSize[i]-1), U_(histSize[i]-1). The array
* elements, that are not between L_0 and U_(histSize[i]-1),
* are not counted in the histogram.
*
* @see org.opencv.imgproc.Imgproc.calcHist
*/
public static void calcHist(ListCompares two histograms.
* *The functions compareHist
compare two dense or two sparse
* histograms using the specified method:
-
*
- Correlation (
method=CV_COMP_CORREL
) *
d(H_1,H_2) = (sum_I(H_1(I) - H_1")(H_2(I) - H_2"))/(sqrt(sum_I(H_1(I) - * H_1")^2 sum_I(H_2(I) - H_2")^2))
* *where
* *H_k" = 1/(N) sum _J H_k(J)
* *and N is a total number of histogram bins.
*-
*
- Chi-Square (
method=CV_COMP_CHISQR
) *
d(H_1,H_2) = sum _I((H_1(I)-H_2(I))^2)/(H_1(I))
* *-
*
- Intersection (
method=CV_COMP_INTERSECT
) *
d(H_1,H_2) = sum _I min(H_1(I), H_2(I))
* *-
*
- Bhattacharyya distance (
method=CV_COMP_BHATTACHARYYA
or *method=CV_COMP_HELLINGER
). In fact, OpenCV computes Hellinger * distance, which is related to Bhattacharyya coefficient. *
d(H_1,H_2) = sqrt(1 - frac(1)(sqrt(H_1" H_2" N^2)) sum_I sqrt(H_1(I) * * H_2(I)))
* *The function returns d(H_1, H_2).
* *While the function works well with 1-, 2-, 3-dimensional dense histograms, it * may not be suitable for high-dimensional sparse histograms. In such * histograms, because of aliasing and sampling problems, the coordinates of * non-zero histogram bins can slightly shift. To compare such histograms or * more general sparse configurations of weighted points, consider using the * "EMD" function.
* * @param H1 First compared histogram. * @param H2 Second compared histogram of the same size asH1
.
* @param method Comparison method that could be one of the following:
* -
*
- CV_COMP_CORREL Correlation *
- CV_COMP_CHISQR Chi-Square *
- CV_COMP_INTERSECT Intersection *
- CV_COMP_BHATTACHARYYA Bhattacharyya distance *
- CV_COMP_HELLINGER Synonym for
CV_COMP_BHATTACHARYYA
*
Calculates a contour area.
* *The function computes a contour area. Similarly to "moments", the area is
* computed using the Green formula. Thus, the returned area and the number of
* non-zero pixels, if you draw the contour using "drawContours" or "fillPoly",
* can be different.
* Also, the function will most certainly give a wrong results for contours with
* self-intersections.
* Example:
// C++ code:
* *vector
contour.push_back(Point2f(0, 0));
* *contour.push_back(Point2f(10, 0));
* *contour.push_back(Point2f(10, 10));
* *contour.push_back(Point2f(5, 4));
* *double area0 = contourArea(contour);
* *vector
approxPolyDP(contour, approx, 5, true);
* *double area1 = contourArea(approx);
* *cout << "area0 =" << area0 << endl <<
* *"area1 =" << area1 << endl <<
* *"approx poly vertices" << approx.size() << endl;
* * @param contour Input vector of 2D points (contour vertices), stored in *std.vector
or Mat
.
* @param oriented Oriented area flag. If it is true, the function returns a
* signed area value, depending on the contour orientation (clockwise or
* counter-clockwise). Using this feature you can determine orientation of a
* contour by taking the sign of an area. By default, the parameter is
* false
, which means that the absolute value is returned.
*
* @see org.opencv.imgproc.Imgproc.contourArea
*/
public static double contourArea(Mat contour, boolean oriented)
{
double retVal = contourArea_0(contour.nativeObj, oriented);
return retVal;
}
/**
* Calculates a contour area.
* *The function computes a contour area. Similarly to "moments", the area is
* computed using the Green formula. Thus, the returned area and the number of
* non-zero pixels, if you draw the contour using "drawContours" or "fillPoly",
* can be different.
* Also, the function will most certainly give a wrong results for contours with
* self-intersections.
* Example:
// C++ code:
* *vector
contour.push_back(Point2f(0, 0));
* *contour.push_back(Point2f(10, 0));
* *contour.push_back(Point2f(10, 10));
* *contour.push_back(Point2f(5, 4));
* *double area0 = contourArea(contour);
* *vector
approxPolyDP(contour, approx, 5, true);
* *double area1 = contourArea(approx);
* *cout << "area0 =" << area0 << endl <<
* *"area1 =" << area1 << endl <<
* *"approx poly vertices" << approx.size() << endl;
* * @param contour Input vector of 2D points (contour vertices), stored in *std.vector
or Mat
.
*
* @see org.opencv.imgproc.Imgproc.contourArea
*/
public static double contourArea(Mat contour)
{
double retVal = contourArea_1(contour.nativeObj);
return retVal;
}
//
// C++: void convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
//
/**
* Converts image transformation maps from one representation to another.
* *The function converts a pair of maps for "remap" from one representation to
* another. The following options ((map1.type(), map2.type())
* -> (dstmap1.type(), dstmap2.type())
) are supported:
-
*
- (CV_32FC1, CV_32FC1) -> (CV_16SC2, CV_16UC1). This is the
* most frequently used conversion operation, in which the original
* floating-point maps (see "remap") are converted to a more compact and much
* faster fixed-point representation. The first output array contains the
* rounded coordinates and the second array (created only when
nninterpolation=false
) * contains indices in the interpolation tables. * - (CV_32FC2) -> (CV_16SC2, CV_16UC1). The same as above but the * original maps are stored in one 2-channel matrix. *
- Reverse conversion. Obviously, the reconstructed floating-point maps * will not be exactly the same as the originals. *
CV_16SC2
,
* CV_32FC1
, or CV_32FC2
.
* @param map2 The second input map of type CV_16UC1
,
* CV_32FC1
, or none (empty matrix), respectively.
* @param dstmap1 The first output map that has the type dstmap1type
* and the same size as src
.
* @param dstmap2 The second output map.
* @param dstmap1type Type of the first output map that should be
* CV_16SC2
, CV_32FC1
, or CV_32FC2
.
* @param nninterpolation Flag indicating whether the fixed-point maps are used
* for the nearest-neighbor or for a more complex interpolation.
*
* @see org.opencv.imgproc.Imgproc.convertMaps
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.imgproc.Imgproc#initUndistortRectifyMap
* @see org.opencv.imgproc.Imgproc#undistort
*/
public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type, boolean nninterpolation)
{
convertMaps_0(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type, nninterpolation);
return;
}
/**
* Converts image transformation maps from one representation to another.
* *The function converts a pair of maps for "remap" from one representation to
* another. The following options ((map1.type(), map2.type())
* -> (dstmap1.type(), dstmap2.type())
) are supported:
-
*
- (CV_32FC1, CV_32FC1) -> (CV_16SC2, CV_16UC1). This is the
* most frequently used conversion operation, in which the original
* floating-point maps (see "remap") are converted to a more compact and much
* faster fixed-point representation. The first output array contains the
* rounded coordinates and the second array (created only when
nninterpolation=false
) * contains indices in the interpolation tables. * - (CV_32FC2) -> (CV_16SC2, CV_16UC1). The same as above but the * original maps are stored in one 2-channel matrix. *
- Reverse conversion. Obviously, the reconstructed floating-point maps * will not be exactly the same as the originals. *
CV_16SC2
,
* CV_32FC1
, or CV_32FC2
.
* @param map2 The second input map of type CV_16UC1
,
* CV_32FC1
, or none (empty matrix), respectively.
* @param dstmap1 The first output map that has the type dstmap1type
* and the same size as src
.
* @param dstmap2 The second output map.
* @param dstmap1type Type of the first output map that should be
* CV_16SC2
, CV_32FC1
, or CV_32FC2
.
*
* @see org.opencv.imgproc.Imgproc.convertMaps
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.imgproc.Imgproc#initUndistortRectifyMap
* @see org.opencv.imgproc.Imgproc#undistort
*/
public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type)
{
convertMaps_1(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type);
return;
}
//
// C++: void convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true)
//
/**
* Finds the convex hull of a point set.
* *The functions find the convex hull of a 2D point set using the Sklansky's
* algorithm [Sklansky82] that has *O(N logN)* complexity in the current
* implementation. See the OpenCV sample convexhull.cpp
that
* demonstrates the usage of different function variants.
Note:
*-
*
- An example using the convexHull functionality can be found at * opencv_source_code/samples/cpp/convexhull.cpp *
std.vector
or
* Mat
.
* @param hull Output convex hull. It is either an integer vector of indices or
* vector of points. In the first case, the hull
elements are
* 0-based indices of the convex hull points in the original array (since the
* set of convex hull points is a subset of the original point set). In the
* second case, hull
elements are the convex hull points
* themselves.
* @param clockwise Orientation flag. If it is true, the output convex hull is
* oriented clockwise. Otherwise, it is oriented counter-clockwise. The assumed
* coordinate system has its X axis pointing to the right, and its Y axis
* pointing upwards.
*
* @see org.opencv.imgproc.Imgproc.convexHull
*/
public static void convexHull(MatOfPoint points, MatOfInt hull, boolean clockwise)
{
Mat points_mat = points;
Mat hull_mat = hull;
convexHull_0(points_mat.nativeObj, hull_mat.nativeObj, clockwise);
return;
}
/**
* Finds the convex hull of a point set.
* *The functions find the convex hull of a 2D point set using the Sklansky's
* algorithm [Sklansky82] that has *O(N logN)* complexity in the current
* implementation. See the OpenCV sample convexhull.cpp
that
* demonstrates the usage of different function variants.
Note:
*-
*
- An example using the convexHull functionality can be found at * opencv_source_code/samples/cpp/convexhull.cpp *
std.vector
or
* Mat
.
* @param hull Output convex hull. It is either an integer vector of indices or
* vector of points. In the first case, the hull
elements are
* 0-based indices of the convex hull points in the original array (since the
* set of convex hull points is a subset of the original point set). In the
* second case, hull
elements are the convex hull points
* themselves.
*
* @see org.opencv.imgproc.Imgproc.convexHull
*/
public static void convexHull(MatOfPoint points, MatOfInt hull)
{
Mat points_mat = points;
Mat hull_mat = hull;
convexHull_1(points_mat.nativeObj, hull_mat.nativeObj);
return;
}
//
// C++: void convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
//
/**
* Finds the convexity defects of a contour.
* *The function finds all convexity defects of the input contour and returns a
* sequence of the CvConvexityDefect
structures, where
* CvConvexityDetect
is defined as:
// C++ code:
* *struct CvConvexityDefect
* * *CvPoint* start; // point of the contour where the defect begins
* *CvPoint* end; // point of the contour where the defect ends
* *CvPoint* depth_point; // the farthest from the convex hull point within the * defect
* *float depth; // distance between the farthest point and the convex hull
* *};
* *The figure below displays convexity defects of a hand contour:
* * @param contour Input contour. * @param convexhull Convex hull obtained using "convexHull" that should contain * indices of the contour points that make the hull. * @param convexityDefects The output vector of convexity defects. In C++ and * the new Python/Java interface each convexity defect is represented as * 4-element integer vector (a.k.a.cv.Vec4i
): (start_index,
* end_index, farthest_pt_index, fixpt_depth)
, where indices are 0-based
* indices in the original contour of the convexity defect beginning, end and
* the farthest point, and fixpt_depth
is fixed-point approximation
* (with 8 fractional bits) of the distance between the farthest contour point
* and the hull. That is, to get the floating-point value of the depth will be
* fixpt_depth/256.0
. In C interface convexity defect is
* represented by CvConvexityDefect
structure - see below.
*
* @see org.opencv.imgproc.Imgproc.convexityDefects
*/
public static void convexityDefects(MatOfPoint contour, MatOfInt convexhull, MatOfInt4 convexityDefects)
{
Mat contour_mat = contour;
Mat convexhull_mat = convexhull;
Mat convexityDefects_mat = convexityDefects;
convexityDefects_0(contour_mat.nativeObj, convexhull_mat.nativeObj, convexityDefects_mat.nativeObj);
return;
}
//
// C++: void copyMakeBorder(Mat src, Mat& dst, int top, int bottom, int left, int right, int borderType, Scalar value = Scalar())
//
/**
* Forms a border around an image.
* *The function copies the source image into the middle of the destination
* image. The areas to the left, to the right, above and below the copied source
* image will be filled with extrapolated pixels. This is not what
* "FilterEngine" or filtering functions based on it do (they extrapolate pixels
* on-fly), but what other more complex functions, including your own, may do to
* simplify image boundary handling.
* The function supports the mode when src
is already in the middle
* of dst
. In this case, the function does not copy
* src
itself but simply constructs the border, for example:
// C++ code:
* *// let border be the same in all directions
* *int border=2;
* *// constructs a larger image to fit both the image and the border
* *Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());
* *// select the middle part of it w/o copying data
* *Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));
* *// convert image from RGB to grayscale
* *cvtColor(rgb, gray, CV_RGB2GRAY);
* *// form a border in-place
* *copyMakeBorder(gray, gray_buf, border, border,
* *border, border, BORDER_REPLICATE);
* *// now do some custom filtering......
* *Note:
* *When the source image is a part (ROI) of a bigger image, the function will
* try to use the pixels outside of the ROI to form a border. To disable this
* feature and always do extrapolation, as if src
was not a ROI,
* use borderType | BORDER_ISOLATED
.
src
and the
* size Size(src.cols+left+right, src.rows+top+bottom)
.
* @param top a top
* @param bottom a bottom
* @param left a left
* @param right Parameter specifying how many pixels in each direction from the
* source image rectangle to extrapolate. For example, top=1, bottom=1,
* left=1, right=1
mean that 1 pixel-wide border needs to be built.
* @param borderType Border type. See "borderInterpolate" for details.
* @param value Border value if borderType==BORDER_CONSTANT
.
*
* @see org.opencv.imgproc.Imgproc.copyMakeBorder
* @see org.opencv.imgproc.Imgproc#borderInterpolate
*/
public static void copyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType, Scalar value)
{
copyMakeBorder_0(src.nativeObj, dst.nativeObj, top, bottom, left, right, borderType, value.val[0], value.val[1], value.val[2], value.val[3]);
return;
}
/**
* Forms a border around an image.
* *The function copies the source image into the middle of the destination
* image. The areas to the left, to the right, above and below the copied source
* image will be filled with extrapolated pixels. This is not what
* "FilterEngine" or filtering functions based on it do (they extrapolate pixels
* on-fly), but what other more complex functions, including your own, may do to
* simplify image boundary handling.
* The function supports the mode when src
is already in the middle
* of dst
. In this case, the function does not copy
* src
itself but simply constructs the border, for example:
// C++ code:
* *// let border be the same in all directions
* *int border=2;
* *// constructs a larger image to fit both the image and the border
* *Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());
* *// select the middle part of it w/o copying data
* *Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));
* *// convert image from RGB to grayscale
* *cvtColor(rgb, gray, CV_RGB2GRAY);
* *// form a border in-place
* *copyMakeBorder(gray, gray_buf, border, border,
* *border, border, BORDER_REPLICATE);
* *// now do some custom filtering......
* *Note:
* *When the source image is a part (ROI) of a bigger image, the function will
* try to use the pixels outside of the ROI to form a border. To disable this
* feature and always do extrapolation, as if src
was not a ROI,
* use borderType | BORDER_ISOLATED
.
src
and the
* size Size(src.cols+left+right, src.rows+top+bottom)
.
* @param top a top
* @param bottom a bottom
* @param left a left
* @param right Parameter specifying how many pixels in each direction from the
* source image rectangle to extrapolate. For example, top=1, bottom=1,
* left=1, right=1
mean that 1 pixel-wide border needs to be built.
* @param borderType Border type. See "borderInterpolate" for details.
*
* @see org.opencv.imgproc.Imgproc.copyMakeBorder
* @see org.opencv.imgproc.Imgproc#borderInterpolate
*/
public static void copyMakeBorder(Mat src, Mat dst, int top, int bottom, int left, int right, int borderType)
{
copyMakeBorder_1(src.nativeObj, dst.nativeObj, top, bottom, left, right, borderType);
return;
}
//
// C++: void cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT)
//
/**
* Calculates eigenvalues and eigenvectors of image blocks for corner detection.
* *For every pixel p, the function cornerEigenValsAndVecs
* considers a blockSize
x blockSize
* neighborhood S(p). It calculates the covariation matrix of
* derivatives over the neighborhood as:
M = sum(by: S(p))(dI/dx)^2 sum(by: S(p))(dI/dx dI/dy)^2 * sum(by: S(p))(dI/dx dI/dy)^2 sum(by: S(p))(dI/dy)^2
* *where the derivatives are computed using the "Sobel" operator.
* *After that, it finds eigenvectors and eigenvalues of M and stores * them in the destination image as (lambda_1, lambda_2, x_1, y_1, x_2, * y_2) where
*-
*
- lambda_1, lambda_2 are the non-sorted eigenvalues of * M *
- x_1, y_1 are the eigenvectors corresponding to * lambda_1 *
- x_2, y_2 are the eigenvectors corresponding to * lambda_2 *
The output of the function can be used for robust edge or corner detection.
* *Note:
*-
*
- (Python) An example on how to use eigenvectors and eigenvalues to * estimate image texture flow direction can be found at opencv_source_code/samples/python2/texture_flow.py *
src
and the type CV_32FC(6)
.
* @param blockSize Neighborhood size (see details below).
* @param ksize Aperture parameter for the "Sobel" operator.
* @param borderType Pixel extrapolation method. See "borderInterpolate".
*
* @see org.opencv.imgproc.Imgproc.cornerEigenValsAndVecs
* @see org.opencv.imgproc.Imgproc#cornerHarris
* @see org.opencv.imgproc.Imgproc#cornerMinEigenVal
* @see org.opencv.imgproc.Imgproc#preCornerDetect
*/
public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize, int borderType)
{
cornerEigenValsAndVecs_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType);
return;
}
/**
* Calculates eigenvalues and eigenvectors of image blocks for corner detection.
* *For every pixel p, the function cornerEigenValsAndVecs
* considers a blockSize
x blockSize
* neighborhood S(p). It calculates the covariation matrix of
* derivatives over the neighborhood as:
M = sum(by: S(p))(dI/dx)^2 sum(by: S(p))(dI/dx dI/dy)^2 * sum(by: S(p))(dI/dx dI/dy)^2 sum(by: S(p))(dI/dy)^2
* *where the derivatives are computed using the "Sobel" operator.
* *After that, it finds eigenvectors and eigenvalues of M and stores * them in the destination image as (lambda_1, lambda_2, x_1, y_1, x_2, * y_2) where
*-
*
- lambda_1, lambda_2 are the non-sorted eigenvalues of * M *
- x_1, y_1 are the eigenvectors corresponding to * lambda_1 *
- x_2, y_2 are the eigenvectors corresponding to * lambda_2 *
The output of the function can be used for robust edge or corner detection.
* *Note:
*-
*
- (Python) An example on how to use eigenvectors and eigenvalues to * estimate image texture flow direction can be found at opencv_source_code/samples/python2/texture_flow.py *
src
and the type CV_32FC(6)
.
* @param blockSize Neighborhood size (see details below).
* @param ksize Aperture parameter for the "Sobel" operator.
*
* @see org.opencv.imgproc.Imgproc.cornerEigenValsAndVecs
* @see org.opencv.imgproc.Imgproc#cornerHarris
* @see org.opencv.imgproc.Imgproc#cornerMinEigenVal
* @see org.opencv.imgproc.Imgproc#preCornerDetect
*/
public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize)
{
cornerEigenValsAndVecs_1(src.nativeObj, dst.nativeObj, blockSize, ksize);
return;
}
//
// C++: void cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT)
//
/**
* Harris edge detector.
* *The function runs the Harris edge detector on the image. Similarly to * "cornerMinEigenVal" and "cornerEigenValsAndVecs", for each pixel (x, * y) it calculates a 2x2 gradient covariance matrix * M^((x,y)) over a blockSize x blockSize neighborhood. Then, * it computes the following characteristic:
* *dst(x,y) = det M^((x,y)) - k * (tr M^((x,y)))^2
* *Corners in the image can be found as the local maxima of this response map.
* * @param src Input single-channel 8-bit or floating-point image. * @param dst Image to store the Harris detector responses. It has the type *CV_32FC1
and the same size as src
.
* @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs").
* @param ksize Aperture parameter for the "Sobel" operator.
* @param k Harris detector free parameter. See the formula below.
* @param borderType Pixel extrapolation method. See "borderInterpolate".
*
* @see org.opencv.imgproc.Imgproc.cornerHarris
*/
public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k, int borderType)
{
cornerHarris_0(src.nativeObj, dst.nativeObj, blockSize, ksize, k, borderType);
return;
}
/**
* Harris edge detector.
* *The function runs the Harris edge detector on the image. Similarly to * "cornerMinEigenVal" and "cornerEigenValsAndVecs", for each pixel (x, * y) it calculates a 2x2 gradient covariance matrix * M^((x,y)) over a blockSize x blockSize neighborhood. Then, * it computes the following characteristic:
* *dst(x,y) = det M^((x,y)) - k * (tr M^((x,y)))^2
* *Corners in the image can be found as the local maxima of this response map.
* * @param src Input single-channel 8-bit or floating-point image. * @param dst Image to store the Harris detector responses. It has the type *CV_32FC1
and the same size as src
.
* @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs").
* @param ksize Aperture parameter for the "Sobel" operator.
* @param k Harris detector free parameter. See the formula below.
*
* @see org.opencv.imgproc.Imgproc.cornerHarris
*/
public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k)
{
cornerHarris_1(src.nativeObj, dst.nativeObj, blockSize, ksize, k);
return;
}
//
// C++: void cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT)
//
/**
* Calculates the minimal eigenvalue of gradient matrices for corner detection.
* *The function is similar to "cornerEigenValsAndVecs" but it calculates and * stores only the minimal eigenvalue of the covariance matrix of derivatives, * that is, min(lambda_1, lambda_2) in terms of the formulae in the * "cornerEigenValsAndVecs" description.
* * @param src Input single-channel 8-bit or floating-point image. * @param dst Image to store the minimal eigenvalues. It has the type *CV_32FC1
and the same size as src
.
* @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs").
* @param ksize Aperture parameter for the "Sobel" operator.
* @param borderType Pixel extrapolation method. See "borderInterpolate".
*
* @see org.opencv.imgproc.Imgproc.cornerMinEigenVal
*/
public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize, int borderType)
{
cornerMinEigenVal_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType);
return;
}
/**
* Calculates the minimal eigenvalue of gradient matrices for corner detection.
* *The function is similar to "cornerEigenValsAndVecs" but it calculates and * stores only the minimal eigenvalue of the covariance matrix of derivatives, * that is, min(lambda_1, lambda_2) in terms of the formulae in the * "cornerEigenValsAndVecs" description.
* * @param src Input single-channel 8-bit or floating-point image. * @param dst Image to store the minimal eigenvalues. It has the type *CV_32FC1
and the same size as src
.
* @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs").
* @param ksize Aperture parameter for the "Sobel" operator.
*
* @see org.opencv.imgproc.Imgproc.cornerMinEigenVal
*/
public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize)
{
cornerMinEigenVal_1(src.nativeObj, dst.nativeObj, blockSize, ksize);
return;
}
/**
* Calculates the minimal eigenvalue of gradient matrices for corner detection.
* *The function is similar to "cornerEigenValsAndVecs" but it calculates and * stores only the minimal eigenvalue of the covariance matrix of derivatives, * that is, min(lambda_1, lambda_2) in terms of the formulae in the * "cornerEigenValsAndVecs" description.
* * @param src Input single-channel 8-bit or floating-point image. * @param dst Image to store the minimal eigenvalues. It has the type *CV_32FC1
and the same size as src
.
* @param blockSize Neighborhood size (see the details on "cornerEigenValsAndVecs").
*
* @see org.opencv.imgproc.Imgproc.cornerMinEigenVal
*/
public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize)
{
cornerMinEigenVal_2(src.nativeObj, dst.nativeObj, blockSize);
return;
}
//
// C++: void cornerSubPix(Mat image, vector_Point2f& corners, Size winSize, Size zeroZone, TermCriteria criteria)
//
/**
* Refines the corner locations.
* *The function iterates to find the sub-pixel accurate location of corners or * radial saddle points, as shown on the figure below.
* *Sub-pixel accurate corner locator is based on the observation that every * vector from the center q to a point p located within a * neighborhood of q is orthogonal to the image gradient at p * subject to image and measurement noise. Consider the expression:
* *epsilon _i = (DI_(p_i))^T * (q - p_i)
* *where (DI_(p_i)) is an image gradient at one of the points * p_i in a neighborhood of q. The value of q is to * be found so that epsilon_i is minimized. A system of equations may * be set up with epsilon_i set to zero:
* *sum _i(DI_(p_i) * (DI_(p_i))^T) - sum _i(DI_(p_i) * (DI_(p_i))^T * * p_i)
* *where the gradients are summed within a neighborhood ("search window") of * q. Calling the first gradient term G and the second * gradient term b gives:
* *q = G^(-1) * b
* *The algorithm sets the center of the neighborhood window at this new center * q and then iterates until the center stays within a set threshold.
* * @param image Input image. * @param corners Initial coordinates of the input corners and refined * coordinates provided for output. * @param winSize Half of the side length of the search window. For example, if *winSize=Size(5,5)
, then a 5*2+1 x 5*2+1 = 11 x 11
* search window is used.
* @param zeroZone Half of the size of the dead region in the middle of the
* search zone over which the summation in the formula below is not done. It is
* used sometimes to avoid possible singularities of the autocorrelation matrix.
* The value of (-1,-1) indicates that there is no such a size.
* @param criteria Criteria for termination of the iterative process of corner
* refinement. That is, the process of corner position refinement stops either
* after criteria.maxCount
iterations or when the corner position
* moves by less than criteria.epsilon
on some iteration.
*
* @see org.opencv.imgproc.Imgproc.cornerSubPix
*/
public static void cornerSubPix(Mat image, MatOfPoint2f corners, Size winSize, Size zeroZone, TermCriteria criteria)
{
Mat corners_mat = corners;
cornerSubPix_0(image.nativeObj, corners_mat.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon);
return;
}
//
// C++: Ptr_CLAHE createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8))
//
// Return type 'Ptr_CLAHE' is not supported, skipping the function
//
// C++: void createHanningWindow(Mat& dst, Size winSize, int type)
//
/**
* This function computes a Hanning window coefficients in two dimensions. See * http://en.wikipedia.org/wiki/Hann_function and http://en.wikipedia.org/wiki/Window_function * for more information.
* *An example is shown below:
// C++ code:
* *// create hanning window of size 100x100 and type CV_32F
* *Mat hann;
* *createHanningWindow(hann, Size(100, 100), CV_32F);
* * @param dst Destination array to place Hann coefficients in * @param winSize The window size specifications * @param type Created array type * * @see org.opencv.imgproc.Imgproc.createHanningWindow * @see org.opencv.imgproc.Imgproc#phaseCorrelate */ public static void createHanningWindow(Mat dst, Size winSize, int type) { createHanningWindow_0(dst.nativeObj, winSize.width, winSize.height, type); return; } // // C++: void cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) // /** *Converts an image from one color space to another.
* *The function converts an input image from one color space to another. In case * of a transformation to-from RGB color space, the order of the channels should * be specified explicitly (RGB or BGR). * Note that the default color format in OpenCV is often referred to as RGB but * it is actually BGR (the bytes are reversed). So the first byte in a standard * (24-bit) color image will be an 8-bit Blue component, the second byte will be * Green, and the third byte will be Red. The fourth, fifth, and sixth bytes * would then be the second pixel (Blue, then Green, then Red), and so on.
* *The conventional ranges for R, G, and B channel values are:
*-
*
- 0 to 255 for
CV_8U
images * - 0 to 65535 for
CV_16U
images * - 0 to 1 for
CV_32F
images *
In case of linear transformations, the range does not matter.
* But in case of a non-linear transformation, an input RGB image should be
* normalized to the proper value range to get the correct results, for example,
* for RGB-> L*u*v* transformation. For example, if you have a 32-bit
* floating-point image directly converted from an 8-bit image without any
* scaling, then it will have the 0..255 value range instead of 0..1 assumed by
* the function. So, before calling cvtColor
, you need first to
* scale the image down:
// C++ code:
* *img *= 1./255;
* *cvtColor(img, img, CV_BGR2Luv);
* *If you use cvtColor
with 8-bit images, the conversion will have
* some information lost. For many applications, this will not be noticeable but
* it is recommended to use 32-bit images in applications that need the full
* range of colors or that convert an image before an operation and then convert
* back.
*
If conversion adds the alpha channel, its value will set to the maximum of
* corresponding channel range: 255 for CV_8U
, 65535 for
* CV_16U
, 1 for CV_32F
.
The function can do the following transformations:
*-
*
- RGB <-> GRAY (
CV_BGR2GRAY, CV_RGB2GRAY, CV_GRAY2BGR, * CV_GRAY2RGB
) Transformations within RGB space like adding/removing the * alpha channel, reversing the channel order, conversion to/from 16-bit RGB * color (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using: *
RGB[A] to Gray: Y <- 0.299 * R + 0.587 * G + 0.114 * B
* *and
* *Gray to RGB[A]: R <- Y, G <- Y, B <- Y, A <- max(ChannelRange)
* *The conversion from a RGB image to gray is done with:
* *// C++ code:
* *cvtColor(src, bwsrc, CV_RGB2GRAY);
* * * *More advanced channel reordering can also be done with "mixChannels".
*-
*
- RGB <-> CIE XYZ.Rec 709 with D65 white point
* (
CV_BGR2XYZ, CV_RGB2XYZ, CV_XYZ2BGR, CV_XYZ2RGB
): *
X * Z ltBR gt <- 0.412453 0.357580 0.180423 * 0.212671 0.715160 0.072169 * 0.019334 0.119193 0.950227 ltBR gt * R * B ltBR gt
* * * *R * B ltBR gt <- 3.240479 -1.53715 -0.498535 * -0.969256 1.875991 0.041556 * 0.055648 -0.204043 1.057311 ltBR gt * X * Z ltBR gt
* *X, Y and Z cover the whole value range (in case of * floating-point images, Z may exceed 1).
*-
*
- RGB <-> YCrCb JPEG (or YCC) (
CV_BGR2YCrCb, * CV_RGB2YCrCb, CV_YCrCb2BGR, CV_YCrCb2RGB
) *
Y <- 0.299 * R + 0.587 * G + 0.114 * B
* * * *Cr <- (R-Y) * 0.713 + delta
* * * *Cb <- (B-Y) * 0.564 + delta
* * * *R <- Y + 1.403 * (Cr - delta)
* * * *G <- Y - 0.714 * (Cr - delta) - 0.344 * (Cb - delta)
* * * *B <- Y + 1.773 * (Cb - delta)
* *where
* *delta = <= ft (128 for 8-bit images * 32768 for 16-bit images * 0.5 for floating-point images right.
* *Y, Cr, and Cb cover the whole value range.
*-
*
- RGB <-> HSV (
CV_BGR2HSV, CV_RGB2HSV, CV_HSV2BGR, * CV_HSV2RGB
) In case of 8-bit and 16-bit images, R, G, and B are * converted to the floating-point format and scaled to fit the 0 to 1 range. *
V <- max(R,G,B)
* * * *S <- (V-min(R,G,B))/(V) if V != 0; 0 otherwise
* * * *H <- (60(G - B))/((V-min(R,G,B))) if V=R; (120+60(B - R))/((V-min(R,G,B))) * if V=G; (240+60(R - G))/((V-min(R,G,B))) if V=B
* *If H<0 then H <- H+360. On output 0 <= V <= 1, * 0 <= S <= 1, 0 <= H <= 360.
* *The values are then converted to the destination data type:
*-
*
- 8-bit images *
V <- 255 V, S <- 255 S, H <- H/2(to fit to 0 to 255)
* *-
*
- 16-bit images (currently not supported) *
V <- 65535 V, S <- 65535 S, H <- H
* *-
*
- 32-bit images H, S, and V are left as is *
- RGB <-> HLS (
CV_BGR2HLS, CV_RGB2HLS, CV_HLS2BGR, * CV_HLS2RGB
). *
In case of 8-bit and 16-bit images, R, G, and B are converted to the * floating-point format and scaled to fit the 0 to 1 range.
* *V_(max) <- (max)(R,G,B)
* * * *V_(min) <- (min)(R,G,B)
* * * *L <- (V_(max) + V_(min))/2
* * * *S <- fork ((V_(max) - V_(min))/(V_(max) + V_(min)))(if L < * 0.5)<BR>((V_(max) - V_(min))/(2 - (V_(max) + V_(min))))(if L >= 0.5)
* * * *H <- forkthree ((60(G - B))/(S))(if V_(max)=R)<BR>((120+60(B - * R))/(S))(if V_(max)=G)<BR>((240+60(R - G))/(S))(if V_(max)=B)
* *If H<0 then H <- H+360. On output 0 <= L <= 1, * 0 <= S <= 1, 0 <= H <= 360.
* *The values are then converted to the destination data type:
*-
*
- 8-bit images *
V <- 255 * V, S <- 255 * S, H <- H/2(to fit to 0 to 255)
* *-
*
- 16-bit images (currently not supported) *
V <- 65535 * V, S <- 65535 * S, H <- H
* *-
*
- 32-bit images H, S, V are left as is *
- RGB <-> CIE L*a*b* (
CV_BGR2Lab, CV_RGB2Lab, CV_Lab2BGR, * CV_Lab2RGB
). *
In case of 8-bit and 16-bit images, R, G, and B are converted to the * floating-point format and scaled to fit the 0 to 1 range.
* *[X Y Z] <- * |0.412453 0.357580 0.180423| * |0.212671 0.715160 0.072169| * |0.019334 0.119193 0.950227|
*-
*
- [R G B] * * *
X <- X/X_n, where X_n = 0.950456
* * * *Z <- Z/Z_n, where Z_n = 1.088754
* * * *L <- 116*Y^(1/3)-16 for Y>0.008856; 903.3*Y for Y <= 0.008856
* * * *a <- 500(f(X)-f(Y)) + delta
* * * *b <- 200(f(Y)-f(Z)) + delta
* *where
* *f(t)= t^(1/3) for t>0.008856; 7.787 t+16/116 for t <= 0.008856
* *and
* *delta = 128 for 8-bit images; 0 for floating-point images
* *This outputs 0 <= L <= 100, -127 <= a <= 127, -127 <= b * <= 127. The values are then converted to the destination data type:
*-
*
- 8-bit images *
L <- L*255/100, a <- a + 128, b <- b + 128
* *-
*
- 16-bit images (currently not supported) *
- 32-bit images L, a, and b are left as is *
- RGB <-> CIE L*u*v* (
CV_BGR2Luv, CV_RGB2Luv, CV_Luv2BGR, * CV_Luv2RGB
). *
In case of 8-bit and 16-bit images, R, G, and B are converted to the * floating-point format and scaled to fit 0 to 1 range.
* *[X Y Z] <- * |0.412453 0.357580 0.180423| * |0.212671 0.715160 0.072169| * |0.019334 0.119193 0.950227|
*-
*
- [R G B] * * *
L <- 116 Y^(1/3) for Y>0.008856; 903.3 Y for Y <= 0.008856
* * * *u' <- 4*X/(X + 15*Y + 3 Z)
* * * *v' <- 9*Y/(X + 15*Y + 3 Z)
* * * *u <- 13*L*(u' - u_n) where u_n=0.19793943
* * * *v <- 13*L*(v' - v_n) where v_n=0.46831096
* *This outputs 0 <= L <= 100, -134 <= u <= 220, -140 <= v * <= 122.
* *The values are then converted to the destination data type:
*-
*
- 8-bit images *
L <- 255/100 L, u <- 255/354(u + 134), v <- 255/256(v + 140)
* *-
*
- 16-bit images (currently not supported) *
- 32-bit images L, u, and v are left as is *
The above formulae for converting RGB to/from various color spaces have been * taken from multiple sources on the web, primarily from the Charles Poynton * site http://www.poynton.com/ColorFAQ.html
*-
*
- Bayer -> RGB (
CV_BayerBG2BGR, CV_BayerGB2BGR, * CV_BayerRG2BGR, CV_BayerGR2BGR, CV_BayerBG2RGB, CV_BayerGB2RGB, * CV_BayerRG2RGB, CV_BayerGR2RGB
). The Bayer pattern is widely used in * CCD and CMOS cameras. It enables you to get color pictures from a single * plane where R,G, and B pixels (sensors of a particular component) are * interleaved as follows: The output RGB components of a pixel are interpolated * from 1, 2, or*
// C++ code:
* *4 neighbors of the pixel having the same color. There are several
* *modifications of the above pattern that can be achieved by shifting
* *the pattern one pixel left and/or one pixel up. The two letters
* *C_1 and
* *C_2 in the conversion constants CV_Bayer
C_1
* C_2 2BGR
and CV_Bayer
C_1 C_2
* 2RGB
indicate the particular pattern
type. These are components from the second row, second and third
* *columns, respectively. For example, the above pattern has a very
* *popular "BG" type.
* * @param src input image: 8-bit unsigned, 16-bit unsigned (CV_16UC...
),
* or single-precision floating-point.
* @param dst output image of the same size and depth as src
.
* @param code color space conversion code (see the description below).
* @param dstCn number of channels in the destination image; if the parameter is
* 0, the number of the channels is derived automatically from src
* and code
.
*
* @see org.opencv.imgproc.Imgproc.cvtColor
*/
public static void cvtColor(Mat src, Mat dst, int code, int dstCn)
{
cvtColor_0(src.nativeObj, dst.nativeObj, code, dstCn);
return;
}
/**
* Converts an image from one color space to another.
* *The function converts an input image from one color space to another. In case * of a transformation to-from RGB color space, the order of the channels should * be specified explicitly (RGB or BGR). * Note that the default color format in OpenCV is often referred to as RGB but * it is actually BGR (the bytes are reversed). So the first byte in a standard * (24-bit) color image will be an 8-bit Blue component, the second byte will be * Green, and the third byte will be Red. The fourth, fifth, and sixth bytes * would then be the second pixel (Blue, then Green, then Red), and so on.
* *The conventional ranges for R, G, and B channel values are:
*-
*
- 0 to 255 for
CV_8U
images * - 0 to 65535 for
CV_16U
images * - 0 to 1 for
CV_32F
images *
In case of linear transformations, the range does not matter.
* But in case of a non-linear transformation, an input RGB image should be
* normalized to the proper value range to get the correct results, for example,
* for RGB-> L*u*v* transformation. For example, if you have a 32-bit
* floating-point image directly converted from an 8-bit image without any
* scaling, then it will have the 0..255 value range instead of 0..1 assumed by
* the function. So, before calling cvtColor
, you need first to
* scale the image down:
// C++ code:
* *img *= 1./255;
* *cvtColor(img, img, CV_BGR2Luv);
* *If you use cvtColor
with 8-bit images, the conversion will have
* some information lost. For many applications, this will not be noticeable but
* it is recommended to use 32-bit images in applications that need the full
* range of colors or that convert an image before an operation and then convert
* back.
*
If conversion adds the alpha channel, its value will set to the maximum of
* corresponding channel range: 255 for CV_8U
, 65535 for
* CV_16U
, 1 for CV_32F
.
The function can do the following transformations:
*-
*
- RGB <-> GRAY (
CV_BGR2GRAY, CV_RGB2GRAY, CV_GRAY2BGR, * CV_GRAY2RGB
) Transformations within RGB space like adding/removing the * alpha channel, reversing the channel order, conversion to/from 16-bit RGB * color (R5:G6:B5 or R5:G5:B5), as well as conversion to/from grayscale using: *
RGB[A] to Gray: Y <- 0.299 * R + 0.587 * G + 0.114 * B
* *and
* *Gray to RGB[A]: R <- Y, G <- Y, B <- Y, A <- max(ChannelRange)
* *The conversion from a RGB image to gray is done with:
* *// C++ code:
* *cvtColor(src, bwsrc, CV_RGB2GRAY);
* * * *More advanced channel reordering can also be done with "mixChannels".
*-
*
- RGB <-> CIE XYZ.Rec 709 with D65 white point
* (
CV_BGR2XYZ, CV_RGB2XYZ, CV_XYZ2BGR, CV_XYZ2RGB
): *
X * Z ltBR gt <- 0.412453 0.357580 0.180423 * 0.212671 0.715160 0.072169 * 0.019334 0.119193 0.950227 ltBR gt * R * B ltBR gt
* * * *R * B ltBR gt <- 3.240479 -1.53715 -0.498535 * -0.969256 1.875991 0.041556 * 0.055648 -0.204043 1.057311 ltBR gt * X * Z ltBR gt
* *X, Y and Z cover the whole value range (in case of * floating-point images, Z may exceed 1).
*-
*
- RGB <-> YCrCb JPEG (or YCC) (
CV_BGR2YCrCb, * CV_RGB2YCrCb, CV_YCrCb2BGR, CV_YCrCb2RGB
) *
Y <- 0.299 * R + 0.587 * G + 0.114 * B
* * * *Cr <- (R-Y) * 0.713 + delta
* * * *Cb <- (B-Y) * 0.564 + delta
* * * *R <- Y + 1.403 * (Cr - delta)
* * * *G <- Y - 0.714 * (Cr - delta) - 0.344 * (Cb - delta)
* * * *B <- Y + 1.773 * (Cb - delta)
* *where
* *delta = <= ft (128 for 8-bit images * 32768 for 16-bit images * 0.5 for floating-point images right.
* *Y, Cr, and Cb cover the whole value range.
*-
*
- RGB <-> HSV (
CV_BGR2HSV, CV_RGB2HSV, CV_HSV2BGR, * CV_HSV2RGB
) In case of 8-bit and 16-bit images, R, G, and B are * converted to the floating-point format and scaled to fit the 0 to 1 range. *
V <- max(R,G,B)
* * * *S <- (V-min(R,G,B))/(V) if V != 0; 0 otherwise
* * * *H <- (60(G - B))/((V-min(R,G,B))) if V=R; (120+60(B - R))/((V-min(R,G,B))) * if V=G; (240+60(R - G))/((V-min(R,G,B))) if V=B
* *If H<0 then H <- H+360. On output 0 <= V <= 1, * 0 <= S <= 1, 0 <= H <= 360.
* *The values are then converted to the destination data type:
*-
*
- 8-bit images *
V <- 255 V, S <- 255 S, H <- H/2(to fit to 0 to 255)
* *-
*
- 16-bit images (currently not supported) *
V <- 65535 V, S <- 65535 S, H <- H
* *-
*
- 32-bit images H, S, and V are left as is *
- RGB <-> HLS (
CV_BGR2HLS, CV_RGB2HLS, CV_HLS2BGR, * CV_HLS2RGB
). *
In case of 8-bit and 16-bit images, R, G, and B are converted to the * floating-point format and scaled to fit the 0 to 1 range.
* *V_(max) <- (max)(R,G,B)
* * * *V_(min) <- (min)(R,G,B)
* * * *L <- (V_(max) + V_(min))/2
* * * *S <- fork ((V_(max) - V_(min))/(V_(max) + V_(min)))(if L < * 0.5)<BR>((V_(max) - V_(min))/(2 - (V_(max) + V_(min))))(if L >= 0.5)
* * * *H <- forkthree ((60(G - B))/(S))(if V_(max)=R)<BR>((120+60(B - * R))/(S))(if V_(max)=G)<BR>((240+60(R - G))/(S))(if V_(max)=B)
* *If H<0 then H <- H+360. On output 0 <= L <= 1, * 0 <= S <= 1, 0 <= H <= 360.
* *The values are then converted to the destination data type:
*-
*
- 8-bit images *
V <- 255 * V, S <- 255 * S, H <- H/2(to fit to 0 to 255)
* *-
*
- 16-bit images (currently not supported) *
V <- 65535 * V, S <- 65535 * S, H <- H
* *-
*
- 32-bit images H, S, V are left as is *
- RGB <-> CIE L*a*b* (
CV_BGR2Lab, CV_RGB2Lab, CV_Lab2BGR, * CV_Lab2RGB
). *
In case of 8-bit and 16-bit images, R, G, and B are converted to the * floating-point format and scaled to fit the 0 to 1 range.
* *[X Y Z] <- * |0.412453 0.357580 0.180423| * |0.212671 0.715160 0.072169| * |0.019334 0.119193 0.950227|
*-
*
- [R G B] * * *
X <- X/X_n, where X_n = 0.950456
* * * *Z <- Z/Z_n, where Z_n = 1.088754
* * * *L <- 116*Y^(1/3)-16 for Y>0.008856; 903.3*Y for Y <= 0.008856
* * * *a <- 500(f(X)-f(Y)) + delta
* * * *b <- 200(f(Y)-f(Z)) + delta
* *where
* *f(t)= t^(1/3) for t>0.008856; 7.787 t+16/116 for t <= 0.008856
* *and
* *delta = 128 for 8-bit images; 0 for floating-point images
* *This outputs 0 <= L <= 100, -127 <= a <= 127, -127 <= b * <= 127. The values are then converted to the destination data type:
*-
*
- 8-bit images *
L <- L*255/100, a <- a + 128, b <- b + 128
* *-
*
- 16-bit images (currently not supported) *
- 32-bit images L, a, and b are left as is *
- RGB <-> CIE L*u*v* (
CV_BGR2Luv, CV_RGB2Luv, CV_Luv2BGR, * CV_Luv2RGB
). *
In case of 8-bit and 16-bit images, R, G, and B are converted to the * floating-point format and scaled to fit 0 to 1 range.
* *[X Y Z] <- * |0.412453 0.357580 0.180423| * |0.212671 0.715160 0.072169| * |0.019334 0.119193 0.950227|
*-
*
- [R G B] * * *
L <- 116 Y^(1/3) for Y>0.008856; 903.3 Y for Y <= 0.008856
* * * *u' <- 4*X/(X + 15*Y + 3 Z)
* * * *v' <- 9*Y/(X + 15*Y + 3 Z)
* * * *u <- 13*L*(u' - u_n) where u_n=0.19793943
* * * *v <- 13*L*(v' - v_n) where v_n=0.46831096
* *This outputs 0 <= L <= 100, -134 <= u <= 220, -140 <= v * <= 122.
* *The values are then converted to the destination data type:
*-
*
- 8-bit images *
L <- 255/100 L, u <- 255/354(u + 134), v <- 255/256(v + 140)
* *-
*
- 16-bit images (currently not supported) *
- 32-bit images L, u, and v are left as is *
The above formulae for converting RGB to/from various color spaces have been * taken from multiple sources on the web, primarily from the Charles Poynton * site http://www.poynton.com/ColorFAQ.html
*-
*
- Bayer -> RGB (
CV_BayerBG2BGR, CV_BayerGB2BGR, * CV_BayerRG2BGR, CV_BayerGR2BGR, CV_BayerBG2RGB, CV_BayerGB2RGB, * CV_BayerRG2RGB, CV_BayerGR2RGB
). The Bayer pattern is widely used in * CCD and CMOS cameras. It enables you to get color pictures from a single * plane where R,G, and B pixels (sensors of a particular component) are * interleaved as follows: The output RGB components of a pixel are interpolated * from 1, 2, or*
// C++ code:
* *4 neighbors of the pixel having the same color. There are several
* *modifications of the above pattern that can be achieved by shifting
* *the pattern one pixel left and/or one pixel up. The two letters
* *C_1 and
* *C_2 in the conversion constants CV_Bayer
C_1
* C_2 2BGR
and CV_Bayer
C_1 C_2
* 2RGB
indicate the particular pattern
type. These are components from the second row, second and third
* *columns, respectively. For example, the above pattern has a very
* *popular "BG" type.
* * @param src input image: 8-bit unsigned, 16-bit unsigned (CV_16UC...
),
* or single-precision floating-point.
* @param dst output image of the same size and depth as src
.
* @param code color space conversion code (see the description below).
*
* @see org.opencv.imgproc.Imgproc.cvtColor
*/
public static void cvtColor(Mat src, Mat dst, int code)
{
cvtColor_1(src.nativeObj, dst.nativeObj, code);
return;
}
//
// C++: void dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
//
/**
* Dilates an image by using a specific structuring element.
* *The function dilates the source image using the specified structuring element * that determines the shape of a pixel neighborhood over which the maximum is * taken:
* *dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')
* *The function supports the in-place mode. Dilation can be applied several
* (iterations
) times. In case of multi-channel images, each
* channel is processed independently.
Note:
*-
*
- An example using the morphological dilate operation can be found at * opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
,
* CV_16S
, CV_32F" or
CV_64F".
* @param dst output image of the same size and type as src
.
* @param kernel a kernel
* @param anchor position of the anchor within the element; default value
* (-1, -1)
means that the anchor is at the element center.
* @param iterations number of times dilation is applied.
* @param borderType pixel extrapolation method (see "borderInterpolate" for
* details).
* @param borderValue border value in case of a constant border (see
* "createMorphologyFilter" for details).
*
* @see org.opencv.imgproc.Imgproc.dilate
* @see org.opencv.imgproc.Imgproc#erode
* @see org.opencv.imgproc.Imgproc#morphologyEx
*/
public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue)
{
dilate_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
return;
}
/**
* Dilates an image by using a specific structuring element.
* *The function dilates the source image using the specified structuring element * that determines the shape of a pixel neighborhood over which the maximum is * taken:
* *dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')
* *The function supports the in-place mode. Dilation can be applied several
* (iterations
) times. In case of multi-channel images, each
* channel is processed independently.
Note:
*-
*
- An example using the morphological dilate operation can be found at * opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
,
* CV_16S
, CV_32F" or
CV_64F".
* @param dst output image of the same size and type as src
.
* @param kernel a kernel
* @param anchor position of the anchor within the element; default value
* (-1, -1)
means that the anchor is at the element center.
* @param iterations number of times dilation is applied.
*
* @see org.opencv.imgproc.Imgproc.dilate
* @see org.opencv.imgproc.Imgproc#erode
* @see org.opencv.imgproc.Imgproc#morphologyEx
*/
public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations)
{
dilate_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations);
return;
}
/**
* Dilates an image by using a specific structuring element.
* *The function dilates the source image using the specified structuring element * that determines the shape of a pixel neighborhood over which the maximum is * taken:
* *dst(x,y) = max _((x',y'): element(x',y') != 0) src(x+x',y+y')
* *The function supports the in-place mode. Dilation can be applied several
* (iterations
) times. In case of multi-channel images, each
* channel is processed independently.
Note:
*-
*
- An example using the morphological dilate operation can be found at * opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
,
* CV_16S
, CV_32F" or
CV_64F".
* @param dst output image of the same size and type as src
.
* @param kernel a kernel
*
* @see org.opencv.imgproc.Imgproc.dilate
* @see org.opencv.imgproc.Imgproc#erode
* @see org.opencv.imgproc.Imgproc#morphologyEx
*/
public static void dilate(Mat src, Mat dst, Mat kernel)
{
dilate_2(src.nativeObj, dst.nativeObj, kernel.nativeObj);
return;
}
//
// C++: void distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize)
//
/**
* Calculates the distance to the closest zero pixel for each pixel of the * source image.
* *The functions distanceTransform
calculate the approximate or
* precise distance from every binary image pixel to the nearest zero pixel.
* For zero image pixels, the distance will obviously be zero.
When maskSize == CV_DIST_MASK_PRECISE
and distanceType ==
* CV_DIST_L2
, the function runs the algorithm described in
* [Felzenszwalb04]. This algorithm is parallelized with the TBB library.
In other cases, the algorithm [Borgefors86] is used. This means that for a
* pixel the function finds the shortest path to the nearest zero pixel
* consisting of basic shifts: horizontal, vertical, diagonal, or knight's move
* (the latest is available for a 5x 5 mask). The overall distance is
* calculated as a sum of these basic distances. Since the distance function
* should be symmetric, all of the horizontal and vertical shifts must have the
* same cost (denoted as a
), all the diagonal shifts must have the
* same cost (denoted as b
), and all knight's moves must have the
* same cost (denoted as c
). For the CV_DIST_C
and
* CV_DIST_L1
types, the distance is calculated precisely, whereas
* for CV_DIST_L2
(Euclidean distance) the distance can be
* calculated only with a relative error (a 5x 5 mask gives more
* accurate results). For a
,b
, and c
,
* OpenCV uses the values suggested in the original paper:
============== =================== ======================
* CV_DIST_C
(3x 3) a = 1, b = 1 \
* ============== =================== ======================
* CV_DIST_L1
(3x 3) a = 1, b = 2 \
* CV_DIST_L2
(3x 3) a=0.955, b=1.3693 \
* CV_DIST_L2
(5x 5) a=1, b=1.4, c=2.1969 \
* ============== =================== ======================
Typically, for a fast, coarse distance estimation CV_DIST_L2
, a
* 3x 3 mask is used. For a more accurate distance estimation
* CV_DIST_L2
, a 5x 5 mask or the precise algorithm is
* used.
* Note that both the precise and the approximate algorithms are linear on the
* number of pixels.
The second variant of the function does not only compute the minimum distance
* for each pixel (x, y) but also identifies the nearest connected
* component consisting of zero pixels (labelType==DIST_LABEL_CCOMP
)
* or the nearest zero pixel (labelType==DIST_LABEL_PIXEL
). Index
* of the component/pixel is stored in labels(x, y).
* When labelType==DIST_LABEL_CCOMP
, the function automatically
* finds connected components of zero pixels in the input image and marks them
* with distinct labels. When labelType==DIST_LABEL_CCOMP
, the
* function scans through the input image and marks all the zero pixels with
* distinct labels.
In this mode, the complexity is still linear.
* That is, the function provides a very fast way to compute the Voronoi diagram
* for a binary image.
* Currently, the second variant can use only the approximate distance transform
* algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE
is not supported
* yet.
Note:
*-
*
- An example on using the distance transform can be found at * opencv_source_code/samples/cpp/distrans.cpp *
- (Python) An example on using the distance transform can be found at * opencv_source/samples/python2/distrans.py *
src
.
* @param distanceType Type of distance. It can be CV_DIST_L1,
* CV_DIST_L2
, or CV_DIST_C
.
* @param maskSize Size of the distance transform mask. It can be 3, 5, or
* CV_DIST_MASK_PRECISE
(the latter option is only supported by the
* first function). In case of the CV_DIST_L1
or CV_DIST_C
* distance type, the parameter is forced to 3 because a 3x 3 mask
* gives the same result as 5x 5 or any larger aperture.
*
* @see org.opencv.imgproc.Imgproc.distanceTransform
*/
public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize)
{
distanceTransform_0(src.nativeObj, dst.nativeObj, distanceType, maskSize);
return;
}
//
// C++: void distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP)
//
/**
* Calculates the distance to the closest zero pixel for each pixel of the * source image.
* *The functions distanceTransform
calculate the approximate or
* precise distance from every binary image pixel to the nearest zero pixel.
* For zero image pixels, the distance will obviously be zero.
When maskSize == CV_DIST_MASK_PRECISE
and distanceType ==
* CV_DIST_L2
, the function runs the algorithm described in
* [Felzenszwalb04]. This algorithm is parallelized with the TBB library.
In other cases, the algorithm [Borgefors86] is used. This means that for a
* pixel the function finds the shortest path to the nearest zero pixel
* consisting of basic shifts: horizontal, vertical, diagonal, or knight's move
* (the latest is available for a 5x 5 mask). The overall distance is
* calculated as a sum of these basic distances. Since the distance function
* should be symmetric, all of the horizontal and vertical shifts must have the
* same cost (denoted as a
), all the diagonal shifts must have the
* same cost (denoted as b
), and all knight's moves must have the
* same cost (denoted as c
). For the CV_DIST_C
and
* CV_DIST_L1
types, the distance is calculated precisely, whereas
* for CV_DIST_L2
(Euclidean distance) the distance can be
* calculated only with a relative error (a 5x 5 mask gives more
* accurate results). For a
,b
, and c
,
* OpenCV uses the values suggested in the original paper:
============== =================== ======================
* CV_DIST_C
(3x 3) a = 1, b = 1 \
* ============== =================== ======================
* CV_DIST_L1
(3x 3) a = 1, b = 2 \
* CV_DIST_L2
(3x 3) a=0.955, b=1.3693 \
* CV_DIST_L2
(5x 5) a=1, b=1.4, c=2.1969 \
* ============== =================== ======================
Typically, for a fast, coarse distance estimation CV_DIST_L2
, a
* 3x 3 mask is used. For a more accurate distance estimation
* CV_DIST_L2
, a 5x 5 mask or the precise algorithm is
* used.
* Note that both the precise and the approximate algorithms are linear on the
* number of pixels.
The second variant of the function does not only compute the minimum distance
* for each pixel (x, y) but also identifies the nearest connected
* component consisting of zero pixels (labelType==DIST_LABEL_CCOMP
)
* or the nearest zero pixel (labelType==DIST_LABEL_PIXEL
). Index
* of the component/pixel is stored in labels(x, y).
* When labelType==DIST_LABEL_CCOMP
, the function automatically
* finds connected components of zero pixels in the input image and marks them
* with distinct labels. When labelType==DIST_LABEL_CCOMP
, the
* function scans through the input image and marks all the zero pixels with
* distinct labels.
In this mode, the complexity is still linear.
* That is, the function provides a very fast way to compute the Voronoi diagram
* for a binary image.
* Currently, the second variant can use only the approximate distance transform
* algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE
is not supported
* yet.
Note:
*-
*
- An example on using the distance transform can be found at * opencv_source_code/samples/cpp/distrans.cpp *
- (Python) An example on using the distance transform can be found at * opencv_source/samples/python2/distrans.py *
src
.
* @param labels Optional output 2D array of labels (the discrete Voronoi
* diagram). It has the type CV_32SC1
and the same size as
* src
. See the details below.
* @param distanceType Type of distance. It can be CV_DIST_L1,
* CV_DIST_L2
, or CV_DIST_C
.
* @param maskSize Size of the distance transform mask. It can be 3, 5, or
* CV_DIST_MASK_PRECISE
(the latter option is only supported by the
* first function). In case of the CV_DIST_L1
or CV_DIST_C
* distance type, the parameter is forced to 3 because a 3x 3 mask
* gives the same result as 5x 5 or any larger aperture.
* @param labelType Type of the label array to build. If labelType==DIST_LABEL_CCOMP
* then each connected component of zeros in src
(as well as all
* the non-zero pixels closest to the connected component) will be assigned the
* same label. If labelType==DIST_LABEL_PIXEL
then each zero pixel
* (and all the non-zero pixels closest to it) gets its own label.
*
* @see org.opencv.imgproc.Imgproc.distanceTransform
*/
public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType)
{
distanceTransformWithLabels_0(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize, labelType);
return;
}
/**
* Calculates the distance to the closest zero pixel for each pixel of the * source image.
* *The functions distanceTransform
calculate the approximate or
* precise distance from every binary image pixel to the nearest zero pixel.
* For zero image pixels, the distance will obviously be zero.
When maskSize == CV_DIST_MASK_PRECISE
and distanceType ==
* CV_DIST_L2
, the function runs the algorithm described in
* [Felzenszwalb04]. This algorithm is parallelized with the TBB library.
In other cases, the algorithm [Borgefors86] is used. This means that for a
* pixel the function finds the shortest path to the nearest zero pixel
* consisting of basic shifts: horizontal, vertical, diagonal, or knight's move
* (the latest is available for a 5x 5 mask). The overall distance is
* calculated as a sum of these basic distances. Since the distance function
* should be symmetric, all of the horizontal and vertical shifts must have the
* same cost (denoted as a
), all the diagonal shifts must have the
* same cost (denoted as b
), and all knight's moves must have the
* same cost (denoted as c
). For the CV_DIST_C
and
* CV_DIST_L1
types, the distance is calculated precisely, whereas
* for CV_DIST_L2
(Euclidean distance) the distance can be
* calculated only with a relative error (a 5x 5 mask gives more
* accurate results). For a
,b
, and c
,
* OpenCV uses the values suggested in the original paper:
============== =================== ======================
* CV_DIST_C
(3x 3) a = 1, b = 1 \
* ============== =================== ======================
* CV_DIST_L1
(3x 3) a = 1, b = 2 \
* CV_DIST_L2
(3x 3) a=0.955, b=1.3693 \
* CV_DIST_L2
(5x 5) a=1, b=1.4, c=2.1969 \
* ============== =================== ======================
Typically, for a fast, coarse distance estimation CV_DIST_L2
, a
* 3x 3 mask is used. For a more accurate distance estimation
* CV_DIST_L2
, a 5x 5 mask or the precise algorithm is
* used.
* Note that both the precise and the approximate algorithms are linear on the
* number of pixels.
The second variant of the function does not only compute the minimum distance
* for each pixel (x, y) but also identifies the nearest connected
* component consisting of zero pixels (labelType==DIST_LABEL_CCOMP
)
* or the nearest zero pixel (labelType==DIST_LABEL_PIXEL
). Index
* of the component/pixel is stored in labels(x, y).
* When labelType==DIST_LABEL_CCOMP
, the function automatically
* finds connected components of zero pixels in the input image and marks them
* with distinct labels. When labelType==DIST_LABEL_CCOMP
, the
* function scans through the input image and marks all the zero pixels with
* distinct labels.
In this mode, the complexity is still linear.
* That is, the function provides a very fast way to compute the Voronoi diagram
* for a binary image.
* Currently, the second variant can use only the approximate distance transform
* algorithm, i.e. maskSize=CV_DIST_MASK_PRECISE
is not supported
* yet.
Note:
*-
*
- An example on using the distance transform can be found at * opencv_source_code/samples/cpp/distrans.cpp *
- (Python) An example on using the distance transform can be found at * opencv_source/samples/python2/distrans.py *
src
.
* @param labels Optional output 2D array of labels (the discrete Voronoi
* diagram). It has the type CV_32SC1
and the same size as
* src
. See the details below.
* @param distanceType Type of distance. It can be CV_DIST_L1,
* CV_DIST_L2
, or CV_DIST_C
.
* @param maskSize Size of the distance transform mask. It can be 3, 5, or
* CV_DIST_MASK_PRECISE
(the latter option is only supported by the
* first function). In case of the CV_DIST_L1
or CV_DIST_C
* distance type, the parameter is forced to 3 because a 3x 3 mask
* gives the same result as 5x 5 or any larger aperture.
*
* @see org.opencv.imgproc.Imgproc.distanceTransform
*/
public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize)
{
distanceTransformWithLabels_1(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize);
return;
}
//
// C++: void drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = 8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
//
/**
* Draws contours outlines or filled contours.
* *The function draws contour outlines in the image if thickness >= 0
* or fills the area bounded by the contours ifthickness<0. The
* example below shows how to retrieve connected components from the binary
* image and label them:
// C++ code:
* *#include "cv.h"
* *#include "highgui.h"
* *using namespace cv;
* *int main(int argc, char argv)
* * *Mat src;
* *// the first command-line parameter must be a filename of the binary
* *// (black-n-white) image
* *if(argc != 2 || !(src=imread(argv[1], 0)).data)
* *return -1;
* *Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);
* *src = src > 1;
* *namedWindow("Source", 1);
* *imshow("Source", src);
* *vector
vector
findContours(src, contours, hierarchy,
* *CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
* *// iterate through all the top-level contours,
* *// draw each connected component with its own random color
* *int idx = 0;
* *for(; idx >= 0; idx = hierarchy[idx][0])
* * *Scalar color(rand()&255, rand()&255, rand()&255);
* *drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);
* * *namedWindow("Components", 1);
* *imshow("Components", dst);
* *waitKey(0);
* * *Note:
*-
*
- An example using the drawContour functionality can be found at * opencv_source_code/samples/cpp/contours2.cpp *
- An example using drawContours to clean up a background segmentation * result at opencv_source_code/samples/cpp/segment_objects.cpp *
- (Python) An example using the drawContour functionality can be found * at opencv_source/samples/python2/contours.py *
thickness=CV_FILLED
), the contour
* interiors are drawn.
* @param lineType Line connectivity. See "line" for details.
* @param hierarchy Optional information about hierarchy. It is only needed if
* you want to draw only some of the contours (see maxLevel
).
* @param maxLevel Maximal level for drawn contours. If it is 0, only the
* specified contour is drawn. If it is 1, the function draws the contour(s) and
* all the nested contours. If it is 2, the function draws the contours, all the
* nested contours, all the nested-to-nested contours, and so on. This parameter
* is only taken into account when there is hierarchy
available.
* @param offset Optional contour shift parameter. Shift all the drawn contours
* by the specified offset=(dx,dy).
*
* @see org.opencv.imgproc.Imgproc.drawContours
*/
public static void drawContours(Mat image, ListDraws contours outlines or filled contours.
* *The function draws contour outlines in the image if thickness >= 0
* or fills the area bounded by the contours ifthickness<0. The
* example below shows how to retrieve connected components from the binary
* image and label them:
// C++ code:
* *#include "cv.h"
* *#include "highgui.h"
* *using namespace cv;
* *int main(int argc, char argv)
* * *Mat src;
* *// the first command-line parameter must be a filename of the binary
* *// (black-n-white) image
* *if(argc != 2 || !(src=imread(argv[1], 0)).data)
* *return -1;
* *Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);
* *src = src > 1;
* *namedWindow("Source", 1);
* *imshow("Source", src);
* *vector
vector
findContours(src, contours, hierarchy,
* *CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
* *// iterate through all the top-level contours,
* *// draw each connected component with its own random color
* *int idx = 0;
* *for(; idx >= 0; idx = hierarchy[idx][0])
* * *Scalar color(rand()&255, rand()&255, rand()&255);
* *drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);
* * *namedWindow("Components", 1);
* *imshow("Components", dst);
* *waitKey(0);
* * *Note:
*-
*
- An example using the drawContour functionality can be found at * opencv_source_code/samples/cpp/contours2.cpp *
- An example using drawContours to clean up a background segmentation * result at opencv_source_code/samples/cpp/segment_objects.cpp *
- (Python) An example using the drawContour functionality can be found * at opencv_source/samples/python2/contours.py *
thickness=CV_FILLED
), the contour
* interiors are drawn.
*
* @see org.opencv.imgproc.Imgproc.drawContours
*/
public static void drawContours(Mat image, ListDraws contours outlines or filled contours.
* *The function draws contour outlines in the image if thickness >= 0
* or fills the area bounded by the contours ifthickness<0. The
* example below shows how to retrieve connected components from the binary
* image and label them:
// C++ code:
* *#include "cv.h"
* *#include "highgui.h"
* *using namespace cv;
* *int main(int argc, char argv)
* * *Mat src;
* *// the first command-line parameter must be a filename of the binary
* *// (black-n-white) image
* *if(argc != 2 || !(src=imread(argv[1], 0)).data)
* *return -1;
* *Mat dst = Mat.zeros(src.rows, src.cols, CV_8UC3);
* *src = src > 1;
* *namedWindow("Source", 1);
* *imshow("Source", src);
* *vector
vector
findContours(src, contours, hierarchy,
* *CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
* *// iterate through all the top-level contours,
* *// draw each connected component with its own random color
* *int idx = 0;
* *for(; idx >= 0; idx = hierarchy[idx][0])
* * *Scalar color(rand()&255, rand()&255, rand()&255);
* *drawContours(dst, contours, idx, color, CV_FILLED, 8, hierarchy);
* * *namedWindow("Components", 1);
* *imshow("Components", dst);
* *waitKey(0);
* * *Note:
*-
*
- An example using the drawContour functionality can be found at * opencv_source_code/samples/cpp/contours2.cpp *
- An example using drawContours to clean up a background segmentation * result at opencv_source_code/samples/cpp/segment_objects.cpp *
- (Python) An example using the drawContour functionality can be found * at opencv_source/samples/python2/contours.py *
Equalizes the histogram of a grayscale image.
* *The function equalizes the histogram of the input image using the following * algorithm:
*-
*
- Calculate the histogram H for
src
. * - Normalize the histogram so that the sum of histogram bins is 255. *
- Compute the integral of the histogram: *
H'_i = sum(by: 0 <= j < i) H(j)
* *-
*
- *
Transform the image using H' as a look-up table: dst(x,y) = * H'(src(x,y))
* *The algorithm normalizes the brightness and increases the contrast of the * image.
* * @param src Source 8-bit single channel image. * @param dst Destination image of the same size and type assrc
.
*
* @see org.opencv.imgproc.Imgproc.equalizeHist
*/
public static void equalizeHist(Mat src, Mat dst)
{
equalizeHist_0(src.nativeObj, dst.nativeObj);
return;
}
//
// C++: void erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
//
/**
* Erodes an image by using a specific structuring element.
* *The function erodes the source image using the specified structuring element * that determines the shape of a pixel neighborhood over which the minimum is * taken:
* *dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')
* *The function supports the in-place mode. Erosion can be applied several
* (iterations
) times. In case of multi-channel images, each
* channel is processed independently.
Note:
*-
*
- An example using the morphological erode operation can be found at * opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
,
* CV_16S
, CV_32F" or
CV_64F".
* @param dst output image of the same size and type as src
.
* @param kernel a kernel
* @param anchor position of the anchor within the element; default value
* (-1, -1)
means that the anchor is at the element center.
* @param iterations number of times erosion is applied.
* @param borderType pixel extrapolation method (see "borderInterpolate" for
* details).
* @param borderValue border value in case of a constant border (see
* "createMorphologyFilter" for details).
*
* @see org.opencv.imgproc.Imgproc.erode
* @see org.opencv.imgproc.Imgproc#morphologyEx
* @see org.opencv.imgproc.Imgproc#dilate
*/
public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue)
{
erode_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
return;
}
/**
* Erodes an image by using a specific structuring element.
* *The function erodes the source image using the specified structuring element * that determines the shape of a pixel neighborhood over which the minimum is * taken:
* *dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')
* *The function supports the in-place mode. Erosion can be applied several
* (iterations
) times. In case of multi-channel images, each
* channel is processed independently.
Note:
*-
*
- An example using the morphological erode operation can be found at * opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
,
* CV_16S
, CV_32F" or
CV_64F".
* @param dst output image of the same size and type as src
.
* @param kernel a kernel
* @param anchor position of the anchor within the element; default value
* (-1, -1)
means that the anchor is at the element center.
* @param iterations number of times erosion is applied.
*
* @see org.opencv.imgproc.Imgproc.erode
* @see org.opencv.imgproc.Imgproc#morphologyEx
* @see org.opencv.imgproc.Imgproc#dilate
*/
public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations)
{
erode_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations);
return;
}
/**
* Erodes an image by using a specific structuring element.
* *The function erodes the source image using the specified structuring element * that determines the shape of a pixel neighborhood over which the minimum is * taken:
* *dst(x,y) = min _((x',y'): element(x',y') != 0) src(x+x',y+y')
* *The function supports the in-place mode. Erosion can be applied several
* (iterations
) times. In case of multi-channel images, each
* channel is processed independently.
Note:
*-
*
- An example using the morphological erode operation can be found at * opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
,
* CV_16S
, CV_32F" or
CV_64F".
* @param dst output image of the same size and type as src
.
* @param kernel a kernel
*
* @see org.opencv.imgproc.Imgproc.erode
* @see org.opencv.imgproc.Imgproc#morphologyEx
* @see org.opencv.imgproc.Imgproc#dilate
*/
public static void erode(Mat src, Mat dst, Mat kernel)
{
erode_2(src.nativeObj, dst.nativeObj, kernel.nativeObj);
return;
}
//
// C++: void filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
//
/**
* Convolves an image with the kernel.
* *The function applies an arbitrary linear filter to an image. In-place * operation is supported. When the aperture is partially outside the image, the * function interpolates outlier pixel values according to the specified border * mode.
* *The function does actually compute correlation, not the convolution:
* *dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)
* *That is, the kernel is not mirrored around the anchor point. If you need a
* real convolution, flip the kernel using "flip" and set the new anchor to
* (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1)
.
The function uses the DFT-based algorithm in case of sufficiently large
* kernels (~11 x 11
or larger) and the direct algorithm (that uses
* the engine retrieved by "createLinearFilter") for small kernels.
src
.
* @param ddepth desired depth of the destination image; if it is negative, it
* will be the same as src.depth()
; the following combinations of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the output image will have the same depth as the
* source.
dst
.
* @param borderType pixel extrapolation method (see "borderInterpolate" for
* details).
*
* @see org.opencv.imgproc.Imgproc.filter2D
* @see org.opencv.imgproc.Imgproc#matchTemplate
* @see org.opencv.core.Core#dft
* @see org.opencv.imgproc.Imgproc#sepFilter2D
*/
public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType)
{
filter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta, borderType);
return;
}
/**
* Convolves an image with the kernel.
* *The function applies an arbitrary linear filter to an image. In-place * operation is supported. When the aperture is partially outside the image, the * function interpolates outlier pixel values according to the specified border * mode.
* *The function does actually compute correlation, not the convolution:
* *dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)
* *That is, the kernel is not mirrored around the anchor point. If you need a
* real convolution, flip the kernel using "flip" and set the new anchor to
* (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1)
.
The function uses the DFT-based algorithm in case of sufficiently large
* kernels (~11 x 11
or larger) and the direct algorithm (that uses
* the engine retrieved by "createLinearFilter") for small kernels.
src
.
* @param ddepth desired depth of the destination image; if it is negative, it
* will be the same as src.depth()
; the following combinations of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the output image will have the same depth as the
* source.
dst
.
*
* @see org.opencv.imgproc.Imgproc.filter2D
* @see org.opencv.imgproc.Imgproc#matchTemplate
* @see org.opencv.core.Core#dft
* @see org.opencv.imgproc.Imgproc#sepFilter2D
*/
public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta)
{
filter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta);
return;
}
/**
* Convolves an image with the kernel.
* *The function applies an arbitrary linear filter to an image. In-place * operation is supported. When the aperture is partially outside the image, the * function interpolates outlier pixel values according to the specified border * mode.
* *The function does actually compute correlation, not the convolution:
* *dst(x,y) = sum(by: 0 <= x' < kernel.cols, 0 <= y' < kernel.rows) * kernel(x',y')* src(x+x'- anchor.x,y+y'- anchor.y)
* *That is, the kernel is not mirrored around the anchor point. If you need a
* real convolution, flip the kernel using "flip" and set the new anchor to
* (kernel.cols - anchor.x - 1, kernel.rows - anchor.y - 1)
.
The function uses the DFT-based algorithm in case of sufficiently large
* kernels (~11 x 11
or larger) and the direct algorithm (that uses
* the engine retrieved by "createLinearFilter") for small kernels.
src
.
* @param ddepth desired depth of the destination image; if it is negative, it
* will be the same as src.depth()
; the following combinations of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the output image will have the same depth as the
* source.
Finds contours in a binary image.
* *The function retrieves contours from the binary image using the algorithm
* [Suzuki85]. The contours are a useful tool for shape analysis and object
* detection and recognition. See squares.c
in the OpenCV sample
* directory.
Note: Source image
is modified by this function. Also, the
* function does not take into account 1-pixel border of the image (it's filled
* with 0's and used for neighbor analysis in the algorithm), therefore the
* contours touching the image border will be clipped.
Note: If you use the new Python interface then the CV_
prefix
* has to be omitted in contour retrieval mode and contour approximation method
* parameters (for example, use cv2.RETR_LIST
and cv2.CHAIN_APPROX_NONE
* parameters). If you use the old Python interface then these parameters have
* the CV_
prefix (for example, use cv.CV_RETR_LIST
* and cv.CV_CHAIN_APPROX_NONE
).
Note:
*-
*
- An example using the findContour functionality can be found at * opencv_source_code/samples/cpp/contours2.cpp *
- An example using findContours to clean up a background segmentation * result at opencv_source_code/samples/cpp/segment_objects.cpp *
- (Python) An example using the findContour functionality can be found * at opencv_source/samples/python2/contours.py *
- (Python) An example of detecting squares in an image can be found at * opencv_source/samples/python2/squares.py *
binary
. You can use "compare", "inRange", "threshold",
* "adaptiveThreshold", "Canny", and others to create a binary image out of a
* grayscale or color one. The function modifies the image
while
* extracting the contours.
* @param contours Detected contours. Each contour is stored as a vector of
* points.
* @param hierarchy Optional output vector, containing information about the
* image topology. It has as many elements as the number of contours. For each
* i-th contour contours[i]
, the elements hierarchy[i][0]
,
* hiearchy[i][1]
, hiearchy[i][2]
, and
* hiearchy[i][3]
are set to 0-based indices in contours
* of the next and previous contours at the same hierarchical level, the first
* child contour and the parent contour, respectively. If for the contour
* i
there are no next, previous, parent, or nested contours, the
* corresponding elements of hierarchy[i]
will be negative.
* @param mode Contour retrieval mode (if you use Python see also a note below).
* -
*
- CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets
*
hierarchy[i][2]=hierarchy[i][3]=-1
for all the contours. * - CV_RETR_LIST retrieves all of the contours without establishing any * hierarchical relationships. *
- CV_RETR_CCOMP retrieves all of the contours and organizes them into a * two-level hierarchy. At the top level, there are external boundaries of the * components. At the second level, there are boundaries of the holes. If there * is another contour inside a hole of a connected component, it is still put at * the top level. *
- CV_RETR_TREE retrieves all of the contours and reconstructs a full
* hierarchy of nested contours. This full hierarchy is built and shown in the
* OpenCV
contours.c
demo. *
-
*
- CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That
* is, any 2 subsequent points
(x1,y1)
and(x2,y2)
of * the contour will be either horizontal, vertical or diagonal neighbors, that * is,max(abs(x1-x2),abs(y2-y1))==1
. * - CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal * segments and leaves only their end points. For example, an up-right * rectangular contour is encoded with 4 points. *
- CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the * flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for * details. *
Finds contours in a binary image.
* *The function retrieves contours from the binary image using the algorithm
* [Suzuki85]. The contours are a useful tool for shape analysis and object
* detection and recognition. See squares.c
in the OpenCV sample
* directory.
Note: Source image
is modified by this function. Also, the
* function does not take into account 1-pixel border of the image (it's filled
* with 0's and used for neighbor analysis in the algorithm), therefore the
* contours touching the image border will be clipped.
Note: If you use the new Python interface then the CV_
prefix
* has to be omitted in contour retrieval mode and contour approximation method
* parameters (for example, use cv2.RETR_LIST
and cv2.CHAIN_APPROX_NONE
* parameters). If you use the old Python interface then these parameters have
* the CV_
prefix (for example, use cv.CV_RETR_LIST
* and cv.CV_CHAIN_APPROX_NONE
).
Note:
*-
*
- An example using the findContour functionality can be found at * opencv_source_code/samples/cpp/contours2.cpp *
- An example using findContours to clean up a background segmentation * result at opencv_source_code/samples/cpp/segment_objects.cpp *
- (Python) An example using the findContour functionality can be found * at opencv_source/samples/python2/contours.py *
- (Python) An example of detecting squares in an image can be found at * opencv_source/samples/python2/squares.py *
binary
. You can use "compare", "inRange", "threshold",
* "adaptiveThreshold", "Canny", and others to create a binary image out of a
* grayscale or color one. The function modifies the image
while
* extracting the contours.
* @param contours Detected contours. Each contour is stored as a vector of
* points.
* @param hierarchy Optional output vector, containing information about the
* image topology. It has as many elements as the number of contours. For each
* i-th contour contours[i]
, the elements hierarchy[i][0]
,
* hiearchy[i][1]
, hiearchy[i][2]
, and
* hiearchy[i][3]
are set to 0-based indices in contours
* of the next and previous contours at the same hierarchical level, the first
* child contour and the parent contour, respectively. If for the contour
* i
there are no next, previous, parent, or nested contours, the
* corresponding elements of hierarchy[i]
will be negative.
* @param mode Contour retrieval mode (if you use Python see also a note below).
* -
*
- CV_RETR_EXTERNAL retrieves only the extreme outer contours. It sets
*
hierarchy[i][2]=hierarchy[i][3]=-1
for all the contours. * - CV_RETR_LIST retrieves all of the contours without establishing any * hierarchical relationships. *
- CV_RETR_CCOMP retrieves all of the contours and organizes them into a * two-level hierarchy. At the top level, there are external boundaries of the * components. At the second level, there are boundaries of the holes. If there * is another contour inside a hole of a connected component, it is still put at * the top level. *
- CV_RETR_TREE retrieves all of the contours and reconstructs a full
* hierarchy of nested contours. This full hierarchy is built and shown in the
* OpenCV
contours.c
demo. *
-
*
- CV_CHAIN_APPROX_NONE stores absolutely all the contour points. That
* is, any 2 subsequent points
(x1,y1)
and(x2,y2)
of * the contour will be either horizontal, vertical or diagonal neighbors, that * is,max(abs(x1-x2),abs(y2-y1))==1
. * - CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal * segments and leaves only their end points. For example, an up-right * rectangular contour is encoded with 4 points. *
- CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS applies one of the * flavors of the Teh-Chin chain approximation algorithm. See [TehChin89] for * details. *
Fits an ellipse around a set of 2D points.
* *The function calculates the ellipse that fits (in a least-squares sense) a * set of 2D points best of all. It returns the rotated rectangle in which the * ellipse is inscribed. The algorithm [Fitzgibbon95] is used. * Developer should keep in mind that it is possible that the returned * ellipse/rotatedRect data contains negative indices, due to the data points * being close to the border of the containing Mat element.
* *Note:
*-
*
- An example using the fitEllipse technique can be found at * opencv_source_code/samples/cpp/fitellipse.cpp *
-
*
-
std.vector<>
orMat
(C++ interface) * -
CvSeq*
orCvMat*
(C interface) * - Nx2 numpy array (Python interface) *
Fits a line to a 2D or 3D point set.
* *The function fitLine
fits a line to a 2D or 3D point set by
* minimizing sum_i rho(r_i) where r_i is a distance between
* the i^(th) point, the line and rho(r) is a distance
* function, one of the following:
-
*
- distType=CV_DIST_L2 *
rho(r) = r^2/2(the simplest and the fastest least-squares method)
* *-
*
- distType=CV_DIST_L1 *
rho(r) = r
* *-
*
- distType=CV_DIST_L12 *
rho(r) = 2 * (sqrt(1 + frac(r^2)2) - 1)
* *-
*
- distType=CV_DIST_FAIR *
rho(r) = C^2 * ((r)/(C) - log((1 + (r)/(C)))) where C=1.3998
* *-
*
- distType=CV_DIST_WELSCH *
rho(r) = (C^2)/2 * (1 - exp((-((r)/(C))^2))) where C=2.9846
* *-
*
- distType=CV_DIST_HUBER *
rho(r) = r^2/2 if r < C; C * (r-C/2) otherwise where C=1.345
* *The algorithm is based on the M-estimator (http://en.wikipedia.org/wiki/M-estimator) * technique that iteratively fits the line using the weighted least-squares * algorithm. After each iteration the weights w_i are adjusted to be * inversely proportional to rho(r_i)... Sample code:
*-
*
- (Python) An example of robust line fitting can be found at * opencv_source_code/samples/python2/fitline.py *
std.vector<>
* or Mat
.
* @param line Output line parameters. In case of 2D fitting, it should be a
* vector of 4 elements (like Vec4f
) - (vx, vy, x0,
* y0)
, where (vx, vy)
is a normalized vector collinear to
* the line and (x0, y0)
is a point on the line. In case of 3D
* fitting, it should be a vector of 6 elements (like Vec6f
) -
* (vx, vy, vz, x0, y0, z0)
, where (vx, vy, vz)
is a
* normalized vector collinear to the line and (x0, y0, z0)
is a
* point on the line.
* @param distType Distance used by the M-estimator (see the discussion below).
* @param param Numerical parameter (C
) for some types of
* distances. If it is 0, an optimal value is chosen.
* @param reps Sufficient accuracy for the radius (distance between the
* coordinate origin and the line).
* @param aeps Sufficient accuracy for the angle. 0.01 would be a good default
* value for reps
and aeps
.
*
* @see org.opencv.imgproc.Imgproc.fitLine
*/
public static void fitLine(Mat points, Mat line, int distType, double param, double reps, double aeps)
{
fitLine_0(points.nativeObj, line.nativeObj, distType, param, reps, aeps);
return;
}
//
// C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
//
/**
* Fills a connected component with the given color.
* *The functions floodFill
fill a connected component starting from
* the seed point with the specified color. The connectivity is determined by
* the color/brightness closeness of the neighbor pixels. The pixel at
* (x,y) is considered to belong to the repainted domain if:
-
*
- src(x',y')- loDiff <= src(x,y) <= src(x',y')+ upDiff *
in case of a grayscale image and floating range
*-
*
- src(seedPoint.x, seedPoint.y)- loDiff <= src(x,y) <= * src(seedPoint.x, seedPoint.y)+ upDiff *
in case of a grayscale image and fixed range
*-
*
- src(x',y')_r- loDiff _r <= src(x,y)_r <= src(x',y')_r+ upDiff * _r, * * *
src(x',y')_g- loDiff _g <= src(x,y)_g <= src(x',y')_g+ upDiff _g
* *and
* *src(x',y')_b- loDiff _b <= src(x,y)_b <= src(x',y')_b+ upDiff _b
* *in case of a color image and floating range
*-
*
- src(seedPoint.x, seedPoint.y)_r- loDiff _r <= src(x,y)_r <= * src(seedPoint.x, seedPoint.y)_r+ upDiff _r, * * *
src(seedPoint.x, seedPoint.y)_g- loDiff _g <= src(x,y)_g <= * src(seedPoint.x, seedPoint.y)_g+ upDiff _g
* *and
* *src(seedPoint.x, seedPoint.y)_b- loDiff _b <= src(x,y)_b <= * src(seedPoint.x, seedPoint.y)_b+ upDiff _b
* *in case of a color image and fixed range
* *where src(x',y') is the value of one of pixel neighbors that is * already known to belong to the component. That is, to be added to the * connected component, a color/brightness of the pixel should be close enough * to:
*-
*
- Color/brightness of one of its neighbors that already belong to the * connected component in case of a floating range. *
- Color/brightness of the seed point in case of a fixed range. *
Use these functions to either mark a connected component with the specified * color in-place, or build a mask and then extract the contour, or copy the * region to another image, and so on.
* *Note:
*-
*
- An example using the FloodFill technique can be found at * opencv_source_code/samples/cpp/ffilldemo.cpp *
- (Python) An example using the FloodFill technique can be found at * opencv_source_code/samples/python2/floodfill.cpp *
FLOODFILL_MASK_ONLY
flag
* is set in the second variant of the function. See the details below.
* @param mask (For the second function only) Operation mask that should be a
* single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function
* uses and updates the mask, so you take responsibility of initializing the
* mask
content. Flood-filling cannot go across non-zero pixels in
* the mask. For example, an edge detector output can be used as a mask to stop
* filling at edges. It is possible to use the same mask in multiple calls to
* the function to make sure the filled area does not overlap.
*
* Note: Since the mask is larger than the filled image, a pixel (x, y)
* in image
corresponds to the pixel (x+1, y+1) in the
* mask
.
-
*
- FLOODFILL_FIXED_RANGE If set, the difference between the current pixel * and seed pixel is considered. Otherwise, the difference between neighbor * pixels is considered (that is, the range is floating). *
- FLOODFILL_MASK_ONLY If set, the function does not change the image
* (
newVal
is ignored), but fills the mask. The flag can be used * for the second variant only. *
Fills a connected component with the given color.
* *The functions floodFill
fill a connected component starting from
* the seed point with the specified color. The connectivity is determined by
* the color/brightness closeness of the neighbor pixels. The pixel at
* (x,y) is considered to belong to the repainted domain if:
-
*
- src(x',y')- loDiff <= src(x,y) <= src(x',y')+ upDiff *
in case of a grayscale image and floating range
*-
*
- src(seedPoint.x, seedPoint.y)- loDiff <= src(x,y) <= * src(seedPoint.x, seedPoint.y)+ upDiff *
in case of a grayscale image and fixed range
*-
*
- src(x',y')_r- loDiff _r <= src(x,y)_r <= src(x',y')_r+ upDiff * _r, * * *
src(x',y')_g- loDiff _g <= src(x,y)_g <= src(x',y')_g+ upDiff _g
* *and
* *src(x',y')_b- loDiff _b <= src(x,y)_b <= src(x',y')_b+ upDiff _b
* *in case of a color image and floating range
*-
*
- src(seedPoint.x, seedPoint.y)_r- loDiff _r <= src(x,y)_r <= * src(seedPoint.x, seedPoint.y)_r+ upDiff _r, * * *
src(seedPoint.x, seedPoint.y)_g- loDiff _g <= src(x,y)_g <= * src(seedPoint.x, seedPoint.y)_g+ upDiff _g
* *and
* *src(seedPoint.x, seedPoint.y)_b- loDiff _b <= src(x,y)_b <= * src(seedPoint.x, seedPoint.y)_b+ upDiff _b
* *in case of a color image and fixed range
* *where src(x',y') is the value of one of pixel neighbors that is * already known to belong to the component. That is, to be added to the * connected component, a color/brightness of the pixel should be close enough * to:
*-
*
- Color/brightness of one of its neighbors that already belong to the * connected component in case of a floating range. *
- Color/brightness of the seed point in case of a fixed range. *
Use these functions to either mark a connected component with the specified * color in-place, or build a mask and then extract the contour, or copy the * region to another image, and so on.
* *Note:
*-
*
- An example using the FloodFill technique can be found at * opencv_source_code/samples/cpp/ffilldemo.cpp *
- (Python) An example using the FloodFill technique can be found at * opencv_source_code/samples/python2/floodfill.cpp *
FLOODFILL_MASK_ONLY
flag
* is set in the second variant of the function. See the details below.
* @param mask (For the second function only) Operation mask that should be a
* single-channel 8-bit image, 2 pixels wider and 2 pixels taller. The function
* uses and updates the mask, so you take responsibility of initializing the
* mask
content. Flood-filling cannot go across non-zero pixels in
* the mask. For example, an edge detector output can be used as a mask to stop
* filling at edges. It is possible to use the same mask in multiple calls to
* the function to make sure the filled area does not overlap.
*
* Note: Since the mask is larger than the filled image, a pixel (x, y)
* in image
corresponds to the pixel (x+1, y+1) in the
* mask
.
Calculates an affine transform from three pairs of the corresponding points.
* *The function calculates the 2 x 3 matrix of an affine transform so * that:
* *x'_i * y'_i = map_matrix * x_i * y_i * 1
* *where
* *dst(i)=(x'_i,y'_i),<BR>src(i)=(x_i, y_i),<BR>i=0,1,2
* * @param src Coordinates of triangle vertices in the source image. * @param dst Coordinates of the corresponding triangle vertices in the * destination image. * * @see org.opencv.imgproc.Imgproc.getAffineTransform * @see org.opencv.imgproc.Imgproc#warpAffine * @see org.opencv.core.Core#transform */ public static Mat getAffineTransform(MatOfPoint2f src, MatOfPoint2f dst) { Mat src_mat = src; Mat dst_mat = dst; Mat retVal = new Mat(getAffineTransform_0(src_mat.nativeObj, dst_mat.nativeObj)); return retVal; } // // C++: Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) // /** *Returns the default new camera matrix.
* *The function returns the camera matrix that is either an exact copy of the
* input cameraMatrix
(when centerPrinicipalPoint=false
),
* or the modified one (when centerPrincipalPoint=true
).
In the latter case, the new camera matrix will be:
* *f_x 0(imgSize.width -1)*0.5 * 0 f_y(imgSize.height -1)*0.5 * 0 0 1,
* *where f_x and f_y are (0,0) and (1,1)
* elements of cameraMatrix
, respectively.
By default, the undistortion functions in OpenCV (see "initUndistortRectifyMap", * "undistort") do not move the principal point. However, when you work with * stereo, it is important to move the principal points in both views to the * same y-coordinate (which is required by most of stereo correspondence * algorithms), and may be to the same x-coordinate too. So, you can form the * new camera matrix for each view where the principal points are located at the * center.
* * @param cameraMatrix Input camera matrix. * @param imgsize Camera view image size in pixels. * @param centerPrincipalPoint Location of the principal point in the new camera * matrix. The parameter indicates whether this location should be at the image * center or not. * * @see org.opencv.imgproc.Imgproc.getDefaultNewCameraMatrix */ public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint) { Mat retVal = new Mat(getDefaultNewCameraMatrix_0(cameraMatrix.nativeObj, imgsize.width, imgsize.height, centerPrincipalPoint)); return retVal; } /** *Returns the default new camera matrix.
* *The function returns the camera matrix that is either an exact copy of the
* input cameraMatrix
(when centerPrinicipalPoint=false
),
* or the modified one (when centerPrincipalPoint=true
).
In the latter case, the new camera matrix will be:
* *f_x 0(imgSize.width -1)*0.5 * 0 f_y(imgSize.height -1)*0.5 * 0 0 1,
* *where f_x and f_y are (0,0) and (1,1)
* elements of cameraMatrix
, respectively.
By default, the undistortion functions in OpenCV (see "initUndistortRectifyMap", * "undistort") do not move the principal point. However, when you work with * stereo, it is important to move the principal points in both views to the * same y-coordinate (which is required by most of stereo correspondence * algorithms), and may be to the same x-coordinate too. So, you can form the * new camera matrix for each view where the principal points are located at the * center.
* * @param cameraMatrix Input camera matrix. * * @see org.opencv.imgproc.Imgproc.getDefaultNewCameraMatrix */ public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix) { Mat retVal = new Mat(getDefaultNewCameraMatrix_1(cameraMatrix.nativeObj)); return retVal; } // // C++: void getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) // /** *Returns filter coefficients for computing spatial image derivatives.
* *The function computes and returns the filter coefficients for spatial image
* derivatives. When ksize=CV_SCHARR
, the Scharr 3 x 3
* kernels are generated (see "Scharr"). Otherwise, Sobel kernels are generated
* (see "Sobel"). The filters are normally passed to "sepFilter2D" or to
* "createSeparableLinearFilter".
ktype
.
* @param ky Output matrix of column filter coefficients. It has the type
* ktype
.
* @param dx Derivative order in respect of x.
* @param dy Derivative order in respect of y.
* @param ksize Aperture size. It can be CV_SCHARR
, 1, 3, 5, or 7.
* @param normalize Flag indicating whether to normalize (scale down) the filter
* coefficients or not. Theoretically, the coefficients should have the
* denominator =2^(ksize*2-dx-dy-2). If you are going to filter
* floating-point images, you are likely to use the normalized kernels. But if
* you compute derivatives of an 8-bit image, store the results in a 16-bit
* image, and wish to preserve all the fractional bits, you may want to set
* normalize=false
.
* @param ktype Type of filter coefficients. It can be CV_32f
or
* CV_64F
.
*
* @see org.opencv.imgproc.Imgproc.getDerivKernels
*/
public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize, int ktype)
{
getDerivKernels_0(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize, ktype);
return;
}
/**
* Returns filter coefficients for computing spatial image derivatives.
* *The function computes and returns the filter coefficients for spatial image
* derivatives. When ksize=CV_SCHARR
, the Scharr 3 x 3
* kernels are generated (see "Scharr"). Otherwise, Sobel kernels are generated
* (see "Sobel"). The filters are normally passed to "sepFilter2D" or to
* "createSeparableLinearFilter".
ktype
.
* @param ky Output matrix of column filter coefficients. It has the type
* ktype
.
* @param dx Derivative order in respect of x.
* @param dy Derivative order in respect of y.
* @param ksize Aperture size. It can be CV_SCHARR
, 1, 3, 5, or 7.
*
* @see org.opencv.imgproc.Imgproc.getDerivKernels
*/
public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize)
{
getDerivKernels_1(kx.nativeObj, ky.nativeObj, dx, dy, ksize);
return;
}
//
// C++: Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
//
public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi, int ktype)
{
Mat retVal = new Mat(getGaborKernel_0(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi, ktype));
return retVal;
}
public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma)
{
Mat retVal = new Mat(getGaborKernel_1(ksize.width, ksize.height, sigma, theta, lambd, gamma));
return retVal;
}
//
// C++: Mat getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
//
/**
* Returns Gaussian filter coefficients.
* *The function computes and returns the ksize x 1 matrix of Gaussian * filter coefficients:
* *G_i= alpha *e^(-(i-(ksize -1)/2)^2/(2* sigma)^2),
* *where i=0..ksize-1 and alpha is the scale factor chosen so * that sum_i G_i=1.
* *Two of such generated kernels can be passed to "sepFilter2D" or to * "createSeparableLinearFilter". Those functions automatically recognize * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and * handle them accordingly. You may also use the higher-level "GaussianBlur".
* * @param ksize Aperture size. It should be odd (ksize mod 2 = 1) and * positive. * @param sigma Gaussian standard deviation. If it is non-positive, it is * computed fromksize
as sigma = 0.3*((ksize-1)*0.5 - 1) +
* 0.8
.
* @param ktype Type of filter coefficients. It can be CV_32f
or
* CV_64F
.
*
* @see org.opencv.imgproc.Imgproc.getGaussianKernel
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#sepFilter2D
* @see org.opencv.imgproc.Imgproc#getStructuringElement
* @see org.opencv.imgproc.Imgproc#getDerivKernels
*/
public static Mat getGaussianKernel(int ksize, double sigma, int ktype)
{
Mat retVal = new Mat(getGaussianKernel_0(ksize, sigma, ktype));
return retVal;
}
/**
* Returns Gaussian filter coefficients.
* *The function computes and returns the ksize x 1 matrix of Gaussian * filter coefficients:
* *G_i= alpha *e^(-(i-(ksize -1)/2)^2/(2* sigma)^2),
* *where i=0..ksize-1 and alpha is the scale factor chosen so * that sum_i G_i=1.
* *Two of such generated kernels can be passed to "sepFilter2D" or to * "createSeparableLinearFilter". Those functions automatically recognize * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and * handle them accordingly. You may also use the higher-level "GaussianBlur".
* * @param ksize Aperture size. It should be odd (ksize mod 2 = 1) and * positive. * @param sigma Gaussian standard deviation. If it is non-positive, it is * computed fromksize
as sigma = 0.3*((ksize-1)*0.5 - 1) +
* 0.8
.
*
* @see org.opencv.imgproc.Imgproc.getGaussianKernel
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#sepFilter2D
* @see org.opencv.imgproc.Imgproc#getStructuringElement
* @see org.opencv.imgproc.Imgproc#getDerivKernels
*/
public static Mat getGaussianKernel(int ksize, double sigma)
{
Mat retVal = new Mat(getGaussianKernel_1(ksize, sigma));
return retVal;
}
//
// C++: Mat getPerspectiveTransform(Mat src, Mat dst)
//
/**
* Calculates a perspective transform from four pairs of the corresponding * points.
* *The function calculates the 3 x 3 matrix of a perspective transform * so that:
* *t_i x'_i * t_i y'_i * t_i = map_matrix * x_i * y_i * 1
* *where
* *dst(i)=(x'_i,y'_i),<BR>src(i)=(x_i, y_i),<BR>i=0,1,2,3
* * @param src Coordinates of quadrangle vertices in the source image. * @param dst Coordinates of the corresponding quadrangle vertices in the * destination image. * * @see org.opencv.imgproc.Imgproc.getPerspectiveTransform * @see org.opencv.calib3d.Calib3d#findHomography * @see org.opencv.core.Core#perspectiveTransform * @see org.opencv.imgproc.Imgproc#warpPerspective */ public static Mat getPerspectiveTransform(Mat src, Mat dst) { Mat retVal = new Mat(getPerspectiveTransform_0(src.nativeObj, dst.nativeObj)); return retVal; } // // C++: void getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) // /** *Retrieves a pixel rectangle from an image with sub-pixel accuracy.
* *The function getRectSubPix
extracts pixels from src
dst(x, y) = src(x + center.x - (dst.cols -1)*0.5, y + center.y - * (dst.rows -1)*0.5)
* *where the values of the pixels at non-integer coordinates are retrieved using * bilinear interpolation. Every channel of multi-channel images is processed * independently. While the center of the rectangle must be inside the image, * parts of the rectangle may be outside. In this case, the replication border * mode (see "borderInterpolate") is used to extrapolate the pixel values * outside of the image.
* * @param image a image * @param patchSize Size of the extracted patch. * @param center Floating point coordinates of the center of the extracted * rectangle within the source image. The center must be inside the image. * @param patch a patch * @param patchType Depth of the extracted pixels. By default, they have the * same depth assrc
.
*
* @see org.opencv.imgproc.Imgproc.getRectSubPix
* @see org.opencv.imgproc.Imgproc#warpAffine
* @see org.opencv.imgproc.Imgproc#warpPerspective
*/
public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch, int patchType)
{
getRectSubPix_0(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj, patchType);
return;
}
/**
* Retrieves a pixel rectangle from an image with sub-pixel accuracy.
* *The function getRectSubPix
extracts pixels from src
dst(x, y) = src(x + center.x - (dst.cols -1)*0.5, y + center.y - * (dst.rows -1)*0.5)
* *where the values of the pixels at non-integer coordinates are retrieved using * bilinear interpolation. Every channel of multi-channel images is processed * independently. While the center of the rectangle must be inside the image, * parts of the rectangle may be outside. In this case, the replication border * mode (see "borderInterpolate") is used to extrapolate the pixel values * outside of the image.
* * @param image a image * @param patchSize Size of the extracted patch. * @param center Floating point coordinates of the center of the extracted * rectangle within the source image. The center must be inside the image. * @param patch a patch * * @see org.opencv.imgproc.Imgproc.getRectSubPix * @see org.opencv.imgproc.Imgproc#warpAffine * @see org.opencv.imgproc.Imgproc#warpPerspective */ public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch) { getRectSubPix_1(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj); return; } // // C++: Mat getRotationMatrix2D(Point2f center, double angle, double scale) // /** *Calculates an affine matrix of 2D rotation.
* *The function calculates the following matrix:
* *alpha beta(1- alpha) * center.x - beta * center.y * - beta alpha beta * center.x + (1- alpha) * center.y
* *where
* *alpha = scale * cos angle, * beta = scale * sin angle
* *The transformation maps the rotation center to itself. If this is not the * target, adjust the shift.
* * @param center Center of the rotation in the source image. * @param angle Rotation angle in degrees. Positive values mean * counter-clockwise rotation (the coordinate origin is assumed to be the * top-left corner). * @param scale Isotropic scale factor. * * @see org.opencv.imgproc.Imgproc.getRotationMatrix2D * @see org.opencv.imgproc.Imgproc#warpAffine * @see org.opencv.imgproc.Imgproc#getAffineTransform * @see org.opencv.core.Core#transform */ public static Mat getRotationMatrix2D(Point center, double angle, double scale) { Mat retVal = new Mat(getRotationMatrix2D_0(center.x, center.y, angle, scale)); return retVal; } // // C++: Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) // /** *Returns a structuring element of the specified size and shape for * morphological operations.
* *The function constructs and returns the structuring element that can be * further passed to "createMorphologyFilter", "erode", "dilate" or * "morphologyEx". But you can also construct an arbitrary binary mask yourself * and use it as the structuring element.
* *Note: When using OpenCV 1.x C API, the created structuring element
* IplConvKernel* element
must be released in the end using
* cvReleaseStructuringElement(&element)
.
-
*
- MORPH_RECT - a rectangular structuring element: *
E_(ij)=1
* *-
*
- MORPH_ELLIPSE - an elliptic structuring element, that is, a filled
* ellipse inscribed into the rectangle
Rect(0, 0, esize.width, * 0.esize.height)
* - MORPH_CROSS - a cross-shaped structuring element: *
E_(ij) = 1 if i=anchor.y or j=anchor.x; 0 otherwise
* *-
*
- CV_SHAPE_CUSTOM - custom structuring element (OpenCV 1.x API) *
Returns a structuring element of the specified size and shape for * morphological operations.
* *The function constructs and returns the structuring element that can be * further passed to "createMorphologyFilter", "erode", "dilate" or * "morphologyEx". But you can also construct an arbitrary binary mask yourself * and use it as the structuring element.
* *Note: When using OpenCV 1.x C API, the created structuring element
* IplConvKernel* element
must be released in the end using
* cvReleaseStructuringElement(&element)
.
-
*
- MORPH_RECT - a rectangular structuring element: *
E_(ij)=1
* *-
*
- MORPH_ELLIPSE - an elliptic structuring element, that is, a filled
* ellipse inscribed into the rectangle
Rect(0, 0, esize.width, * 0.esize.height)
* - MORPH_CROSS - a cross-shaped structuring element: *
E_(ij) = 1 if i=anchor.y or j=anchor.x; 0 otherwise
* *-
*
- CV_SHAPE_CUSTOM - custom structuring element (OpenCV 1.x API) *
Determines strong corners on an image.
* *The function finds the most prominent corners in the image or in the * specified image region, as described in [Shi94]:
*-
*
- Function calculates the corner quality measure at every source image * pixel using the "cornerMinEigenVal" or "cornerHarris". *
- Function performs a non-maximum suppression (the local maximums in *3 * x 3* neighborhood are retained). *
- The corners with the minimal eigenvalue less than qualityLevel * * max_(x,y) qualityMeasureMap(x,y) are rejected. *
- The remaining corners are sorted by the quality measure in the * descending order. *
- Function throws away each corner for which there is a stronger corner
* at a distance less than
maxDistance
. *
The function can be used to initialize a point-based tracker of an object.
* *Note: If the function is called with different values A
and
* B
of the parameter qualityLevel
, and A
* > {B}, the vector of returned corners with qualityLevel=A
will
* be the prefix of the output vector with qualityLevel=B
.
qualityLevel=0.01
, then
* all the corners with the quality measure less than 15 are rejected.
* @param minDistance Minimum possible Euclidean distance between the returned
* corners.
* @param mask Optional region of interest. If the image is not empty (it needs
* to have the type CV_8UC1
and the same size as image
),
* it specifies the region in which the corners are detected.
* @param blockSize Size of an average block for computing a derivative
* covariation matrix over each pixel neighborhood. See "cornerEigenValsAndVecs".
* @param useHarrisDetector Parameter indicating whether to use a Harris
* detector (see "cornerHarris") or "cornerMinEigenVal".
* @param k Free parameter of the Harris detector.
*
* @see org.opencv.imgproc.Imgproc.goodFeaturesToTrack
* @see org.opencv.imgproc.Imgproc#cornerHarris
* @see org.opencv.video.Video#estimateRigidTransform
* @see org.opencv.imgproc.Imgproc#cornerMinEigenVal
* @see org.opencv.video.Video#calcOpticalFlowPyrLK
*/
public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector, double k)
{
Mat corners_mat = corners;
goodFeaturesToTrack_0(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector, k);
return;
}
/**
* Determines strong corners on an image.
* *The function finds the most prominent corners in the image or in the * specified image region, as described in [Shi94]:
*-
*
- Function calculates the corner quality measure at every source image * pixel using the "cornerMinEigenVal" or "cornerHarris". *
- Function performs a non-maximum suppression (the local maximums in *3 * x 3* neighborhood are retained). *
- The corners with the minimal eigenvalue less than qualityLevel * * max_(x,y) qualityMeasureMap(x,y) are rejected. *
- The remaining corners are sorted by the quality measure in the * descending order. *
- Function throws away each corner for which there is a stronger corner
* at a distance less than
maxDistance
. *
The function can be used to initialize a point-based tracker of an object.
* *Note: If the function is called with different values A
and
* B
of the parameter qualityLevel
, and A
* > {B}, the vector of returned corners with qualityLevel=A
will
* be the prefix of the output vector with qualityLevel=B
.
qualityLevel=0.01
, then
* all the corners with the quality measure less than 15 are rejected.
* @param minDistance Minimum possible Euclidean distance between the returned
* corners.
*
* @see org.opencv.imgproc.Imgproc.goodFeaturesToTrack
* @see org.opencv.imgproc.Imgproc#cornerHarris
* @see org.opencv.video.Video#estimateRigidTransform
* @see org.opencv.imgproc.Imgproc#cornerMinEigenVal
* @see org.opencv.video.Video#calcOpticalFlowPyrLK
*/
public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance)
{
Mat corners_mat = corners;
goodFeaturesToTrack_1(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance);
return;
}
//
// C++: void grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
//
/**
* Runs the GrabCut algorithm.
* *The function implements the GrabCut image segmentation algorithm
* (http://en.wikipedia.org/wiki/GrabCut).
* See the sample grabcut.cpp
to learn how to use the function.
Note:
*-
*
- An example using the GrabCut algorithm can be found at * opencv_source_code/samples/cpp/grabcut.cpp *
- (Python) An example using the GrabCut algorithm can be found at * opencv_source_code/samples/python2/grabcut.py *
mode
is set to GC_INIT_WITH_RECT
.
* Its elements may have one of following values:
* -
*
- GC_BGD defines an obvious background pixels. *
- GC_FGD defines an obvious foreground (object) pixel. *
- GC_PR_BGD defines a possible background pixel. *
- GC_PR_FGD defines a possible foreground pixel. *
mode==GC_INIT_WITH_RECT
.
* @param bgdModel Temporary array for the background model. Do not modify it
* while you are processing the same image.
* @param fgdModel Temporary arrays for the foreground model. Do not modify it
* while you are processing the same image.
* @param iterCount Number of iterations the algorithm should make before
* returning the result. Note that the result can be refined with further calls
* with mode==GC_INIT_WITH_MASK
or mode==GC_EVAL
.
* @param mode Operation mode that could be one of the following:
* -
*
- GC_INIT_WITH_RECT The function initializes the state and the mask
* using the provided rectangle. After that it runs
iterCount
* iterations of the algorithm. * - GC_INIT_WITH_MASK The function initializes the state using the
* provided mask. Note that
GC_INIT_WITH_RECT
andGC_INIT_WITH_MASK
* can be combined. Then, all the pixels outside of the ROI are automatically * initialized withGC_BGD
. * - GC_EVAL The value means that the algorithm should just resume. *
Runs the GrabCut algorithm.
* *The function implements the GrabCut image segmentation algorithm
* (http://en.wikipedia.org/wiki/GrabCut).
* See the sample grabcut.cpp
to learn how to use the function.
Note:
*-
*
- An example using the GrabCut algorithm can be found at * opencv_source_code/samples/cpp/grabcut.cpp *
- (Python) An example using the GrabCut algorithm can be found at * opencv_source_code/samples/python2/grabcut.py *
mode
is set to GC_INIT_WITH_RECT
.
* Its elements may have one of following values:
* -
*
- GC_BGD defines an obvious background pixels. *
- GC_FGD defines an obvious foreground (object) pixel. *
- GC_PR_BGD defines a possible background pixel. *
- GC_PR_FGD defines a possible foreground pixel. *
mode==GC_INIT_WITH_RECT
.
* @param bgdModel Temporary array for the background model. Do not modify it
* while you are processing the same image.
* @param fgdModel Temporary arrays for the foreground model. Do not modify it
* while you are processing the same image.
* @param iterCount Number of iterations the algorithm should make before
* returning the result. Note that the result can be refined with further calls
* with mode==GC_INIT_WITH_MASK
or mode==GC_EVAL
.
*
* @see org.opencv.imgproc.Imgproc.grabCut
*/
public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount)
{
grabCut_1(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount);
return;
}
//
// C++: void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
//
/**
* Computes the undistortion and rectification transformation map.
* *The function computes the joint undistortion and rectification transformation
* and represents the result in the form of maps for "remap". The undistorted
* image looks like original, as if it is captured with a camera using the
* camera matrix =newCameraMatrix
and zero distortion. In case of a
* monocular camera, newCameraMatrix
is usually equal to
* cameraMatrix
, or it can be computed by "getOptimalNewCameraMatrix"
* for a better control over scaling. In case of a stereo camera,
* newCameraMatrix
is normally set to P1
or
* P2
computed by "stereoRectify".
Also, this new camera is oriented differently in the coordinate space,
* according to R
. That, for example, helps to align two heads of a
* stereo camera so that the epipolar lines on both images become horizontal and
* have the same y- coordinate (in case of a horizontally aligned stereo
* camera).
The function actually builds the maps for the inverse mapping algorithm that * is used by "remap". That is, for each pixel (u, v) in the * destination (corrected and rectified) image, the function computes the * corresponding coordinates in the source image (that is, in the original image * from camera). The following process is applied:
* *x <- (u - (c')_x)/(f')_x * y <- (v - (c')_y)/(f')_y * ([X Y W]) ^T <- R^(-1)*[x y 1]^T * x' <- X/W * y' <- Y/W * x" <- x' (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) + 2p_1 x' y' + p_2(r^2 + 2 x'^2) * y" <- y' (1 + k_1 r^2 + k_2 r^4 + k_3 r^6) + p_1(r^2 + 2 y'^2) + 2 p_2 x' y' * map_x(u,v) <- x" f_x + c_x * map_y(u,v) <- y" f_y + c_y
* *where (k_1, k_2, p_1, p_2[, k_3]) are the distortion coefficients.
* *In case of a stereo camera, this function is called twice: once for each
* camera head, after "stereoRectify", which in its turn is called after
* "stereoCalibrate". But if the stereo camera was not calibrated, it is still
* possible to compute the rectification transformations directly from the
* fundamental matrix using "stereoRectifyUncalibrated". For each camera, the
* function computes homography H
as the rectification
* transformation in a pixel domain, not a rotation matrix R
in 3D
* space. R
can be computed from H
as
R = cameraMatrix ^(-1) * H * cameraMatrix
* *where cameraMatrix
can be chosen arbitrarily.
|f_x 0 c_x| * |0 f_y c_y| * |0 0 1| *
. * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is * NULL/empty, the zero distortion coefficients are assumed. * @param R Optional rectification transformation in the object space (3x3 * matrix).R1
or R2
, computed by "stereoRectify" can
* be passed here. If the matrix is empty, the identity transformation is
* assumed. In cvInitUndistortMap
R assumed to be an identity
* matrix.
* @param newCameraMatrix New camera matrix A'=
* |f_x' 0 c_x'| * |0 f_y' c_y'| * |0 0 1| *
. * @param size Undistorted image size. * @param m1type Type of the first output map that can beCV_32FC1
* or CV_16SC2
. See "convertMaps" for details.
* @param map1 The first output map.
* @param map2 The second output map.
*
* @see org.opencv.imgproc.Imgproc.initUndistortRectifyMap
*/
public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2)
{
initUndistortRectifyMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj);
return;
}
//
// C++: float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat& map1, Mat& map2, int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0)
//
public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2, int projType, double alpha)
{
float retVal = initWideAngleProjMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj, projType, alpha);
return retVal;
}
public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2)
{
float retVal = initWideAngleProjMap_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj);
return retVal;
}
//
// C++: void integral(Mat src, Mat& sum, int sdepth = -1)
//
/**
* Calculates the integral of an image.
* *The functions calculate one or more integral images for the source image as * follows:
* *sum(X,Y) = sum(by: x<X,y<Y) image(x,y)
* * * *sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2
* * * *tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)
* *Using these integral images, you can calculate sum, mean, and standard * deviation over a specific up-right or rotated rectangular region of the image * in a constant time, for example:
* *sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)
* *It makes possible to do a fast blurring or fast block correlation with a * variable window size, for example. In case of multi-channel images, sums for * each channel are accumulated independently.
* *As a practical example, the next figure shows the calculation of the integral
* of a straight rectangle Rect(3,3,3,2)
and of a tilted rectangle
* Rect(5,1,2,3)
. The selected pixels in the original
* image
are shown, as well as the relative pixels in the integral
* images sum
and tilted
.
CV_32S
, CV_32F
, or CV_64F
.
*
* @see org.opencv.imgproc.Imgproc.integral
*/
public static void integral(Mat src, Mat sum, int sdepth)
{
integral_0(src.nativeObj, sum.nativeObj, sdepth);
return;
}
/**
* Calculates the integral of an image.
* *The functions calculate one or more integral images for the source image as * follows:
* *sum(X,Y) = sum(by: x<X,y<Y) image(x,y)
* * * *sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2
* * * *tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)
* *Using these integral images, you can calculate sum, mean, and standard * deviation over a specific up-right or rotated rectangular region of the image * in a constant time, for example:
* *sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)
* *It makes possible to do a fast blurring or fast block correlation with a * variable window size, for example. In case of multi-channel images, sums for * each channel are accumulated independently.
* *As a practical example, the next figure shows the calculation of the integral
* of a straight rectangle Rect(3,3,3,2)
and of a tilted rectangle
* Rect(5,1,2,3)
. The selected pixels in the original
* image
are shown, as well as the relative pixels in the integral
* images sum
and tilted
.
Calculates the integral of an image.
* *The functions calculate one or more integral images for the source image as * follows:
* *sum(X,Y) = sum(by: x<X,y<Y) image(x,y)
* * * *sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2
* * * *tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)
* *Using these integral images, you can calculate sum, mean, and standard * deviation over a specific up-right or rotated rectangular region of the image * in a constant time, for example:
* *sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)
* *It makes possible to do a fast blurring or fast block correlation with a * variable window size, for example. In case of multi-channel images, sums for * each channel are accumulated independently.
* *As a practical example, the next figure shows the calculation of the integral
* of a straight rectangle Rect(3,3,3,2)
and of a tilted rectangle
* Rect(5,1,2,3)
. The selected pixels in the original
* image
are shown, as well as the relative pixels in the integral
* images sum
and tilted
.
CV_32S
, CV_32F
, or CV_64F
.
*
* @see org.opencv.imgproc.Imgproc.integral
*/
public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth)
{
integral2_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth);
return;
}
/**
* Calculates the integral of an image.
* *The functions calculate one or more integral images for the source image as * follows:
* *sum(X,Y) = sum(by: x<X,y<Y) image(x,y)
* * * *sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2
* * * *tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)
* *Using these integral images, you can calculate sum, mean, and standard * deviation over a specific up-right or rotated rectangular region of the image * in a constant time, for example:
* *sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)
* *It makes possible to do a fast blurring or fast block correlation with a * variable window size, for example. In case of multi-channel images, sums for * each channel are accumulated independently.
* *As a practical example, the next figure shows the calculation of the integral
* of a straight rectangle Rect(3,3,3,2)
and of a tilted rectangle
* Rect(5,1,2,3)
. The selected pixels in the original
* image
are shown, as well as the relative pixels in the integral
* images sum
and tilted
.
Calculates the integral of an image.
* *The functions calculate one or more integral images for the source image as * follows:
* *sum(X,Y) = sum(by: x<X,y<Y) image(x,y)
* * * *sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2
* * * *tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)
* *Using these integral images, you can calculate sum, mean, and standard * deviation over a specific up-right or rotated rectangular region of the image * in a constant time, for example:
* *sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)
* *It makes possible to do a fast blurring or fast block correlation with a * variable window size, for example. In case of multi-channel images, sums for * each channel are accumulated independently.
* *As a practical example, the next figure shows the calculation of the integral
* of a straight rectangle Rect(3,3,3,2)
and of a tilted rectangle
* Rect(5,1,2,3)
. The selected pixels in the original
* image
are shown, as well as the relative pixels in the integral
* images sum
and tilted
.
sum
.
* @param sdepth desired depth of the integral and the tilted integral images,
* CV_32S
, CV_32F
, or CV_64F
.
*
* @see org.opencv.imgproc.Imgproc.integral
*/
public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth)
{
integral3_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth);
return;
}
/**
* Calculates the integral of an image.
* *The functions calculate one or more integral images for the source image as * follows:
* *sum(X,Y) = sum(by: x<X,y<Y) image(x,y)
* * * *sqsum(X,Y) = sum(by: x<X,y<Y) image(x,y)^2
* * * *tilted(X,Y) = sum(by: y<Y,abs(x-X+1) <= Y-y-1) image(x,y)
* *Using these integral images, you can calculate sum, mean, and standard * deviation over a specific up-right or rotated rectangular region of the image * in a constant time, for example:
* *sum(by: x_1 <= x < x_2, y_1 <= y < y_2) image(x,y) = sum(x_2,y_2)- * sum(x_1,y_2)- sum(x_2,y_1)+ sum(x_1,y_1)
* *It makes possible to do a fast blurring or fast block correlation with a * variable window size, for example. In case of multi-channel images, sums for * each channel are accumulated independently.
* *As a practical example, the next figure shows the calculation of the integral
* of a straight rectangle Rect(3,3,3,2)
and of a tilted rectangle
* Rect(5,1,2,3)
. The selected pixels in the original
* image
are shown, as well as the relative pixels in the integral
* images sum
and tilted
.
sum
.
*
* @see org.opencv.imgproc.Imgproc.integral
*/
public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted)
{
integral3_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj);
return;
}
//
// C++: float intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true)
//
public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12, boolean handleNested)
{
float retVal = intersectConvexConvex_0(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj, handleNested);
return retVal;
}
public static float intersectConvexConvex(Mat _p1, Mat _p2, Mat _p12)
{
float retVal = intersectConvexConvex_1(_p1.nativeObj, _p2.nativeObj, _p12.nativeObj);
return retVal;
}
//
// C++: void invertAffineTransform(Mat M, Mat& iM)
//
/**
* Inverts an affine transformation.
* *The function computes an inverse affine transformation represented by 2 x
* 3 matrix M
:
a_11 a_12 b_1 * a_21 a_22 b_2
* *The result is also a 2 x 3 matrix of the same type as
* M
.
Tests a contour convexity.
* *The function tests whether the input contour is convex or not. The contour * must be simple, that is, without self-intersections. Otherwise, the function * output is undefined.
* * @param contour Input vector of 2D points, stored in: *-
*
-
std.vector<>
orMat
(C++ interface) * -
CvSeq*
orCvMat*
(C interface) * - Nx2 numpy array (Python interface) *
Compares two shapes.
* *The function compares two shapes. All three implemented methods use the Hu
* invariants (see "HuMoments") as follows (A denotes object1
,B
* denotes object2
):
-
*
- method=CV_CONTOURS_MATCH_I1 *
I_1(A,B) = sum(by: i=1...7) <= ft|1/(m^A_i) - 1/(m^B_i) right|
* *-
*
- method=CV_CONTOURS_MATCH_I2 *
I_2(A,B) = sum(by: i=1...7) <= ft|m^A_i - m^B_i right|
* *-
*
- method=CV_CONTOURS_MATCH_I3 *
I_3(A,B) = max _(i=1...7)(<= ft| m^A_i - m^B_i right|)/(<= ft| m^A_i * right|)
* *where
* *m^A_i = sign(h^A_i) * log(h^A_i) * m^B_i = sign(h^B_i) * log(h^B_i)
* *and h^A_i, h^B_i are the Hu moments of A and B, * respectively.
* * @param contour1 a contour1 * @param contour2 a contour2 * @param method Comparison method:CV_CONTOURS_MATCH_I1
,
* CV_CONTOURS_MATCH_I2
\
* or CV_CONTOURS_MATCH_I3
(see the details below).
Compares a template against overlapped image regions.
* *The function slides through image
, compares the overlapped
* patches of size w x h against templ
using the specified
* method and stores the comparison results in result
. Here are the
* formulae for the available comparison methods (I denotes
* image
, T template
, R
* result
). The summation is done over template and/or the image
* patch: x' = 0...w-1, y' = 0...h-1
-
*
- method=CV_TM_SQDIFF *
R(x,y)= sum(by: x',y')(T(x',y')-I(x+x',y+y'))^2
* *-
*
- method=CV_TM_SQDIFF_NORMED *
R(x,y)= (sum_(x',y')(T(x',y')-I(x+x',y+y'))^2)/(sqrt(sum_(x',y')T(x',y')^2 * * sum_(x',y') I(x+x',y+y')^2))
* *-
*
- method=CV_TM_CCORR *
R(x,y)= sum(by: x',y')(T(x',y') * I(x+x',y+y'))
* *-
*
- method=CV_TM_CCORR_NORMED *
R(x,y)= (sum_(x',y')(T(x',y') * I(x+x',y+y')))/(sqrt(sum_(x',y')T(x',y')^2 * * sum_(x',y') I(x+x',y+y')^2))
* *-
*
- method=CV_TM_CCOEFF *
R(x,y)= sum(by: x',y')(T'(x',y') * I'(x+x',y+y'))
* *where
* *T'(x',y')=T(x',y') - 1/(w * h) * sum(by: x'',y'') T(x'',y'') * I'(x+x',y+y')=I(x+x',y+y') - 1/(w * h) * sum(by: x'',y'') I(x+x'',y+y'') *
* *-
*
- method=CV_TM_CCOEFF_NORMED *
R(x,y)= (sum_(x',y')(T'(x',y') * I'(x+x',y+y')))/(sqrt(sum_(x',y')T'(x',y')^2 * * sum_(x',y') I'(x+x',y+y')^2))
* *After the function finishes the comparison, the best matches can be found as
* global minimums (when CV_TM_SQDIFF
was used) or maximums (when
* CV_TM_CCORR
or CV_TM_CCOEFF
was used) using the
* "minMaxLoc" function. In case of a color image, template summation in the
* numerator and each sum in the denominator is done over all of the channels
* and separate mean values are used for each channel. That is, the function can
* take a color template and a color image. The result will still be a
* single-channel image, which is easier to analyze.
Note:
*-
*
- (Python) An example on how to match mouse selected regions in an image * can be found at opencv_source_code/samples/python2/mouse_and_match.py *
image
is W x H and templ
* is w x h, then result
is (W-w+1) x(H-h+1).
* @param method Parameter specifying the comparison method (see below).
*
* @see org.opencv.imgproc.Imgproc.matchTemplate
*/
public static void matchTemplate(Mat image, Mat templ, Mat result, int method)
{
matchTemplate_0(image.nativeObj, templ.nativeObj, result.nativeObj, method);
return;
}
//
// C++: void medianBlur(Mat src, Mat& dst, int ksize)
//
/**
* Blurs an image using the median filter.
* *The function smoothes an image using the median filter with the ksize x * ksize aperture. Each channel of a multi-channel image is processed * independently. In-place operation is supported.
* * @param src input 1-, 3-, or 4-channel image; whenksize
is 3 or
* 5, the image depth should be CV_8U
, CV_16U
, or
* CV_32F
, for larger aperture sizes, it can only be
* CV_8U
.
* @param dst destination array of the same size and type as src
.
* @param ksize aperture linear size; it must be odd and greater than 1, for
* example: 3, 5, 7...
*
* @see org.opencv.imgproc.Imgproc.medianBlur
* @see org.opencv.imgproc.Imgproc#boxFilter
* @see org.opencv.imgproc.Imgproc#GaussianBlur
* @see org.opencv.imgproc.Imgproc#bilateralFilter
* @see org.opencv.imgproc.Imgproc#blur
*/
public static void medianBlur(Mat src, Mat dst, int ksize)
{
medianBlur_0(src.nativeObj, dst.nativeObj, ksize);
return;
}
//
// C++: RotatedRect minAreaRect(vector_Point2f points)
//
/**
* Finds a rotated rectangle of the minimum area enclosing the input 2D point * set.
* *The function calculates and returns the minimum-area bounding rectangle
* (possibly rotated) for a specified point set. See the OpenCV sample
* minarea.cpp
.
* Developer should keep in mind that the returned rotatedRect can contain
* negative indices when data is close the the containing Mat element boundary.
-
*
-
std.vector<>
orMat
(C++ interface) * -
CvSeq*
orCvMat*
(C interface) * - Nx2 numpy array (Python interface) *
Finds a circle of the minimum area enclosing a 2D point set.
* *The function finds the minimal enclosing circle of a 2D point set using an
* iterative algorithm. See the OpenCV sample minarea.cpp
.
-
*
-
std.vector<>
orMat
(C++ interface) * -
CvSeq*
orCvMat*
(C interface) * - Nx2 numpy array (Python interface) *
Calculates all of the moments up to the third order of a polygon or * rasterized shape.
* *The function computes moments, up to the 3rd order, of a vector shape or a
* rasterized shape. The results are returned in the structure Moments
* defined as:
// C++ code:
* *class Moments
* * *public:
* *Moments();
* *Moments(double m00, double m10, double m01, double m20, double m11,
* *double m02, double m30, double m21, double m12, double m03);
* *Moments(const CvMoments& moments);
* *operator CvMoments() const;
* *// spatial moments
* *double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
* *// central moments
* *double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
* *// central normalized moments
* *double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
* * *In case of a raster image, the spatial moments Moments.m_(ji) are * computed as:
* *m _(ji)= sum(by: x,y)(array(x,y) * x^j * y^i)
* *The central moments Moments.mu_(ji) are computed as:
* *mu _(ji)= sum(by: x,y)(array(x,y) * (x - x")^j * (y - y")^i)
* *where (x", y") is the mass center:
* *x" = (m_10)/(m_(00)), y" = (m_01)/(m_(00))
* *The normalized central moments Moments.nu_(ij) are computed as:
* *nu _(ji)= (mu_(ji))/(m_(00)^((i+j)/2+1)).
* *Note:
* *mu_00=m_00, nu_00=1 nu_10=mu_10=mu_01=mu_10=0, * hence the values are not stored.
* *The moments of a contour are defined in the same way but computed using the * Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to * a limited raster resolution, the moments computed for a contour are slightly * different from the moments computed for the same rasterized contour.
* *Note:
* *Since the contour moments are computed using Green formula, you may get
* seemingly odd results for contours with self-intersections, e.g. a zero area
* (m00
) for butterfly-shaped contours.
Point
* or Point2f
).
* @param binaryImage If it is true, all non-zero image pixels are treated as
* 1's. The parameter is used for images only.
*
* @see org.opencv.imgproc.Imgproc.moments
* @see org.opencv.imgproc.Imgproc#contourArea
* @see org.opencv.imgproc.Imgproc#arcLength
*/
public static Moments moments(Mat array, boolean binaryImage)
{
Moments retVal = new Moments(moments_0(array.nativeObj, binaryImage));
return retVal;
}
/**
* Calculates all of the moments up to the third order of a polygon or * rasterized shape.
* *The function computes moments, up to the 3rd order, of a vector shape or a
* rasterized shape. The results are returned in the structure Moments
* defined as:
// C++ code:
* *class Moments
* * *public:
* *Moments();
* *Moments(double m00, double m10, double m01, double m20, double m11,
* *double m02, double m30, double m21, double m12, double m03);
* *Moments(const CvMoments& moments);
* *operator CvMoments() const;
* *// spatial moments
* *double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
* *// central moments
* *double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
* *// central normalized moments
* *double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
* * *In case of a raster image, the spatial moments Moments.m_(ji) are * computed as:
* *m _(ji)= sum(by: x,y)(array(x,y) * x^j * y^i)
* *The central moments Moments.mu_(ji) are computed as:
* *mu _(ji)= sum(by: x,y)(array(x,y) * (x - x")^j * (y - y")^i)
* *where (x", y") is the mass center:
* *x" = (m_10)/(m_(00)), y" = (m_01)/(m_(00))
* *The normalized central moments Moments.nu_(ij) are computed as:
* *nu _(ji)= (mu_(ji))/(m_(00)^((i+j)/2+1)).
* *Note:
* *mu_00=m_00, nu_00=1 nu_10=mu_10=mu_01=mu_10=0, * hence the values are not stored.
* *The moments of a contour are defined in the same way but computed using the * Green's formula (see http://en.wikipedia.org/wiki/Green_theorem). So, due to * a limited raster resolution, the moments computed for a contour are slightly * different from the moments computed for the same rasterized contour.
* *Note:
* *Since the contour moments are computed using Green formula, you may get
* seemingly odd results for contours with self-intersections, e.g. a zero area
* (m00
) for butterfly-shaped contours.
Point
* or Point2f
).
*
* @see org.opencv.imgproc.Imgproc.moments
* @see org.opencv.imgproc.Imgproc#contourArea
* @see org.opencv.imgproc.Imgproc#arcLength
*/
public static Moments moments(Mat array)
{
Moments retVal = new Moments(moments_1(array.nativeObj));
return retVal;
}
//
// C++: void morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
//
/**
* Performs advanced morphological transformations.
* *The function can perform advanced morphological transformations using an * erosion and dilation as basic operations.
* *Opening operation:
* *dst = open(src, element)= dilate(erode(src, element))
* *Closing operation:
* *dst = close(src, element)= erode(dilate(src, element))
* *Morphological gradient:
* *dst = morph_grad(src, element)= dilate(src, element)- erode(src, * element)
* *"Top hat":
* *dst = tophat(src, element)= src - open(src, element)
* *"Black hat":
* *dst = blackhat(src, element)= close(src, element)- src
* *Any of the operations can be done in-place. In case of multi-channel images, * each channel is processed independently.
* *Note:
*-
*
- An example using the morphologyEx function for the morphological * opening and closing operations can be found at opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
, CV_16S
,
* CV_32F" or
CV_64F".
* @param dst Destination image of the same size and type as src
.
* @param op Type of a morphological operation that can be one of the following:
* -
*
- MORPH_OPEN - an opening operation *
- MORPH_CLOSE - a closing operation *
- MORPH_GRADIENT - a morphological gradient *
- MORPH_TOPHAT - "top hat" *
- MORPH_BLACKHAT - "black hat" *
Performs advanced morphological transformations.
* *The function can perform advanced morphological transformations using an * erosion and dilation as basic operations.
* *Opening operation:
* *dst = open(src, element)= dilate(erode(src, element))
* *Closing operation:
* *dst = close(src, element)= erode(dilate(src, element))
* *Morphological gradient:
* *dst = morph_grad(src, element)= dilate(src, element)- erode(src, * element)
* *"Top hat":
* *dst = tophat(src, element)= src - open(src, element)
* *"Black hat":
* *dst = blackhat(src, element)= close(src, element)- src
* *Any of the operations can be done in-place. In case of multi-channel images, * each channel is processed independently.
* *Note:
*-
*
- An example using the morphologyEx function for the morphological * opening and closing operations can be found at opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
, CV_16S
,
* CV_32F" or
CV_64F".
* @param dst Destination image of the same size and type as src
.
* @param op Type of a morphological operation that can be one of the following:
* -
*
- MORPH_OPEN - an opening operation *
- MORPH_CLOSE - a closing operation *
- MORPH_GRADIENT - a morphological gradient *
- MORPH_TOPHAT - "top hat" *
- MORPH_BLACKHAT - "black hat" *
Performs advanced morphological transformations.
* *The function can perform advanced morphological transformations using an * erosion and dilation as basic operations.
* *Opening operation:
* *dst = open(src, element)= dilate(erode(src, element))
* *Closing operation:
* *dst = close(src, element)= erode(dilate(src, element))
* *Morphological gradient:
* *dst = morph_grad(src, element)= dilate(src, element)- erode(src, * element)
* *"Top hat":
* *dst = tophat(src, element)= src - open(src, element)
* *"Black hat":
* *dst = blackhat(src, element)= close(src, element)- src
* *Any of the operations can be done in-place. In case of multi-channel images, * each channel is processed independently.
* *Note:
*-
*
- An example using the morphologyEx function for the morphological * opening and closing operations can be found at opencv_source_code/samples/cpp/morphology2.cpp *
CV_8U
, CV_16U
, CV_16S
,
* CV_32F" or
CV_64F".
* @param dst Destination image of the same size and type as src
.
* @param op Type of a morphological operation that can be one of the following:
* -
*
- MORPH_OPEN - an opening operation *
- MORPH_CLOSE - a closing operation *
- MORPH_GRADIENT - a morphological gradient *
- MORPH_TOPHAT - "top hat" *
- MORPH_BLACKHAT - "black hat" *
The function is used to detect translational shifts that occur between two * images. The operation takes advantage of the Fourier shift theorem for * detecting the translational shift in the frequency domain. It can be used for * fast image registration as well as motion estimation. For more information * please see http://en.wikipedia.org/wiki/Phase_correlation.
* *Calculates the cross-power spectrum of two supplied source arrays. The arrays * are padded if needed with "getOptimalDFTSize".
* *Return value: detected phase shift (sub-pixel) between the two arrays.
* *The function performs the following equations
*-
*
- First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann_function) * to each image to remove possible edge effects. This window is cached until * the array size changes to speed up processing time. *
- Next it computes the forward DFTs of each source array: *
mathbf(G)_a = mathcal(F)(src_1), mathbf(G)_b = mathcal(F)(src_2)
* *where mathcal(F) is the forward DFT.
*-
*
- It then computes the cross-power spectrum of each frequency domain * array: *
R = (mathbf(G)_a mathbf(G)_b^*)/(|mathbf(G)_a mathbf(G)_b^*|)
* *-
*
- Next the cross-correlation is converted back into the time domain via * the inverse DFT: *
r = mathcal(F)^(-1)(R)
* *-
*
- Finally, it computes the peak location and computes a 5x5 weighted * centroid around the peak to achieve sub-pixel accuracy. *
(Delta x, Delta y) = weightedCentroid (arg max_((x, y))(r))
* *-
*
- If non-zero, the response parameter is computed as the sum of the * elements of r within the 5x5 centroid around the peak location. It is * normalized to a maximum of 1 (meaning there is a single peak) and will be * smaller when there are multiple peaks. *
The function is used to detect translational shifts that occur between two * images. The operation takes advantage of the Fourier shift theorem for * detecting the translational shift in the frequency domain. It can be used for * fast image registration as well as motion estimation. For more information * please see http://en.wikipedia.org/wiki/Phase_correlation.
* *Calculates the cross-power spectrum of two supplied source arrays. The arrays * are padded if needed with "getOptimalDFTSize".
* *Return value: detected phase shift (sub-pixel) between the two arrays.
* *The function performs the following equations
*-
*
- First it applies a Hanning window (see http://en.wikipedia.org/wiki/Hann_function) * to each image to remove possible edge effects. This window is cached until * the array size changes to speed up processing time. *
- Next it computes the forward DFTs of each source array: *
mathbf(G)_a = mathcal(F)(src_1), mathbf(G)_b = mathcal(F)(src_2)
* *where mathcal(F) is the forward DFT.
*-
*
- It then computes the cross-power spectrum of each frequency domain * array: *
R = (mathbf(G)_a mathbf(G)_b^*)/(|mathbf(G)_a mathbf(G)_b^*|)
* *-
*
- Next the cross-correlation is converted back into the time domain via * the inverse DFT: *
r = mathcal(F)^(-1)(R)
* *-
*
- Finally, it computes the peak location and computes a 5x5 weighted * centroid around the peak to achieve sub-pixel accuracy. *
(Delta x, Delta y) = weightedCentroid (arg max_((x, y))(r))
* *-
*
- If non-zero, the response parameter is computed as the sum of the * elements of r within the 5x5 centroid around the peak location. It is * normalized to a maximum of 1 (meaning there is a single peak) and will be * smaller when there are multiple peaks. *
Performs a point-in-contour test.
* *The function determines whether the point is inside a contour, outside, or
* lies on an edge (or coincides with a vertex). It returns positive (inside),
* negative (outside), or zero (on an edge) value, correspondingly. When
* measureDist=false
, the return value is +1, -1, and 0,
* respectively. Otherwise, the return value is a signed distance between the
* point and the nearest contour edge.
See below a sample output of the function where each image pixel is tested * against the contour.
* * @param contour Input contour. * @param pt Point tested against the contour. * @param measureDist If true, the function estimates the signed distance from * the point to the nearest contour edge. Otherwise, the function only checks if * the point is inside a contour or not. * * @see org.opencv.imgproc.Imgproc.pointPolygonTest */ public static double pointPolygonTest(MatOfPoint2f contour, Point pt, boolean measureDist) { Mat contour_mat = contour; double retVal = pointPolygonTest_0(contour_mat.nativeObj, pt.x, pt.y, measureDist); return retVal; } // // C++: void preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) // /** *Calculates a feature map for corner detection.
* *The function calculates the complex spatial derivative-based function of the * source image
* *dst = (D_x src)^2 * D_(yy) src + (D_y src)^2 * D_(xx) src - 2 D_x src * * D_y src * D_(xy) src
* *where D_x,D_y are the first image derivatives,
* D_(xx),D_(yy) are the second image derivatives, and
* D_(xy) is the mixed derivative.
* The corners can be found as local maximums of the functions, as shown below:
*
// C++ code:
* *Mat corners, dilated_corners;
* *preCornerDetect(image, corners, 3);
* *// dilation with 3x3 rectangular structuring element
* *dilate(corners, dilated_corners, Mat(), 1);
* *Mat corner_mask = corners == dilated_corners;
* * * * @param src Source single-channel 8-bit of floating-point image. * @param dst Output image that has the typeCV_32F
and the same
* size as src
.
* @param ksize Aperture size of the "Sobel".
* @param borderType Pixel extrapolation method. See "borderInterpolate".
*
* @see org.opencv.imgproc.Imgproc.preCornerDetect
*/
public static void preCornerDetect(Mat src, Mat dst, int ksize, int borderType)
{
preCornerDetect_0(src.nativeObj, dst.nativeObj, ksize, borderType);
return;
}
/**
* Calculates a feature map for corner detection.
* *The function calculates the complex spatial derivative-based function of the * source image
* *dst = (D_x src)^2 * D_(yy) src + (D_y src)^2 * D_(xx) src - 2 D_x src * * D_y src * D_(xy) src
* *where D_x,D_y are the first image derivatives,
* D_(xx),D_(yy) are the second image derivatives, and
* D_(xy) is the mixed derivative.
* The corners can be found as local maximums of the functions, as shown below:
*
// C++ code:
* *Mat corners, dilated_corners;
* *preCornerDetect(image, corners, 3);
* *// dilation with 3x3 rectangular structuring element
* *dilate(corners, dilated_corners, Mat(), 1);
* *Mat corner_mask = corners == dilated_corners;
* * * * @param src Source single-channel 8-bit of floating-point image. * @param dst Output image that has the typeCV_32F
and the same
* size as src
.
* @param ksize Aperture size of the "Sobel".
*
* @see org.opencv.imgproc.Imgproc.preCornerDetect
*/
public static void preCornerDetect(Mat src, Mat dst, int ksize)
{
preCornerDetect_1(src.nativeObj, dst.nativeObj, ksize);
return;
}
//
// C++: void pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
//
/**
* Blurs an image and downsamples it.
* *The function performs the downsampling step of the Gaussian pyramid * construction. First, it convolves the source image with the kernel:
* *1/256 1 4 6 4 1 * 4 16 24 16 4 * 6 24 36 24 6 * 4 16 24 16 4 * 1 4 6 4 1
* *Then, it downsamples the image by rejecting even rows and columns.
* * @param src input image. * @param dst output image; it has the specified size and the same type as *src
.
* @param dstsize size of the output image; by default, it is computed as
* Size((src.cols+1)/2, (src.rows+1)/2)
, but in any case, the
* following conditions should be satisfied:
*
* ltBR gt| dstsize.width *2-src.cols| <= 2 * |dstsize.height *2-src.rows| <= 2
* @param borderType a borderType * * @see org.opencv.imgproc.Imgproc.pyrDown */ public static void pyrDown(Mat src, Mat dst, Size dstsize, int borderType) { pyrDown_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); return; } /** *Blurs an image and downsamples it.
* *The function performs the downsampling step of the Gaussian pyramid * construction. First, it convolves the source image with the kernel:
* *1/256 1 4 6 4 1 * 4 16 24 16 4 * 6 24 36 24 6 * 4 16 24 16 4 * 1 4 6 4 1
* *Then, it downsamples the image by rejecting even rows and columns.
* * @param src input image. * @param dst output image; it has the specified size and the same type as *src
.
* @param dstsize size of the output image; by default, it is computed as
* Size((src.cols+1)/2, (src.rows+1)/2)
, but in any case, the
* following conditions should be satisfied:
*
* ltBR gt| dstsize.width *2-src.cols| <= 2 * |dstsize.height *2-src.rows| <= 2
* * @see org.opencv.imgproc.Imgproc.pyrDown */ public static void pyrDown(Mat src, Mat dst, Size dstsize) { pyrDown_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); return; } /** *Blurs an image and downsamples it.
* *The function performs the downsampling step of the Gaussian pyramid * construction. First, it convolves the source image with the kernel:
* *1/256 1 4 6 4 1 * 4 16 24 16 4 * 6 24 36 24 6 * 4 16 24 16 4 * 1 4 6 4 1
* *Then, it downsamples the image by rejecting even rows and columns.
* * @param src input image. * @param dst output image; it has the specified size and the same type as *src
.
*
* @see org.opencv.imgproc.Imgproc.pyrDown
*/
public static void pyrDown(Mat src, Mat dst)
{
pyrDown_2(src.nativeObj, dst.nativeObj);
return;
}
//
// C++: void pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
//
/**
* Performs initial step of meanshift segmentation of an image.
* *The function implements the filtering stage of meanshift segmentation, that
* is, the output of the function is the filtered "posterized" image with color
* gradients and fine-grain texture flattened. At every pixel (X,Y)
* of the input image (or down-sized input image, see below) the function
* executes meanshift iterations, that is, the pixel (X,Y)
* neighborhood in the joint space-color hyperspace is considered:
(x,y): X- sp <= x <= X+ sp, Y- sp <= y <= Y+ sp, ||(R,G,B)-(r,g,b)|| <= * sr
* *where (R,G,B)
and (r,g,b)
are the vectors of color
* components at (X,Y)
and (x,y)
, respectively
* (though, the algorithm does not depend on the color space used, so any
* 3-component color space can be used instead). Over the neighborhood the
* average spatial value (X',Y')
and average color vector
* (R',G',B')
are found and they act as the neighborhood center on
* the next iteration:
(X,Y)~(X',Y'), (R,G,B)~(R',G',B').
* *After the iterations over, the color components of the initial pixel (that * is, the pixel from where the iterations started) are set to the final value * (average color at the last iteration):
* *I(X,Y) <- (R*,G*,B*)
* *When maxLevel > 0
, the gaussian pyramid of maxLevel+1
* levels is built, and the above procedure is run on the smallest layer first.
* After that, the results are propagated to the larger layer and the iterations
* are run again only on those pixels where the layer colors differ by more than
* sr
from the lower-resolution layer of the pyramid. That makes
* boundaries of color regions sharper. Note that the results will be actually
* different from the ones obtained by running the meanshift procedure on the
* whole original image (i.e. when maxLevel==0
).
Note:
*-
*
- An example using mean-shift image segmentation can be found at * opencv_source_code/samples/cpp/meanshift_segmentation.cpp *
Performs initial step of meanshift segmentation of an image.
* *The function implements the filtering stage of meanshift segmentation, that
* is, the output of the function is the filtered "posterized" image with color
* gradients and fine-grain texture flattened. At every pixel (X,Y)
* of the input image (or down-sized input image, see below) the function
* executes meanshift iterations, that is, the pixel (X,Y)
* neighborhood in the joint space-color hyperspace is considered:
(x,y): X- sp <= x <= X+ sp, Y- sp <= y <= Y+ sp, ||(R,G,B)-(r,g,b)|| <= * sr
* *where (R,G,B)
and (r,g,b)
are the vectors of color
* components at (X,Y)
and (x,y)
, respectively
* (though, the algorithm does not depend on the color space used, so any
* 3-component color space can be used instead). Over the neighborhood the
* average spatial value (X',Y')
and average color vector
* (R',G',B')
are found and they act as the neighborhood center on
* the next iteration:
(X,Y)~(X',Y'), (R,G,B)~(R',G',B').
* *After the iterations over, the color components of the initial pixel (that * is, the pixel from where the iterations started) are set to the final value * (average color at the last iteration):
* *I(X,Y) <- (R*,G*,B*)
* *When maxLevel > 0
, the gaussian pyramid of maxLevel+1
* levels is built, and the above procedure is run on the smallest layer first.
* After that, the results are propagated to the larger layer and the iterations
* are run again only on those pixels where the layer colors differ by more than
* sr
from the lower-resolution layer of the pyramid. That makes
* boundaries of color regions sharper. Note that the results will be actually
* different from the ones obtained by running the meanshift procedure on the
* whole original image (i.e. when maxLevel==0
).
Note:
*-
*
- An example using mean-shift image segmentation can be found at * opencv_source_code/samples/cpp/meanshift_segmentation.cpp *
Upsamples an image and then blurs it.
* *The function performs the upsampling step of the Gaussian pyramid * construction, though it can actually be used to construct the Laplacian * pyramid. First, it upsamples the source image by injecting even zero rows and * columns and then convolves the result with the same kernel as in "pyrDown" * multiplied by 4.
* *Note:
*-
*
- (Python) An example of Laplacian Pyramid construction and merging can * be found at opencv_source_code/samples/python2/lappyr.py *
src
.
* @param dstsize size of the output image; by default, it is computed as
* Size(src.cols*2, (src.rows*2)
, but in any case, the following
* conditions should be satisfied:
*
* ltBR gt| dstsize.width -src.cols*2| <= (dstsize.width mod 2) * |dstsize.height -src.rows*2| <= (dstsize.height mod 2)
* @param borderType a borderType * * @see org.opencv.imgproc.Imgproc.pyrUp */ public static void pyrUp(Mat src, Mat dst, Size dstsize, int borderType) { pyrUp_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); return; } /** *Upsamples an image and then blurs it.
* *The function performs the upsampling step of the Gaussian pyramid * construction, though it can actually be used to construct the Laplacian * pyramid. First, it upsamples the source image by injecting even zero rows and * columns and then convolves the result with the same kernel as in "pyrDown" * multiplied by 4.
* *Note:
*-
*
- (Python) An example of Laplacian Pyramid construction and merging can * be found at opencv_source_code/samples/python2/lappyr.py *
src
.
* @param dstsize size of the output image; by default, it is computed as
* Size(src.cols*2, (src.rows*2)
, but in any case, the following
* conditions should be satisfied:
*
* ltBR gt| dstsize.width -src.cols*2| <= (dstsize.width mod 2) * |dstsize.height -src.rows*2| <= (dstsize.height mod 2)
* * @see org.opencv.imgproc.Imgproc.pyrUp */ public static void pyrUp(Mat src, Mat dst, Size dstsize) { pyrUp_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); return; } /** *Upsamples an image and then blurs it.
* *The function performs the upsampling step of the Gaussian pyramid * construction, though it can actually be used to construct the Laplacian * pyramid. First, it upsamples the source image by injecting even zero rows and * columns and then convolves the result with the same kernel as in "pyrDown" * multiplied by 4.
* *Note:
*-
*
- (Python) An example of Laplacian Pyramid construction and merging can * be found at opencv_source_code/samples/python2/lappyr.py *
src
.
*
* @see org.opencv.imgproc.Imgproc.pyrUp
*/
public static void pyrUp(Mat src, Mat dst)
{
pyrUp_2(src.nativeObj, dst.nativeObj);
return;
}
//
// C++: void remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
//
/**
* Applies a generic geometrical transformation to an image.
* *The function remap
transforms the source image using the
* specified map:
dst(x,y) = src(map_x(x,y),map_y(x,y))
* *where values of pixels with non-integer coordinates are computed using one of
* available interpolation methods.
* map_x and map_y can be encoded as separate floating-point
* maps in map_1 and map_2 respectively, or interleaved
* floating-point maps of (x,y) in map_1, or fixed-point maps
* created by using "convertMaps". The reason you might want to convert from
* floating to fixed-point representations of a map is that they can yield much
* faster (~2x) remapping operations. In the converted case, map_1
* contains pairs (cvFloor(x), cvFloor(y))
and map_2
* contains indices in a table of interpolation coefficients.
This function cannot operate in-place.
* * @param src Source image. * @param dst Destination image. It has the same size asmap1
and
* the same type as src
.
* @param map1 The first map of either (x,y)
points or just
* x
values having the type CV_16SC2
,
* CV_32FC1
, or CV_32FC2
. See "convertMaps" for
* details on converting a floating point representation to fixed-point for
* speed.
* @param map2 The second map of y
values having the type
* CV_16UC1
, CV_32FC1
, or none (empty map if
* map1
is (x,y)
points), respectively.
* @param interpolation Interpolation method (see "resize"). The method
* INTER_AREA
is not supported by this function.
* @param borderMode Pixel extrapolation method (see "borderInterpolate"). When
* borderMode=BORDER_TRANSPARENT
, it means that the pixels in the
* destination image that corresponds to the "outliers" in the source image are
* not modified by the function.
* @param borderValue Value used in case of a constant border. By default, it is
* 0.
*
* @see org.opencv.imgproc.Imgproc.remap
*/
public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue)
{
remap_0(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
return;
}
/**
* Applies a generic geometrical transformation to an image.
* *The function remap
transforms the source image using the
* specified map:
dst(x,y) = src(map_x(x,y),map_y(x,y))
* *where values of pixels with non-integer coordinates are computed using one of
* available interpolation methods.
* map_x and map_y can be encoded as separate floating-point
* maps in map_1 and map_2 respectively, or interleaved
* floating-point maps of (x,y) in map_1, or fixed-point maps
* created by using "convertMaps". The reason you might want to convert from
* floating to fixed-point representations of a map is that they can yield much
* faster (~2x) remapping operations. In the converted case, map_1
* contains pairs (cvFloor(x), cvFloor(y))
and map_2
* contains indices in a table of interpolation coefficients.
This function cannot operate in-place.
* * @param src Source image. * @param dst Destination image. It has the same size asmap1
and
* the same type as src
.
* @param map1 The first map of either (x,y)
points or just
* x
values having the type CV_16SC2
,
* CV_32FC1
, or CV_32FC2
. See "convertMaps" for
* details on converting a floating point representation to fixed-point for
* speed.
* @param map2 The second map of y
values having the type
* CV_16UC1
, CV_32FC1
, or none (empty map if
* map1
is (x,y)
points), respectively.
* @param interpolation Interpolation method (see "resize"). The method
* INTER_AREA
is not supported by this function.
*
* @see org.opencv.imgproc.Imgproc.remap
*/
public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation)
{
remap_1(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation);
return;
}
//
// C++: void resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
//
/**
* Resizes an image.
* *The function resize
resizes the image src
down to
* or up to the specified size.Note that the initial dst
type or
* size are not taken into account. Instead, the size and type are derived from
* the src
,dsize
,fx
, and fy
.
* If you want to resize src
so that it fits the pre-created
* dst
, you may call the function as follows:
// C++ code:
* *// explicitly specify dsize=dst.size(); fx and fy will be computed from that.
* *resize(src, dst, dst.size(), 0, 0, interpolation);
* *If you want to decimate the image by factor of 2 in each direction, you can * call the function this way:
* *// specify fx and fy and let the function compute the destination image size.
* *resize(src, dst, Size(), 0.5, 0.5, interpolation);
* *To shrink an image, it will generally look best with CV_INTER_AREA * interpolation, whereas to enlarge an image, it will generally look best with * CV_INTER_CUBIC (slow) or CV_INTER_LINEAR (faster but still looks OK). *
* * @param src input image. * @param dst output image; it has the sizedsize
(when it is
* non-zero) or the size computed from src.size()
, fx
,
* and fy
; the type of dst
is the same as of
* src
.
* @param dsize output image size; if it equals zero, it is computed as:
*
* dsize = Size(round(fx*src.cols), round(fy*src.rows))
* *Either dsize
or both fx
and fy
must be
* non-zero.
(double)dsize.width/src.cols
* @param fy scale factor along the vertical axis; when it equals 0, it is * computed as * *(double)dsize.height/src.rows
* @param interpolation interpolation method: *-
*
- INTER_NEAREST - a nearest-neighbor interpolation *
- INTER_LINEAR - a bilinear interpolation (used by default) *
- INTER_AREA - resampling using pixel area relation. It may be a
* preferred method for image decimation, as it gives moire'-free results. But
* when the image is zoomed, it is similar to the
INTER_NEAREST
* method. * - INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood *
- INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood *
Resizes an image.
* *The function resize
resizes the image src
down to
* or up to the specified size.Note that the initial dst
type or
* size are not taken into account. Instead, the size and type are derived from
* the src
,dsize
,fx
, and fy
.
* If you want to resize src
so that it fits the pre-created
* dst
, you may call the function as follows:
// C++ code:
* *// explicitly specify dsize=dst.size(); fx and fy will be computed from that.
* *resize(src, dst, dst.size(), 0, 0, interpolation);
* *If you want to decimate the image by factor of 2 in each direction, you can * call the function this way:
* *// specify fx and fy and let the function compute the destination image size.
* *resize(src, dst, Size(), 0.5, 0.5, interpolation);
* *To shrink an image, it will generally look best with CV_INTER_AREA * interpolation, whereas to enlarge an image, it will generally look best with * CV_INTER_CUBIC (slow) or CV_INTER_LINEAR (faster but still looks OK). *
* * @param src input image. * @param dst output image; it has the sizedsize
(when it is
* non-zero) or the size computed from src.size()
, fx
,
* and fy
; the type of dst
is the same as of
* src
.
* @param dsize output image size; if it equals zero, it is computed as:
*
* dsize = Size(round(fx*src.cols), round(fy*src.rows))
* *Either dsize
or both fx
and fy
must be
* non-zero.
Applies a separable linear filter to an image.
* *The function applies a separable linear filter to the image. That is, first,
* every row of src
is filtered with the 1D kernel
* kernelX
. Then, every column of the result is filtered with the
* 1D kernel kernelY
. The final result shifted by delta
* is stored in dst
.
src
.
* @param ddepth Destination image depth. The following combination of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the destination image will have the same depth
* as the source.
Applies a separable linear filter to an image.
* *The function applies a separable linear filter to the image. That is, first,
* every row of src
is filtered with the 1D kernel
* kernelX
. Then, every column of the result is filtered with the
* 1D kernel kernelY
. The final result shifted by delta
* is stored in dst
.
src
.
* @param ddepth Destination image depth. The following combination of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the destination image will have the same depth
* as the source.
Applies a separable linear filter to an image.
* *The function applies a separable linear filter to the image. That is, first,
* every row of src
is filtered with the 1D kernel
* kernelX
. Then, every column of the result is filtered with the
* 1D kernel kernelY
. The final result shifted by delta
* is stored in dst
.
src
.
* @param ddepth Destination image depth. The following combination of
* src.depth()
and ddepth
are supported:
* -
*
-
src.depth()
=CV_8U
,ddepth
= * -1/CV_16S
/CV_32F
/CV_64F
* -
src.depth()
=CV_16U
/CV_16S
, *ddepth
= -1/CV_32F
/CV_64F
* -
src.depth()
=CV_32F
,ddepth
= * -1/CV_32F
/CV_64F
* -
src.depth()
=CV_64F
,ddepth
= * -1/CV_64F
*
when ddepth=-1
, the destination image will have the same depth
* as the source.
Applies a fixed-level threshold to each array element.
* *The function applies fixed-level thresholding to a single-channel array. The
* function is typically used to get a bi-level (binary) image out of a
* grayscale image ("compare" could be also used for this purpose) or for
* removing a noise, that is, filtering out pixels with too small or too large
* values. There are several types of thresholding supported by the function.
* They are determined by type
:
-
*
- THRESH_BINARY *
dst(x,y) = maxval if src(x,y) > thresh; 0 otherwise
* *-
*
- THRESH_BINARY_INV *
dst(x,y) = 0 if src(x,y) > thresh; maxval otherwise
* *-
*
- THRESH_TRUNC *
dst(x,y) = threshold if src(x,y) > thresh; src(x,y) otherwise
* *-
*
- THRESH_TOZERO *
dst(x,y) = src(x,y) if src(x,y) > thresh; 0 otherwise
* *-
*
- THRESH_TOZERO_INV *
dst(x,y) = 0 if src(x,y) > thresh; src(x,y) otherwise
* *Also, the special value THRESH_OTSU
may be combined with one of
* the above values. In this case, the function determines the optimal threshold
* value using the Otsu's algorithm and uses it instead of the specified
* thresh
.
* The function returns the computed threshold value.
* Currently, the Otsu's method is implemented only for 8-bit images.
src
.
* @param thresh threshold value.
* @param maxval maximum value to use with the THRESH_BINARY
and
* THRESH_BINARY_INV
thresholding types.
* @param type thresholding type (see the details below).
*
* @see org.opencv.imgproc.Imgproc.threshold
* @see org.opencv.imgproc.Imgproc#findContours
* @see org.opencv.core.Core#max
* @see org.opencv.imgproc.Imgproc#adaptiveThreshold
* @see org.opencv.core.Core#compare
* @see org.opencv.core.Core#min
*/
public static double threshold(Mat src, Mat dst, double thresh, double maxval, int type)
{
double retVal = threshold_0(src.nativeObj, dst.nativeObj, thresh, maxval, type);
return retVal;
}
//
// C++: void undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
//
/**
* Transforms an image to compensate for lens distortion.
* *The function transforms an image to compensate radial and tangential lens * distortion.
* *The function is simply a combination of "initUndistortRectifyMap" (with unity
* R
) and "remap" (with bilinear interpolation). See the former
* function for details of the transformation being performed.
Those pixels in the destination image, for which there is no correspondent * pixels in the source image, are filled with zeros (black color).
* *A particular subset of the source image that will be visible in the corrected
* image can be regulated by newCameraMatrix
. You can use
* "getOptimalNewCameraMatrix" to compute the appropriate newCameraMatrix
* depending on your requirements.
The camera matrix and the distortion parameters can be determined using * "calibrateCamera". If the resolution of images is different from the * resolution used at the calibration stage, f_x, f_y, c_x and * c_y need to be scaled accordingly, while the distortion coefficients * remain the same.
* * @param src Input (distorted) image. * @param dst Output (corrected) image that has the same size and type as *src
.
* @param cameraMatrix Input camera matrix A =
* |f_x 0 c_x| * |0 f_y c_y| * |0 0 1| *
. * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is * NULL/empty, the zero distortion coefficients are assumed. * @param newCameraMatrix Camera matrix of the distorted image. By default, it * is the same ascameraMatrix
but you may additionally scale and
* shift the result by using a different matrix.
*
* @see org.opencv.imgproc.Imgproc.undistort
*/
public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix)
{
undistort_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, newCameraMatrix.nativeObj);
return;
}
/**
* Transforms an image to compensate for lens distortion.
* *The function transforms an image to compensate radial and tangential lens * distortion.
* *The function is simply a combination of "initUndistortRectifyMap" (with unity
* R
) and "remap" (with bilinear interpolation). See the former
* function for details of the transformation being performed.
Those pixels in the destination image, for which there is no correspondent * pixels in the source image, are filled with zeros (black color).
* *A particular subset of the source image that will be visible in the corrected
* image can be regulated by newCameraMatrix
. You can use
* "getOptimalNewCameraMatrix" to compute the appropriate newCameraMatrix
* depending on your requirements.
The camera matrix and the distortion parameters can be determined using * "calibrateCamera". If the resolution of images is different from the * resolution used at the calibration stage, f_x, f_y, c_x and * c_y need to be scaled accordingly, while the distortion coefficients * remain the same.
* * @param src Input (distorted) image. * @param dst Output (corrected) image that has the same size and type as *src
.
* @param cameraMatrix Input camera matrix A =
* |f_x 0 c_x| * |0 f_y c_y| * |0 0 1| *
. * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is * NULL/empty, the zero distortion coefficients are assumed. * * @see org.opencv.imgproc.Imgproc.undistort */ public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) { undistort_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); return; } // // C++: void undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) // /** *Computes the ideal point coordinates from the observed point coordinates.
* *The function is similar to "undistort" and "initUndistortRectifyMap" but it
* operates on a sparse set of points instead of a raster image. Also the
* function performs a reverse transformation to"projectPoints". In case of a 3D
* object, it does not reconstruct its 3D coordinates, but for a planar object,
* it does, up to a translation vector, if the proper R
is
* specified.
*
// C++ code:
* *// (u,v) is the input point, (u', v') is the output point
* *// camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]
* *// P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]
* *x" = (u - cx)/fx
* *y" = (v - cy)/fy
* *(x',y') = undistort(x",y",dist_coeffs)
* *[X,Y,W]T = R*[x' y' 1]T
* *x = X/W, y = Y/W
* *// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is * specified
* *u' = x*fx' + cx'
* *v' = y*fy' + cy',
* *where undistort()
is an approximate iterative algorithm that
* estimates the normalized original point coordinates out of the normalized
* distorted point coordinates ("normalized" means that the coordinates do not
* depend on the camera matrix).
*
The function can be used for both a stereo camera head or a monocular camera * (when R is empty).
* * @param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or * CV_64FC2). * @param dst Output ideal point coordinates after undistortion and reverse * perspective transformation. If matrixP
is identity or omitted,
* dst
will contain normalized point coordinates.
* @param cameraMatrix Camera matrix
* |f_x 0 c_x| * |0 f_y c_y| * |0 0 1| *
. * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is * NULL/empty, the zero distortion coefficients are assumed. * @param R Rectification transformation in the object space (3x3 matrix). *R1
or R2
computed by "stereoRectify" can be passed
* here. If the matrix is empty, the identity transformation is used.
* @param P New camera matrix (3x3) or new projection matrix (3x4).
* P1
or P2
computed by "stereoRectify" can be passed
* here. If the matrix is empty, the identity new camera matrix is used.
*
* @see org.opencv.imgproc.Imgproc.undistortPoints
*/
public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P)
{
Mat src_mat = src;
Mat dst_mat = dst;
undistortPoints_0(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj);
return;
}
/**
* Computes the ideal point coordinates from the observed point coordinates.
* *The function is similar to "undistort" and "initUndistortRectifyMap" but it
* operates on a sparse set of points instead of a raster image. Also the
* function performs a reverse transformation to"projectPoints". In case of a 3D
* object, it does not reconstruct its 3D coordinates, but for a planar object,
* it does, up to a translation vector, if the proper R
is
* specified.
*
// C++ code:
* *// (u,v) is the input point, (u', v') is the output point
* *// camera_matrix=[fx 0 cx; 0 fy cy; 0 0 1]
* *// P=[fx' 0 cx' tx; 0 fy' cy' ty; 0 0 1 tz]
* *x" = (u - cx)/fx
* *y" = (v - cy)/fy
* *(x',y') = undistort(x",y",dist_coeffs)
* *[X,Y,W]T = R*[x' y' 1]T
* *x = X/W, y = Y/W
* *// only performed if P=[fx' 0 cx' [tx]; 0 fy' cy' [ty]; 0 0 1 [tz]] is * specified
* *u' = x*fx' + cx'
* *v' = y*fy' + cy',
* *where undistort()
is an approximate iterative algorithm that
* estimates the normalized original point coordinates out of the normalized
* distorted point coordinates ("normalized" means that the coordinates do not
* depend on the camera matrix).
*
The function can be used for both a stereo camera head or a monocular camera * (when R is empty).
* * @param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or * CV_64FC2). * @param dst Output ideal point coordinates after undistortion and reverse * perspective transformation. If matrixP
is identity or omitted,
* dst
will contain normalized point coordinates.
* @param cameraMatrix Camera matrix
* |f_x 0 c_x| * |0 f_y c_y| * |0 0 1| *
. * @param distCoeffs Input vector of distortion coefficients (k_1, k_2, p_1, * p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. If the vector is * NULL/empty, the zero distortion coefficients are assumed. * * @see org.opencv.imgproc.Imgproc.undistortPoints */ public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs) { Mat src_mat = src; Mat dst_mat = dst; undistortPoints_1(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); return; } // // C++: void warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) // /** *Applies an affine transformation to an image.
* *The function warpAffine
transforms the source image using the
* specified matrix:
dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)
* *when the flag WARP_INVERSE_MAP
is set. Otherwise, the
* transformation is first inverted with "invertAffineTransform" and then put in
* the formula above instead of M
.
* The function cannot operate in-place.
Note: cvGetQuadrangleSubPix
is similar to cvWarpAffine
,
* but the outliers are extrapolated using replication border mode.
dsize
and the same
* type as src
.
* @param M 2x 3 transformation matrix.
* @param dsize size of the output image.
* @param flags combination of interpolation methods (see "resize") and the
* optional flag WARP_INVERSE_MAP
that means that M
is
* the inverse transformation (dst->src).
* @param borderMode pixel extrapolation method (see "borderInterpolate"); when
* borderMode=BORDER_TRANSPARENT
, it means that the pixels in the
* destination image corresponding to the "outliers" in the source image are not
* modified by the function.
* @param borderValue value used in case of a constant border; by default, it is
* 0.
*
* @see org.opencv.imgproc.Imgproc.warpAffine
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.imgproc.Imgproc#warpPerspective
* @see org.opencv.imgproc.Imgproc#getRectSubPix
* @see org.opencv.imgproc.Imgproc#resize
* @see org.opencv.core.Core#transform
*/
public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue)
{
warpAffine_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
return;
}
/**
* Applies an affine transformation to an image.
* *The function warpAffine
transforms the source image using the
* specified matrix:
dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)
* *when the flag WARP_INVERSE_MAP
is set. Otherwise, the
* transformation is first inverted with "invertAffineTransform" and then put in
* the formula above instead of M
.
* The function cannot operate in-place.
Note: cvGetQuadrangleSubPix
is similar to cvWarpAffine
,
* but the outliers are extrapolated using replication border mode.
dsize
and the same
* type as src
.
* @param M 2x 3 transformation matrix.
* @param dsize size of the output image.
* @param flags combination of interpolation methods (see "resize") and the
* optional flag WARP_INVERSE_MAP
that means that M
is
* the inverse transformation (dst->src).
*
* @see org.opencv.imgproc.Imgproc.warpAffine
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.imgproc.Imgproc#warpPerspective
* @see org.opencv.imgproc.Imgproc#getRectSubPix
* @see org.opencv.imgproc.Imgproc#resize
* @see org.opencv.core.Core#transform
*/
public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags)
{
warpAffine_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags);
return;
}
/**
* Applies an affine transformation to an image.
* *The function warpAffine
transforms the source image using the
* specified matrix:
dst(x,y) = src(M _11 x + M _12 y + M _13, M _21 x + M _22 y + M _23)
* *when the flag WARP_INVERSE_MAP
is set. Otherwise, the
* transformation is first inverted with "invertAffineTransform" and then put in
* the formula above instead of M
.
* The function cannot operate in-place.
Note: cvGetQuadrangleSubPix
is similar to cvWarpAffine
,
* but the outliers are extrapolated using replication border mode.
dsize
and the same
* type as src
.
* @param M 2x 3 transformation matrix.
* @param dsize size of the output image.
*
* @see org.opencv.imgproc.Imgproc.warpAffine
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.imgproc.Imgproc#warpPerspective
* @see org.opencv.imgproc.Imgproc#getRectSubPix
* @see org.opencv.imgproc.Imgproc#resize
* @see org.opencv.core.Core#transform
*/
public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize)
{
warpAffine_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height);
return;
}
//
// C++: void warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
//
/**
* Applies a perspective transformation to an image.
* *The function warpPerspective
transforms the source image using
* the specified matrix:
dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))
* *when the flag WARP_INVERSE_MAP
is set. Otherwise, the
* transformation is first inverted with "invert" and then put in the formula
* above instead of M
.
* The function cannot operate in-place.
dsize
and the same
* type as src
.
* @param M 3x 3 transformation matrix.
* @param dsize size of the output image.
* @param flags combination of interpolation methods (INTER_LINEAR
* or INTER_NEAREST
) and the optional flag WARP_INVERSE_MAP
,
* that sets M
as the inverse transformation (dst->src).
* @param borderMode pixel extrapolation method (BORDER_CONSTANT
or
* BORDER_REPLICATE
).
* @param borderValue value used in case of a constant border; by default, it
* equals 0.
*
* @see org.opencv.imgproc.Imgproc.warpPerspective
* @see org.opencv.imgproc.Imgproc#warpAffine
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.core.Core#perspectiveTransform
* @see org.opencv.imgproc.Imgproc#getRectSubPix
* @see org.opencv.imgproc.Imgproc#resize
*/
public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue)
{
warpPerspective_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]);
return;
}
/**
* Applies a perspective transformation to an image.
* *The function warpPerspective
transforms the source image using
* the specified matrix:
dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))
* *when the flag WARP_INVERSE_MAP
is set. Otherwise, the
* transformation is first inverted with "invert" and then put in the formula
* above instead of M
.
* The function cannot operate in-place.
dsize
and the same
* type as src
.
* @param M 3x 3 transformation matrix.
* @param dsize size of the output image.
* @param flags combination of interpolation methods (INTER_LINEAR
* or INTER_NEAREST
) and the optional flag WARP_INVERSE_MAP
,
* that sets M
as the inverse transformation (dst->src).
*
* @see org.opencv.imgproc.Imgproc.warpPerspective
* @see org.opencv.imgproc.Imgproc#warpAffine
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.core.Core#perspectiveTransform
* @see org.opencv.imgproc.Imgproc#getRectSubPix
* @see org.opencv.imgproc.Imgproc#resize
*/
public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags)
{
warpPerspective_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags);
return;
}
/**
* Applies a perspective transformation to an image.
* *The function warpPerspective
transforms the source image using
* the specified matrix:
dst(x,y) = src((M_11 x + M_12 y + M_13)/(M_(31) x + M_32 y + * M_33),<BR>(M_21 x + M_22 y + M_23)/(M_(31) x + M_32 y + M_33))
* *when the flag WARP_INVERSE_MAP
is set. Otherwise, the
* transformation is first inverted with "invert" and then put in the formula
* above instead of M
.
* The function cannot operate in-place.
dsize
and the same
* type as src
.
* @param M 3x 3 transformation matrix.
* @param dsize size of the output image.
*
* @see org.opencv.imgproc.Imgproc.warpPerspective
* @see org.opencv.imgproc.Imgproc#warpAffine
* @see org.opencv.imgproc.Imgproc#remap
* @see org.opencv.core.Core#perspectiveTransform
* @see org.opencv.imgproc.Imgproc#getRectSubPix
* @see org.opencv.imgproc.Imgproc#resize
*/
public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize)
{
warpPerspective_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height);
return;
}
//
// C++: void watershed(Mat image, Mat& markers)
//
/**
* Performs a marker-based image segmentation using the watershed algorithm.
* *The function implements one of the variants of watershed, non-parametric * marker-based segmentation algorithm, described in [Meyer92].
* *Before passing the image to the function, you have to roughly outline the
* desired regions in the image markers
with positive
* (>0
) indices. So, every region is represented as one or more
* connected components with the pixel values 1, 2, 3, and so on. Such markers
* can be retrieved from a binary mask using "findContours" and "drawContours"
* (see the watershed.cpp
demo). The markers are "seeds" of the
* future image regions. All the other pixels in markers
, whose
* relation to the outlined regions is not known and should be defined by the
* algorithm, should be set to 0's. In the function output, each pixel in
* markers is set to a value of the "seed" components or to -1 at boundaries
* between the regions.
Visual demonstration and usage example of the function can be found in the
* OpenCV samples directory (see the watershed.cpp
demo).
Note: Any two neighbor connected components are not necessarily separated by * a watershed boundary (-1's pixels); for example, they can touch each other in * the initial marker image passed to the function.
* *Note:
*-
*
- An example using the watershed algorithm can be found at * opencv_source_code/samples/cpp/watershed.cpp *
- (Python) An example using the watershed algorithm can be found at * opencv_source_code/samples/python2/watershed.py *
image
.
*
* @see org.opencv.imgproc.Imgproc.watershed
* @see org.opencv.imgproc.Imgproc#findContours
*/
public static void watershed(Mat image, Mat markers)
{
watershed_0(image.nativeObj, markers.nativeObj);
return;
}
// C++: void Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false)
private static native void Canny_0(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize, boolean L2gradient);
private static native void Canny_1(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2);
// C++: void GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT)
private static native void GaussianBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY, int borderType);
private static native void GaussianBlur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY);
private static native void GaussianBlur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX);
// C++: void HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0)
private static native void HoughCircles_0(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius);
private static native void HoughCircles_1(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist);
// C++: void HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0)
private static native void HoughLines_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn);
private static native void HoughLines_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
// C++: void HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0)
private static native void HoughLinesP_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength, double maxLineGap);
private static native void HoughLinesP_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold);
// C++: void HuMoments(Moments m, Mat& hu)
private static native void HuMoments_0(long m_nativeObj, long hu_nativeObj);
// C++: void Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
private static native void Laplacian_0(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta, int borderType);
private static native void Laplacian_1(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta);
private static native void Laplacian_2(long src_nativeObj, long dst_nativeObj, int ddepth);
// C++: double PSNR(Mat src1, Mat src2)
private static native double PSNR_0(long src1_nativeObj, long src2_nativeObj);
// C++: void Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
private static native void Scharr_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta, int borderType);
private static native void Scharr_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta);
private static native void Scharr_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy);
// C++: void Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT)
private static native void Sobel_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType);
private static native void Sobel_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta);
private static native void Sobel_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy);
// C++: void accumulate(Mat src, Mat& dst, Mat mask = Mat())
private static native void accumulate_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj);
private static native void accumulate_1(long src_nativeObj, long dst_nativeObj);
// C++: void accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat())
private static native void accumulateProduct_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj);
private static native void accumulateProduct_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj);
// C++: void accumulateSquare(Mat src, Mat& dst, Mat mask = Mat())
private static native void accumulateSquare_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj);
private static native void accumulateSquare_1(long src_nativeObj, long dst_nativeObj);
// C++: void accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat())
private static native void accumulateWeighted_0(long src_nativeObj, long dst_nativeObj, double alpha, long mask_nativeObj);
private static native void accumulateWeighted_1(long src_nativeObj, long dst_nativeObj, double alpha);
// C++: void adaptiveBilateralFilter(Mat src, Mat& dst, Size ksize, double sigmaSpace, double maxSigmaColor = 20.0, Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT)
private static native void adaptiveBilateralFilter_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaSpace, double maxSigmaColor, double anchor_x, double anchor_y, int borderType);
private static native void adaptiveBilateralFilter_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaSpace, double maxSigmaColor, double anchor_x, double anchor_y);
private static native void adaptiveBilateralFilter_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaSpace);
// C++: void adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C)
private static native void adaptiveThreshold_0(long src_nativeObj, long dst_nativeObj, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C);
// C++: void approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed)
private static native void approxPolyDP_0(long curve_mat_nativeObj, long approxCurve_mat_nativeObj, double epsilon, boolean closed);
// C++: double arcLength(vector_Point2f curve, bool closed)
private static native double arcLength_0(long curve_mat_nativeObj, boolean closed);
// C++: void bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
private static native void bilateralFilter_0(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace, int borderType);
private static native void bilateralFilter_1(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace);
// C++: void blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT)
private static native void blur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y, int borderType);
private static native void blur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
private static native void blur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height);
// C++: int borderInterpolate(int p, int len, int borderType)
private static native int borderInterpolate_0(int p, int len, int borderType);
// C++: Rect boundingRect(vector_Point points)
private static native double[] boundingRect_0(long points_mat_nativeObj);
// C++: void boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT)
private static native void boxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType);
private static native void boxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize);
private static native void boxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height);
// C++: void calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale)
private static native void calcBackProject_0(long images_mat_nativeObj, long channels_mat_nativeObj, long hist_nativeObj, long dst_nativeObj, long ranges_mat_nativeObj, double scale);
// C++: void calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false)
private static native void calcHist_0(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj, boolean accumulate);
private static native void calcHist_1(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj);
// C++: double compareHist(Mat H1, Mat H2, int method)
private static native double compareHist_0(long H1_nativeObj, long H2_nativeObj, int method);
// C++: double contourArea(Mat contour, bool oriented = false)
private static native double contourArea_0(long contour_nativeObj, boolean oriented);
private static native double contourArea_1(long contour_nativeObj);
// C++: void convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false)
private static native void convertMaps_0(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type, boolean nninterpolation);
private static native void convertMaps_1(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type);
// C++: void convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true)
private static native void convexHull_0(long points_mat_nativeObj, long hull_mat_nativeObj, boolean clockwise);
private static native void convexHull_1(long points_mat_nativeObj, long hull_mat_nativeObj);
// C++: void convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects)
private static native void convexityDefects_0(long contour_mat_nativeObj, long convexhull_mat_nativeObj, long convexityDefects_mat_nativeObj);
// C++: void copyMakeBorder(Mat src, Mat& dst, int top, int bottom, int left, int right, int borderType, Scalar value = Scalar())
private static native void copyMakeBorder_0(long src_nativeObj, long dst_nativeObj, int top, int bottom, int left, int right, int borderType, double value_val0, double value_val1, double value_val2, double value_val3);
private static native void copyMakeBorder_1(long src_nativeObj, long dst_nativeObj, int top, int bottom, int left, int right, int borderType);
// C++: void cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT)
private static native void cornerEigenValsAndVecs_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType);
private static native void cornerEigenValsAndVecs_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize);
// C++: void cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT)
private static native void cornerHarris_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k, int borderType);
private static native void cornerHarris_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k);
// C++: void cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT)
private static native void cornerMinEigenVal_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType);
private static native void cornerMinEigenVal_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize);
private static native void cornerMinEigenVal_2(long src_nativeObj, long dst_nativeObj, int blockSize);
// C++: void cornerSubPix(Mat image, vector_Point2f& corners, Size winSize, Size zeroZone, TermCriteria criteria)
private static native void cornerSubPix_0(long image_nativeObj, long corners_mat_nativeObj, double winSize_width, double winSize_height, double zeroZone_width, double zeroZone_height, int criteria_type, int criteria_maxCount, double criteria_epsilon);
// C++: void createHanningWindow(Mat& dst, Size winSize, int type)
private static native void createHanningWindow_0(long dst_nativeObj, double winSize_width, double winSize_height, int type);
// C++: void cvtColor(Mat src, Mat& dst, int code, int dstCn = 0)
private static native void cvtColor_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn);
private static native void cvtColor_1(long src_nativeObj, long dst_nativeObj, int code);
// C++: void dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
private static native void dilate_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
private static native void dilate_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
private static native void dilate_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj);
// C++: void distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize)
private static native void distanceTransform_0(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize);
// C++: void distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP)
private static native void distanceTransformWithLabels_0(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize, int labelType);
private static native void distanceTransformWithLabels_1(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize);
// C++: void drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = 8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point())
private static native void drawContours_0(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel, double offset_x, double offset_y);
private static native void drawContours_1(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness);
private static native void drawContours_2(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3);
// C++: void equalizeHist(Mat src, Mat& dst)
private static native void equalizeHist_0(long src_nativeObj, long dst_nativeObj);
// C++: void erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
private static native void erode_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
private static native void erode_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
private static native void erode_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj);
// C++: void filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
private static native void filter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta, int borderType);
private static native void filter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta);
private static native void filter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj);
// C++: void findContours(Mat& image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point())
private static native void findContours_0(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method, double offset_x, double offset_y);
private static native void findContours_1(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method);
// C++: RotatedRect fitEllipse(vector_Point2f points)
private static native double[] fitEllipse_0(long points_mat_nativeObj);
// C++: void fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps)
private static native void fitLine_0(long points_nativeObj, long line_nativeObj, int distType, double param, double reps, double aeps);
// C++: int floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4)
private static native int floodFill_0(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3, int flags);
private static native int floodFill_1(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3);
// C++: Mat getAffineTransform(vector_Point2f src, vector_Point2f dst)
private static native long getAffineTransform_0(long src_mat_nativeObj, long dst_mat_nativeObj);
// C++: Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false)
private static native long getDefaultNewCameraMatrix_0(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height, boolean centerPrincipalPoint);
private static native long getDefaultNewCameraMatrix_1(long cameraMatrix_nativeObj);
// C++: void getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F)
private static native void getDerivKernels_0(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize, int ktype);
private static native void getDerivKernels_1(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize);
// C++: Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F)
private static native long getGaborKernel_0(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi, int ktype);
private static native long getGaborKernel_1(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma);
// C++: Mat getGaussianKernel(int ksize, double sigma, int ktype = CV_64F)
private static native long getGaussianKernel_0(int ksize, double sigma, int ktype);
private static native long getGaussianKernel_1(int ksize, double sigma);
// C++: Mat getPerspectiveTransform(Mat src, Mat dst)
private static native long getPerspectiveTransform_0(long src_nativeObj, long dst_nativeObj);
// C++: void getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1)
private static native void getRectSubPix_0(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj, int patchType);
private static native void getRectSubPix_1(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj);
// C++: Mat getRotationMatrix2D(Point2f center, double angle, double scale)
private static native long getRotationMatrix2D_0(double center_x, double center_y, double angle, double scale);
// C++: Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1))
private static native long getStructuringElement_0(int shape, double ksize_width, double ksize_height, double anchor_x, double anchor_y);
private static native long getStructuringElement_1(int shape, double ksize_width, double ksize_height);
// C++: void goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04)
private static native void goodFeaturesToTrack_0(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector, double k);
private static native void goodFeaturesToTrack_1(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance);
// C++: void grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL)
private static native void grabCut_0(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount, int mode);
private static native void grabCut_1(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount);
// C++: void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2)
private static native void initUndistortRectifyMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj);
// C++: float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat& map1, Mat& map2, int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0)
private static native float initWideAngleProjMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, int destImageWidth, int m1type, long map1_nativeObj, long map2_nativeObj, int projType, double alpha);
private static native float initWideAngleProjMap_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, int destImageWidth, int m1type, long map1_nativeObj, long map2_nativeObj);
// C++: void integral(Mat src, Mat& sum, int sdepth = -1)
private static native void integral_0(long src_nativeObj, long sum_nativeObj, int sdepth);
private static native void integral_1(long src_nativeObj, long sum_nativeObj);
// C++: void integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1)
private static native void integral2_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth);
private static native void integral2_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj);
// C++: void integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1)
private static native void integral3_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth);
private static native void integral3_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj);
// C++: float intersectConvexConvex(Mat _p1, Mat _p2, Mat& _p12, bool handleNested = true)
private static native float intersectConvexConvex_0(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj, boolean handleNested);
private static native float intersectConvexConvex_1(long _p1_nativeObj, long _p2_nativeObj, long _p12_nativeObj);
// C++: void invertAffineTransform(Mat M, Mat& iM)
private static native void invertAffineTransform_0(long M_nativeObj, long iM_nativeObj);
// C++: bool isContourConvex(vector_Point contour)
private static native boolean isContourConvex_0(long contour_mat_nativeObj);
// C++: double matchShapes(Mat contour1, Mat contour2, int method, double parameter)
private static native double matchShapes_0(long contour1_nativeObj, long contour2_nativeObj, int method, double parameter);
// C++: void matchTemplate(Mat image, Mat templ, Mat& result, int method)
private static native void matchTemplate_0(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method);
// C++: void medianBlur(Mat src, Mat& dst, int ksize)
private static native void medianBlur_0(long src_nativeObj, long dst_nativeObj, int ksize);
// C++: RotatedRect minAreaRect(vector_Point2f points)
private static native double[] minAreaRect_0(long points_mat_nativeObj);
// C++: void minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius)
private static native void minEnclosingCircle_0(long points_mat_nativeObj, double[] center_out, double[] radius_out);
// C++: Moments moments(Mat array, bool binaryImage = false)
private static native long moments_0(long array_nativeObj, boolean binaryImage);
private static native long moments_1(long array_nativeObj);
// C++: void morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue())
private static native void morphologyEx_0(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
private static native void morphologyEx_1(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations);
private static native void morphologyEx_2(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj);
// C++: Point2d phaseCorrelate(Mat src1, Mat src2, Mat window = Mat())
private static native double[] phaseCorrelate_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj);
private static native double[] phaseCorrelate_1(long src1_nativeObj, long src2_nativeObj);
// C++: Point2d phaseCorrelateRes(Mat src1, Mat src2, Mat window, double* response = 0)
private static native double[] phaseCorrelateRes_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj, double[] response_out);
private static native double[] phaseCorrelateRes_1(long src1_nativeObj, long src2_nativeObj, long window_nativeObj);
// C++: double pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist)
private static native double pointPolygonTest_0(long contour_mat_nativeObj, double pt_x, double pt_y, boolean measureDist);
// C++: void preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT)
private static native void preCornerDetect_0(long src_nativeObj, long dst_nativeObj, int ksize, int borderType);
private static native void preCornerDetect_1(long src_nativeObj, long dst_nativeObj, int ksize);
// C++: void pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
private static native void pyrDown_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType);
private static native void pyrDown_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height);
private static native void pyrDown_2(long src_nativeObj, long dst_nativeObj);
// C++: void pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1))
private static native void pyrMeanShiftFiltering_0(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel, int termcrit_type, int termcrit_maxCount, double termcrit_epsilon);
private static native void pyrMeanShiftFiltering_1(long src_nativeObj, long dst_nativeObj, double sp, double sr);
// C++: void pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT)
private static native void pyrUp_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType);
private static native void pyrUp_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height);
private static native void pyrUp_2(long src_nativeObj, long dst_nativeObj);
// C++: void remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
private static native void remap_0(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
private static native void remap_1(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation);
// C++: void resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR)
private static native void resize_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy, int interpolation);
private static native void resize_1(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height);
// C++: void sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT)
private static native void sepFilter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta, int borderType);
private static native void sepFilter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta);
private static native void sepFilter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj);
// C++: double threshold(Mat src, Mat& dst, double thresh, double maxval, int type)
private static native double threshold_0(long src_nativeObj, long dst_nativeObj, double thresh, double maxval, int type);
// C++: void undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat())
private static native void undistort_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long newCameraMatrix_nativeObj);
private static native void undistort_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
// C++: void undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat())
private static native void undistortPoints_0(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj);
private static native void undistortPoints_1(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj);
// C++: void warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
private static native void warpAffine_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
private static native void warpAffine_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags);
private static native void warpAffine_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height);
// C++: void warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar())
private static native void warpPerspective_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3);
private static native void warpPerspective_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags);
private static native void warpPerspective_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height);
// C++: void watershed(Mat image, Mat& markers)
private static native void watershed_0(long image_nativeObj, long markers_nativeObj);
}