public void FishEyeCalibrate() { var patternSize = new Size(10, 7); using (var image = Image("calibration/00.jpg")) using (var corners = new MatOfPoint2f()) { Cv2.FindChessboardCorners(image, patternSize, corners); var objectPointsArray = Create3DChessboardCorners(patternSize, 1.0f).ToArray(); var imagePointsArray = corners.ToArray(); using (var objectPoints = MatOfPoint3f.FromArray(objectPointsArray)) using (var imagePoints = MatOfPoint2f.FromArray(imagePointsArray)) using (var cameraMatrix = new MatOfDouble(Mat.Eye(3, 3, MatType.CV_64FC1))) using (var distCoeffs = new MatOfDouble()) { var rms = Cv2.FishEye.Calibrate(new[] { objectPoints }, new[] { imagePoints }, image.Size(), cameraMatrix, distCoeffs, out var rotationVectors, out var translationVectors, FishEyeCalibrationFlags.None); var distCoeffValues = distCoeffs.ToArray(); Assert.Equal(55.15, rms, 2); Assert.Contains(distCoeffValues, d => Math.Abs(d) > 1e-20); Assert.NotEmpty(rotationVectors); Assert.NotEmpty(translationVectors); } } }
public void Draw2dContour(Mat image, Scalar color) { Point2f[] points2dArray = Points2d.ToArray(); for (int i = 0; i < points2dArray.Length; i++) { Cv2.Line(image, points2dArray[i], points2dArray[(i + 1) % points2dArray.Length], color, 2, LineTypes.AntiAlias, 0); } }
// fixed FromArray behavior static Point2d[] MyPerspectiveTransform2(Point2f[] yourData, Mat transformationMatrix) { using (MatOfPoint2f s = MatOfPoint2f.FromArray(yourData)) using (MatOfPoint2f d = new MatOfPoint2f()) { Cv2.PerspectiveTransform(s, d, transformationMatrix); Point2f[] f = d.ToArray(); return(f.Select(Point2fToPoint2d).ToArray()); } }
private OpenCV.Core.Point[] FindBiggestContourWithFourBorders(IList <MatOfPoint> contours) { OpenCV.Core.Point[] biggestContour = null; Parallel.ForEach(contours, (c) => { MatOfPoint2f mat2f = new MatOfPoint2f(c.ToArray()); double perimeter = Imgproc.ArcLength(mat2f, true); if (perimeter > 1500) { MatOfPoint2f approx = new MatOfPoint2f(); Imgproc.ApproxPolyDP(mat2f, approx, 0.02 * perimeter, true); if (approx.Total() == 4) { MatOfPoint approxMat = new MatOfPoint(approx.ToArray()); if (Imgproc.IsContourConvex(approxMat)) { if (biggestContour == null) { biggestContour = approx.ToArray(); detectedBorder = approx; oldPerimeter = perimeter; } else { if (oldPerimeter < perimeter) { biggestContour = approx.ToArray(); detectedBorder = approx; oldPerimeter = perimeter; } } } } } mat2f.Dispose(); }); return(biggestContour); }
private void GetImageMaxSize(out double maxWidth, out double maxHeight, out OpenCV.Core.Point pTopLeft, out OpenCV.Core.Point pBottomLeft, out OpenCV.Core.Point pBottomRight, out OpenCV.Core.Point pTopRight) { var pointArray = detectedBorder.ToArray(); pTopLeft = new OpenCV.Core.Point(pointArray[0].X, pointArray[0].Y); pBottomLeft = new OpenCV.Core.Point(pointArray[1].X, pointArray[1].Y); pBottomRight = new OpenCV.Core.Point(pointArray[2].X, pointArray[2].Y); pTopRight = new OpenCV.Core.Point(pointArray[3].X, pointArray[3].Y); var w1 = pTopLeft.X - pBottomRight.X; var w2 = pTopRight.X - pBottomLeft.X; var h1 = pTopRight.Y - pTopLeft.Y; var h2 = pBottomLeft.Y - pBottomRight.Y; maxWidth = w1 > w2 ? w1 : w2; maxHeight = h1 > h2 ? h1 : h2; }
public void CalibrateCameraByArray() { var patternSize = new Size(10, 7); using (var image = Image("calibration/00.jpg")) using (var corners = new MatOfPoint2f()) { Cv2.FindChessboardCorners(image, patternSize, corners); var objectPoints = Create3DChessboardCorners(patternSize, 1.0f); var imagePoints = corners.ToArray(); var cameraMatrix = new double[3, 3] { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 } }; var distCoeffs = new double[5]; var rms = Cv2.CalibrateCamera(new [] { objectPoints }, new[] { imagePoints }, image.Size(), cameraMatrix, distCoeffs, out var rotationVectors, out var translationVectors, CalibrationFlags.UseIntrinsicGuess | CalibrationFlags.FixK5); Assert.Equal(6.16, rms, 2); Assert.Contains(distCoeffs, d => Math.Abs(d) > 1e-20); } }
/// <summary> /// performs perspective transformation of each element of multi-channel input matrix /// </summary> /// <param name="src">The source two-channel or three-channel floating-point array; /// each element is 2D/3D vector to be transformed</param> /// <param name="m">3x3 or 4x4 transformation matrix</param> /// <returns>The destination array; it will have the same size and same type as src</returns> public static Point2f[] PerspectiveTransform(IEnumerable<Point2f> src, Mat m) { if (src == null) throw new ArgumentNullException("src"); if (m == null) throw new ArgumentNullException("m"); using (var srcMat = MatOfPoint2f.FromArray(src)) using (var dstMat = new MatOfPoint2f()) { NativeMethods.core_perspectiveTransform_Mat(srcMat.CvPtr, dstMat.CvPtr, m.CvPtr); return dstMat.ToArray(); } }
/// <summary> /// Computes convex hull for a set of 2D points. /// </summary> /// <param name="points">The input 2D point set, represented by CV_32SC2 or CV_32FC2 matrix</param> /// <param name="clockwise">If true, the output convex hull will be oriented clockwise, /// otherwise it will be oriented counter-clockwise. Here, the usual screen coordinate /// system is assumed - the origin is at the top-left corner, x axis is oriented to the right, /// and y axis is oriented downwards.</param> /// <returns>The output convex hull. It is a vector of points that form the /// hull (must have the same type as the input points).</returns> public Point2f[] ConvexHullFloatPoints(InputArray points, bool clockwise = false) { var dst = new MatOfPoint2f(); Cv2.ConvexHull(points, dst, clockwise, true); return dst.ToArray(); }
/// <summary> /// projects points from the model coordinate space to the image coordinates. /// Also computes derivatives of the image coordinates w.r.t the intrinsic /// and extrinsic camera parameters /// </summary> /// <param name="objectPoints">Array of object points, 3xN/Nx3 1-channel or /// 1xN/Nx1 3-channel, where N is the number of points in the view.</param> /// <param name="rvec">Rotation vector (3x1).</param> /// <param name="tvec">Translation vector (3x1).</param> /// <param name="cameraMatrix">Camera matrix (3x3)</param> /// <param name="distCoeffs">Input vector of distortion coefficients /// (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements. /// If the vector is null, the zero distortion coefficients are assumed.</param> /// <param name="imagePoints">Output array of image points, 2xN/Nx2 1-channel /// or 1xN/Nx1 2-channel</param> /// <param name="jacobian">Optional output 2Nx(10 + numDistCoeffs) jacobian matrix /// of derivatives of image points with respect to components of the rotation vector, /// translation vector, focal lengths, coordinates of the principal point and /// the distortion coefficients. In the old interface different components of /// the jacobian are returned via different output parameters.</param> /// <param name="aspectRatio">Optional “fixed aspect ratio” parameter. /// If the parameter is not 0, the function assumes that the aspect ratio (fx/fy) /// is fixed and correspondingly adjusts the jacobian matrix.</param> public static void ProjectPoints(IEnumerable<Point3f> objectPoints, double[] rvec, double[] tvec, double[,] cameraMatrix, double[] distCoeffs, out Point2f[] imagePoints, out double[,] jacobian, double aspectRatio = 0) { if (objectPoints == null) throw new ArgumentNullException(nameof(objectPoints)); if (rvec == null) throw new ArgumentNullException(nameof(rvec)); if (rvec.Length != 3) throw new ArgumentException("rvec.Length != 3"); if (tvec == null) throw new ArgumentNullException(nameof(tvec)); if (tvec.Length != 3) throw new ArgumentException("tvec.Length != 3"); if (cameraMatrix == null) throw new ArgumentNullException(nameof(cameraMatrix)); if (cameraMatrix.GetLength(0) != 3 || cameraMatrix.GetLength(1) != 3) throw new ArgumentException("cameraMatrix must be double[3,3]"); Point3f[] objectPointsArray = EnumerableEx.ToArray(objectPoints); using (var objectPointsM = new Mat(objectPointsArray.Length, 1, MatType.CV_32FC3, objectPointsArray)) using (var rvecM = new Mat(3, 1, MatType.CV_64FC1, rvec)) using (var tvecM = new Mat(3, 1, MatType.CV_64FC1, tvec)) using (var cameraMatrixM = new Mat(3, 3, MatType.CV_64FC1, cameraMatrix)) using (var imagePointsM = new MatOfPoint2f()) { var distCoeffsM = new Mat(); if (distCoeffs != null) distCoeffsM = new Mat(distCoeffs.Length, 1, MatType.CV_64FC1, distCoeffs); var jacobianM = new MatOfDouble(); NativeMethods.calib3d_projectPoints_Mat(objectPointsM.CvPtr, rvecM.CvPtr, tvecM.CvPtr, cameraMatrixM.CvPtr, distCoeffsM.CvPtr, imagePointsM.CvPtr, jacobianM.CvPtr, aspectRatio); imagePoints = imagePointsM.ToArray(); jacobian = jacobianM.ToRectangularArray(); } }
// fixed FromArray behavior static Point2d[] MyPerspectiveTransform2(Point2f[] yourData, Mat transformationMatrix) { using (MatOfPoint2f s = MatOfPoint2f.FromArray(yourData)) using (MatOfPoint2f d = new MatOfPoint2f()) { Cv2.PerspectiveTransform(s, d, transformationMatrix); Point2f[] f = d.ToArray(); return f.Select(Point2fToPoint2d).ToArray(); } }