/// <summary> /// Otvorí obrázky s šachovnicami a extrahuje rohové body /// </summary> /// <param name="fileList">zoznam mien obrázkov s šachovnicami</param> /// <param name="boardSize">počet vnútorných rohov šachovnice (x-1, y-1)</param> /// <returns></returns> private int AddChessboardPoints(List <string> fileList, Size boardSize) { //PointF[][] imageCorners = new PointF[Frame_array_buffer.Length][]; //body na šachovnici //PointF[] imageCorners; //Emgu.CV.IOutputArray imageCorners; //poloha rohov šachovnice v 3D priestore MCvPoint3D32f[] objectCorners = new MCvPoint3D32f[boardSize.Height * boardSize.Width]; //3D Scene Points: //Inicializácia vnútorných rohov šachovnice v 3D priestore (x,y,z) = (i,j,0) for (int i = 0; i < boardSize.Height; i++) { for (int j = 0; j < boardSize.Width; j++) { objectCorners[i * boardSize.Width + j] = new MCvPoint3D32f(i, j, 0.0f); } } //2D body obrázka: Image <Gray, Byte> image; //obrázok pre načítavanie obrázka so šachovnicou int successes = 0; //počet najdenych obrazkov so sachovnicou //List<VectorOfPointF> corners = new List<VectorOfPointF>(); GC.Collect(); //pre všetky vstupné obrázky - uhly pohľadu for (int i = 0; i < fileList.Count; i++) { var cornerPoints = new VectorOfPointF(); //vektor rohových bodov šachovnice image = new Image <Gray, Byte>(fileList[i]); //načítaj obrázok zo zoznamu //imageCorners = null; //CameraCalibration.FindChessboardCorners(image, boardSize, CALIB_CB_TYPE.DEFAULT); CvInvoke.FindChessboardCorners(image, boardSize, cornerPoints, CalibCbType.Default); //získaj rohové body šachovnice if (cornerPoints == null) { continue; //keď v aktuálnom obrázku nenašiel žiadne body, zoberie ďalší //imageCorners } //corners.Add(cornerPoints); //image.FindCornerSubPix( imageCorners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //získaj rohové body so subpixelovou presnosťou CvInvoke.CornerSubPix(image, cornerPoints, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //CvInvoke.cvFindCornerSubPix(image, imageCorners, // boardSize.Height * boardSize.Width, // new Size(5, 5), new Size(-1, -1), // new MCvTermCriteria(30, 0.1)); //keď našiel na obrázku dosť bodov (9*6), tak ich pridá do zoznamu if (cornerPoints.Size == boardSize.Height * boardSize.Width) //imageCorners.Length { //zavolá metódu na pridanie bodov do zoznamov AddPoints(cornerPoints.ToArray(), objectCorners); successes++; } } return(successes); }
/// <summary> /// Finds the corners/circles (depending on selected target type). /// </summary> /// <param name="image">The image to search.</param> /// <param name="annotatedImage">The image with the corners/circles annotated.</param> /// <param name="corners">Vector of the corners/circles.</param> /// <returns>True if corers/circles found, false otherwise.</returns> private bool FindCorners(Image <Bgr, byte> image, ref Image <Bgr, byte> annotatedImage, VectorOfPointF corners) { var found = false; // use simple blob detector if finding circle grid using (var det = new SimpleBlobDetector()) { // set the size of the pattern var size = new Size(_cornersPerRow, _cornersPerCol); // look for chessboard or circle grid switch (_targetType) { case CalibTargetType.ChessBoard: found = CvInvoke.FindChessboardCorners(image, size, corners); // if corners found, get sub-pixel accuracy if (found) { CvInvoke.CornerSubPix(image.Convert <Gray, byte>(), corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); } break; case CalibTargetType.CirclesGrid: found = CvInvoke.FindCirclesGrid(image, size, corners, _circlesGridType, det); break; } // draw the results CvInvoke.DrawChessboardCorners(annotatedImage, size, corners, found); return(found); } }
private List <VectorOfVectorOfPointF> findCorners(float squareEdge, Size patternSize, string[] imagesLeft, string[] imagesRight) { VectorOfVectorOfPointF allCornersLeft = new VectorOfVectorOfPointF(); VectorOfVectorOfPointF allCornersRight = new VectorOfVectorOfPointF(); VectorOfPointF cornersLeft = new VectorOfPointF(); VectorOfPointF cornersRight = new VectorOfPointF(); Image <Gray, Byte> imageLeft; Image <Gray, Byte> imageRight; bool findLeft, findRight; for (int i = 0; i < imagesLeft.Length; i++) { imageLeft = new Image <Gray, Byte>(imagesLeft[i]); imageRight = new Image <Gray, Byte>(imagesRight[i]); findLeft = CvInvoke.FindChessboardCorners( imageLeft, patternSize, cornersLeft); findRight = CvInvoke.FindChessboardCorners( imageRight, patternSize, cornersRight); if (!findLeft || !findRight) { continue; } CvInvoke.CornerSubPix( imageLeft, cornersLeft, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); CvInvoke.CornerSubPix( imageRight, cornersRight, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); allCornersLeft.Push(cornersLeft); allCornersRight.Push(cornersRight); imageLeft.Dispose(); imageRight.Dispose(); GC.Collect(); } return(new List <VectorOfVectorOfPointF>() { allCornersLeft, allCornersRight }); }
public void TestChessboardCalibrationSolvePnPRansac() { Size patternSize = new Size(9, 6); Mat chessboardImage = EmguAssert.LoadMat("left01.jpg", ImreadModes.Grayscale); Util.VectorOfPointF corners = new Util.VectorOfPointF(); bool patternWasFound = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners); CvInvoke.CornerSubPix( chessboardImage, corners, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(0.05)); MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f); using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts })) using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners)) using (Mat cameraMatrix = new Mat()) using (Mat distortionCoeff = new Mat()) using (VectorOfMat rotations = new VectorOfMat()) using (VectorOfMat translations = new VectorOfMat()) { Mat calMat = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0); Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels); calMat.CopyTo(calMatF); double error = CvInvoke.CalibrateCamera(ptsVec, imgPtsVec, chessboardImage.Size, cameraMatrix, distortionCoeff, rotations, translations, CalibType.Default, new MCvTermCriteria(30, 1.0e-10)); using (Mat rotation = new Mat()) using (Mat translation = new Mat()) using (VectorOfPoint3D32F vpObject = new VectorOfPoint3D32F(objectPts)) { CvInvoke.SolvePnPRansac( vpObject, corners, cameraMatrix, distortionCoeff, rotation, translation); } CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound); using (Mat undistorted = new Mat()) { CvInvoke.Undistort(chessboardImage, undistorted, cameraMatrix, distortionCoeff); String title = String.Format("Reprojection error: {0}", error); //CvInvoke.NamedWindow(title); //CvInvoke.Imshow(title, undistorted); //CvInvoke.WaitKey(); //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error)); } } }
private bool DetectCheckerboard(Mat detectImage, Mat drawImage = null) { bool result = CvInvoke.FindChessboardCorners(detectImage, patternSize, cvImageCorners); if (result == false) { return(false); } CvInvoke.CornerSubPix(detectImage, cvImageCorners, new Size(5, 5), new Size(-1, -1), criteria); if (drawImage != null) { CvInvoke.DrawChessboardCorners(drawImage, patternSize, cvImageCorners, true); } return(true); }
public static double CalibrateRGBCAM() { Size patternSize = new Size(width, height); string[] fileEntries = Directory.GetFiles(@"..\..\..\..\rgb image", "*.jpg"); Image <Gray, Byte>[] Frame_array_buffer = new Image <Gray, byte> [fileEntries.Length]; //number of images to calibrate camera over MCvPoint3D32f[][] corners_object_list = new MCvPoint3D32f[Frame_array_buffer.Length][]; PointF[][] corners_points_list = new PointF[Frame_array_buffer.Length][]; VectorOfPointF[] _cornersPointsVec = new VectorOfPointF[Frame_array_buffer.Length]; Mat[] _rvecs, _tvecs; for (int k = 0; k < Frame_array_buffer.Length; k++) { Frame_array_buffer[k] = new Image <Gray, byte>(fileEntries[k]); _cornersPointsVec[k] = new VectorOfPointF(); CvInvoke.FindChessboardCorners(Frame_array_buffer[k], patternSize, _cornersPointsVec[k], CalibCbType.AdaptiveThresh | CalibCbType.FilterQuads); //for accuracy CvInvoke.CornerSubPix(Frame_array_buffer[k], _cornersPointsVec[k], new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world mesurments for the intrinsic calculations List <MCvPoint3D32f> object_list = new List <MCvPoint3D32f>(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { object_list.Add(new MCvPoint3D32f(j * 20.0F, i * 20.0F, 0.0F)); } } corners_object_list[k] = object_list.ToArray(); corners_points_list[k] = _cornersPointsVec[k].ToArray(); } double error = CvInvoke.CalibrateCamera(corners_object_list, corners_points_list, Frame_array_buffer[0].Size, _cameraMatrix, _distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out _rvecs, out _tvecs); //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default. Console.WriteLine("Intrinsci Calculation Error: " + error.ToString(), "Results"); //display the results to the user return(error); }
private Pattern FindPattern(Image <Bgr, byte> image) { var corners = new VectorOfPointF(); var grayImage = image.Convert <Gray, byte>(); var found = CvInvoke.FindChessboardCorners(grayImage, new ImageSize(_nCornersHorizontal, _nCornersVertical), corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); if (found) { CvInvoke.CornerSubPix(grayImage, corners, new ImageSize(11, 11), new ImageSize(-1, -1), new MCvTermCriteria(30, 0.1)); } else { corners = new VectorOfPointF(); } CvInvoke.DrawChessboardCorners(image, new ImageSize(_nCornersHorizontal, _nCornersVertical), corners, found); return(new Pattern { Corners = corners, Image = image }); }
private VectorOfVectorOfPointF findCorners(float squareEdge, Size patternSize, string[] imagePaths) { VectorOfVectorOfPointF allCorners = new VectorOfVectorOfPointF(); VectorOfPointF corners = new VectorOfPointF(); Image <Gray, Byte> image; bool find; for (int i = 0; i < imagePaths.Length; i++) { image = new Image <Gray, Byte>(imagePaths[i]); find = CvInvoke.FindChessboardCorners( image, patternSize, corners); if (!find) { continue; } CvInvoke.CornerSubPix( image, corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); allCorners.Push(corners); image.Dispose(); GC.Collect(); } return(allCorners); }
private void onImageGrab(Mat image) { frame = image; if (frame == null) { return; } // konwersja do koloru graya. CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray); // tryb zapisywania punktow. if (currentMode == Modes.Saving) { hasCorners = CvInvoke.FindChessboardCorners(grayFrame, patternSize, corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //we use this loop so we can show a colour image rather than a gray: if (hasCorners) //chess board found { //make mesurments more accurate by using FindCornerSubPixel CvInvoke.CornerSubPix(grayFrame, corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //if go button has been pressed start aquiring frames else we will just display the points frameArrayBuffer[frameBufferSavepoint] = grayFrame; //store the image frameBufferSavepoint++; //increase buffer positon //check the state of buffer if (frameBufferSavepoint == frameArrayBuffer.Length) { currentMode = Modes.CalculateIntristrics; //buffer full } Thread.Sleep(100); CvInvoke.DrawChessboardCorners(frame, patternSize, corners, hasCorners); string text = String.Format("{0}", frameBufferSavepoint); if (this.countLabel.InvokeRequired) { StringArgReturningVoidDelegate d = new StringArgReturningVoidDelegate(SetLabelText); this.Invoke(d, new object[] { text }); } else { SetLabelText(text); } } corners = new VectorOfPointF(); hasCorners = false; } // Kalibracja. if (currentMode == Modes.CalculateIntristrics) { for (int k = 0; k < frameArrayBuffer.Length; k++) { cornersPointsVec[k] = new VectorOfPointF(); CvInvoke.FindChessboardCorners(frameArrayBuffer[k], patternSize, cornersPointsVec[k], CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //for accuracy CvInvoke.CornerSubPix(grayFrame, cornersPointsVec[k], new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world mesurments for the intrinsic calculations var objectList = new List <MCvPoint3D32f>(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { objectList.Add(new MCvPoint3D32f(j * squareSize, i * squareSize, 0.0F)); } } //corners_object_list[k] = new MCvPoint3D32f[]; cornersObjectList[k] = objectList.ToArray(); cornersPointsList[k] = cornersPointsVec[k].ToArray(); } //our error should be as close to 0 as possible double error = CvInvoke.CalibrateCamera(cornersObjectList, cornersPointsList, grayFrame.Size, cameraMatrix, distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out rvecs, out tvecs); MessageBox.Show(@"Intrinsic Calculation Error: " + error.ToString(CultureInfo.InvariantCulture), @"Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user currentMode = Modes.Calibrated; } if (currentMode == Modes.Calibrated) { Mat outFrame = grayFrame.Clone(); CvInvoke.Undistort(image, outFrame, cameraMatrix, distCoeffs); grayFrame = outFrame.Clone(); if (calibrationSaved) { SaveCalibration(); calibrationSaved = false; } } SetImageBox(frame); }
public static void Calibrate() { objectPoints = new MCvPoint3D32f[frames][]; imgPoints = new VectorOfPointF[frames]; rVecs = new Mat[frames]; tVecs = new Mat[frames]; width = 5; height = 5; patternSize = new Size(width, height); var files = Directory.GetFiles(@"--location--"); for (int k = 0; k < frames; k++) { grayFrame = CvInvoke.Imread(files[k], ImreadModes.Grayscale); found = CvInvoke.FindChessboardCorners(grayFrame, patternSize, cornerPoints, CalibCbType.AdaptiveThresh); if (found) { //read more about its use and last 3 arguments CvInvoke.CornerSubPix(grayFrame, cornerPoints, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); var objectList = new List <MCvPoint3D32f> (); // populating real world coordinates of the chess board corners for (int i = 0; i < patternSize.Height; i++) { for (int j = 0; j < patternSize.Width; j++) { objectList.Add(new MCvPoint3D32f(j * squareSize, i * squareSize, 0.0f)); } } objectPoints[k] = objectList.ToArray(); imgPoints[k] = cornerPoints; } } // Calibrate Camera double error = CalibrateCamera(objectPoints, imgPoints.Select(a => a.ToArray()).ToArray(), grayFrame.Size, cameraMatrix, distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out rVecs, out tVecs); // Get Optimal new Camera Matrix var imgSize = CvInvoke.Imread(files[4], ImreadModes.Grayscale).Size; Rectangle ROI = new Rectangle(); newMatrix = CvInvoke.GetOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imgSize, 1, imgSize, ref ROI); Mat dupFrame = grayFrame.Clone(); // Undistort CvInvoke.Undistort(grayFrame, dupFrame, newMatrix, distCoeffs); var frame = dupFrame.Clone(); CvInvoke.Imwrite("undistorted.png", frame); // Region of Interest //var buffer_im = _frame.ToImage<Bgr, byte> (); //buffer_im.ROI = ROI; //Image<Bgr, byte> cropped_im = buffer_im.Copy (); //cropped_im.Save ("cropped.png"); // Drawing detected chessboard corners CvInvoke.DrawChessboardCorners(grayFrame, patternSize, cornerPoints, found); //CvInvoke.Imwrite ("chessboard.png", _frame); CvInvoke.Imwrite("distorted.png", grayFrame); Console.WriteLine(MeanError()); }
public void SuperR() { SRC_Img = new Image <Gray, byte>(@"C:\Users\Админ\Downloads\image63341262,2002.png"); Corrected_Img = SRC_Img.Clone(); PointF[] corners = new PointF[] { new PointF(100, 196), new PointF(261, 190), new PointF(417, 192), new PointF(584, 201), new PointF(111, 277), new PointF(284, 287), new PointF(458, 291), new PointF(580, 284), new PointF(130, 368), new PointF(276, 395), new PointF(429, 391), new PointF(563, 365) }; /*MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f( 0, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f( SRC_Img.Width - 1, 0, 0.0f), * new MCvPoint3D32f( 0, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f( SRC_Img.Width - 1, SRC_Img.Height / 2 - 1, 0.0f), * new MCvPoint3D32f( 0, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f( SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f( SRC_Img.Width - 1, SRC_Img.Height - 1, 0.0f) * }; */ // X: 0 - 480 / 3 ||0 159 329 479 // Y: 0 - 210 / 2 || 0 104 209 MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f(0, 0, 0.0f), new MCvPoint3D32f(159, 0, 0.0f), new MCvPoint3D32f(329, 0, 0.0f), new MCvPoint3D32f(479, 0, 0.0f), new MCvPoint3D32f(0, 104, 0.0f), new MCvPoint3D32f(159, 104, 0.0f), new MCvPoint3D32f(329, 104, 0.0f), new MCvPoint3D32f(479, 104, 0.0f), new MCvPoint3D32f(0, 209, 0.0f), new MCvPoint3D32f(159, 209, 0.0f), new MCvPoint3D32f(329, 209, 0.0f), new MCvPoint3D32f(479, 209, 0.0f) }; VectorOfPointF veccorners = new VectorOfPointF(); veccorners.Push(corners); VectorOfPoint3D32F vecobjcorners = new VectorOfPoint3D32F(); vecobjcorners.Push(objCorners); MCvTermCriteria TermCriteria = new MCvTermCriteria(30, 0.1); CvInvoke.CornerSubPix(SRC_Img, veccorners, new Size(2, 2), new Size(-1, -1), TermCriteria); IntrinsicCameraParameters intrisic = new IntrinsicCameraParameters(); ExtrinsicCameraParameters[] extrinsic; intrisic.IntrinsicMatrix = new Matrix <double>(new double[, ] { { 1, 0, 349.417 }, { 0, 1, 286.417 }, { 0, 0, 1 } }); try { Matrix <float> distortCoeffs = new Matrix <float>(1, 4); Mat rotationVectors = new Mat(); //rotationVectors[0] = new Mat(3,1, DepthType.Cv32F, 1); Mat translationVectors = new Mat(); //translationVectors[0] = new Mat(1, 3, DepthType.Cv32F, 1); /* * double error = CvInvoke.CalibrateCamera(new MCvPoint3D32f[][] { objCorners }, new PointF[][] { veccorners.ToArray() }, * SRC_Img.Size, intrisic.IntrinsicMatrix, distortCoeffs, CalibType.UserIntrinsicGuess, new MCvTermCriteria(30, 0.01), out rotationVectors, out translationVectors); */ /* * * Fisheye.Calibrate(vecobjcorners, veccorners, SRC_Img.Size, intrisic.IntrinsicMatrix, distortCoeffs, rotationVectors, translationVectors, * Fisheye.CalibrationFlag.UseIntrinsicGuess, TermCriteria); * */ Matrix <float> matrix = new Matrix <float>(new float[, ] { { 1, 0, 349 }, { 0, 1, 286 }, { 0, 0, 1 } }); Fisheye.UndistorImage(SRC_Img, Corrected_Img, matrix, new VectorOfFloat(new float[] { 3500, 3500, 0, 0 })); Image <Gray, Byte> Res_Img = new Image <Gray, byte>(2 * SRC_Img.Width, SRC_Img.Height); CvInvoke.HConcat(SRC_Img, Corrected_Img, Res_Img); int error = 0; error++; //error += 0; //Array aa = rotationVectors[0].Data; //error += 0; //float q = rotationVectors.ElementAt<float>(0); } catch (Exception) { } }
void ProcessFrame(object sender, EventArgs e) { _capture.Retrieve(_frame); CvInvoke.CvtColor(_frame, _grayFrame, ColorConversion.Bgr2Gray); //apply chess board detection if (_currentMode == Mode.SavingFrames) { _find = CvInvoke.FindChessboardCorners(_grayFrame, _patternSize, _corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //we use this loop so we can show a colour image rather than a gray: if (_find) //chess board found { //make mesurments more accurate by using FindCornerSubPixel CvInvoke.CornerSubPix(_grayFrame, _corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //if go button has been pressed start aquiring frames else we will just display the points if (_startFlag) { _frameArrayBuffer[_frameBufferSavepoint] = _grayFrame; //store the image _frameBufferSavepoint++; //increase buffer positon //check the state of buffer if (_frameBufferSavepoint == _frameArrayBuffer.Length) { _currentMode = Mode.CalculatingIntrinsics; //buffer full } } //draw the results CvInvoke.DrawChessboardCorners(_frame, _patternSize, _corners, _find); string msg = string.Format("{0}/{1}", _frameBufferSavepoint + 1, _frameArrayBuffer.Length); int baseLine = 0; var textOrigin = new Point(_frame.Cols - 2 * 120 - 10, _frame.Rows - 2 * baseLine - 10); CvInvoke.PutText(_frame, msg, textOrigin, FontFace.HersheyPlain, 3, new MCvScalar(0, 0, 255), 2); //calibrate the delay bassed on size of buffer //if buffer small you want a big delay if big small delay Thread.Sleep(100); //allow the user to move the board to a different position } _corners = new VectorOfPointF(); _find = false; } if (_currentMode == Mode.CalculatingIntrinsics) { for (int k = 0; k < _frameArrayBuffer.Length; k++) { _cornersPointsVec[k] = new VectorOfPointF(); CvInvoke.FindChessboardCorners(_frameArrayBuffer[k], _patternSize, _cornersPointsVec[k], CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //for accuracy CvInvoke.CornerSubPix(_grayFrame, _cornersPointsVec[k], new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world mesurments for the intrinsic calculations var objectList = new List <MCvPoint3D32f>(); for (int i = 0; i < _height; i++) { for (int j = 0; j < _width; j++) { objectList.Add(new MCvPoint3D32f(j * _squareSize, i * _squareSize, 0.0F)); } } //corners_object_list[k] = new MCvPoint3D32f[]; _cornersObjectList[k] = objectList.ToArray(); _cornersPointsList[k] = _cornersPointsVec[k].ToArray(); } //our error should be as close to 0 as possible double error = CvInvoke.CalibrateCamera(_cornersObjectList, _cornersPointsList, _grayFrame.Size, _cameraMatrix, _distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out _rvecs, out _tvecs); MessageBox.Show(@"Intrinsic Calculation Error: " + error.ToString(CultureInfo.InvariantCulture), @"Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user _currentMode = Mode.Calibrated; } if (_currentMode == Mode.Calibrated) { Sub_PicturBox.Image = _frame; Mat outFrame = _frame.Clone(); CvInvoke.Undistort(_frame, outFrame, _cameraMatrix, _distCoeffs); _frame = outFrame.Clone(); } Main_Picturebox.Image = _frame; }
private void ProcessFrame() { if (Ovrvision.imageDataLeft_Mat.Cols == 0 || Ovrvision.imageDataRight_Mat.Cols == 0) { //Util.WriteLine(ref mScene.rhinoDoc, "waiting camera views"); return; } _frame_L = Ovrvision.imageDataLeft_Mat; _frame_R = Ovrvision.imageDataRight_Mat; CvInvoke.CvtColor(_frame_L, _grayFrame_L, ColorConversion.Bgr2Gray); CvInvoke.CvtColor(_frame_R, _grayFrame_R, ColorConversion.Bgr2Gray); //calculate view and projection matrix for opengl CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImag _find = CvInvoke.FindChessboardCorners(_grayFrame_L, _patternSize, _corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //we use this loop so we can show a colour image rather than a gray: if (_find) //chess board found { //Util.WriteLine(ref mScene.rhinoDoc, "left marker found"); //make mesurments more accurate by using FindCornerSubPixel CvInvoke.CornerSubPix(_grayFrame_L, _corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(20, 0.001)); CvInvoke.SolvePnP(objectList.ToArray(), _corners.ToArray(), _cameraMatrix_new, _distCoeffs_new, _rvecAR, _tvecAR); // drawing axis points or cubePoints imagePoints_L = new PointF[cubePoints.Count]; imagePoints_L = CvInvoke.ProjectPoints(cubePoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new); imagePoints_axis_L = new PointF[axisPoints.Count]; imagePoints_axis_L = CvInvoke.ProjectPoints(axisPoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new); foundMarker_L = true; //calculate view matrix BuildViewMatrix(0); } else { if (imagePoints_L != null) { Array.Clear(imagePoints_L, 0, imagePoints_L.Length); } if (imagePoints_axis_L != null) { Array.Clear(imagePoints_axis_L, 0, imagePoints_axis_L.Length); } foundMarker_L = false; } //calculate view and projection matrix for opengl CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImag _find = CvInvoke.FindChessboardCorners(_grayFrame_R, _patternSize, _corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //we use this loop so we can show a colour image rather than a gray: if (_find) //chess board found { //Util.WriteLine(ref mScene.rhinoDoc, "right marker found"); //make mesurments more accurate by using FindCornerSubPixel CvInvoke.CornerSubPix(_grayFrame_R, _corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(20, 0.001)); CvInvoke.SolvePnP(objectList.ToArray(), _corners.ToArray(), _cameraMatrix_new, _distCoeffs_new, _rvecAR, _tvecAR); // drawing axis points or cubePoints imagePoints_R = new PointF[cubePoints.Count]; imagePoints_R = CvInvoke.ProjectPoints(cubePoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new); imagePoints_axis_R = new PointF[axisPoints.Count]; imagePoints_axis_R = CvInvoke.ProjectPoints(axisPoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new); foundMarker_R = true; //calculate view matrix BuildViewMatrix(1); } else { if (imagePoints_R != null) { Array.Clear(imagePoints_R, 0, imagePoints_R.Length); } if (imagePoints_axis_R != null) { Array.Clear(imagePoints_axis_R, 0, imagePoints_axis_R.Length); } foundMarker_R = false; } }
public static void ProcessFrames() { var cornersObjectList = new List <MCvPoint3D32f[]>(); var cornersPointsList = new List <PointF[]>(); var width = 8; //width of chessboard no. squares in width - 1 var height = 6; // heght of chess board no. squares in heigth - 1 float squareSize = width * height; var patternSize = new Size(width, height); //size of chess board to be detected var corners = new VectorOfPointF(); //corners found from chessboard Mat[] _rvecs, _tvecs; var frameArrayBuffer = new List <Mat>(); var cameraMatrix = new Mat(3, 3, DepthType.Cv64F, 1); var distCoeffs = new Mat(8, 1, DepthType.Cv64F, 1); // Glob our frames from the static dir, loop for them string[] filePaths = Directory.GetFiles(@"/home/dietpi/", "*.jpg"); var frames = filePaths.Select(path => CvInvoke.Imread(path)).ToList(); LogUtil.Write("We have " + frames.Count + " frames."); var fc = 0; foreach (var frame in frames) { var grayFrame = new Mat(); // Convert to grayscale CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray); //apply chess board detection var boardFound = CvInvoke.FindChessboardCorners(grayFrame, patternSize, corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //we use this loop so we can show a colour image rather than a gray: if (boardFound) { LogUtil.Write("Found board in frame " + fc); //make measurements more accurate by using FindCornerSubPixel CvInvoke.CornerSubPix(grayFrame, corners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); frameArrayBuffer.Add(grayFrame); } fc++; corners = new VectorOfPointF(); } LogUtil.Write("We have " + frameArrayBuffer.Count + " frames to use for mapping."); // Loop through frames where board was detected foreach (var frame in frameArrayBuffer) { var frameVect = new VectorOfPointF(); CvInvoke.FindChessboardCorners(frame, patternSize, frameVect, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage); //for accuracy CvInvoke.CornerSubPix(frame, frameVect, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world measurements for the intrinsic calculations var objectList = new List <MCvPoint3D32f>(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { objectList.Add(new MCvPoint3D32f(j * squareSize, i * squareSize, 0.0F)); } } //corners_object_list[k] = new MCvPoint3D32f[]; cornersObjectList.Add(objectList.ToArray()); cornersPointsList.Add(frameVect.ToArray()); frameVect.Dispose(); } //our error should be as close to 0 as possible double error = CvInvoke.CalibrateCamera(cornersObjectList.ToArray(), cornersPointsList.ToArray(), frames[0].Size, cameraMatrix, distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out _rvecs, out _tvecs); LogUtil.Write("Correction error: " + error); var sk = JsonConvert.SerializeObject(cameraMatrix); var sd = JsonConvert.SerializeObject(distCoeffs); LogUtil.Write("Camera matrix: " + sk); LogUtil.Write("Dist coefficient: " + sd); DataUtil.SetItem("K", sk); DataUtil.SetItem("D", sd); }
public ChessboardCalibration() { InitializeComponent(); imageFolder = @"D:\ChessBoardImages"; cameraMatrixFolder = @"D:\ChessBoardImages\CameraMatrix"; imageNames = Directory.GetFiles(path: imageFolder); nImages = imageNames.Length; allImages = new List <Image <Gray, byte> >(capacity: nImages); _squareSize = 25.0f; // side length of chessboard square in millimeters _patternSize = new Size(width: _internalCornersWidth, height: _internalCornersHeight); _nInternalCorners = _internalCornersWidth * _internalCornersHeight; Console.WriteLine("nImages: {0}", nImages.ToString()); calibration_imageNumUpDown.Minimum = 0; calibration_imageNumUpDown.Maximum = nImages - 1; foundBoardIdx = new List <int>(capacity: nImages); foundFastCorners = new List <VectorOfPointF>(capacity: nImages); for (int i = 0; i < nImages; i++) { allImages.Add(new Image <Gray, byte>(fileName: imageNames[i])); VectorOfPointF _corners = new VectorOfPointF(); bool _find = CvInvoke.FindChessboardCorners(image: allImages[i], patternSize: _patternSize, corners: _corners, flags: CalibCbType.FastCheck); Console.WriteLine("imageNum: {0} FastCheck Chessboard Found: {1}", i.ToString(), _find.ToString()); if (_find) { CvInvoke.FindChessboardCorners(image: allImages[i], patternSize: _patternSize, corners: _corners, flags: CalibCbType.Accuracy); foundBoardIdx.Add(i); foundFastCorners.Add(_corners); } } imageSize = new Size(width: allImages[0].Width, height: allImages[0].Height); nBoardsFound = foundBoardIdx.Count(); _cornersObjectArray = new MCvPoint3D32f[nBoardsFound][]; _cornersPointsArray = new PointF[nBoardsFound][]; _cornersPointsVec = new VectorOfPointF[nBoardsFound]; var objectList = new List <MCvPoint3D32f>(); for (int j = 0; j < _internalCornersWidth; j++) { for (int k = 0; k < _internalCornersHeight; k++) { objectList.Add(new MCvPoint3D32f(x: j * _squareSize, y: k * _squareSize, z: 0.0F)); } } for (int i = 0; i < nBoardsFound; i++) { // for higher corner accuracy CvInvoke.CornerSubPix(allImages[foundBoardIdx[i]], foundFastCorners[i], win: new Size(11, 11), zeroZone: new Size(-1, -1), criteria: new MCvTermCriteria(maxIteration: 30, eps: 0.1)); Console.WriteLine("SubPix accuracy calculated for image {0}", foundBoardIdx[i].ToString()); _cornersObjectArray[i] = objectList.ToArray(); _cornersPointsArray[i] = foundFastCorners[i].ToArray(); } double error = CvInvoke.CalibrateCamera(objectPoints: _cornersObjectArray, imagePoints: _cornersPointsArray, imageSize: imageSize, cameraMatrix: _cameraMatrix, distortionCoeffs: _distCoeffs, calibrationType: CalibType.RationalModel, termCriteria: new MCvTermCriteria(maxIteration: 30, eps: 0.1), out Mat[] rotationVectors, out Mat[] translationVectors);