public static void initBundleAdjust(string myCalibFile, int xsize, int ysize, int npoints, float[] points, int nMeasurements, float[] matrices) { // camera related parameters Mat cameraMat; Mat distCoeffs; LoadCameraFromFile(myCalibFile, out cameraMat, out distCoeffs); Size cameraRes = new Size(xsize, ysize); List <Emgu.CV.Structure.MCvPoint3D32f> objectPoints = new List <Emgu.CV.Structure.MCvPoint3D32f>(); objectPoints.Add(new Emgu.CV.Structure.MCvPoint3D32f(points[0], points[1], points[2])); var points3d = new Emgu.CV.Util.VectorOfPoint3D32F(objectPoints.ToArray()); var points2d = new Emgu.CV.Util.VectorOfVectorOfPointF(); var visibility = new Emgu.CV.Util.VectorOfVectorOfInt(); var cameraMatrix = new Emgu.CV.Util.VectorOfMat(); var R_true = new Emgu.CV.Util.VectorOfMat(); var T_true = new Emgu.CV.Util.VectorOfMat(); var distortionCoeffs = new Emgu.CV.Util.VectorOfMat(); Emgu.CV.Structure.MCvTermCriteria criteria = new Emgu.CV.Structure.MCvTermCriteria(70, 1e-10); // define cameras for (int i = 0; i < nMeasurements; i++) { cameraMatrix.Push(cameraMat); distortionCoeffs.Push(distCoeffs); Mat _R_true = new Mat(3, 3, Emgu.CV.CvEnum.DepthType.Cv64F, 1); double[] matrixArray = new double[9]; for (int j = 0; j < 3; j++) { for (int k = 0; k < 3; k++) { matrixArray[j * 3 + i] = matrices[i * 12 + j * 3 + k]; } } Marshal.Copy(matrixArray, 0, _R_true.DataPointer, 9); R_true.Push(_R_true); Mat _T_true = new Mat(3, 1, Emgu.CV.CvEnum.DepthType.Cv64F, 1); double[] vectorArray = new double[3]; for (int j = 0; j < 3; j++) { vectorArray[j] = matrices[i * 12 + 9 + j]; } Marshal.Copy(vectorArray, 0, _T_true.DataPointer, 3); T_true.Push(_T_true); } // project points to image coordinates for (int i = 0; i < nMeasurements; i++) { // project var imagePoints2 = new Emgu.CV.Util.VectorOfPointF(); Emgu.CV.CvInvoke.ProjectPoints(points3d, R_true[i], T_true[i], cameraMatrix[i], distortionCoeffs[i], imagePoints2); // check if the point is in camera shot Emgu.CV.Util.VectorOfInt vis = new Emgu.CV.Util.VectorOfInt(1); // if the image point is within camera resolution then the point is visible if ((0 <= imagePoints2[0].X) && (imagePoints2[0].X <= cameraRes.Width) && (0 <= imagePoints2[0].Y) && (imagePoints2[0].Y <= cameraRes.Height)) { vis.Push(new int[] { 1 }); } // else, the point is not visible else { vis.Push(new int[] { 0 }); } points2d.Push(imagePoints2); visibility.Push(vis); } //Emgu.CV. // cv::LevMarqSparse lv; //lv.bundleAdjust(points_true, imagePoints, visibility, cameraMatrix, R_true, T_true, distCoeffs, criteria); }
public static void DoDetection(string myCalibFile, string myImageFile) { int i, j; Mat myImage = Emgu.CV.CvInvoke.Imread(myImageFile, Emgu.CV.CvEnum.ImreadModes.Color); FileStream sr = File.Open(myCalibFile, FileMode.Open, FileAccess.Read); BinaryReader br = new BinaryReader(sr); ARParam param = new ARParam(); param.xsize = byteSwapInt(br.ReadInt32()); param.ysize = byteSwapInt(br.ReadInt32()); for (i = 0; i < 3; i++) { for (j = 0; j < 4; j++) { param.mat[i, j] = byteSwapDouble(br.ReadDouble()); } } for (i = 0; i < 9; i++) { param.dist_factor[i] = byteSwapDouble(br.ReadDouble()); } br.Close(); sr.Close(); double s = param.dist_factor[8]; param.mat[0, 0] *= s; param.mat[0, 1] *= s; param.mat[1, 0] *= s; param.mat[1, 1] *= s; Mat cameraMatrix = new Mat(3, 3, Emgu.CV.CvEnum.DepthType.Cv64F, 1); Mat distortionCoeffs = new Mat(4, 1, Emgu.CV.CvEnum.DepthType.Cv64F, 1); double[] cameraArray = new double[9]; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { cameraArray[j * 3 + i] = param.mat[j, i]; } } double[] distCoeffArray = new double[4]; for (i = 0; i < 4; i++) { distCoeffArray[i] = param.dist_factor[i]; } Marshal.Copy(cameraArray, 0, cameraMatrix.DataPointer, 9); Marshal.Copy(distCoeffArray, 0, distortionCoeffs.DataPointer, 4); Emgu.CV.Aruco.Dictionary myDict = new Emgu.CV.Aruco.Dictionary(Emgu.CV.Aruco.Dictionary.PredefinedDictionaryName.Dict4X4_1000); Emgu.CV.Aruco.GridBoard myBoard = new Emgu.CV.Aruco.GridBoard(2, 1, 0.08f, 0.005f, myDict, 33); Mat mappedImage = myImage.Clone(); CvInvoke.Undistort(myImage, mappedImage, cameraMatrix, distortionCoeffs); //Mat rvecs = new Mat(3, 1, Emgu.CV.CvEnum.DepthType.Cv32F, 1); //Mat tvecs = new Mat(3, 1, Emgu.CV.CvEnum.DepthType.Cv32F, 1); Emgu.CV.Aruco.DetectorParameters myParams = Emgu.CV.Aruco.DetectorParameters.GetDefault(); myParams.CornerRefinementMethod = Emgu.CV.Aruco.DetectorParameters.RefinementMethod.Subpix; //myParams.AdaptiveThreshWinSizeStep = 10; //myParams.AdaptiveThreshWinSizeMax = 23; //myParams.AdaptiveThreshWinSizeMin = 3; //myParams.MaxMarkerPerimeterRate = 4.0; //myParams.MinMarkerPerimeterRate = 0.03; //myParams.AdaptiveThreshConstant = 7; //myParams.PolygonalApproxAccuracyRate = 0.1; //myParams.MinCornerDistanceRate = 0.05; //myParams.MinDistanceToBorder = 3; //myParams.MinMarkerDistanceRate = 0.05; //myParams.CornerRefinementMinAccuracy = 0.1; //myParams.CornerRefinementWinSize = 5; //myParams.CornerRefinementMaxIterations = 30; myParams.MarkerBorderBits = 2; //myParams.PerspectiveRemoveIgnoredMarginPerCell = 0.13; //myParams.PerspectiveRemovePixelPerCell = 8; //myParams.MaxErroneousBitsInBorderRate = 0.35; //myParams.MinOtsuStdDev = 5.0; //myParams.ErrorCorrectionRate = 0.6; using (Emgu.CV.Util.VectorOfInt ids = new Emgu.CV.Util.VectorOfInt()) using (Emgu.CV.Util.VectorOfVectorOfPointF corners = new Emgu.CV.Util.VectorOfVectorOfPointF()) using (Emgu.CV.Util.VectorOfVectorOfPointF rejected = new Emgu.CV.Util.VectorOfVectorOfPointF()) { Emgu.CV.Aruco.ArucoInvoke.DetectMarkers(mappedImage, myDict, corners, ids, myParams, rejected); if (ids.Size > 0) { //Emgu.CV.Aruco.ArucoInvoke.RefineDetectedMarkers(mappedImage, myBoard, corners, ids, rejected, null, null, 10, 3, true, null, myParams); using (Mat rvecs = new Mat(3, 1, Emgu.CV.CvEnum.DepthType.Cv32F, 1)) using (Mat tvecs = new Mat(3, 1, Emgu.CV.CvEnum.DepthType.Cv32F, 1)) { Emgu.CV.Aruco.ArucoInvoke.EstimatePoseSingleMarkers(corners, 0.08f, cameraMatrix, distortionCoeffs, rvecs, tvecs); for (i = 0; i < rvecs.Rows; i++) { Emgu.CV.Aruco.ArucoInvoke.DrawAxis(mappedImage, cameraMatrix, distortionCoeffs, rvecs.Row(i), tvecs.Row(i), 0.05f); } } Emgu.CV.Aruco.ArucoInvoke.DrawDetectedMarkers(mappedImage, corners, ids, new Emgu.CV.Structure.MCvScalar(0.0, 200.0, 50.0)); mappedImage.Save("C:\\Temp\\ArucoDetect.png"); } else if (rejected.Size > 0) { PointF[][] myPts = rejected.ToArrayOfArray(); for (i = 0; i < myPts.GetUpperBound(0); i++) { CvInvoke.Line(mappedImage, new Point((int)myPts[i][0].X, (int)myPts[i][0].Y), new Point((int)myPts[i][1].X, (int)myPts[i][1].Y), new Emgu.CV.Structure.MCvScalar(0.0, 0.5, 200.0), 2); CvInvoke.Line(mappedImage, new Point((int)myPts[i][1].X, (int)myPts[i][1].Y), new Point((int)myPts[i][2].X, (int)myPts[i][2].Y), new Emgu.CV.Structure.MCvScalar(0.0, 0.5, 200.0), 2); CvInvoke.Line(mappedImage, new Point((int)myPts[i][2].X, (int)myPts[i][2].Y), new Point((int)myPts[i][3].X, (int)myPts[i][3].Y), new Emgu.CV.Structure.MCvScalar(0.0, 0.5, 200.0), 2); CvInvoke.Line(mappedImage, new Point((int)myPts[i][3].X, (int)myPts[i][3].Y), new Point((int)myPts[i][0].X, (int)myPts[i][0].Y), new Emgu.CV.Structure.MCvScalar(0.0, 0.5, 200.0), 2); } mappedImage.Save("C:\\Temp\\ArucoReject.png"); } } }