コード例 #1
0
        private static double Validate(VectorOfVectorOfPoint3D32F processedObjectPoints, VectorOfVectorOfPointF processedImagePoints, Mat cameraMatrix, Mat distCoeffs, VectorOfPoint3D32F rvecs, VectorOfPoint3D32F tvecs, bool fisheye)
        {
            double error       = 0;
            int    totalpoints = 0;

            if (fisheye)
            {
                for (int i = 0; i < processedObjectPoints.Size; i++)
                {
                    VectorOfPoint3D32F objectFramePoints = processedObjectPoints[i];
                    VectorOfPointF     imageFramePoints  = processedImagePoints[i];
                    RotationVector3D   tvec = new RotationVector3D(new double[] { tvecs[i].X, tvecs[i].Y, tvecs[i].Z });
                    RotationVector3D   rvec = new RotationVector3D(new double[] { rvecs[i].X, rvecs[i].Y, rvecs[i].Z });

                    VectorOfPointF newImageFramePoints = new VectorOfPointF();

                    Fisheye.ProjectPoints(objectFramePoints, newImageFramePoints, rvec, tvec, cameraMatrix, distCoeffs);

                    for (int j = 0; j < newImageFramePoints.Size; j++)
                    {
                        PointF x1 = newImageFramePoints[j];
                        PointF x2 = imageFramePoints[j];
                        totalpoints++;
                        error += Math.Pow(x1.X - x2.X, 2) + Math.Pow(x1.Y - x2.Y, 2);
                    }
                }
            }
            return(Math.Sqrt(error / totalpoints));
        }
コード例 #2
0
 /// <summary>
 /// 相机内参标定结果评价
 /// </summary>
 private void btnEst_Click(object sender, EventArgs e)
 {
     try
     {
         for (int i = 0; i < left_corners_set.Size; i++)         //计算左相机标定误差
         {
             objtemp = objectpoints[i];
             CvInvoke.ProjectPoints(objtemp, leftRvecs[i], leftTvecs[i],
                                    leftCamMatrix, leftDistCoeffs, projecttemp);  //空间点反向投影
             imgtemp = left_corners_set[i];
             //转换为矩阵存储方式:  需要再想!!!
             Image <Bgr, Single> prot = new Image <Bgr, float>(projecttemp.Size, 1);
             Image <Bgr, Single> imgt = new Image <Bgr, float>(imgtemp.Size, 1);
             for (int j = 0; j < projecttemp.Size; j++)
             {
                 prot.Data[0, j, 0] = projecttemp[j].X;
                 prot.Data[0, j, 1] = projecttemp[j].Y;
                 prot.Data[0, j, 2] = 0;
             }
             for (int j = 0; j < imgtemp.Size; j++)
             {
                 imgt.Data[0, j, 0] = imgtemp[j].X;
                 imgt.Data[0, j, 1] = imgtemp[j].Y;
                 imgt.Data[0, j, 2] = 0;
             }
             err        = CvInvoke.Norm(prot, imgt, NormType.L2); //计算误差
             total_err += (err /= (patternSize.Width * patternSize.Height));
         }
         for (int i = 0; i < right_corners_set.Size; i++)                 //计算右相机标定误差
         {
             objtemp = objectpoints[i];
             CvInvoke.ProjectPoints(objtemp, rightRvecs[i], rightTvecs[i],
                                    rightCamMatrix, rightDistCoeffs, projecttemp);  //空间点反向投影
             imgtemp = right_corners_set[i];
             //转换为矩阵存储方式:  需要再想!!!
             Image <Bgr, Single> prot = new Image <Bgr, float>(projecttemp.Size, 1);
             Image <Bgr, Single> imgt = new Image <Bgr, float>(imgtemp.Size, 1);
             for (int j = 0; j < projecttemp.Size; j++)
             {
                 prot.Data[0, j, 0] = projecttemp[j].X;
                 prot.Data[0, j, 1] = projecttemp[j].Y;
                 prot.Data[0, j, 2] = 0;
             }
             for (int j = 0; j < imgtemp.Size; j++)
             {
                 imgt.Data[0, j, 0] = imgtemp[j].X;
                 imgt.Data[0, j, 1] = imgtemp[j].Y;
                 imgt.Data[0, j, 2] = 0;
             }
             err        = CvInvoke.Norm(prot, imgt, NormType.L2); //计算误差
             total_err += (err /= (patternSize.Width * patternSize.Height));
         }
         total_err     /= (left_corners_set.Size + right_corners_set.Size);  //取均值
         Data.LogString = "相机内参标定平均误差:" + total_err.ToString() + "\n请保存数据~";
     }
     catch (Exception exc)
     {
         Data.LogString = exc.Message;
     }
 }
コード例 #3
0
        private static double ComputeAngleBetweenCameraNormAndPlaneNorm(VectorOfPoint3D32F trackedFeatures3D, Matrix <double> normal, VectorOfFloat raux, VectorOfFloat taux)
        {
            var tvec = taux.ToArray().Select(i => (double)i).ToArray();

            var rotationMat = new Mat();

            CvInvoke.Rodrigues(raux, rotationMat);
            var rotationMatrix = new Matrix <double>(rotationMat.Rows, rotationMat.Cols, rotationMat.DataPointer);

            // ???
            Utils.Negotiate(ref rotationMatrix);

            var cameraPosition      = rotationMatrix * new Matrix <double>(tvec);
            var cameraPositionPoint = new MCvPoint3D32f((float)cameraPosition[0, 0], (float)cameraPosition[1, 0], (float)cameraPosition[2, 0]);

            var cameraVector = trackedFeatures3D[0] - cameraPositionPoint;

            Func <double, double> radianToDegree = angle => angle * (180.0 / Math.PI);

            double dotProduct = new double[] { cameraVector.X, cameraVector.Y, cameraVector.Z }.Dot(new[] { normal[0, 0], normal[0, 1], normal[0, 2] });
            double acos       = Math.Acos(dotProduct);
            double anglResult = radianToDegree(acos);

            Console.WriteLine($"Normal: [{normal.Data[0, 0]}, {normal.Data[0, 1]}, {normal.Data[0, 2]}]");
            Console.WriteLine($"Angle: {anglResult}");
            Console.WriteLine($"Dot product: {dotProduct}");

            return(anglResult);
        }
コード例 #4
0
        /// <summary>
        /// Compute pattern pose using PnP algorithm
        /// </summary>
        /// <param name="points"></param>
        /// <param name="calibration"></param>
        /// <param name="points3D"></param>
        /// <param name="raux"></param>
        /// <param name="taux"></param>
        public Transformation ComputePose(VectorOfPoint3D32F points3D, VectorOfPointF points, CameraCalibrationInfo calibration, out VectorOfFloat raux, out VectorOfFloat taux)
        {
            var pose3D = new Transformation();

            var rotationVector32F    = new VectorOfFloat();
            var translationVector32F = new VectorOfFloat();
            var rotationVector       = new Mat();
            var translationVector    = new Mat();

            CvInvoke.SolvePnP(points3D, points, calibration.Intrinsic, calibration.Distortion, rotationVector, translationVector);

            rotationVector.ConvertTo(rotationVector32F, DepthType.Cv32F);
            translationVector.ConvertTo(translationVector32F, DepthType.Cv32F);

            raux = rotationVector32F;
            taux = translationVector32F;

            var rotationMat = new Mat();

            CvInvoke.Rodrigues(rotationVector32F, rotationMat);
            var rotationMatrix = new Matrix <double>(rotationMat.Rows, rotationMat.Cols, rotationMat.DataPointer);

            // Copy to transformation matrix
            for (int col = 0; col < 3; col++)
            {
                for (int row = 0; row < 3; row++)
                {
                    pose3D.SetRotationMatrixValue(row, col, (float)rotationMatrix[row, col]); // Copy rotation component
                }
                pose3D.SetTranslationVectorValue(col, translationVector32F[col]);             // Copy translation component
            }

            // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
            return(pose3D.GetInverted());
        }
コード例 #5
0
        private static VectorOfByte ComputeNormalAndPlaneInliers(VectorOfPoint3D32F trackedFeatures3D, out Matrix <double> trackedFeatures3Dm, out Mat mean, out Mat eigenvectors, out int numInliers, out Matrix <double> normalOfPlaneMatrix)
        {
            //var tf3D = new double[_trackedFeatures3D.Size, 3];
            //var trackedFeatures3Dm = new Matrix<double>(trackedFeatures3D.Size, 3);
            trackedFeatures3Dm = new Matrix <double>(trackedFeatures3D.Size, 3);
            for (int k = 0; k < trackedFeatures3D.Size; k++)
            {
                trackedFeatures3Dm[k, 0] = trackedFeatures3D[k].X;
                trackedFeatures3Dm[k, 1] = trackedFeatures3D[k].Y;
                trackedFeatures3Dm[k, 2] = trackedFeatures3D[k].Z;

                //tf3D[k, 0] = _trackedFeatures3D[k].X;
                //tf3D[k, 1] = _trackedFeatures3D[k].Y;
                //tf3D[k, 2] = _trackedFeatures3D[k].Z;
            }

            //var eigenvectors = new Mat();
            //var mean = new Mat();
            eigenvectors = new Mat();
            mean         = new Mat();
            CvInvoke.PCACompute(trackedFeatures3Dm, mean, eigenvectors);
            var eigenvectorsMatrix = new Matrix <double>(eigenvectors.Rows, eigenvectors.Cols, eigenvectors.DataPointer);

            //double[] meanArr = tf3D.Mean(0);
            var meanArr = trackedFeatures3Dm.Data.Mean(0);
            var pca     = new PrincipalComponentAnalysis(PrincipalComponentMethod.Center);

            pca.Learn(trackedFeatures3Dm.Data.ToJagged());

            //int numInliers = 0;
            numInliers = 0;
            var normalOfPlane = eigenvectorsMatrix.GetRow(2).ToUMat().ToMat(AccessType.Fast);

            CvInvoke.Normalize(normalOfPlane, normalOfPlane);
            //var normalOfPlaneMatrix = new Matrix<double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);
            normalOfPlaneMatrix = new Matrix <double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);

            double pToPlaneThresh = Math.Sqrt(pca.Eigenvalues.ElementAt(2));
            var    statusArray    = new byte[trackedFeatures3D.Size];

            for (int k = 0; k < trackedFeatures3D.Size; k++)
            {
                var t1 = new double[] { trackedFeatures3D[k].X, trackedFeatures3D[k].Y, trackedFeatures3D[k].Z };
                var t2 = t1.Subtract(meanArr);
                var w  = new Matrix <double>(new[, ] {
                    { t2[0], t2[1], t2[2] }
                });
                double d = Math.Abs(normalOfPlane.Dot(w));
                if (d < pToPlaneThresh)
                {
                    numInliers++;
                    statusArray[k] = 1;
                }
            }

            var statusVector = new VectorOfByte(statusArray);

            return(statusVector);
        }
コード例 #6
0
ファイル: AutoTestCalibration.cs プロジェクト: wtf3505/emgucv
        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(9, 6);

            Image <Gray, Byte> chessboardImage = EmguAssert.LoadImage <Gray, byte>("left01.jpg");

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            chessboardImage.FindCornerSubPix(
                new PointF[][] { corners.ToArray() },
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);

            using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts }))
                using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners))
                    using (Mat cameraMatrix = new Mat())
                        using (Mat distortionCoeff = new Mat())
                            using (VectorOfMat rotations = new VectorOfMat())
                                using (VectorOfMat translations = new VectorOfMat())
                                {
                                    Mat             calMat  = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0);
                                    Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels);
                                    calMat.CopyTo(calMatF);
                                    double error = CvInvoke.CalibrateCamera(ptsVec, imgPtsVec, chessboardImage.Size, cameraMatrix,
                                                                            distortionCoeff,
                                                                            rotations, translations, CalibType.Default, new MCvTermCriteria(30, 1.0e-10));
                                    using (Mat rotation = new Mat())
                                        using (Mat translation = new Mat())
                                            using (VectorOfPoint3D32F vpObject = new VectorOfPoint3D32F(objectPts))
                                            {
                                                CvInvoke.SolvePnPRansac(
                                                    vpObject,
                                                    corners,
                                                    cameraMatrix,
                                                    distortionCoeff,
                                                    rotation,
                                                    translation,
                                                    true);
                                            }

                                    CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
                                    using (Mat undistorted = new Mat())
                                    {
                                        CvInvoke.Undistort(chessboardImage, undistorted, cameraMatrix, distortionCoeff);
                                        String title = String.Format("Reprojection error: {0}", error);
                                        //CvInvoke.NamedWindow(title);
                                        //CvInvoke.Imshow(title, undistorted);
                                        //CvInvoke.WaitKey();
                                        //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
                                    }
                                }
        }
コード例 #7
0
        /// <summary>
        ///
        /// </summary>
        public Pattern()
        {
            Frame       = new Mat();
            GrayImg     = new Mat();
            Keypoints   = new VectorOfKeyPoint();
            Descriptors = new Mat();

            Points2d = new VectorOfPoint();
            Points3d = new VectorOfPoint3D32F();
        }
コード例 #8
0
 /// <summary>
 /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
 /// </summary>
 /// <param name="objectPoints">The array of object points</param>
 /// <param name="imagePoints">The array of corresponding image points</param>
 /// <param name="intrin">The intrinsic parameters</param>
 /// <param name="method">Method for solving a PnP problem</param>
 /// <returns>The extrinsic parameters</returns>
 public static ExtrinsicCameraParameters SolvePnP(
    MCvPoint3D32f[] objectPoints,
    PointF[] imagePoints,
    IntrinsicCameraParameters intrin, 
    CvEnum.SolvePnpMethod method = CvEnum.SolvePnpMethod.Iterative)
 {
    ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
    using (VectorOfPoint3D32F objPtVec = new VectorOfPoint3D32F(objectPoints))
    using (VectorOfPointF imgPtVec = new VectorOfPointF(imagePoints))
       CvInvoke.SolvePnP(objPtVec, imgPtVec, intrin.IntrinsicMatrix, intrin.DistortionCoeffs, p.RotationVector, p.TranslationVector, false, method);
    return p;
 }
コード例 #9
0
ファイル: CeresEXT.cs プロジェクト: sxqwycg/CameraCalibratie
        public static PointF[] ProjectPointd2D_Manually(this PinholeCamera cam, MCvPoint3D32f[] points3d, out MCvPoint3D32f[] visible)
        {
            var r   = new List <PointF>();
            var vis = new List <MCvPoint3D32f>();


            var transftk = cam.worldMat.Inverted();
            var transf   = transftk.tocv();


            VectorOfPoint3D32F points3dvec       = new VectorOfPoint3D32F(points3d);
            VectorOfPoint3D32F points3dvectransf = new VectorOfPoint3D32F(points3dvec.Size);

            CvInvoke.Transform(points3dvec, points3dvectransf, transf);


            for (int i = 0; i < points3dvec.Size; i++)
            {
                var camcoord = points3dvectransf[i];

                if (camcoord.Z < 0)
                {
                    continue;
                }


                var x = camcoord.X / camcoord.Z;
                var y = camcoord.Y / camcoord.Z;

                var r2      = x * x + y * y;
                var r4      = r2 * r2;
                var r6      = r4 * r2;
                var r_coeff = ((1) + cam.DistortionR1 * r2 + cam.DistortionR2 * r4 + cam.DistortionR3 * r6);
                var tdistx  = 2 * cam.DistortionT1 * x * y + cam.DistortionT2 * (r2 + 2 * x * x);
                var tdisty  = 2 * cam.DistortionT2 * x * y + cam.DistortionT1 * (r2 + 2 * y * y);
                var xd      = x * r_coeff + tdistx;
                var yd      = y * r_coeff + tdisty;

                var im_x = cam.Intrinsics.fx * xd + cam.Intrinsics.cx;
                var im_y = cam.Intrinsics.fy * yd + cam.Intrinsics.cy;

                if (im_x >= 0 && im_x <= cam.PictureSize.Width && im_y >= 0 && im_y <= cam.PictureSize.Height)
                {
                    vis.Add(points3dvec[i]);
                    r.Add(new PointF((float)im_x, (float)im_y));
                }
            }
            visible = vis.ToArray();
            return(r.ToArray());
        }
コード例 #10
0
ファイル: Utils.cs プロジェクト: Lewis945/RubiksCubeSolver
        public static Mat Get3dPointsMat(VectorOfPoint3D32F points)
        {
            var mat    = new Mat(points.Size, 3, DepthType.Cv32F, 1);
            var matrix = new Matrix <double>(points.Size, 3, mat.DataPointer);

            for (int i = 0; i < points.Size; i++)
            {
                matrix[i, 0] = points[i].X;
                matrix[i, 1] = points[i].Y;
                matrix[i, 2] = points[i].Z;
            }

            return(mat);
        }
コード例 #11
0
        private double Angle(PointF point, Calibration calibration)
        {
            var undistorted = new VectorOfPointF();
            var homogeneous = new VectorOfPoint3D32F();

            // Undistort and make homogeneous
            CvInvoke.UndistortPoints(new VectorOfPointF(new[] { point }), undistorted, calibration.CameraMatrix, calibration.DistortionCoefficients);
            CvInvoke.ConvertPointsToHomogeneous(undistorted, homogeneous);

            // Calculate angle
            var sign = homogeneous[0].X / Math.Abs(homogeneous[0].X);

            return(sign * Math.Acos(1 / homogeneous[0].Norm) * 180 / Math.PI);
        }
コード例 #12
0
        private static void ComputeRotationAndTranslation(VectorOfPoint3D32F trackedFeatures3D, VectorOfKeyPoint trackedFeatures, CameraCalibrationInfo calibrationInfo, out VectorOfFloat raux, out VectorOfFloat taux)
        {
            var rotationVector32F    = new VectorOfFloat();
            var translationVector32F = new VectorOfFloat();
            var rotationVector       = new Mat();
            var translationVector    = new Mat();

            CvInvoke.SolvePnP(trackedFeatures3D, Utils.GetPointsVector(trackedFeatures), calibrationInfo.Intrinsic, calibrationInfo.Distortion, rotationVector, translationVector);

            rotationVector.ConvertTo(rotationVector32F, DepthType.Cv32F);
            translationVector.ConvertTo(translationVector32F, DepthType.Cv32F);

            raux = rotationVector32F;
            taux = translationVector32F;
        }
コード例 #13
0
        public SimpleAdHocTracker(CameraCalibrationInfo calibrationInfo)
        {
            _calibrationInfo = calibrationInfo;

            _detector = new ORBDetector();

            _prevGray = new Mat();
            _currGray = new Mat();

            _raux = new Mat();
            _taux = new Mat();

            _bootstrapKp       = new VectorOfKeyPoint();
            _trackedFeatures   = new VectorOfKeyPoint();
            _trackedFeatures3D = new VectorOfPoint3D32F();
        }
コード例 #14
0
        public static double ValidateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner, Mat cameraMatrix, Mat distCoeffs)
        {
            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();
            VectorOfPoint3D32F         rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F         tvecs = new VectorOfPoint3D32F();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame       = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints          = new VectorOfPointF();
                VectorOfPointF     currentImgPointsUndistort = new VectorOfPointF();
                VectorOfInt        currentIds       = new VectorOfInt();
                VectorOfPoint3D32F currentObjPoints = new VectorOfPoint3D32F();
                Mat                tvec             = new Mat();
                Mat                rvec             = new Mat();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentIds.Push(new int[] { charucoIds[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                Mat distCoeffsNew = new Mat(1, 4, DepthType.Cv64F, 1);
                distCoeffsNew.SetValue(0, 0, 0);
                distCoeffsNew.SetValue(0, 1, 0);
                distCoeffsNew.SetValue(0, 2, 0);
                distCoeffsNew.SetValue(0, 3, 0);

                Fisheye.UndistorPoints(currentImgPoints, currentImgPointsUndistort, cameraMatrix, distCoeffs, Mat.Eye(3, 3, DepthType.Cv64F, 1), Mat.Eye(3, 3, DepthType.Cv64F, 1));
                if (ArucoInvoke.EstimatePoseCharucoBoard(currentImgPointsUndistort, currentIds, CreateBoard(squaresX, squaresY, squareLength, markerLength, new Dictionary(dictionary)), Mat.Eye(3, 3, DepthType.Cv64F, 1), distCoeffsNew, rvec, tvec))
                {
                    rvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)rvec.GetValue(0, 0), (float)rvec.GetValue(1, 0), (float)rvec.GetValue(2, 0)) });
                    tvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)tvec.GetValue(0, 0), (float)tvec.GetValue(1, 0), (float)tvec.GetValue(2, 0)) });

                    processedImagePoints.Push(currentImgPoints);
                    processedObjectPoints.Push(currentObjPoints);
                }
            }

            return(Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye));
        }
コード例 #15
0
ファイル: Utils.cs プロジェクト: Lewis945/RubiksCubeSolver
        public static void KeepVectorsByStatus(ref VectorOfKeyPoint f1, ref VectorOfPoint3D32F f2, VectorOfByte status)
        {
            var newF1 = new VectorOfKeyPoint();
            var newF2 = new VectorOfPoint3D32F();

            for (int i = 0; i < status.Size; i++)
            {
                if (status[i] > 0)
                {
                    newF1.Push(new[] { f1[i] });
                    newF2.Push(new[] { f2[i] });
                }
            }

            f1 = newF1;
            f2 = newF2;
        }
コード例 #16
0
        public static (Mat cameraMatrix, Mat distCoeffs, double rms) CalibrateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner)
        {
            Mat    cameraMatrix = new Mat(3, 3, Emgu.CV.CvEnum.DepthType.Cv64F, 1);
            Mat    distCoeffs   = new Mat(1, 4, Emgu.CV.CvEnum.DepthType.Cv64F, 1);
            double rms          = 0.0;

            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints    = new VectorOfPointF();
                VectorOfPoint3D32F currentObjPoints    = new VectorOfPoint3D32F();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                processedImagePoints.Push(currentImgPoints);
                processedObjectPoints.Push(currentObjPoints);
            }

            VectorOfPoint3D32F rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F tvecs = new VectorOfPoint3D32F();

            if (fisheye)
            {
                Fisheye.Calibrate(processedObjectPoints, processedImagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, Fisheye.CalibrationFlag.FixSkew | Fisheye.CalibrationFlag.RecomputeExtrinsic, new MCvTermCriteria(400, double.Epsilon));
            }
            else
            {
                CvInvoke.CalibrateCamera(processedObjectPoints, processedImagePoints, imageSize, cameraMatrix, distCoeffs, new Mat(), new Mat(), CalibType.FixK3, new MCvTermCriteria(30, 1e-4));
            }

            rms = Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye);

            return(cameraMatrix, distCoeffs, rms);
        }
コード例 #17
0
        public bool calibrate(float squareEdge, Size patternSize, string[] images)
        {
            VectorOfVectorOfPointF corners = findCorners(squareEdge, patternSize, images);

            if (corners.Size == 0)
            {
                Console.WriteLine("Cannot find chessboard!");
                return(false);
            }

            VectorOfPoint3D32F         chessboard   = getChessboardCorners(squareEdge, patternSize);
            VectorOfVectorOfPoint3D32F objectPoints = new VectorOfVectorOfPoint3D32F();

            for (int i = corners.Size; i > 0; i--)
            {
                objectPoints.Push(chessboard);
            }

            CameraParam param = new CameraParam();

            // set mats
            Mat rotationMat    = new Mat();
            Mat translationMat = new Mat();

            Image <Gray, Byte> image = new Image <Gray, Byte>(images[0]);

            imgSize = image.Size;

            CvInvoke.CalibrateCamera(
                objectPoints,
                corners,
                image.Size,
                param.cameraMatrix.Mat,
                param.distortionCoeffs.Mat,
                rotationMat,
                translationMat,
                CalibType.Default,
                new MCvTermCriteria(30, 0.1));

            cameraParam.Clear();
            cameraParam.Add(param);
            return(_isCalibrated = true);
        }
コード例 #18
0
        public PtamLikeAlgorithm(CameraCalibrationInfo calibrationInfo)
        {
            _calibrationInfo = calibrationInfo;

            _detector = new ORBDetector();

            _prevGray = new Mat();
            _currGray = new Mat();

            _raux = new VectorOfFloat();
            _taux = new VectorOfFloat();

            _bootstrapKp       = new VectorOfKeyPoint();
            _trackedFeatures   = new VectorOfKeyPoint();
            _trackedFeatures3D = new VectorOfPoint3D32F();

            InitialP1 = new Matrix <double>(3, 4);
            InitialP1.SetIdentity();
        }
コード例 #19
0
        public bool BootstrapTrack(Mat img, Action <Matrix <double>, VectorOfPoint3D32F, Point[]> onPlaneFound)
        {
            ValidateImages(_prevGray, img);

            CvInvoke.CvtColor(img, _currGray, ColorConversion.Bgr2Gray);

            ComputeOpticalFlowAndValidate(_prevGray, _currGray, ref _trackedFeatures, ref _bootstrapKp, img);

            Matrix <double> homography;

            ComputeHomographyAndValidate(ref _trackedFeatures, ref _bootstrapKp, out homography);

            var bootstrapKpOrig     = new VectorOfKeyPoint(_bootstrapKp.ToArray());
            var trackedFeaturesOrig = new VectorOfKeyPoint(_trackedFeatures.ToArray());

            //Attempt at 3D reconstruction (triangulation) if conditions are right
            var rigidT = CvInvoke.EstimateRigidTransform(Utils.GetPointsVector(_trackedFeatures).ToArray(), Utils.GetPointsVector(_bootstrapKp).ToArray(), false);
            var matrix = new Matrix <double>(rigidT.Rows, rigidT.Cols, rigidT.DataPointer);

            if (CvInvoke.Norm(matrix.GetCol(2)) > 100)
            {
                //camera motion is sufficient
                var result = OpenCvUtilities.CameraPoseAndTriangulationFromFundamental(_calibrationInfo, _trackedFeatures, _bootstrapKp, InitialP1);
                if (result.Result)
                {
                    _trackedFeatures = result.FilteredTrackedFeaturesKp;
                    _bootstrapKp     = result.FilteredBootstrapKp;

                    _trackedFeatures3D = result.TrackedFeatures3D;

                    int             numInliers;
                    Matrix <double> trackedFeatures3Dm;
                    Mat             mean;
                    Mat             eigenvectors;
                    Matrix <double> normalOfPlaneMatrix;
                    var             statusVector = ComputeNormalAndPlaneInliers(_trackedFeatures3D, out trackedFeatures3Dm, out mean, out eigenvectors, out numInliers, out normalOfPlaneMatrix);

                    bool bootstrapping = numInliers / (double)_trackedFeatures3D.Size < 0.75;
                    if (!bootstrapping)
                    {
                        //enough features are coplanar, keep them and flatten them on the XY plane
                        Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _trackedFeatures3D, statusVector);

                        //the PCA has the major axes of the plane
                        var projected = new Mat();
                        CvInvoke.PCAProject(trackedFeatures3Dm, mean, eigenvectors, projected);
                        var projectedMatrix = new Matrix <double>(projected.Rows, projected.Cols, projected.DataPointer);
                        projectedMatrix.GetCol(2).SetValue(0);
                        projectedMatrix.CopyTo(trackedFeatures3Dm);

                        //InitialP1 = result.P2;

                        VectorOfFloat raux;
                        VectorOfFloat taux;

                        ComputeRotationAndTranslation(_trackedFeatures3D, _trackedFeatures, _calibrationInfo, out raux, out taux);

                        double angle = ComputeAngleBetweenCameraNormAndPlaneNorm(_trackedFeatures3D, normalOfPlaneMatrix, raux, taux);
                        if (angle > 65 && angle < 110)
                        {
                            var points2D = FindFaceCorners(_currGray, _trackedFeatures);
                            onPlaneFound(normalOfPlaneMatrix.Clone(), new VectorOfPoint3D32F(_trackedFeatures3D.ToArray()), points2D.Points);

                            var face = ExtractFace(img, points2D);
                            face.Save("C:\\Users\\zakharov\\Documents\\Repos\\Mine\\Rc\\src\\RubiksCube.OpenCV.TestCase\\extracted.jpg");
                        }

                        return(true);
                    }

                    //cerr << "not enough features are coplanar" << "\n";
                    _bootstrapKp     = bootstrapKpOrig;
                    _trackedFeatures = trackedFeaturesOrig;
                }
            }

            return(false);
        }
コード例 #20
0
        /// <summary>
        /// 立体校正评价函数
        /// </summary>
        private void Stereo_Calib_Quanlity_Check()
        {
            int                board_n = patternSize.Width * patternSize.Height;
            float              a = 0, b = 0, c = 0;
            float              x = 0, y = 0;
            double             err        = 0;
            double             allerr     = 0;
            VectorOfPoint3D32F leftLines  = new VectorOfPoint3D32F();
            VectorOfPoint3D32F rightLines = new VectorOfPoint3D32F();
            //存储校正映射后点集
            VectorOfVectorOfPointF leftpoints_rectify  = new VectorOfVectorOfPointF();
            VectorOfVectorOfPointF rightpoints_rectify = new VectorOfVectorOfPointF();
            VectorOfPointF         tempPoints          = new VectorOfPointF();
            VectorOfPointF         rimgPoints_temp     = new VectorOfPointF();
            VectorOfPointF         limgPoints_temp     = new VectorOfPointF();

            for (int i = 0; i < left_corners_set.Size; i++)
            {
                //稀疏点校正,参考Learning OpenCV3
                CvInvoke.UndistortPoints(left_corners_set[i], tempPoints, leftCamMatrix, leftDistCoeffs, null, leftCamMatrix);
                leftpoints_rectify.Push(tempPoints);
            }
            for (int i = 0; i < right_corners_set.Size; i++)
            {
                //稀疏点校正,参考Learning OpenCV3
                CvInvoke.UndistortPoints(right_corners_set[i], tempPoints, rightCamMatrix, rightDistCoeffs, null, rightCamMatrix);
                rightpoints_rectify.Push(tempPoints);
            }
            //计算左相机图像点与极线的距离
            for (int i = 0; i < left_corners_set.Size; i++)
            {
                rimgPoints_temp = rightpoints_rectify[i];
                limgPoints_temp = leftpoints_rectify[i];
                CvInvoke.ComputeCorrespondEpilines(rimgPoints_temp, 2, F, leftLines);  //计算极线
                for (int j = 0; j < board_n; j++)
                {
                    //读取存储的数据
                    a = leftLines[j].X;
                    b = leftLines[j].Y;
                    c = leftLines[j].Z;
                    x = limgPoints_temp[j].X;
                    y = limgPoints_temp[j].Y;
                    //计算每个点到直线的距离    (点到直线距离公式)
                    err     = Math.Abs(a * x + b * y + c) / Math.Sqrt(a * a + b * b);
                    allerr += err;
                }
            }
            //计算右相机图像点与极线的距离
            for (int i = 0; i < right_corners_set.Size; i++)
            {
                rimgPoints_temp = rightpoints_rectify[i];
                limgPoints_temp = leftpoints_rectify[i];
                CvInvoke.ComputeCorrespondEpilines(limgPoints_temp, 1, F, rightLines);  //计算极线
                for (int j = 0; j < board_n; j++)
                {
                    //读取存储的数据
                    a = rightLines[j].X;
                    b = rightLines[j].Y;
                    c = rightLines[j].Z;
                    x = rimgPoints_temp[j].X;
                    y = rimgPoints_temp[j].Y;
                    //计算每个点到直线的距离    (点到直线距离公式)
                    err     = Math.Abs(a * x + b * y + c) / Math.Sqrt(a * a + b * b);
                    allerr += err;
                }
            }
            dist_err = allerr / (2 * (left_corners_set.Size + right_corners_set.Size) * board_n);
        }
コード例 #21
0
        public bool calibrate(float squareEdge, Size patternSize, string[] imagesLeft, string[] imagesRight)
        {
            List <VectorOfVectorOfPointF> listCorners = findCorners(squareEdge, patternSize, imagesLeft, imagesRight);

            if (listCorners.Last().Size == 0)
            {
                Console.WriteLine("Cannot find chessboard!");
                return(false);
            }

            VectorOfPoint3D32F         chessboard   = getChessboardCorners(squareEdge, patternSize);
            VectorOfVectorOfPoint3D32F objectPoints = new VectorOfVectorOfPoint3D32F();

            for (int i = listCorners.Last().Size; i > 0; i--)
            {
                objectPoints.Push(chessboard);
            }

            Image <Gray, Byte> image = new Image <Gray, Byte>(imagesLeft[0]);

            CameraParam camLeft  = new CameraParam();
            CameraParam camRight = new CameraParam();

            CvInvoke.StereoCalibrate(
                objectPoints,
                listCorners[0],
                listCorners[1],
                camLeft.cameraMatrix.Mat,
                camLeft.distortionCoeffs.Mat,
                camRight.cameraMatrix.Mat,
                camRight.distortionCoeffs.Mat,
                image.Size,
                R, T, E, F,
                CalibType.Default,
                new MCvTermCriteria(30, 0.1e5));


            Rectangle roi1 = Rectangle.Empty, roi2 = Rectangle.Empty;

            CvInvoke.StereoRectify(
                camLeft.cameraMatrix.Mat,
                camLeft.distortionCoeffs.Mat,
                camRight.cameraMatrix.Mat,
                camRight.distortionCoeffs.Mat,
                image.Size,
                R, T,
                camLeft.rotationMatrix.Mat,
                camRight.rotationMatrix.Mat,
                camLeft.translationMatrix.Mat,
                camRight.translationMatrix.Mat,
                disparityMatrix,
                StereoRectifyType.Default,
                -1, Size.Empty,
                ref roi1,
                ref roi2);


            cameraParam.Clear();
            cameraParam.Add(camLeft);
            cameraParam.Add(camRight);

            rectTransforms = getRectifyTransforms();

            Matrix <double> p = new Matrix <double>(new double[, ] {
                { image.Size.Width / 2 }, { image.Size.Height / 2 }, { 1 }
            });
            Matrix <double> px = rectTransforms[0].Mul(p);

            double[,] dL = p.Sub(px.Mul(1 / px[2, 0])).Data;

            px           = rectTransforms[1].Mul(p);
            double[,] dR = p.Sub(px.Mul(1 / px[2, 0])).Data;

            rectTransforms = getRectifyTransforms(dL, dR);

            rectMask[0] = getRectMask(image.Size, rectTransforms[0]);
            rectMask[1] = getRectMask(image.Size, rectTransforms[1]);

            _isStereo = true;
            return(_isCalibrated = true);
        }
コード例 #22
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="calibrationInfo"></param>
        /// <param name="trackedFeaturesKp"></param>
        /// <param name="bootstrapKp"></param>
        /// <param name="p"></param>
        /// <param name="p1"></param>
        /// <returns></returns>
        public static TriangulateAndCheckReprojResult TriangulateAndCheckReproj(CameraCalibrationInfo calibrationInfo, VectorOfKeyPoint trackedFeaturesKp, VectorOfKeyPoint bootstrapKp, Matrix <double> p, Matrix <double> p1)
        {
            var result = new TriangulateAndCheckReprojResult();

            var trackedFeaturesPoints = Utils.GetPointsVector(trackedFeaturesKp);
            var bootstrapPoints       = Utils.GetPointsVector(bootstrapKp);

            //undistort
            var normalizedTrackedPts   = new VectorOfPointF();
            var normalizedBootstrapPts = new VectorOfPointF();

            CvInvoke.UndistortPoints(trackedFeaturesPoints, normalizedTrackedPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);
            CvInvoke.UndistortPoints(bootstrapPoints, normalizedBootstrapPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);

            //triangulate
            var pt3Dh = new Mat();

            CvInvoke.TriangulatePoints(p, p1, normalizedBootstrapPts, normalizedTrackedPts, pt3Dh);
            var pt3DhMatrix = new Matrix <float>(pt3Dh.Rows, pt3Dh.Cols, pt3Dh.DataPointer);

            var pt3DMat = new Mat();

            CvInvoke.ConvertPointsFromHomogeneous(pt3DhMatrix.Transpose(), pt3DMat);
            var pt3D = new Matrix <float>(pt3DMat.Rows, pt3DMat.Cols * pt3DMat.NumberOfChannels, pt3DMat.DataPointer);

            var statusArray = new byte[pt3D.Rows];

            for (int i = 0; i < pt3D.Rows; i++)
            {
                statusArray[i] = (pt3D[i, 2] > 0) ? (byte)1 : (byte)0;
            }

            var status = new VectorOfByte(statusArray);
            int count  = CvInvoke.CountNonZero(status);

            double percentage = count / (double)pt3D.Rows;

            if (percentage > 0.75)
            {
                //calculate reprojection
                var rvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var tvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var reprojectedPtSet1 = new VectorOfPointF();
                CvInvoke.ProjectPoints(pt3D, rvec, tvec, calibrationInfo.Intrinsic, calibrationInfo.Distortion,
                                       reprojectedPtSet1);

                double reprojErr = CvInvoke.Norm(reprojectedPtSet1, bootstrapPoints) / bootstrapPoints.Size;
                if (reprojErr < 5)
                {
                    statusArray = new byte[bootstrapPoints.Size];
                    for (int i = 0; i < bootstrapPoints.Size; ++i)
                    {
                        var pointsDiff    = Utils.SubstarctPoints(bootstrapPoints[i], reprojectedPtSet1[i]);
                        var vectorOfPoint = new VectorOfPointF(new[] { pointsDiff });
                        statusArray[i] = CvInvoke.Norm(vectorOfPoint) < 20.0 ? (byte)1 : (byte)0;
                    }

                    status = new VectorOfByte(statusArray);

                    var trackedFeatures3D = new VectorOfPoint3D32F(Utils.Get3dPointsArray(pt3D));

                    Utils.KeepVectorsByStatus(ref trackedFeaturesKp, ref trackedFeatures3D, status);

                    result.Error                     = reprojErr;
                    result.TrackedFeatures3D         = new VectorOfPoint3D32F(trackedFeatures3D.ToArray());
                    result.FilteredTrackedFeaturesKp = new VectorOfKeyPoint(trackedFeaturesKp.ToArray());
                    result.FilteredBootstrapKp       = new VectorOfKeyPoint(bootstrapKp.ToArray());
                    result.Result                    = true;
                }

                rvec.Dispose();
                tvec.Dispose();
                reprojectedPtSet1.Dispose();
            }

            normalizedTrackedPts.Dispose();
            normalizedBootstrapPts.Dispose();
            pt3Dh.Dispose();
            pt3DhMatrix.Dispose();
            pt3DMat.Dispose();
            pt3D.Dispose();
            status.Dispose();

            return(result);
        }
コード例 #23
0
ファイル: CeresEXT.cs プロジェクト: sxqwycg/CameraCalibratie
        public static PointF[] ProjectPointd2D_Manually <T>(this PinholeCamera cam, T[] points3d, out T[] visible) where T : SPoint
        {
            var r   = new List <PointF>();
            var vis = new List <T>();

            //var tt = cam.worldMat.Inverted();
            //var transf = tt.tocv();
            var transf      = cam.WorldMat.Inverted();
            var worldmatinv = cam.worldMat.Inverted();

            var mcvpoints3d = points3d.Select(x => x.toMCvPoint3D32f());

            VectorOfPoint3D32F points3dvec = new VectorOfPoint3D32F(mcvpoints3d.ToArray());
            //VectorOfPoint3D32F points3dvectransf = new VectorOfPoint3D32F(points3dvec.Size);


            var camcoords = new Matrix <double>(3, points3d.Length);

            var phm_mat        = new Mat();
            var camcoords_mat  = new Mat();
            var camcoords_mat2 = new VectorOfPoint3D32F();

            CvInvoke.ConvertPointsToHomogeneous(points3dvec, phm_mat);

            //var phm = new Matrix<float>(4, points3d.Length, phm_mat.DataPointer);
            //var camcoord = cam.WorldMat.Inverted() * phm;
            CvInvoke.Transform(phm_mat, camcoords_mat, transf);
            CvInvoke.ConvertPointsFromHomogeneous(camcoords_mat, camcoords_mat2);


            var cc = cam.toCeresCamera();

            for (int i = 0; i < camcoords_mat2.Size; i++)
            {
                var phmm = new Matrix <double>(3, 1);

                var camcoord = Vector3d.TransformPerspective(new Vector3d(points3d[i].X, points3d[i].Y, points3d[i].Z), worldmatinv);
                var x        = camcoord.X / camcoord.Z;
                var y        = camcoord.Y / camcoord.Z;

                var r2      = x * x + y * y;
                var r4      = r2 * r2;
                var r6      = r4 * r2;
                var r_coeff = ((1) + cam.DistortionR1 * r2 + cam.DistortionR2 * r4 + cam.DistortionR3 * r6);
                var tdistx  = 2 * cam.DistortionT1 * x * y + cam.DistortionT2 * (r2 + 2 * x * x);
                var tdisty  = 2 * cam.DistortionT2 * x * y + cam.DistortionT1 * (r2 + 2 * y * y);
                var xd      = x * r_coeff + tdistx;
                var yd      = y * r_coeff + tdisty;

                var im_x2 = cam.Intrinsics.fx * xd + cam.Intrinsics.cx;
                var im_y2 = cam.Intrinsics.fy * yd + cam.Intrinsics.cy;

                if (camcoord.Z < 0) //camcoords_mat2[i].Z < 0) {
                {
                    continue;
                }

                var pointf = ceresdotnet.CeresTestFunctions.ProjectPoint(cc.Internal, cc.External, points3d[i].Pos);

                var im_x = pointf.X;
                var im_y = pointf.Y;

                if (im_x >= 0 && im_x <= cam.PictureSize.Width && im_y >= 0 && im_y <= cam.PictureSize.Height)
                {
                    vis.Add(points3d[i]);
                    r.Add(pointf);
                }
                if (im_x2 >= 0 && im_x2 <= cam.PictureSize.Width && im_y2 >= 0 && im_y2 <= cam.PictureSize.Height)
                {
                    //vis.Add(points3d[i]);
                    //pointf.X = (float)im_x2;
                    //pointf.Y = (float)im_y2;
                    //r.Add(pointf);
                }
            }
            visible = vis.ToArray();
            return(r.ToArray());
        }
コード例 #24
0
        public bool BootstrapTrack(Mat img)
        {
            #region Trace

            Trace.WriteLine($"BootstrapTrack iteration ({ _trackedFeatures.Size}).");
            Trace.WriteLine("--------------------------");
            Trace.Indent();

            #endregion

            //Track detected features
            if (_prevGray.IsEmpty)
            {
                const string error = "Previous frame is empty. Bootstrap first.";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            if (img.IsEmpty || !img.IsEmpty && img.NumberOfChannels != 3)
            {
                const string error = "Image is not appropriate (Empty or wrong number of channels).";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            var corners = new VectorOfPointF();
            var status  = new VectorOfByte();
            var errors  = new VectorOfFloat();

            var currGray = new Mat();
            CvInvoke.CvtColor(img, currGray, ColorConversion.Bgr2Gray);

            CvInvoke.CalcOpticalFlowPyrLK(_prevGray, currGray, Utils.GetPointsVector(_trackedFeatures), corners,
                                          status, errors, new Size(11, 11), 3, new MCvTermCriteria(20, 0.03));
            currGray.CopyTo(_prevGray);

            #region Trace

            Trace.WriteLine($"Tracked first point: ({_trackedFeatures[0].Point.X}, {_trackedFeatures[0].Point.Y}) / Found first corner = ({corners[0].X}, {corners[0].Y})");
            Trace.WriteLine($"Tracked second point: ({_trackedFeatures[1].Point.X}, {_trackedFeatures[1].Point.Y}) / Found second corner = ({corners[1].X}, {corners[1].Y})");
            Trace.WriteLine($"Tracked third point: ({_trackedFeatures[2].Point.X}, {_trackedFeatures[2].Point.Y}) / Found third corner = ({corners[2].X}, {corners[2].Y})");

            #endregion

            for (int j = 0; j < corners.Size; j++)
            {
                if (status[j] == 1)
                {
                    var p1 = new Point((int)_trackedFeatures[j].Point.X, (int)_trackedFeatures[j].Point.Y);
                    var p2 = new Point((int)corners[j].X, (int)corners[j].Y);

                    CvInvoke.Line(img, p1, p2, new MCvScalar(120, 10, 20));
                }
            }

            if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
            {
                Trace.TraceError("Tracking failed.");
                throw new Exception("Tracking failed.");
            }

            _trackedFeatures = Utils.GetKeyPointsVector(corners);

            Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _bootstrapKp, status);

            Trace.WriteLine($"{_trackedFeatures.Size} features survived optical flow.");

            if (_trackedFeatures.Size != _bootstrapKp.Size)
            {
                const string error = "Tracked features vector size is not equal to bootstrapped one.";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            #region Trace

            Trace.WriteLine($"Bootstrap first point: ({_bootstrapKp[0].Point.X}, {_bootstrapKp[0].Point.Y}) / Found first corner = ({corners[0].X}, {corners[0].Y})");
            Trace.WriteLine($"Bootstrap second point: ({_bootstrapKp[1].Point.X}, {_bootstrapKp[1].Point.Y}) / Found second corner = ({corners[1].X}, {corners[1].Y})");
            Trace.WriteLine($"Bootstrap third point: ({_bootstrapKp[2].Point.X}, {_bootstrapKp[2].Point.Y}) / Found third corner = ({corners[2].X}, {corners[2].Y})");

            #endregion

            //verify features with a homography
            var inlierMask = new VectorOfByte();
            var homography = new Mat();
            if (_trackedFeatures.Size > 4)
            {
                CvInvoke.FindHomography(Utils.GetPointsVector(_trackedFeatures), Utils.GetPointsVector(_bootstrapKp), homography, HomographyMethod.Ransac, RansacThreshold, inlierMask);
            }

            int inliersNum = CvInvoke.CountNonZero(inlierMask);

            var m = new Matrix <double>(homography.Rows, homography.Cols, homography.DataPointer);

            m.Dispose();

            Trace.WriteLine($"{inliersNum} features survived homography.");

            if (inliersNum != _trackedFeatures.Size && inliersNum >= 4 && !homography.IsEmpty)
            {
                Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _bootstrapKp, inlierMask);
            }
            else if (inliersNum < MinInliers)
            {
                Trace.TraceError("Not enough features survived homography.");
                return(false);
            }

            var bootstrapKpOrig     = new VectorOfKeyPoint(_bootstrapKp.ToArray());
            var trackedFeaturesOrig = new VectorOfKeyPoint(_trackedFeatures.ToArray());

            //Attempt at 3D reconstruction (triangulation) if conditions are right
            var rigidT = CvInvoke.EstimateRigidTransform(Utils.GetPointsVector(_trackedFeatures).ToArray(), Utils.GetPointsVector(_bootstrapKp).ToArray(), false);
            var matrix = new Matrix <double>(rigidT.Rows, rigidT.Cols, rigidT.DataPointer);

            #region Trace

            Trace.WriteLine($"Track first point: ({_trackedFeatures[0].Point.X}, {_trackedFeatures[0].Point.Y}) / Bootstrap first point = ({_bootstrapKp[0].Point.X}, {_bootstrapKp[0].Point.Y})");
            Trace.WriteLine($"Track 10th point: ({_trackedFeatures[10].Point.X}, {_trackedFeatures[10].Point.Y}) / Bootstrap 10th point = ({_bootstrapKp[10].Point.X}, {_bootstrapKp[10].Point.Y})");
            Trace.WriteLine($"Track last point: ({_trackedFeatures[_trackedFeatures.Size - 1].Point.X}, {_trackedFeatures[_trackedFeatures.Size - 1].Point.Y}" +
                            $") / Bootstrap third point = ({_bootstrapKp[_bootstrapKp.Size - 1].Point.X}, {_bootstrapKp[_bootstrapKp.Size - 1].Point.Y})");

            Trace.WriteLine($"Rigid matrix: [ [ {matrix[0, 0]}, {matrix[0, 1]}, {matrix[0, 2]} ] [ {matrix[1, 0]}, {matrix[1, 1]}, {matrix[1, 2]} ] ].");
            Trace.WriteLine($"Rigid: {CvInvoke.Norm(matrix.GetCol(2))}");

            #endregion

            if (CvInvoke.Norm(matrix.GetCol(2)) > 100)
            {
                //camera motion is sufficient
                var p1 = new Matrix <double>(3, 4);
                p1.SetIdentity();
                var result = OpenCvUtilities.CameraPoseAndTriangulationFromFundamental(_calibrationInfo, _trackedFeatures, _bootstrapKp, p1);

                _trackedFeatures = result.FilteredTrackedFeaturesKp;
                _bootstrapKp     = result.FilteredBootstrapKp;

                if (result.Result)
                {
                    _trackedFeatures3D = result.TrackedFeatures3D;
                    var trackedFeatures3Dm = Utils.Get3dPointsMat(_trackedFeatures3D);

                    var eigenvectors = new Mat();
                    var mean         = new Mat();
                    CvInvoke.PCACompute(trackedFeatures3Dm, mean, eigenvectors);
                    var eigenvectorsMatrix = new Matrix <double>(eigenvectors.Rows, eigenvectors.Cols, eigenvectors.DataPointer);

                    int numInliers    = 0;
                    var normalOfPlane = eigenvectorsMatrix.GetRow(2).ToUMat().ToMat(AccessType.Fast);
                    //eigenvectors.GetRow(2).CopyTo(normalOfPlane);
                    CvInvoke.Normalize(normalOfPlane, normalOfPlane);

                    var normalOfPlaneMatrix = new Matrix <double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);
                    Trace.WriteLine($"normal of plane: {normalOfPlaneMatrix[0, 0]}");
                    //cv::Vec3d x0 = pca.mean;
                    //double p_to_plane_thresh = sqrt(pca.eigenvalues.at<double>(2));
                }

                return(true);
            }

            #region Trace

            Trace.Unindent();
            Trace.WriteLine("--------------------------");

            #endregion

            return(false);
        }
コード例 #25
0
ファイル: CvInvokeCalib3d.cs プロジェクト: neutmute/emgucv
 /// <summary>
 /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
 /// </summary>
 /// <param name="objectPoints">The array of object points</param>
 /// <param name="imagePoints">The array of corresponding image points</param>
 /// <param name="intrinsicMatrix">The camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1]. </param>
 /// <param name="distortionCoeffs">The vector of distortion coefficients, 4x1 or 1x4 [k1, k2, p1, p2]. If it is IntPtr.Zero, all distortion coefficients are considered 0's.</param>
 /// <param name="rotationVector">The output 3x1 or 1x3 rotation vector (compact representation of a rotation matrix, see cvRodrigues2). </param>
 /// <param name="translationVector">The output 3x1 or 1x3 translation vector</param>
 /// <param name="useExtrinsicGuess">Use the input rotation and translation parameters as a guess</param>
 /// <param name="method">Method for solving a PnP problem</param>
 /// <returns>The extrinsic parameters</returns>
 public static bool SolvePnP(
    MCvPoint3D32f[] objectPoints,
    PointF[] imagePoints,
    IInputArray intrinsicMatrix,
    IInputArray distortionCoeffs,
    IOutputArray rotationVector,
    IOutputArray translationVector,
    bool useExtrinsicGuess = false,
    CvEnum.SolvePnpMethod method = CvEnum.SolvePnpMethod.Iterative)
 {
    using (VectorOfPoint3D32F objPtVec = new VectorOfPoint3D32F(objectPoints))
    using (VectorOfPointF imgPtVec = new VectorOfPointF(imagePoints))
       return CvInvoke.SolvePnP(objPtVec, imgPtVec, intrinsicMatrix, distortionCoeffs, rotationVector,
          translationVector, false, method);
 }
コード例 #26
0
        public void SuperR()
        {
            SRC_Img       = new Image <Gray, byte>(@"C:\Users\Админ\Downloads\image63341262,2002.png");
            Corrected_Img = SRC_Img.Clone();

            PointF[] corners = new PointF[] { new PointF(100, 196), new PointF(261, 190), new PointF(417, 192), new PointF(584, 201),
                                              new PointF(111, 277), new PointF(284, 287), new PointF(458, 291), new PointF(580, 284),
                                              new PointF(130, 368), new PointF(276, 395), new PointF(429, 391), new PointF(563, 365) };

            /*MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f( 0, 0, 0.0f),    new MCvPoint3D32f(SRC_Img.Width / 3 - 1, 0, 0.0f),       new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, 0, 0.0f),    new MCvPoint3D32f( SRC_Img.Width - 1, 0, 0.0f),
             *                                  new MCvPoint3D32f( 0, SRC_Img.Height / 2 - 1, 0.0f),  new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f),     new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f),  new MCvPoint3D32f( SRC_Img.Width - 1, SRC_Img.Height / 2 - 1, 0.0f),
             *                                  new MCvPoint3D32f( 0, SRC_Img.Height - 1, 0.0f),  new MCvPoint3D32f( SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f),    new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f),  new MCvPoint3D32f( SRC_Img.Width - 1, SRC_Img.Height - 1, 0.0f)
             *                            };
             */
            // X: 0 - 480 / 3 ||0 159 329 479
            // Y: 0 - 210 / 2 || 0 104 209

            MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f(0, 0, 0.0f), new MCvPoint3D32f(159, 0, 0.0f), new MCvPoint3D32f(329, 0, 0.0f), new MCvPoint3D32f(479, 0, 0.0f),
                                                               new MCvPoint3D32f(0, 104, 0.0f), new MCvPoint3D32f(159, 104, 0.0f), new MCvPoint3D32f(329, 104, 0.0f), new MCvPoint3D32f(479, 104, 0.0f),
                                                               new MCvPoint3D32f(0, 209, 0.0f), new MCvPoint3D32f(159, 209, 0.0f), new MCvPoint3D32f(329, 209, 0.0f), new MCvPoint3D32f(479, 209, 0.0f) };

            VectorOfPointF veccorners = new VectorOfPointF();

            veccorners.Push(corners);
            VectorOfPoint3D32F vecobjcorners = new VectorOfPoint3D32F();

            vecobjcorners.Push(objCorners);

            MCvTermCriteria TermCriteria = new MCvTermCriteria(30, 0.1);

            CvInvoke.CornerSubPix(SRC_Img, veccorners, new Size(2, 2), new Size(-1, -1), TermCriteria);

            IntrinsicCameraParameters intrisic = new IntrinsicCameraParameters();

            ExtrinsicCameraParameters[] extrinsic;
            intrisic.IntrinsicMatrix = new Matrix <double>(new double[, ] {
                { 1, 0, 349.417 }, { 0, 1, 286.417 }, { 0, 0, 1 }
            });
            try
            {
                Matrix <float> distortCoeffs   = new Matrix <float>(1, 4);
                Mat            rotationVectors = new Mat();
                //rotationVectors[0] = new Mat(3,1, DepthType.Cv32F, 1);
                Mat translationVectors = new Mat();
                //translationVectors[0] = new Mat(1, 3, DepthType.Cv32F, 1);

                /*
                 * double error = CvInvoke.CalibrateCamera(new MCvPoint3D32f[][] { objCorners }, new PointF[][] { veccorners.ToArray() },
                 *   SRC_Img.Size, intrisic.IntrinsicMatrix, distortCoeffs, CalibType.UserIntrinsicGuess, new MCvTermCriteria(30, 0.01), out rotationVectors, out translationVectors);
                 */
                /*
                 *
                 * Fisheye.Calibrate(vecobjcorners, veccorners, SRC_Img.Size, intrisic.IntrinsicMatrix, distortCoeffs, rotationVectors, translationVectors,
                 * Fisheye.CalibrationFlag.UseIntrinsicGuess, TermCriteria);
                 * */

                Matrix <float> matrix = new Matrix <float>(new float[, ] {
                    { 1, 0, 349 }, { 0, 1, 286 }, { 0, 0, 1 }
                });
                Fisheye.UndistorImage(SRC_Img, Corrected_Img, matrix, new VectorOfFloat(new float[] { 3500, 3500, 0, 0 }));
                Image <Gray, Byte> Res_Img = new Image <Gray, byte>(2 * SRC_Img.Width, SRC_Img.Height);
                CvInvoke.HConcat(SRC_Img, Corrected_Img, Res_Img);
                int error = 0;
                error++;
                //error += 0;
                //Array aa = rotationVectors[0].Data;
                //error += 0;
                //float q = rotationVectors.ElementAt<float>(0);
            }
            catch (Exception) { }
        }
コード例 #27
0
        public void MainStuff()
        {
            SRC_Img       = new Image <Gray, byte>(@"C:\Users\Админ\Downloads\image63341262,2002.png");
            Corrected_Img = SRC_Img.Clone();

            //CvInvoke.CLAHE(SRC_Img, 40, new Size(8, 8), Corrected_Img);
            //CvInvoke.FindChessboardCorners(SRC_Img, new Size(8,8), vec);
            #region
            PointF[] corners = new PointF[] { new PointF(100, 196), new PointF(261, 190), new PointF(417, 192), new PointF(584, 201),
                                              new PointF(111, 277), new PointF(284, 287), new PointF(458, 291), new PointF(580, 284),
                                              new PointF(130, 368), new PointF(276, 395), new PointF(429, 391), new PointF(563, 365) };
            #endregion
            VectorOfPointF vec = new VectorOfPointF();
            vec.Push(corners);
            // X: 0 - 480 / 3 ||0 159 329 479
            // Y: 0 - 210 / 2 || 0 104 209

            MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f(0, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, 0, 0.0f),
                                                               new MCvPoint3D32f(0, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, SRC_Img.Height / 2 - 1, 0.0f),
                                                               new MCvPoint3D32f(0, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, SRC_Img.Height - 1, 0.0f) };

            /*
             * for (int i = 0; i < objCorners.Length; i++)
             * {
             * objCorners[i].X += SRC_Img.Width / 2;
             * objCorners[i].Y += SRC_Img.Height / 2;
             * }*/
            //VectorOfPointF objvec = new VectorOfPointF();
            //objvec.Push(objCorners);


            //Corrected_Img = FindTable(SRC_Img);
            Matrix <double> CameraMatrix = new Matrix <double>(3, 3, 1);
            CameraMatrix[0, 0] = 1;
            CameraMatrix[1, 1] = 1;
            CameraMatrix[2, 2] = 1;
            CameraMatrix[0, 2] = 349.417;
            CameraMatrix[1, 2] = 286.417;

            Mat newCameraMatrix = CvInvoke.GetDefaultNewCameraMatrix(CameraMatrix);
            //CvInvoke.Undistort(SRC_Img, Corrected_Img,
            //CvInvoke.FindChessboardCorners(SRC_Img, new System.Drawing.Size(5,5),

            Mat             distCoeffs    = new Mat(1, 5, DepthType.Cv32F, 1);
            Mat             rotCoeffs     = new Mat();
            Mat             translVectors = new Mat();
            MCvTermCriteria TermCriteria  = new MCvTermCriteria(30, 0.1);
            Corrected_Img = SRC_Img.Clone();
            CvInvoke.DrawChessboardCorners(Corrected_Img, new System.Drawing.Size(4, 3), vec, true);
            //CvInvoke.CornerSubPix(SRC_Img, vec, new Size(2, 2), new Size(-1, -1), TermCriteria);
            //CvInvoke.DrawChessboardCorners(SRC_Img, new System.Drawing.Size(4, 3), objvec, true);

            /*
             * try
             * {
             * CvInvoke.Remap(SRC_Img, Corrected_Img, vec, objvec, Inter.Nearest, BorderType.Constant);
             * } catch (Exception ex) { string s = ex.Message; }
             */
            VectorOfPoint3D32F obj3dvec = new VectorOfPoint3D32F();
            obj3dvec.Push(objCorners);

            try
            {
                MCvPoint3D32f[][] corners_object_list = new MCvPoint3D32f[1][];
                PointF[][]        corners_points_list = new PointF[1][];
                corners_object_list[0] = objCorners;
                corners_points_list[0] = corners;
                double r = CvInvoke.CalibrateCamera(obj3dvec,
                                                    vec,
                                                    SRC_Img.Size,
                                                    CameraMatrix,
                                                    distCoeffs,
                                                    rotCoeffs,
                                                    translVectors,
                                                    CalibType.Default,
                                                    TermCriteria);

                //double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, out EX_Param);
                r += 0;
                //Matrix<float> dist = new Matrix<float>( new float[] {

                //CvInvoke.Undistort(SRC_Img, Corrected_Img, cameraMatrix, );
            } catch (Exception ex) { }

            IntrinsicCameraParameters IC = new IntrinsicCameraParameters(8);
            Matrix <float>            Map1, Map2;
            IC.InitUndistortMap(SRC_Img.Width, SRC_Img.Height, out Map1, out Map2);
            Image <Gray, Byte> stuff = Undistort(SRC_Img);

            imageBox1.Image = SRC_Img.Resize(imageBox1.Width, imageBox1.Height, Inter.Linear);
            imageBox2.Image = Corrected_Img.Resize(imageBox1.Width, imageBox1.Height, Inter.Linear);
        }
コード例 #28
0
        public bool Track(Mat img)
        {
            //Track detected features
            if (_prevGray.IsEmpty)
            {
                Trace.WriteLine("Can't track: empty prev frame."); return(false);
            }

            var corners = new VectorOfPointF();
            var status  = new VectorOfByte();
            var errors  = new VectorOfFloat();

            CvInvoke.CvtColor(img, _currGray, ColorConversion.Bgr2Gray);

            CvInvoke.CalcOpticalFlowPyrLK(_prevGray, _currGray, Utils.GetPointsVector(_trackedFeatures), corners, status, errors, new Size(11, 11), 0, new MCvTermCriteria(100));
            _currGray.CopyTo(_prevGray);

            if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
            {
                Trace.WriteLine("Tracking failed.");
                _bootstrapping = false;
                _canCalcMvm    = false;
                return(false);
            }

            _trackedFeatures = Utils.GetKeyPointsVector(corners);

            Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _trackedFeatures3D, status);

            Console.WriteLine("tracking.");

            _canCalcMvm = (_trackedFeatures.Size >= MinInliers);

            if (_canCalcMvm)
            {
                //Perform camera pose estimation for AR
                var rvec = new Mat();
                var tvec = new Mat();

                CvInvoke.SolvePnP(_trackedFeatures3D, Utils.GetPointsVector(_trackedFeatures), _calibrationInfo.Intrinsic, _calibrationInfo.Distortion, _raux, _taux, !_raux.IsEmpty);

                _raux.ConvertTo(rvec, DepthType.Cv32F);
                _taux.ConvertTo(tvec, DepthType.Cv64F);

                var pts = new MCvPoint3D32f[] {
                    new MCvPoint3D32f(0.01f, 0, 0),
                    new MCvPoint3D32f(0, 0.01f, 0),
                    new MCvPoint3D32f(0, 0, 0.01f)
                };
                var axis = new VectorOfPoint3D32F(pts);

                var imgPoints = new VectorOfPointF();
                CvInvoke.ProjectPoints(axis, _raux, _taux, _calibrationInfo.Intrinsic, _calibrationInfo.Distortion, imgPoints);

                var centerPoint = new Point((int)_trackedFeatures[0].Point.X, (int)_trackedFeatures[0].Point.Y);

                var xPoint = new Point((int)imgPoints[0].X, (int)imgPoints[0].Y);
                var yPoint = new Point((int)imgPoints[1].X, (int)imgPoints[1].Y);
                var zPoint = new Point((int)imgPoints[2].X, (int)imgPoints[2].Y);

                CvInvoke.Line(img, centerPoint, xPoint, new MCvScalar(255, 0, 0), 5); //blue x-ax
                CvInvoke.Line(img, centerPoint, yPoint, new MCvScalar(0, 255, 0), 5); //green y-ax
                CvInvoke.Line(img, centerPoint, zPoint, new MCvScalar(0, 0, 255), 5); //red z-ax

                var rot = new Mat(3, 3, DepthType.Cv32F, 3);

                CvInvoke.Rodrigues(rvec, rot);
            }

            return(true);
        }
コード例 #29
0
        public static unsafe void ceresSolveAruco()
        {
            var                phc          = PinholeCamera.getTestCameraHuawei();
            string             dir          = @"C:\Users\jens\Desktop\calibratie\Huawei p9\aruco\stereo test\";
            List <CeresMarker> ceresmarkers = new List <CeresMarker>();
            List <CeresCamera> cerescameras = new List <CeresCamera>();
            var                files        = Directory.GetFiles(dir).ToList();


            //8 punten nodig
            var markerDictionary = Aruco.findArucoMarkers(files, Path.Combine(dir, "aruco_detected\\"), 1);
            var pairs            = findImagePairsMinMarkers(markerDictionary, 8);

            Matrix K = new Matrix(phc.Intrinsics.Mat);

            var W = new Matrix(new double[] {
                0.0D, -1.0D, 0.0D,
                1.0D, 0.0D, 0.0D,
                0.0D, 0.0D, 1.0D
            });


            var Wt = new Matrix(new double[] {
                0.0D, 1.0D, 0.0D,
                -1.0D, 0.0D, 0.0D,
                0.0D, 0.0D, 1.0D
            });
            var Z = new Matrix(new double[] {
                0.0D, 1.0D, 0.0D,
                -1.0D, 0.0D, 0.0D,
                0.0D, 0.0D, 0D
            });

            var diag = new Matrix(new double[] {
                1.0D, 0.0D, 0.0D,
                0.0D, 1.0D, 0.0D,
                0.0D, 0.0D, 0.0D
            });


            foreach (var stereoPair in pairs)
            {
                var            points_count = stereoPair.intersection.Count;
                VectorOfPointF punten1px, punten2px;
                {
                    int           i  = 0;
                    List <PointF> p1 = new List <PointF>();
                    List <PointF> p2 = new List <PointF>();
                    foreach (KeyValuePair <ArucoMarker, ArucoMarker> kvp in stereoPair.intersection)
                    {
                        p1.Add(kvp.Key.Corner1);
                        p2.Add(kvp.Value.Corner1);
                        i++;
                    }
                    punten1px = new VectorOfPointF(p1.ToArray());
                    punten2px = new VectorOfPointF(p2.ToArray());
                }


                Matrix F      = new Matrix(3, 3);
                CVI.FindFundamentalMat(punten1px, punten2px, F);



                Matrix essential = K.Transpose() * F * K;
                var    decomp = new SVD <double>(essential);
                var    U      = decomp.U;
                var    Vt     = decomp.Vt;

                var R1 = U * W * Vt;
                var R2 = U * W.Transpose() * Vt;
                var T1 = U.GetCol(2);
                var T2 = -1 * U.GetCol(2);

                Matrix[] Ps = new Matrix[4];

                for (int i = 0; i < 4; i++)
                {
                    Ps[i] = new Matrix(3, 4);
                }

                CVI.HConcat(R1, T1, Ps[0]);
                CVI.HConcat(R1, T2, Ps[1]);
                CVI.HConcat(R2, T1, Ps[2]);
                CVI.HConcat(R2, T2, Ps[3]);

                var KPs = new Matrix[4];
                KPs[0] = K * Ps[0];
                KPs[1] = K * Ps[1];
                KPs[2] = K * Ps[2];
                KPs[3] = K * Ps[3];


                var KP0 = K * new Matrix(new double [, ] {
                    { 1, 0, 0, 0 }, { 0, 1, 0, 0 }, { 0, 0, 1, 0 }
                });

                for (int i = 0; i < 4; i++)
                {
                    Matrix <float>     output_hom = new Matrix <float>(4, punten1px.Size);
                    VectorOfPoint3D32F output_3d  = new VectorOfPoint3D32F();

                    CVI.TriangulatePoints(KP0, KPs[i], punten1px, punten2px, output_hom);
                    CVI.ConvertPointsFromHomogeneous(output_hom, output_3d);
                }

                Matrix S = U * diag * W * U.Transpose();

                Matrix R = U * W * decomp.Vt;
            }
        }
コード例 #30
0
        /// <summary>
        /// 相机内参计算函数
        /// </summary>
        void CameraIntrinsicCalculate()
        {
            string[] allimgfiles = File.ReadAllLines(root + @"\calibrationImg.txt");    //读取文件中所有图片名称
            string   imgfile     = root + @"\" + allimgfiles[0];                        //必须是完整的路径

            try
            {
                for (int i = 0; i < allimgfiles.Length; i++)
                {
                    imgfile = root + @"\" + allimgfiles[i];
                    if (imgfile.Contains("leftImg"))                 //提取左相机图片角点
                    {
                        srcImg = CvInvoke.Imread(imgfile, ImreadModes.Color);
                        CvInvoke.CvtColor(srcImg, grayImg, ColorConversion.Bgr2Gray);
                        if (false == CvInvoke.FindChessboardCorners(grayImg, patternSize, corners))
                        {
                            throw new Exception("左相机采集图片未识别到所有角点~,请重新采集~");
                        }
                        else
                        {
                            CvInvoke.Find4QuadCornerSubpix(grayImg, corners, new Size(11, 11)); //亚像素级角点精确化
                            left_corners_set.Push(corners);                                     //存储提取的角点坐标
                        }
                    }
                    else if (imgfile.Contains("rightImg"))           //提取右相机图片角点
                    {
                        srcImg = CvInvoke.Imread(imgfile, ImreadModes.Color);
                        CvInvoke.CvtColor(srcImg, grayImg, ColorConversion.Bgr2Gray);
                        if (false == CvInvoke.FindChessboardCorners(grayImg, patternSize, corners))
                        {
                            throw new Exception("右相机采集图片未识别到所有角点~,请重新采集~");
                        }
                        else
                        {
                            CvInvoke.Find4QuadCornerSubpix(grayImg, corners, new Size(11, 11)); //亚像素级角点精确化
                            right_corners_set.Push(corners);                                    //存储提取的角点坐标
                        }
                    }
                    CvInvoke.WaitKey(10);
                }
                for (int k = 0; k < left_corners_set.Size; k++)
                {
                    VectorOfPoint3D32F tempPoint = new VectorOfPoint3D32F();
                    for (int i = 0; i < patternSize.Height; i++)
                    {
                        for (int j = 0; j < patternSize.Width; j++)
                        {
                            MCvPoint3D32f[] objPoint = new MCvPoint3D32f[1];
                            objPoint[0].X = j * SquareSize;
                            objPoint[0].Y = i * SquareSize;
                            objPoint[0].Z = 0;
                            tempPoint.Push(objPoint);
                        }
                    }
                    objectpoints.Push(tempPoint);
                }
                //左相机内参标定
                CvInvoke.CalibrateCamera(objectpoints, left_corners_set, new Size(320, 240),
                                         leftCamMatrix, leftDistCoeffs, leftRvecs, leftTvecs, CalibType.Default, new MCvTermCriteria(30, 0.00001));
                //右相机内参标定
                CvInvoke.CalibrateCamera(objectpoints, right_corners_set, new Size(320, 240),
                                         rightCamMatrix, rightDistCoeffs, rightRvecs, rightTvecs, CalibType.Default, new MCvTermCriteria(30, 0.00001));
                Data.LogString = "相机内参标定完成,请保存数据~~";
            }
            catch (Exception e)
            {
                Data.LogString = e.Message;
            }
        }