private static double Validate(VectorOfVectorOfPoint3D32F processedObjectPoints, VectorOfVectorOfPointF processedImagePoints, Mat cameraMatrix, Mat distCoeffs, VectorOfPoint3D32F rvecs, VectorOfPoint3D32F tvecs, bool fisheye)
        {
            double error       = 0;
            int    totalpoints = 0;

            if (fisheye)
            {
                for (int i = 0; i < processedObjectPoints.Size; i++)
                {
                    VectorOfPoint3D32F objectFramePoints = processedObjectPoints[i];
                    VectorOfPointF     imageFramePoints  = processedImagePoints[i];
                    RotationVector3D   tvec = new RotationVector3D(new double[] { tvecs[i].X, tvecs[i].Y, tvecs[i].Z });
                    RotationVector3D   rvec = new RotationVector3D(new double[] { rvecs[i].X, rvecs[i].Y, rvecs[i].Z });

                    VectorOfPointF newImageFramePoints = new VectorOfPointF();

                    Fisheye.ProjectPoints(objectFramePoints, newImageFramePoints, rvec, tvec, cameraMatrix, distCoeffs);

                    for (int j = 0; j < newImageFramePoints.Size; j++)
                    {
                        PointF x1 = newImageFramePoints[j];
                        PointF x2 = imageFramePoints[j];
                        totalpoints++;
                        error += Math.Pow(x1.X - x2.X, 2) + Math.Pow(x1.Y - x2.Y, 2);
                    }
                }
            }
            return(Math.Sqrt(error / totalpoints));
        }
Exemple #2
0
        // EMGU's calibrate camera method has a bug.
        // Refer this case: https://stackoverflow.com/questions/33127581/how-do-i-access-the-rotation-and-translation-vectors-after-camera-calibration-in
        public static double CalibrateCamera(MCvPoint3D32f[][] objectPoints, PointF[][] imagePoints, Size imageSize, IInputOutputArray cameraMatrix, IInputOutputArray distortionCoeffs, CalibType calibrationType, MCvTermCriteria termCriteria, out Mat[] rotationVectors, out Mat[] translationVectors)
        {
            System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints)) {
                    double reprojectionError;
                    using (VectorOfMat rVecs = new VectorOfMat())
                        using (VectorOfMat tVecs = new VectorOfMat()) {
                            reprojectionError  = CvInvoke.CalibrateCamera(vvObjPts, vvImgPts, imageSize, cameraMatrix, distortionCoeffs, rVecs, tVecs, calibrationType, termCriteria);
                            rotationVectors    = new Mat[imageCount];
                            translationVectors = new Mat[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                rotationVectors[i] = new Mat();
                                using (Mat matR = rVecs[i])
                                    matR.CopyTo(rotationVectors[i]);
                                translationVectors[i] = new Mat();
                                using (Mat matT = tVecs[i])
                                    matT.CopyTo(translationVectors[i]);
                            }
                        }
                    return(reprojectionError);
                }
        }
        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(9, 6);

            Image <Gray, Byte> chessboardImage = EmguAssert.LoadImage <Gray, byte>("left01.jpg");

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            chessboardImage.FindCornerSubPix(
                new PointF[][] { corners.ToArray() },
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);

            using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts }))
                using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners))
                    using (Mat cameraMatrix = new Mat())
                        using (Mat distortionCoeff = new Mat())
                            using (VectorOfMat rotations = new VectorOfMat())
                                using (VectorOfMat translations = new VectorOfMat())
                                {
                                    Mat             calMat  = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0);
                                    Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels);
                                    calMat.CopyTo(calMatF);
                                    double error = CvInvoke.CalibrateCamera(ptsVec, imgPtsVec, chessboardImage.Size, cameraMatrix,
                                                                            distortionCoeff,
                                                                            rotations, translations, CalibType.Default, new MCvTermCriteria(30, 1.0e-10));
                                    using (Mat rotation = new Mat())
                                        using (Mat translation = new Mat())
                                            using (VectorOfPoint3D32F vpObject = new VectorOfPoint3D32F(objectPts))
                                            {
                                                CvInvoke.SolvePnPRansac(
                                                    vpObject,
                                                    corners,
                                                    cameraMatrix,
                                                    distortionCoeff,
                                                    rotation,
                                                    translation,
                                                    true);
                                            }

                                    CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
                                    using (Mat undistorted = new Mat())
                                    {
                                        CvInvoke.Undistort(chessboardImage, undistorted, cameraMatrix, distortionCoeff);
                                        String title = String.Format("Reprojection error: {0}", error);
                                        //CvInvoke.NamedWindow(title);
                                        //CvInvoke.Imshow(title, undistorted);
                                        //CvInvoke.WaitKey();
                                        //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
                                    }
                                }
        }
Exemple #4
0
        public static void Calibrate(string[] imgFiles, out LensParams lensParams)
        {
            Size patternSize = new Size(CHESS_PATTERN_WIDTH, CHESS_PATTERN_HEIGHT);

            VectorOfVectorOfPoint3D32F objPoints   = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     imagePoints = new VectorOfVectorOfPointF();

            Size imageSize = Size.Empty;

            foreach (string file in imgFiles)
            {
                Mat img = CvInvoke.Imread(file, ImreadModes.Grayscale);
                if (imageSize == Size.Empty)
                {
                    imageSize = new Size(img.Width, img.Height);
                }
                //CvInvoke.Imshow("input", img);
                VectorOfPointF corners = new VectorOfPointF(patternSize.Width * patternSize.Height);
                bool           find    = CvInvoke.FindChessboardCorners(img, patternSize, corners);
                if (find)
                {
                    MCvPoint3D32f[] points    = new MCvPoint3D32f[patternSize.Width * patternSize.Height];
                    int             loopIndex = 0;
                    for (int i = 0; i < patternSize.Height; i++)
                    {
                        for (int j = 0; j < patternSize.Width; j++)
                        {
                            points[loopIndex++] = new MCvPoint3D32f(j, i, 0);
                        }
                    }
                    objPoints.Push(new VectorOfPoint3D32F(points));
                    imagePoints.Push(corners);
                }
            }

            Matrix <double> K           = new Matrix <double>(3, 3);
            Matrix <double> D           = new Matrix <double>(4, 1);
            Mat             rotation    = new Mat();
            Mat             translation = new Mat();

            Fisheye.Calibrate(objPoints,
                              imagePoints,
                              imageSize,
                              K,
                              D,
                              rotation,
                              translation,
                              Fisheye.CalibrationFlag.CheckCond,
                              new MCvTermCriteria(30, 0.1)
                              );
            lensParams = new LensParams(K, D);
        }
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CalibType calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
                {
                    double reprojectionError = -1;
                    using (VectorOfMat rotationVectors = new VectorOfMat())
                        using (VectorOfMat translationVectors = new VectorOfMat())
                        {
                            Mat cameraMat   = new Mat();
                            Mat distorCoeff = new Mat();
                            reprojectionError = CvInvoke.CalibrateCamera(
                                vvObjPts,
                                vvImgPts,
                                imageSize,
                                intrinsicParam.IntrinsicMatrix,
                                intrinsicParam.DistortionCoeffs,
                                rotationVectors,
                                translationVectors,
                                calibrationType,
                                termCriteria);

                            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                using (Mat matR = rotationVectors[i])
                                    matR.CopyTo(p.RotationVector);
                                using (Mat matT = translationVectors[i])
                                    matT.CopyTo(p.TranslationVector);
                                extrinsicParams[i] = p;
                            }
                        }
                    return(reprojectionError);
                }
        }
 private VectorOfVectorOfPoint3D32F CreateModelPoints(int length, int chessboardCols, int chessboardRows)
 {
     modelPoints = new VectorOfVectorOfPoint3D32F();
     for (var k = 0; k < length; k++)
     {
         var chessboard = new List<MCvPoint3D32f>();
         for (var y = 0; y < chessboardRows; y++)
         {
             for (var x = 0; x < chessboardCols; x++)
             {
                 chessboard.Add(new MCvPoint3D32f(x, y, 0));
             }
         }
         modelPoints.Push(new VectorOfPoint3D32F(chessboard.ToArray()));
     }
     return modelPoints;
 }
        public static double ValidateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner, Mat cameraMatrix, Mat distCoeffs)
        {
            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();
            VectorOfPoint3D32F         rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F         tvecs = new VectorOfPoint3D32F();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame       = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints          = new VectorOfPointF();
                VectorOfPointF     currentImgPointsUndistort = new VectorOfPointF();
                VectorOfInt        currentIds       = new VectorOfInt();
                VectorOfPoint3D32F currentObjPoints = new VectorOfPoint3D32F();
                Mat                tvec             = new Mat();
                Mat                rvec             = new Mat();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentIds.Push(new int[] { charucoIds[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                Mat distCoeffsNew = new Mat(1, 4, DepthType.Cv64F, 1);
                distCoeffsNew.SetValue(0, 0, 0);
                distCoeffsNew.SetValue(0, 1, 0);
                distCoeffsNew.SetValue(0, 2, 0);
                distCoeffsNew.SetValue(0, 3, 0);

                Fisheye.UndistorPoints(currentImgPoints, currentImgPointsUndistort, cameraMatrix, distCoeffs, Mat.Eye(3, 3, DepthType.Cv64F, 1), Mat.Eye(3, 3, DepthType.Cv64F, 1));
                if (ArucoInvoke.EstimatePoseCharucoBoard(currentImgPointsUndistort, currentIds, CreateBoard(squaresX, squaresY, squareLength, markerLength, new Dictionary(dictionary)), Mat.Eye(3, 3, DepthType.Cv64F, 1), distCoeffsNew, rvec, tvec))
                {
                    rvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)rvec.GetValue(0, 0), (float)rvec.GetValue(1, 0), (float)rvec.GetValue(2, 0)) });
                    tvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)tvec.GetValue(0, 0), (float)tvec.GetValue(1, 0), (float)tvec.GetValue(2, 0)) });

                    processedImagePoints.Push(currentImgPoints);
                    processedObjectPoints.Push(currentObjPoints);
                }
            }

            return(Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye));
        }
        public static (Mat cameraMatrix, Mat distCoeffs, double rms) CalibrateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner)
        {
            Mat    cameraMatrix = new Mat(3, 3, Emgu.CV.CvEnum.DepthType.Cv64F, 1);
            Mat    distCoeffs   = new Mat(1, 4, Emgu.CV.CvEnum.DepthType.Cv64F, 1);
            double rms          = 0.0;

            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints    = new VectorOfPointF();
                VectorOfPoint3D32F currentObjPoints    = new VectorOfPoint3D32F();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                processedImagePoints.Push(currentImgPoints);
                processedObjectPoints.Push(currentObjPoints);
            }

            VectorOfPoint3D32F rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F tvecs = new VectorOfPoint3D32F();

            if (fisheye)
            {
                Fisheye.Calibrate(processedObjectPoints, processedImagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, Fisheye.CalibrationFlag.FixSkew | Fisheye.CalibrationFlag.RecomputeExtrinsic, new MCvTermCriteria(400, double.Epsilon));
            }
            else
            {
                CvInvoke.CalibrateCamera(processedObjectPoints, processedImagePoints, imageSize, cameraMatrix, distCoeffs, new Mat(), new Mat(), CalibType.FixK3, new MCvTermCriteria(30, 1e-4));
            }

            rms = Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye);

            return(cameraMatrix, distCoeffs, rms);
        }
        public bool calibrate(float squareEdge, Size patternSize, string[] images)
        {
            VectorOfVectorOfPointF corners = findCorners(squareEdge, patternSize, images);

            if (corners.Size == 0)
            {
                Console.WriteLine("Cannot find chessboard!");
                return(false);
            }

            VectorOfPoint3D32F         chessboard   = getChessboardCorners(squareEdge, patternSize);
            VectorOfVectorOfPoint3D32F objectPoints = new VectorOfVectorOfPoint3D32F();

            for (int i = corners.Size; i > 0; i--)
            {
                objectPoints.Push(chessboard);
            }

            CameraParam param = new CameraParam();

            // set mats
            Mat rotationMat    = new Mat();
            Mat translationMat = new Mat();

            Image <Gray, Byte> image = new Image <Gray, Byte>(images[0]);

            imgSize = image.Size;

            CvInvoke.CalibrateCamera(
                objectPoints,
                corners,
                image.Size,
                param.cameraMatrix.Mat,
                param.distortionCoeffs.Mat,
                rotationMat,
                translationMat,
                CalibType.Default,
                new MCvTermCriteria(30, 0.1));

            cameraParam.Clear();
            cameraParam.Add(param);
            return(_isCalibrated = true);
        }
Exemple #10
0
        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(9, 6);

            Image <Gray, Byte> chessboardImage = EmguAssert.LoadImage <Gray, byte>("left01.jpg");

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            chessboardImage.FindCornerSubPix(
                new PointF[][] { corners.ToArray() },
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[]           objectPts = CalcChessboardCorners(patternSize, 1.0f);
            IntrinsicCameraParameters intrisic  = new IntrinsicCameraParameters(8);

            ExtrinsicCameraParameters[] extrinsic;

            using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts }))
                using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners))
                {
                    Mat             calMat  = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0);
                    Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels);
                    calMat.CopyTo(calMatF);
                }

            double error = CameraCalibration.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
                                                             chessboardImage.Size, intrisic, CvEnum.CalibType.Default, new MCvTermCriteria(30, 1.0e-10), out extrinsic);

            CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
            //CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners);
            Image <Gray, Byte> undistorted = intrisic.Undistort(chessboardImage);

            //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));

            Mat[] rotationVectors, translationVectors;
            CvInvoke.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
                                     chessboardImage.Size, intrisic.IntrinsicMatrix, intrisic.DistortionCoeffs, CalibType.Default,
                                     new MCvTermCriteria(30, 1.0e-10),
                                     out rotationVectors, out translationVectors);
        }
        public void Calibrate(VectorOfVectorOfPointF cornersPoints, Size imageSize, int innerCornersPerChessboardCols,
            int innerCornersPerChessboardRows)
        {
            modelPoints = CreateModelPoints(cornersPoints.Size, innerCornersPerChessboardCols,
                innerCornersPerChessboardRows);

            var rotationVectors = new VectorOfMat();
            var translationVectors = new VectorOfMat();

            CvInvoke.CalibrateCamera(modelPoints, cornersPoints, imageSize, cameraMatrix, cameraDistortionCoeffs,
                rotationVectors, translationVectors, CalibType.Default, new MCvTermCriteria(10));

            translation = new Matrix<double>(translationVectors[0].Rows, translationVectors[0].Cols,
                translationVectors[0].DataPointer);

            var rotationMatrix = new Matrix<double>(rotationVectors[0].Rows, rotationVectors[0].Cols,
                rotationVectors[0].DataPointer);

            rotation = new RotationVector3D(new[] {rotationMatrix[0, 0], rotationMatrix[1, 0], rotationMatrix[2, 0]});
        }
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems;
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">The essential matrix</param>
        /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">The fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CalibType flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> foundamentalMatrix,
            out Matrix <double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

            using (VectorOfVectorOfPoint3D32F objectPointVec = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF imagePoints1Vec = new VectorOfVectorOfPointF(imagePoints1))
                    using (VectorOfVectorOfPointF imagePoints2Vec = new VectorOfVectorOfPointF(imagePoints2))
                    {
                        extrinsicParams    = new ExtrinsicCameraParameters();
                        essentialMatrix    = new Matrix <double>(3, 3);
                        foundamentalMatrix = new Matrix <double>(3, 3);

                        CvInvoke.StereoCalibrate(
                            objectPointVec,
                            imagePoints1Vec,
                            imagePoints2Vec,

                            intrinsicParam1.IntrinsicMatrix,
                            intrinsicParam1.DistortionCoeffs,
                            intrinsicParam2.IntrinsicMatrix,
                            intrinsicParam2.DistortionCoeffs,
                            imageSize,
                            extrinsicParams.RotationVector,
                            extrinsicParams.TranslationVector,
                            essentialMatrix,
                            foundamentalMatrix,
                            flags,
                            termCrit);
                    }
        }
Exemple #13
0
      /// <summary>
      /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
      /// R2=R*R1,
      /// T2=R*T1 + T
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="cameraMatrix1">The input/output camera matrices [fxk 0 cxk; 0 fyk cyk; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of the elements of the matrices must be initialized</param>
      /// <param name="distCoeffs1">The input/output vectors of distortion coefficients for each camera, 4x1, 1x4, 5x1 or 1x5</param>
      /// <param name="cameraMatrix2">The input/output camera matrices [fxk 0 cxk; 0 fyk cyk; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of the elements of the matrices must be initialized</param>
      /// <param name="distCoeffs2">The input/output vectors of distortion coefficients for each camera, 4x1, 1x4, 5x1 or 1x5</param>
      /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="r">The rotation matrix between the 1st and the 2nd cameras' coordinate systems </param>
      /// <param name="t">The translation vector between the cameras' coordinate systems</param>
      /// <param name="e">The optional output essential matrix</param>
      /// <param name="f">The optional output fundamental matrix </param>
      /// <param name="termCrit">Termination criteria for the iterative optimization algorithm</param>
      /// <param name="flags">The calibration flags</param>
      public static void StereoCalibrate(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints1,
         PointF[][] imagePoints2,
         IInputOutputArray cameraMatrix1,
         IInputOutputArray distCoeffs1,
         IInputOutputArray cameraMatrix2,
         IInputOutputArray distCoeffs2,
         Size imageSize,
         IOutputArray r,
         IOutputArray t,
         IOutputArray e,
         IOutputArray f,
         CvEnum.CalibType flags,
         MCvTermCriteria termCrit)
      {
         System.Diagnostics.Debug.Assert(
            objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length,
            "The number of images for objects points should be equal to the number of images for image points");

         using (VectorOfVectorOfPoint3D32F objectPointVec = new VectorOfVectorOfPoint3D32F(objectPoints))
         using (VectorOfVectorOfPointF imagePoints1Vec = new VectorOfVectorOfPointF(imagePoints1))
         using (VectorOfVectorOfPointF imagePoints2Vec = new VectorOfVectorOfPointF(imagePoints2))
         {
            CvInvoke.StereoCalibrate(
               objectPointVec,
               imagePoints1Vec,
               imagePoints2Vec,
               cameraMatrix1,
               distCoeffs1,
               cameraMatrix2,
               distCoeffs2,
               imageSize,
               r,
               t,
               e,
               f,
               flags,
               termCrit);
         }
      }
Exemple #14
0
      /// <summary>
      /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="rotationVectors">The output 3xM or Mx3 array of rotation vectors (compact representation of rotation matrices, see cvRodrigues2). </param>
      /// <param name="translationVectors">The output 3xM or Mx3 array of translation vectors</param>/// <param name="calibrationType">cCalibration type</param>
      /// <param name="termCriteria">The termination criteria</param>
      /// <param name="cameraMatrix">The output camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized</param>
      /// <param name="distortionCoeffs">The output 4x1 or 1x4 vector of distortion coefficients [k1, k2, p1, p2]</param>
      /// <returns>The final reprojection error</returns>
      public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IInputOutputArray cameraMatrix,
         IInputOutputArray distortionCoeffs,
         CvEnum.CalibType calibrationType,
         MCvTermCriteria termCriteria,
         out Mat[] rotationVectors,
         out Mat[] translationVectors)
      {
         System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length,
            "The number of images for objects points should be equal to the number of images for image points");
         int imageCount = objectPoints.Length;

         using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
         using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
         {
            double reprojectionError;
            using (VectorOfMat rVecs = new VectorOfMat())
            using (VectorOfMat tVecs = new VectorOfMat())
            {
               reprojectionError = CvInvoke.CalibrateCamera(
                  vvObjPts,
                  vvImgPts,
                  imageSize,
                  cameraMatrix,
                  distortionCoeffs,
                  rVecs,
                  tVecs,
                  calibrationType,
                  termCriteria);

               rotationVectors = new Mat[imageCount];
               translationVectors = new Mat[imageCount];
               for (int i = 0; i < imageCount; i++)
               {
                  rotationVectors[i] = new Mat();
                  using (Mat matR = rotationVectors[i])
                     matR.CopyTo(rotationVectors[i]);
                  translationVectors[i] = new Mat();
                  using (Mat matT = translationVectors[i])
                     matT.CopyTo(translationVectors[i]);
               }
            }
            return reprojectionError;
         }
      }
        public bool calibrate(float squareEdge, Size patternSize, string[] imagesLeft, string[] imagesRight)
        {
            List <VectorOfVectorOfPointF> listCorners = findCorners(squareEdge, patternSize, imagesLeft, imagesRight);

            if (listCorners.Last().Size == 0)
            {
                Console.WriteLine("Cannot find chessboard!");
                return(false);
            }

            VectorOfPoint3D32F         chessboard   = getChessboardCorners(squareEdge, patternSize);
            VectorOfVectorOfPoint3D32F objectPoints = new VectorOfVectorOfPoint3D32F();

            for (int i = listCorners.Last().Size; i > 0; i--)
            {
                objectPoints.Push(chessboard);
            }

            Image <Gray, Byte> image = new Image <Gray, Byte>(imagesLeft[0]);

            CameraParam camLeft  = new CameraParam();
            CameraParam camRight = new CameraParam();

            CvInvoke.StereoCalibrate(
                objectPoints,
                listCorners[0],
                listCorners[1],
                camLeft.cameraMatrix.Mat,
                camLeft.distortionCoeffs.Mat,
                camRight.cameraMatrix.Mat,
                camRight.distortionCoeffs.Mat,
                image.Size,
                R, T, E, F,
                CalibType.Default,
                new MCvTermCriteria(30, 0.1e5));


            Rectangle roi1 = Rectangle.Empty, roi2 = Rectangle.Empty;

            CvInvoke.StereoRectify(
                camLeft.cameraMatrix.Mat,
                camLeft.distortionCoeffs.Mat,
                camRight.cameraMatrix.Mat,
                camRight.distortionCoeffs.Mat,
                image.Size,
                R, T,
                camLeft.rotationMatrix.Mat,
                camRight.rotationMatrix.Mat,
                camLeft.translationMatrix.Mat,
                camRight.translationMatrix.Mat,
                disparityMatrix,
                StereoRectifyType.Default,
                -1, Size.Empty,
                ref roi1,
                ref roi2);


            cameraParam.Clear();
            cameraParam.Add(camLeft);
            cameraParam.Add(camRight);

            rectTransforms = getRectifyTransforms();

            Matrix <double> p = new Matrix <double>(new double[, ] {
                { image.Size.Width / 2 }, { image.Size.Height / 2 }, { 1 }
            });
            Matrix <double> px = rectTransforms[0].Mul(p);

            double[,] dL = p.Sub(px.Mul(1 / px[2, 0])).Data;

            px           = rectTransforms[1].Mul(p);
            double[,] dR = p.Sub(px.Mul(1 / px[2, 0])).Data;

            rectTransforms = getRectifyTransforms(dL, dR);

            rectMask[0] = getRectMask(image.Size, rectTransforms[0]);
            rectMask[1] = getRectMask(image.Size, rectTransforms[1]);

            _isStereo = true;
            return(_isCalibrated = true);
        }
Exemple #16
0
        /// <summary>
        /// Performs a single calibration execution.
        /// </summary>
        /// <param name="image">The image to search.</param>
        /// <param name="annotatedImage">The image with the corners/circles annotated.</param>
        private void SnapCalibrate(Image <Bgr, byte> image, ref Image <Bgr, byte> annotatedImage)
        {
            try
            {
                // initialize variables if first snap
                if (_imageIndex == 0)
                {
                    _cameraMatrix = new Mat(3, 3, DepthType.Cv64F, 1);
                    _distCoeffs   = new Mat(8, 1, DepthType.Cv64F, 1);
                    _objectPoints = new VectorOfVectorOfPoint3D32F();
                    _imagePoints  = new VectorOfVectorOfPointF();
                    _rvecs        = new Mat();
                    _tvecs        = new Mat();
                    Find          = false;
                    Undistort     = false;
                }

                // find corners/circles
                var corners = new VectorOfPointF();
                var found   = FindCorners(image, ref annotatedImage, corners);
                if (!found)
                {
                    return;
                }

                // flash outpout image
                annotatedImage = annotatedImage.Not();

                // add corners to image points vector
                _imagePoints.Push(corners);

                // construct object points
                var objectList = new List <MCvPoint3D32f>();
                for (var col = 0; col < _cornersPerCol; col++)
                {
                    for (var row = 0; row < _cornersPerRow; row++)
                    {
                        objectList.Add(new MCvPoint3D32f(row * _objWidth, col * _objHeight, 0.0F));
                    }
                }

                // add constructed object points to object points vector
                _objectPoints.Push(new VectorOfPoint3D32F(objectList.ToArray()));

                // increment image index
                ImageIndex++;

                // exti if haven't reached number of images
                if (_imageIndex < _numberOfImages)
                {
                    return;
                }

                // estimate intrinsic/extrinsic parameters
                ProjectionError = CvInvoke.CalibrateCamera(
                    _objectPoints,
                    _imagePoints,
                    image.Size,
                    _cameraMatrix,
                    _distCoeffs,
                    _rvecs,
                    _tvecs,
                    _calibType,
                    new MCvTermCriteria(30, 0.1));

                // latch undistort; reset image index
                Undistort  = true;
                ImageIndex = 0;

                // save parameters to file to be used by undistort image processor
                SaveParameters();
            }
            finally
            {
                Snap = false;
            }
        }