Exemplo n.º 1
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="stdDeviationsIntrinsics">Output vector of standard deviations estimated for intrinsic parameters. Order of deviations values: (fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,τx,τy) If one of parameters is not estimated, it's deviation is equals to zero.</param>
 /// <param name="stdDeviationsExtrinsics">Output vector of standard deviations estimated for extrinsic parameters. Order of deviations values: (R1,T1,…,RM,TM) where M is number of pattern views, Ri,Ti are concatenated 1x3 vectors.</param>
 /// <param name="perViewErrors">Output vector of average re-projection errors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     IOutputArray stdDeviationsIntrinsics,
     IOutputArray stdDeviationsExtrinsics,
     IOutputArray perViewErrors,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                 using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             using (OutputArray oaStdDeviationsIntrinsics = stdDeviationsIntrinsics == null ? OutputArray.GetEmpty() : stdDeviationsIntrinsics.GetOutputArray())
                                 using (OutputArray oaStdDeviationsExtrinsics = stdDeviationsExtrinsics == null ? OutputArray.GetEmpty() : stdDeviationsExtrinsics.GetOutputArray())
                                     using (OutputArray oaPerViewErrors = perViewErrors == null ? OutputArray.GetEmpty() : perViewErrors.GetOutputArray())
                                     {
                                         return(cveArucoCalibrateCameraCharuco(
                                                    iaCharucoCorners, iaCharucoIds, board.BoardPtr, ref imageSize,
                                                    ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs,
                                                    oaStdDeviationsIntrinsics, oaStdDeviationsExtrinsics, oaPerViewErrors,
                                                    flags, ref criteria));
                                     }
 }
Exemplo n.º 2
0
 /// <summary>
 /// Calibrate a camera using aruco markers.
 /// </summary>
 /// <param name="corners">Vector of detected marker corners in all frames. The corners should have the same format returned by detectMarkers</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="counter">Number of markers in each frame so that corners and ids can be split</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraAruco(
     IInputArrayOfArrays corners, IInputArray ids, IInputArray counter, IBoard board, Size imageSize,
     IInputOutputArray cameraMatrix, IInputOutputArray distCoeffs, IOutputArray rvecs, IOutputArray tvecs,
     CalibType flags, MCvTermCriteria criteria)
 {
     return(CalibrateCameraAruco(corners, ids, counter, board, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs,
                                 null, null, null, flags, criteria));
 }
Exemplo n.º 3
0
        /// <summary>
        /// 初始化内部数据
        /// </summary>
        /// <param name="imgsize"></param>
        private void Initializer(Size imgsize)
        {
            _nPointsPerImage = _ChessBoard.BoardRows * _ChessBoard.BoardColumns; // 每幅棋盘的角点数
            _nPoints         = _nPointsPerImage * _ChessBoard.NImages;           // 棋盘角点总数
            _imageSize       = imgsize;                                          // 图像分辨率
            _objectPoints    = new List <MCvPoint3D32f[]>(_ChessBoard.NImages);  //棋盘角点的世界坐标值(三维)
            for (int i = 0; i < _ChessBoard.NImages; i++)
            {
                _objectPoints.Add(new MCvPoint3D32f[_nPointsPerImage]);
            }
            int currentImage;

            for (currentImage = 0; currentImage < _ChessBoard.NImages; currentImage++)
            {
                int currentRow;
                for (currentRow = 0; currentRow < _ChessBoard.BoardRows; currentRow++)
                {
                    int currentCol;
                    for (currentCol = 0; currentCol < _ChessBoard.BoardColumns; currentCol++)
                    {
                        int nPoint = currentRow * _ChessBoard.BoardColumns + currentCol;
                        _objectPoints[currentImage][nPoint].X = (float)currentCol * _ChessBoard.SquareWidth;
                        _objectPoints[currentImage][nPoint].Y = (float)currentRow * _ChessBoard.SquareWidth;
                        _objectPoints[currentImage][nPoint].Z = (float)0.0f;
                    }
                }
            }


            _imagePointsL       = new List <PointF[]>();     // 左视图的棋盘角点像素坐标序列(二维)
            _imagePointsR       = new List <PointF[]>();     // 右视图的棋盘角点像素坐标序列(二维)
            _q                  = new Matrix <double>(4, 4); // 用于计算三维点云的 Q 矩阵
            _roi1               = new Rectangle();           // 左视图有效区域的矩形
            _roi2               = new Rectangle();           // 右视图有效区域的矩形
            _r1                 = new Matrix <double>(3, 3);
            _r2                 = new Matrix <double>(3, 3);
            _p1                 = new Matrix <double>(3, 4);
            _p2                 = new Matrix <double>(3, 4);
            _mx1                = new Matrix <float>(_imageSize);
            _my1                = new Matrix <float>(_imageSize);
            _mx2                = new Matrix <float>(_imageSize);
            _my2                = new Matrix <float>(_imageSize);
            _extrParamsS        = new ExtrinsicCameraParameters(); //立体摄像机外部参数
            _intrParamL         = new IntrinsicCameraParameters(); //左摄像机内参
            _intrParamR         = new IntrinsicCameraParameters(); //右摄像机内参
            _extrParamsL        = new ExtrinsicCameraParameters[_ChessBoard.NImages];
            _extrParamsR        = new ExtrinsicCameraParameters[_ChessBoard.NImages];
            _termCriteria       = new MCvTermCriteria(30, 0.05); //终止准则
            _calibType          = CalibType.FixK3;
            _DoubleCapture.ImgL = new Mat();
            _DoubleCapture.ImgR = new Mat();
        }
Exemplo n.º 4
0
 internal static extern double cveArucoCalibrateCameraCharuco(
     IntPtr charucoCorners,
     IntPtr charucoIds,
     IntPtr board,
     ref Size imageSize,
     IntPtr cameraMatrix,
     IntPtr distCoeffs,
     IntPtr rvecs,
     IntPtr tvecs,
     IntPtr stdDeviationsIntrinsics,
     IntPtr stdDeviationsExtrinsics,
     IntPtr perViewErrors,
     CalibType flags,
     ref MCvTermCriteria criteria);
Exemplo n.º 5
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;>). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     return CalibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs,
         rvecs, tvecs, null, null, null, flags, criteria);
 }
Exemplo n.º 6
0
 /// <summary>
 /// Calibrate a camera using aruco markers.
 /// </summary>
 /// <param name="corners">Vector of detected marker corners in all frames. The corners should have the same format returned by detectMarkers</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="counter">Number of markers in each frame so that corners and ids can be split</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;>). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraAruco(
     IInputArray corners, IInputArray ids, IInputArray counter, IBoard board, Size imageSize,
     IInputOutputArray cameraMatrix, IInputOutputArray distCoeffs, IOutputArray rvecs, IOutputArray tvecs,
     CalibType flags, MCvTermCriteria criteria)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaIds = ids.GetInputArray())
             using (InputArray iaCounter = counter.GetInputArray())
                 using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                     using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                         using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                             using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             {
                                 return(cveArucoCalibrateCameraAruco(iaCorners, iaIds, iaCounter, board.BoardPtr, ref imageSize,
                                                                     ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs, flags, ref criteria));
                             }
 }
Exemplo n.º 7
0
        public Kalibrator(List <string> fileList, Size boardSize)
        {
            _fileList = new List <string>();
            for (int i = 0; i < fileList.Count; i++)
            {
                this._fileList.Add(fileList[i]);
            }
            this._boardSize = boardSize;

            this.Flag = CalibType.Default;
            this.MustInitUndistort = true;
            this.ImagePoints       = new List <PointF[]>(); //PointF[]
            this.ObjectPoints      = new List <MCvPoint3D32f[]>();

            int    v      = AddChessboardPoints(this._fileList, this._boardSize);
            Size   imsize = new Image <Bgr, byte>(this._fileList[0]).Size;
            double result = Calibrate(imsize);
        }
Exemplo n.º 8
0
    // Original method http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
    //// An example of the code working can be found in VVVV
    //// vvvv git https://github.com/elliotwoods/VVVV.Nodes.EmguCV/blob/master/src/CameraCalibration/CalibrateCamera.cs
    //// to see it working in vvvv, check "{vvvv}\packs\{vvvv.imagepack}\nodes\modules\Image\OpenCV\CalibrateProjector (CV.Transform).v4p"
    //// The general idea is to take the camera calibration and use it for a projector, by adding near/far planes
    //// The code might be outdated, since they changed the EmguCV interface

    // Returns CalibrationCameraResult with the calibration parameters if callibration was possible, null otherwise
    public static CameraCalibrationResult ComputeCameraCalibration(
        Vector3[] inObjectPoints,                      // N Points in space(x,y,z)
                                                       // * when computing intrinsics, use the checkerboard as reference frame (that means the corners won't move)
                                                       // * when computing extrinsics, you can use the checkerboard corners in world coordinates for global position estimation
        Vector2[] inImagePoints,                       // N*S points on the image plane(u,v) matching the N points in space, where
                                                       // * for intrinsic computation, S = number of samples
                                                       // * for extrinsic computation, S = 1
        Size sensorSize,                               // Size of the image, used only to initialize intrinsic camera matrix
        Matrix <double> IntrinsicMatrix,               // The output camera matrix(A)[fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and / or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized
        out string status,                             // OK if everything went well, verbse error description otherwise
        bool intrinsicGuess             = true,        // If intrinsicGuess is true, the intrinsic matrix will be initialized with default values
        bool normalizedImageCoordinates = true,        // if true, the image coordinates are normalized between 0-1
        CalibType flags = CalibType.UseIntrinsicGuess) // Different flags:
                                                       // * If Emgu.CV.CvEnum.CalibType == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                                                       // * if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                                                       //            flags |= CalibType.UseIntrinsicGuess;   // uses the intrinsicMatrix as initial estimation, or generates an initial estimation using imageSize
                                                       //            flags |= CalibType.FixFocalLength;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {fx,fy} are constant
                                                       //            flags |= CalibType.FixAspectRatio;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: fy is a free variable, fx/fy stays constant
                                                       //            flags |= CalibType.FixPrincipalPoint;   // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {cx,cy} are constant
                                                       //            flags |= (CalibType.FixK1               //  Given CalibType.FixK{i}: if (CV_CALIB_USE_INTRINSIC_GUESS) then: K{i} = distortionCoefficents[i], else:k ki = 0
                                                       //                    | CalibType.FixK2
                                                       //                    | CalibType.FixK3
                                                       //                    | CalibType.FixK4
                                                       //                    | CalibType.FixK5
                                                       //                    | CalibType.FixK6);
                                                       //            flags |= CalibType.ZeroTangentDist;     // tangential distortion is zero: {P1,P2} = {0,0}
                                                       //            flags |= CalibType.RationalModel;       // enable K4,k5,k6, disabled by default
    {
        int nPointsPerImage = inObjectPoints.Length;

        if (nPointsPerImage == 0)
        {
            status = "Insufficient points";
            return(null);
        }
        int nImages = inImagePoints.Length / nPointsPerImage;

        Debug.Log("point/images" + nPointsPerImage + "/" + nImages);


        //Intrinsics: an inout intrisic matrix, and depending on the calibrationType, the distortion Coefficients
        if (intrinsicGuess)
        {
            IntrinsicMatrix = new Matrix <double>(3, 3);
            // NOTE: A possible cause of failure is that this matrix might be transposed (given how openCV handles indexes)
            IntrinsicMatrix[0, 0] = sensorSize.Width;
            IntrinsicMatrix[1, 1] = sensorSize.Height;
            IntrinsicMatrix[0, 2] = sensorSize.Width / 2.0d;
            IntrinsicMatrix[1, 2] = sensorSize.Height / 2.0d;
            IntrinsicMatrix[2, 2] = 1;
        }
        Emgu.CV.IInputOutputArray distortionCoeffs = new Matrix <double>(1, 8); // The output 4x1 or 1x4 vector of distortion coefficients[k1, k2, p1, p2]

        // Matching world points (3D) to image points (2D), with the accompaining size of the image in pixels
        MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[nImages][];  //The joint matrix of object points, 3xN or Nx3, where N is the total number of points in all views
        PointF[][]        imagePoints  = new PointF[nImages][];         //The joint matrix of corresponding image points, 2xN or Nx2, where N is the total number of points in all views

        for (int i = 0; i < nImages; i++)
        {
            objectPoints[i] = new MCvPoint3D32f[nPointsPerImage];
            imagePoints[i]  = new PointF[nPointsPerImage];

            for (int j = 0; j < nPointsPerImage; j++)
            {
                objectPoints[i][j].X = inObjectPoints[j].x;
                objectPoints[i][j].Y = inObjectPoints[j].y;
                objectPoints[i][j].Z = inObjectPoints[j].z;

                if (normalizedImageCoordinates)
                {
                    imagePoints[i][j].X = inImagePoints[i * nPointsPerImage + j].x * (sensorSize.Width - 1);
                    imagePoints[i][j].Y = (1 - inImagePoints[i * nPointsPerImage + j].y) * (sensorSize.Height - 1);
                }
                else
                {
                    imagePoints[i][j].X = inImagePoints[i * nPointsPerImage + j].x;
                    imagePoints[i][j].Y = (1 - inImagePoints[i * nPointsPerImage + j].y);
                }
            }
        }

        //Extrinsics: they are decomposed in position and orientation
        Mat[] rotationVectors;    //The output 3xM or Mx3 array of rotation vectors(compact representation of rotation matrices, see cvRodrigues2).
        Mat[] translationVectors; //The output 3xM or Mx3 array of translation vectors

        // When to end: 10 iterations
        Emgu.CV.Structure.MCvTermCriteria termCriteria = new Emgu.CV.Structure.MCvTermCriteria(10); //The termination criteria

        try
        {
            // To make this method work it was necessary to patch it (see below)
            double reprojectionError = CalibrateCamera(
                objectPoints,
                imagePoints,
                sensorSize,
                IntrinsicMatrix,
                distortionCoeffs,
                flags,
                termCriteria,
                out rotationVectors,
                out translationVectors);


            var rotation = new Matrix <double>(rotationVectors[0].Rows, rotationVectors[0].Cols, rotationVectors[0].DataPointer);

            CameraCalibrationResult calibration = new CameraCalibrationResult(
                sensorSize.Width, sensorSize.Height,
                new CameraCalibrationResult.Extrinsics(MatToVector3(translationVectors[0]), MatToVector3(rotationVectors[0])),
                new CameraCalibrationResult.Intrinsics(IntrinsicMatrix, sensorSize),
                new CameraCalibrationResult.Distortion(distortionCoeffs),
                reprojectionError
                );
            DebugMatrix(IntrinsicMatrix);
            status = "OK! " + reprojectionError;


            return(calibration);
        }
        catch (Exception e)
        {   // Error
            status = e.Message;
            return(null);
        }
    }
Exemplo n.º 9
0
 internal static extern double cveArucoCalibrateCameraAruco(
     IntPtr corners, IntPtr ids, IntPtr counter, IntPtr board,
     ref Size imageSize, IntPtr cameraMatrix, IntPtr distCoeffs,
     IntPtr rvecs, IntPtr tvecs, CalibType flags,
     ref MCvTermCriteria criteria);
Exemplo n.º 10
0
        // EMGU's calibrate camera method has a bug.
        // Refer this case: https://stackoverflow.com/questions/33127581/how-do-i-access-the-rotation-and-translation-vectors-after-camera-calibration-in
        public static double CalibrateCamera(MCvPoint3D32f[][] objectPoints, PointF[][] imagePoints, Size imageSize, IInputOutputArray cameraMatrix, IInputOutputArray distortionCoeffs, CalibType calibrationType, MCvTermCriteria termCriteria, out Mat[] rotationVectors, out Mat[] translationVectors)
        {
            System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints)) {
                    double reprojectionError;
                    using (VectorOfMat rVecs = new VectorOfMat())
                        using (VectorOfMat tVecs = new VectorOfMat()) {
                            reprojectionError  = CvInvoke.CalibrateCamera(vvObjPts, vvImgPts, imageSize, cameraMatrix, distortionCoeffs, rVecs, tVecs, calibrationType, termCriteria);
                            rotationVectors    = new Mat[imageCount];
                            translationVectors = new Mat[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                rotationVectors[i] = new Mat();
                                using (Mat matR = rVecs[i])
                                    matR.CopyTo(rotationVectors[i]);
                                translationVectors[i] = new Mat();
                                using (Mat matT = tVecs[i])
                                    matT.CopyTo(translationVectors[i]);
                            }
                        }
                    return(reprojectionError);
                }
        }
Exemplo n.º 11
0
 internal static extern double cveArucoCalibrateCameraAruco(
    IntPtr corners, IntPtr ids, IntPtr counter, IntPtr board,
    ref Size imageSize, IntPtr cameraMatrix, IntPtr distCoeffs,
    IntPtr rvecs, IntPtr tvecs, CalibType flags,
    ref MCvTermCriteria criteria);
Exemplo n.º 12
0
 /// <summary>
 /// Calibrate a camera using aruco markers.
 /// </summary>
 /// <param name="corners">Vector of detected marker corners in all frames. The corners should have the same format returned by detectMarkers</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="counter">Number of markers in each frame so that corners and ids can be split</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;>). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static  double CalibrateCameraAruco(
    IInputArray corners, IInputArray ids, IInputArray counter, IBoard board, Size imageSize,
    IInputOutputArray cameraMatrix, IInputOutputArray distCoeffs, IOutputArray rvecs, IOutputArray tvecs,
    CalibType flags, MCvTermCriteria criteria)
 {
    using (InputArray iaCorners = corners.GetInputArray())
    using (InputArray iaIds = ids.GetInputArray())
    using (InputArray iaCounter = counter.GetInputArray())
    using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
    using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
    using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
    using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
    {
       return cveArucoCalibrateCameraAruco(iaCorners, iaIds, iaCounter, board.BoardPtr, ref imageSize,
          ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs, flags, ref criteria);
    }
 }