Example #1
0
 public ProjectorCalibrationResult ToProjectorCalibration(CameraCalibrationResult cameraCalibration)
 {
     //TODO
     return(null);
 }
Example #2
0
    // Original method http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
    //// An example of the code working can be found in VVVV
    //// vvvv git https://github.com/elliotwoods/VVVV.Nodes.EmguCV/blob/master/src/CameraCalibration/CalibrateCamera.cs
    //// to see it working in vvvv, check "{vvvv}\packs\{vvvv.imagepack}\nodes\modules\Image\OpenCV\CalibrateProjector (CV.Transform).v4p"
    //// The general idea is to take the camera calibration and use it for a projector, by adding near/far planes
    //// The code might be outdated, since they changed the EmguCV interface

    // Returns CalibrationCameraResult with the calibration parameters if callibration was possible, null otherwise
    public static CameraCalibrationResult ComputeCameraCalibration(
        Vector3[] inObjectPoints,                      // N Points in space(x,y,z)
                                                       // * when computing intrinsics, use the checkerboard as reference frame (that means the corners won't move)
                                                       // * when computing extrinsics, you can use the checkerboard corners in world coordinates for global position estimation
        Vector2[] inImagePoints,                       // N*S points on the image plane(u,v) matching the N points in space, where
                                                       // * for intrinsic computation, S = number of samples
                                                       // * for extrinsic computation, S = 1
        Size sensorSize,                               // Size of the image, used only to initialize intrinsic camera matrix
        Matrix <double> IntrinsicMatrix,               // The output camera matrix(A)[fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and / or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized
        out string status,                             // OK if everything went well, verbse error description otherwise
        bool intrinsicGuess             = true,        // If intrinsicGuess is true, the intrinsic matrix will be initialized with default values
        bool normalizedImageCoordinates = true,        // if true, the image coordinates are normalized between 0-1
        CalibType flags = CalibType.UseIntrinsicGuess) // Different flags:
                                                       // * If Emgu.CV.CvEnum.CalibType == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                                                       // * if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                                                       //            flags |= CalibType.UseIntrinsicGuess;   // uses the intrinsicMatrix as initial estimation, or generates an initial estimation using imageSize
                                                       //            flags |= CalibType.FixFocalLength;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {fx,fy} are constant
                                                       //            flags |= CalibType.FixAspectRatio;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: fy is a free variable, fx/fy stays constant
                                                       //            flags |= CalibType.FixPrincipalPoint;   // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {cx,cy} are constant
                                                       //            flags |= (CalibType.FixK1               //  Given CalibType.FixK{i}: if (CV_CALIB_USE_INTRINSIC_GUESS) then: K{i} = distortionCoefficents[i], else:k ki = 0
                                                       //                    | CalibType.FixK2
                                                       //                    | CalibType.FixK3
                                                       //                    | CalibType.FixK4
                                                       //                    | CalibType.FixK5
                                                       //                    | CalibType.FixK6);
                                                       //            flags |= CalibType.ZeroTangentDist;     // tangential distortion is zero: {P1,P2} = {0,0}
                                                       //            flags |= CalibType.RationalModel;       // enable K4,k5,k6, disabled by default
    {
        int nPointsPerImage = inObjectPoints.Length;

        if (nPointsPerImage == 0)
        {
            status = "Insufficient points";
            return(null);
        }
        int nImages = inImagePoints.Length / nPointsPerImage;

        Debug.Log("point/images" + nPointsPerImage + "/" + nImages);


        //Intrinsics: an inout intrisic matrix, and depending on the calibrationType, the distortion Coefficients
        if (intrinsicGuess)
        {
            IntrinsicMatrix = new Matrix <double>(3, 3);
            // NOTE: A possible cause of failure is that this matrix might be transposed (given how openCV handles indexes)
            IntrinsicMatrix[0, 0] = sensorSize.Width;
            IntrinsicMatrix[1, 1] = sensorSize.Height;
            IntrinsicMatrix[0, 2] = sensorSize.Width / 2.0d;
            IntrinsicMatrix[1, 2] = sensorSize.Height / 2.0d;
            IntrinsicMatrix[2, 2] = 1;
        }
        Emgu.CV.IInputOutputArray distortionCoeffs = new Matrix <double>(1, 8); // The output 4x1 or 1x4 vector of distortion coefficients[k1, k2, p1, p2]

        // Matching world points (3D) to image points (2D), with the accompaining size of the image in pixels
        MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[nImages][];  //The joint matrix of object points, 3xN or Nx3, where N is the total number of points in all views
        PointF[][]        imagePoints  = new PointF[nImages][];         //The joint matrix of corresponding image points, 2xN or Nx2, where N is the total number of points in all views

        for (int i = 0; i < nImages; i++)
        {
            objectPoints[i] = new MCvPoint3D32f[nPointsPerImage];
            imagePoints[i]  = new PointF[nPointsPerImage];

            for (int j = 0; j < nPointsPerImage; j++)
            {
                objectPoints[i][j].X = inObjectPoints[j].x;
                objectPoints[i][j].Y = inObjectPoints[j].y;
                objectPoints[i][j].Z = inObjectPoints[j].z;

                if (normalizedImageCoordinates)
                {
                    imagePoints[i][j].X = inImagePoints[i * nPointsPerImage + j].x * (sensorSize.Width - 1);
                    imagePoints[i][j].Y = (1 - inImagePoints[i * nPointsPerImage + j].y) * (sensorSize.Height - 1);
                }
                else
                {
                    imagePoints[i][j].X = inImagePoints[i * nPointsPerImage + j].x;
                    imagePoints[i][j].Y = (1 - inImagePoints[i * nPointsPerImage + j].y);
                }
            }
        }

        //Extrinsics: they are decomposed in position and orientation
        Mat[] rotationVectors;    //The output 3xM or Mx3 array of rotation vectors(compact representation of rotation matrices, see cvRodrigues2).
        Mat[] translationVectors; //The output 3xM or Mx3 array of translation vectors

        // When to end: 10 iterations
        Emgu.CV.Structure.MCvTermCriteria termCriteria = new Emgu.CV.Structure.MCvTermCriteria(10); //The termination criteria

        try
        {
            // To make this method work it was necessary to patch it (see below)
            double reprojectionError = CalibrateCamera(
                objectPoints,
                imagePoints,
                sensorSize,
                IntrinsicMatrix,
                distortionCoeffs,
                flags,
                termCriteria,
                out rotationVectors,
                out translationVectors);


            var rotation = new Matrix <double>(rotationVectors[0].Rows, rotationVectors[0].Cols, rotationVectors[0].DataPointer);

            CameraCalibrationResult calibration = new CameraCalibrationResult(
                sensorSize.Width, sensorSize.Height,
                new CameraCalibrationResult.Extrinsics(MatToVector3(translationVectors[0]), MatToVector3(rotationVectors[0])),
                new CameraCalibrationResult.Intrinsics(IntrinsicMatrix, sensorSize),
                new CameraCalibrationResult.Distortion(distortionCoeffs),
                reprojectionError
                );
            DebugMatrix(IntrinsicMatrix);
            status = "OK! " + reprojectionError;


            return(calibration);
        }
        catch (Exception e)
        {   // Error
            status = e.Message;
            return(null);
        }
    }