Esempio n. 1
0
    public Calibration.CameraCalibrationResult ComputeCalibration(int pixelWidth, int pixelHeight, float near, float far)
    {
        string status;

        Vector2[] uvPoints  = subsampling ? subsampledUV.ToArray() : uv.ToArray();
        Vector3[] xyzPoints = subsampling ? subsampledXYZ.ToArray() : xyz.ToArray();

        if (subsampling)
        {
            SubSample();
            uvPoints  = subsampledUV.ToArray();
            xyzPoints = subsampledXYZ.ToArray();
        }
        else
        {
            uvPoints  = uv.ToArray();
            xyzPoints = xyz.ToArray();
        }

        //Compute calibration
        Debug.Log("Calibration requiested: #uv:" + uvPoints.Length + "#xyz:" + xyzPoints.Length);
        Emgu.CV.CvEnum.CalibType flags = Emgu.CV.CvEnum.CalibType.UseIntrinsicGuess;   // uses the intrinsicMatrix as initial estimation, or generates an initial estimation using imageSize
                                                                                       //flags |= CalibType.FixFocalLength;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {fx,fy} are constant
                                                                                       // flags |= CalibType.FixAspectRatio;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: fy is a free variable, fx/fy stays constant
                                                                                       //flags |= CalibType.FixPrincipalPoint;   // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {cx,cy} are constant

        /*flags |= (CalibType.FixK1               //  Given CalibType.FixK{i}: if (CV_CALIB_USE_INTRINSIC_GUESS) then: K{i} = distortionCoefficents[i], else:k ki = 0
         | CalibType.FixK2
         | CalibType.FixK3
         | CalibType.FixK4
         | CalibType.FixK5
         | CalibType.FixK6);
         | // flags |= CalibType.FixIntrinsic;
         | flags |= CalibType.ZeroTangentDist;     // tangential distortion is zero: {P1,P2} = {0,0}
         */
        result = Calibration.ComputeCameraCalibration(xyzPoints, uvPoints, new System.Drawing.Size(pixelWidth, pixelHeight), new Emgu.CV.Matrix <double>(3, 3), out status, true, true, flags);
        Debug.Log(status);

        Debug.Log("distortion:" + result.distortion.ToString());
        error = result.Error;

        PerspectiveMatrixAfter = result.intrinsics.ProjectionMatrix(near, far);
        if (targetCamera != null && PerspectiveMatrixAfter != Matrix4x4.identity)
        {
            PerspectiveMatrixBefore = targetCamera.projectionMatrix;
            result.extrinsics.ApplyToTransform(targetCamera.transform);
            targetCamera.projectionMatrix = PerspectiveMatrixAfter;
        }
        return(result);
    }
Esempio n. 2
0
    /// OH GOD this is here because a nice guy indicated there was a bug in the EmguCV code, and this is the patched version 
    /// https://stackoverflow.com/questions/33127581/how-do-i-access-the-rotation-and-translation-vectors-after-camera-calibration-in
    /// 
    /// <summary>
    /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
    /// </summary>
    /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
    /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
    /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
    /// <param name="rotationVectors">The output 3xM or Mx3 array of rotation vectors (compact representation of rotation matrices, see cvRodrigues2). </param>
    /// <param name="translationVectors">The output 3xM or Mx3 array of translation vectors</param>/// <param name="calibrationType">cCalibration type</param>
    /// <param name="termCriteria">The termination criteria</param>
    /// <param name="cameraMatrix">The output camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized</param>
    /// <param name="distortionCoeffs">The output 4x1 or 1x4 vector of distortion coefficients [k1, k2, p1, p2]</param>
    /// <returns>The final reprojection error</returns>
    public static double CalibrateCamera(
       MCvPoint3D32f[][] objectPoints,
       PointF[][] imagePoints,
       Size imageSize,
       IInputOutputArray cameraMatrix,
       IInputOutputArray distortionCoeffs,
       Emgu.CV.CvEnum.CalibType calibrationType,
       MCvTermCriteria termCriteria,
       out Mat[] rotationVectors,
       out Mat[] translationVectors)
    {
        System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
        int imageCount = objectPoints.Length;

        using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
        using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
        {
            double reprojectionError;
            using (VectorOfMat rVecs = new VectorOfMat())
            using (VectorOfMat tVecs = new VectorOfMat())
            {

                reprojectionError = CvInvoke.CalibrateCamera(
                    vvObjPts,
                    vvImgPts,
                    imageSize,
                    cameraMatrix,
                    distortionCoeffs,
                    rVecs,
                    tVecs,
                    calibrationType,
                    termCriteria);

                rotationVectors = new Mat[imageCount];
                translationVectors = new Mat[imageCount];
                for (int i = 0; i < imageCount; i++)
                {
                    rotationVectors[i] = new Mat();
                    using (Mat matR = rVecs[i])
                        matR.CopyTo(rotationVectors[i]);
                    translationVectors[i] = new Mat();
                    using (Mat matT = tVecs[i])
                        matT.CopyTo(translationVectors[i]);
                }
            }
            return reprojectionError;
        }
    }
Esempio n. 3
0
        static void Main(string[] args)
        {
            MCvPoint3D32f objectp_1 = new MCvPoint3D32f(1f, 1f, 1f);
            MCvPoint3D32f objectp_2 = new MCvPoint3D32f(1f, 1f, 1f);
            MCvPoint3D32f objectp_3 = new MCvPoint3D32f(1f, 1f, 1f);

            //List<MCvPoint3D32f> objectPoints = new List<MCvPoint3D32f> { objectp_1, objectp_2, objectp_3 };
            MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[][] { new MCvPoint3D32f[]
                                                                     { objectp_1, objectp_1, objectp_1, objectp_1 } };



            PointF imagep_1 = new PointF(1f, 1f);
            PointF imagep_2 = new PointF(1f, 1f);
            PointF imagep_3 = new PointF(1f, 1f);

            //List<PointF> imagePoints = new List<PointF> { imagep_1, imagep_2, imagep_3 };
            PointF[][] imagePoints = new PointF[][] { new PointF[] { imagep_1, imagep_1, imagep_1, imagep_1 } };

            Size imageSize = new Size(500, 500);

            Mat cameraMat = new Mat(new Size(3, 3), DepthType.Cv32F, 1);

            cameraMat.SetValue(0, 0, 302);
            cameraMat.SetValue(0, 1, 0);
            cameraMat.SetValue(0, 2, 101);
            cameraMat.SetValue(1, 0, 0);
            cameraMat.SetValue(1, 1, 411);
            cameraMat.SetValue(1, 2, 106);
            cameraMat.SetValue(2, 0, 0);
            cameraMat.SetValue(2, 1, 0);
            cameraMat.SetValue(2, 1, 1);

            Matrix <double> cameraMatrix = new Matrix <double>(new double[, ] {
                { 302, 0, 101 }, { 0, 411, 106 }, { 0, 0, 1 }
            });

            cameraMat.ToImage <Gray, byte>().Save("test.jpg");
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 0, 0, 302);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 0, 1, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 0, 2, 101);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 1, 0, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 1, 1, 411);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 1, 2, 106);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 2, 0, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 2, 1, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 2, 2, 1);

            Emgu.CV.CvEnum.CalibType calibrationType = Emgu.CV.CvEnum.CalibType.UseIntrinsicGuess;

            Emgu.CV.Structure.MCvTermCriteria termCriteria = new Emgu.CV.Structure.MCvTermCriteria(50);

            Mat _distortionCoeffs = new Mat(new Size(1, 5), DepthType.Cv32F, 1);


            Emgu.CV.ExtrinsicCameraParameters[] extrinsicParams;

            Mat[] rotation;    // = new Mat(new Size(3, 3), DepthType.Cv32F, 1);
            Mat[] translation; //= new Mat(new Size(3, 3), DepthType.Cv32F, 1);

            var    result = CvInvoke.CalibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, _distortionCoeffs, calibrationType, termCriteria, out rotation, out translation);
            double t      = rotation[0].GetValue(0, 0);
            double t2     = rotation[0].GetValue(2, 0);
        }