public KinectCalibrator(CalibrationResult calib)
        {
            this.calib = calib;

            var ir2rgbExtrin = new CV.ExtrinsicCameraParameters(
              new CV.RotationVector3D(new double[] { 0.56 * Math.PI / 180.0, 0.07 * Math.PI / 180.0, +0.05 * Math.PI / 180.0 }),
              new CV.Matrix<double>(new double[,] { { -0.0256 }, { 0.00034 }, { 0.00291 } })).ExtrinsicMatrix;

            IR2RGB = DenseMatrix.OfArray(new float[,]
            {
                { (float)ir2rgbExtrin.Data[0,0], (float)ir2rgbExtrin.Data[0,1], (float)ir2rgbExtrin.Data[0,2], (float)ir2rgbExtrin.Data[0,3] },
                { (float)ir2rgbExtrin.Data[1,0], (float)ir2rgbExtrin.Data[1,1], (float)ir2rgbExtrin.Data[1,2], (float)ir2rgbExtrin.Data[1,3] },
                { (float)ir2rgbExtrin.Data[2,0], (float)ir2rgbExtrin.Data[2,1], (float)ir2rgbExtrin.Data[2,2], (float)ir2rgbExtrin.Data[2,3] },
                {0,0,0,1}
            });

            var rt = calib.Extrinsic.ExtrinsicMatrix;
            K2G = DenseMatrix.OfArray(new Single[,]
            {
                { (float)rt.Data[0,0], (float)rt.Data[0,1], (float)rt.Data[0,2], (float)rt.Data[0,3] },
                { (float)rt.Data[1,0], (float)rt.Data[1,1], (float)rt.Data[1,2], (float)rt.Data[1,3] },
                { (float)rt.Data[2,0], (float)rt.Data[2,1], (float)rt.Data[2,2], (float)rt.Data[2,3] },
                {0,0,0,1}
            }).Inverse();

            var flip = DenseMatrix.OfArray(new Single[,]
            {
                { -1,0,0,0 },
                { 0,-1,0,0 },
                { 0,0,1,0 },
                { 0,0,0,1 }
            });
            EXTRA = K2G * IR2RGB * flip;
        }
        /// <summary>
        /// Computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters.
        /// Optionally, the function computes jacobians - matrices of partial derivatives of image points as functions of all the input parameters w.r.t. the particular parameters, intrinsic and/or extrinsic.
        /// The jacobians are used during the global optimization in cvCalibrateCamera2 and cvFindExtrinsicCameraParams2.
        /// The function itself is also used to compute back-projection error for with current intrinsic and extrinsic parameters.
        /// </summary>
        /// <remarks>Note, that with intrinsic and/or extrinsic parameters set to special values, the function can be used to compute just extrinsic transformation or just intrinsic transformation (i.e. distortion of a sparse set of points) </remarks>
        /// <param name="objectPoints">The array of object points.</param>
        /// <param name="extrin">Extrinsic parameters</param>
        /// <param name="intrin">Intrinsic parameters</param>
        /// <param name="mats">Optional matrix supplied in the following order: dpdrot, dpdt, dpdf, dpdc, dpddist</param>
        /// <returns>The array of image points which is the projection of <paramref name="objectPoints"/></returns>
        public static PointF[] ProjectPoints(
            MCvPoint3D32f[] objectPoints,
            ExtrinsicCameraParameters extrin,
            IntrinsicCameraParameters intrin,
            params Matrix <float>[] mats)
        {
            PointF[] imagePoints = new PointF[objectPoints.Length];

            int      matsLength = mats.Length;
            GCHandle handle1    = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
            GCHandle handle2    = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);

            using (Matrix <float> pointMatrix = new Matrix <float>(objectPoints.Length, 1, 3, handle1.AddrOfPinnedObject(), 3 * sizeof(float)))
                using (Matrix <float> imagePointMatrix = new Matrix <float>(imagePoints.Length, 1, 2, handle2.AddrOfPinnedObject(), 2 * sizeof(float)))
                    CvInvoke.ProjectPoints(
                        pointMatrix,
                        extrin.RotationVector,
                        extrin.TranslationVector,
                        intrin.IntrinsicMatrix,
                        intrin.DistortionCoeffs,
                        imagePointMatrix);
            handle1.Free();
            handle2.Free();
            return(imagePoints);
        }
Beispiel #3
0
 /// <summary>
 /// Default Constructor
 /// </summary>
 /// <param name="theCam"> Camera Object needed in order to obtain the current camera frame.</param>
 public MarkerPositioner()
 {
     _final                         = Matrix.Identity(4, 4);
     _ecp_A                         = null;
     _cpattern                      = null;
     _extrinsicMatrix_A             = Matrix.Identity(4, 4);
     _logger                        = LogManager.GetLogger(typeof(MarkerPositioner));
     _firstCallUpdateTransformation = true;
 }
 void DrawCoordinateFrame(Emgu.CV.Image <Emgu.CV.Structure.Bgr, byte> img)
 {
     Emgu.CV.IntrinsicCameraParameters icp = _icp;
     if (_ec != null && _pattern.PatternFound && icp != null)
     {
         Emgu.CV.ExtrinsicCameraParameters ecp = _ec.Calibrate(_pattern.ImagePoints);
         _pattern.DrawCoordinateFrame(img, ecp, icp);
     }
 }
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CALIB_TYPE calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            #region get the array that represent the point counts
            int[] pointCounts = new int[objectPoints.Length];
            for (int i = 0; i < objectPoints.Length; i++)
            {
                Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
                pointCounts[i] = objectPoints[i].Length;
            }
            #endregion

            double reprojectionError = -1;
            using (Matrix <float> objectPointMatrix = ToMatrix(objectPoints))
                using (Matrix <float> imagePointMatrix = ToMatrix(imagePoints))
                    using (Matrix <int> pointCountsMatrix = new Matrix <int>(pointCounts))
                        using (Matrix <double> rotationVectors = new Matrix <double>(imageCount, 3))
                            using (Matrix <double> translationVectors = new Matrix <double>(imageCount, 3))
                            {
                                reprojectionError = CvInvoke.cvCalibrateCamera2(
                                    objectPointMatrix.Ptr,
                                    imagePointMatrix.Ptr,
                                    pointCountsMatrix.Ptr,
                                    imageSize,
                                    intrinsicParam.IntrinsicMatrix,
                                    intrinsicParam.DistortionCoeffs,
                                    rotationVectors,
                                    translationVectors,
                                    calibrationType,
                                    termCriteria);

                                extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                                IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
                                for (int i = 0; i < imageCount; i++)
                                {
                                    ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                    CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
                                    CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
                                    CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
                                    CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
                                    extrinsicParams[i] = p;
                                }
                                Marshal.FreeHGlobal(matPtr);
                            }
            return(reprojectionError);
        }
 /// <summary>
 /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
 /// </summary>
 /// <param name="objectPoints">The array of object points</param>
 /// <param name="imagePoints">The array of corresponding image points</param>
 /// <param name="intrin">The intrinsic parameters</param>
 /// <param name="method">Method for solving a PnP problem</param>
 /// <returns>The extrinsic parameters</returns>
 public static ExtrinsicCameraParameters SolvePnP(
    MCvPoint3D32f[] objectPoints,
    PointF[] imagePoints,
    IntrinsicCameraParameters intrin, 
    CvEnum.SolvePnpMethod method = CvEnum.SolvePnpMethod.Iterative)
 {
    ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
    using (VectorOfPoint3D32F objPtVec = new VectorOfPoint3D32F(objectPoints))
    using (VectorOfPointF imgPtVec = new VectorOfPointF(imagePoints))
       CvInvoke.SolvePnP(objPtVec, imgPtVec, intrin.IntrinsicMatrix, intrin.DistortionCoeffs, p.RotationVector, p.TranslationVector, false, method);
    return p;
 }
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="flags">Flags</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IntrinsicCameraParameters intrinsicParam,
         CvEnum.CALIB_TYPE flags,
         out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
             int imageCount = objectPoints.Length;

             #region get the array that represent the point counts
             int[] pointCounts = new int[objectPoints.Length];
             for (int i = 0; i < objectPoints.Length; i++)
             {
            Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i] = objectPoints[i].Length;
             }
             #endregion

             double reprojectionError = -1;
             using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
             using (Matrix<float> imagePointMatrix = ToMatrix(imagePoints))
             using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
             using (Matrix<double> rotationVectors = new Matrix<double>(imageCount, 3))
             using (Matrix<double> translationVectors = new Matrix<double>(imageCount, 3))
             {
            reprojectionError = CvInvoke.cvCalibrateCamera2(
                objectPointMatrix.Ptr,
                imagePointMatrix.Ptr,
                pointCountsMatrix.Ptr,
                imageSize,
                intrinsicParam.IntrinsicMatrix,
                intrinsicParam.DistortionCoeffs,
                rotationVectors,
                translationVectors,
                flags);

            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
            IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
            for (int i = 0; i < imageCount; i++)
            {
               ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
               CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
               CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
               extrinsicParams[i] = p;
            }
            Marshal.FreeHGlobal(matPtr);
             }
             return reprojectionError;
        }
Beispiel #8
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems;
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">The essential matrix</param>
        /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">The fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CALIB_TYPE flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> foundamentalMatrix,
            out Matrix <double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

            #region get the matrix that represent the point counts
            int[,] pointCounts = new int[objectPoints.Length, 1];
            for (int i = 0; i < objectPoints.Length; i++)
            {
                Debug.Assert(objectPoints[i].Length == imagePoints1[i].Length && objectPoints[i].Length == imagePoints2[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
                pointCounts[i, 0] = objectPoints[i].Length;
            }
            #endregion

            using (Matrix <float> objectPointMatrix = ToMatrix(objectPoints))
                using (Matrix <float> imagePointMatrix1 = ToMatrix(imagePoints1))
                    using (Matrix <float> imagePointMatrix2 = ToMatrix(imagePoints2))
                        using (Matrix <int> pointCountsMatrix = new Matrix <int>(pointCounts))
                        {
                            extrinsicParams    = new ExtrinsicCameraParameters();
                            essentialMatrix    = new Matrix <double>(3, 3);
                            foundamentalMatrix = new Matrix <double>(3, 3);

                            CvInvoke.cvStereoCalibrate(
                                objectPointMatrix.Ptr,
                                imagePointMatrix1.Ptr,
                                imagePointMatrix2.Ptr,
                                pointCountsMatrix.Ptr,
                                intrinsicParam1.IntrinsicMatrix,
                                intrinsicParam1.DistortionCoeffs,
                                intrinsicParam2.IntrinsicMatrix,
                                intrinsicParam2.DistortionCoeffs,
                                imageSize,
                                extrinsicParams.RotationVector,
                                extrinsicParams.TranslationVector,
                                essentialMatrix.Ptr,
                                foundamentalMatrix.Ptr,
                                termCrit,
                                flags);
                        }
        }
 /// <summary>
 /// Extracts the extrinsic matrix from the cameras' extrinsic parameters
 /// The values of the extrinsic matrix are extracted into a 4x4 Matrix.
 /// This function is static, since it is called firstly in the deserialization constructor.
 /// </summary>
 private static Matrix ExtractExctrinsicMatrix(ExtrinsicCameraParameters ecp)
 {
   Matrix extrinsicMatrix = Matrix.Identity(4, 4);
   if (ecp != null)
   {
     for (int i = 0; i < 3; i++)
     {
       for (int j = 0; j < 4; j++)
       {
         extrinsicMatrix[i, j] = ecp.ExtrinsicMatrix[i, j];
       }
     }
   }
   return extrinsicMatrix;
 }
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CalibType calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
                {
                    double reprojectionError = -1;
                    using (VectorOfMat rotationVectors = new VectorOfMat())
                        using (VectorOfMat translationVectors = new VectorOfMat())
                        {
                            Mat cameraMat   = new Mat();
                            Mat distorCoeff = new Mat();
                            reprojectionError = CvInvoke.CalibrateCamera(
                                vvObjPts,
                                vvImgPts,
                                imageSize,
                                intrinsicParam.IntrinsicMatrix,
                                intrinsicParam.DistortionCoeffs,
                                rotationVectors,
                                translationVectors,
                                calibrationType,
                                termCriteria);

                            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                using (Mat matR = rotationVectors[i])
                                    matR.CopyTo(p.RotationVector);
                                using (Mat matT = translationVectors[i])
                                    matT.CopyTo(p.TranslationVector);
                                extrinsicParams[i] = p;
                            }
                        }
                    return(reprojectionError);
                }
        }
Beispiel #11
0
        /// <summary>
        /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error.
        /// </summary>
        /// <param name="objectPoints">The array of object points</param>
        /// <param name="imagePoints">The array of corresponding image points</param>
        /// <param name="intrin">The intrinsic parameters</param>
        /// <returns>The extrinsic parameters</returns>
        public static ExtrinsicCameraParameters FindExtrinsicCameraParams2(
            MCvPoint3D32f[] objectPoints,
            PointF[] imagePoints,
            IntrinsicCameraParameters intrin)
        {
            ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();

            GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
            GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);

            using (Matrix <float> objectPointMatrix = new Matrix <float>(objectPoints.Length, 3, handle1.AddrOfPinnedObject()))
                using (Matrix <float> imagePointMatrix = new Matrix <float>(imagePoints.Length, 2, handle2.AddrOfPinnedObject()))
                    CvInvoke.cvFindExtrinsicCameraParams2(objectPointMatrix, imagePointMatrix, intrin.IntrinsicMatrix.Ptr, intrin.DistortionCoeffs.Ptr, p.RotationVector.Ptr, p.TranslationVector.Ptr, 0);
            handle1.Free();
            handle2.Free();

            return(p);
        }
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems;
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">The essential matrix</param>
        /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">The fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CalibType flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> foundamentalMatrix,
            out Matrix <double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

            using (VectorOfVectorOfPoint3D32F objectPointVec = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF imagePoints1Vec = new VectorOfVectorOfPointF(imagePoints1))
                    using (VectorOfVectorOfPointF imagePoints2Vec = new VectorOfVectorOfPointF(imagePoints2))
                    {
                        extrinsicParams    = new ExtrinsicCameraParameters();
                        essentialMatrix    = new Matrix <double>(3, 3);
                        foundamentalMatrix = new Matrix <double>(3, 3);

                        CvInvoke.StereoCalibrate(
                            objectPointVec,
                            imagePoints1Vec,
                            imagePoints2Vec,

                            intrinsicParam1.IntrinsicMatrix,
                            intrinsicParam1.DistortionCoeffs,
                            intrinsicParam2.IntrinsicMatrix,
                            intrinsicParam2.DistortionCoeffs,
                            imageSize,
                            extrinsicParams.RotationVector,
                            extrinsicParams.TranslationVector,
                            essentialMatrix,
                            foundamentalMatrix,
                            flags,
                            termCrit);
                    }
        }
Beispiel #13
0
            public Camera(string path, int width, int height, PointF[] imageCorners, MCvPoint3D32f[] worldCorners)
            {
                Width = width;
                Height = height;

                Intrinsics = Markers.Chessboard.Load(path, width, height, out UndistortX, out UndistortY);
                Extrinsics = CameraCalibration.FindExtrinsicCameraParams2(worldCorners, imageCorners, Intrinsics);

                var ext = Extrinsics;
                View = new Matrix((float)ext.ExtrinsicMatrix[0, 0], -(float)ext.ExtrinsicMatrix[1, 0], -(float)ext.ExtrinsicMatrix[2, 0], 0,
                                         (float)ext.ExtrinsicMatrix[0, 1], -(float)ext.ExtrinsicMatrix[1, 1], -(float)ext.ExtrinsicMatrix[2, 1], 0,
                                         (float)ext.ExtrinsicMatrix[0, 2], -(float)ext.ExtrinsicMatrix[1, 2], -(float)ext.ExtrinsicMatrix[2, 2], 0,
                                         (float)ext.ExtrinsicMatrix[0, 3], -(float)ext.ExtrinsicMatrix[1, 3], -(float)ext.ExtrinsicMatrix[2, 3], 1);
                Intrinsics.GetIntrinsicMatrixValues(width, height, 0, 0,
                    out FovX, out FovY,
                    out FocalLength, out PrincipalPoint,
                    out PixelAspectRatio);
                World = Matrix.Invert(View);
                Projection = Matrix.CreatePerspectiveFieldOfView(MathHelper.ToRadians((float)FovY), ((float)width) / ((float)height), 0.1f, 4.0f);
                BoundingFrustum = new BoundingFrustum(View * Projection);

                if (Global.No)
                    using (var img = new Image<Bgr, byte>(@"C:\Users\Jaap\My Dropbox\Data\Pentacorn.Vision\Offline\Casio Ex S5\Tape on Melamine\CIMG0606.JPG"))
                    {
                        foreach (var p in imageCorners)
                            img.Draw(new Cross2DF(p, 20, 20), new Bgr(255, 0, 255), 1);

                        var projectedCorners = CameraCalibration.ProjectPoints(worldCorners, Extrinsics, Intrinsics);
                        foreach (var p in projectedCorners)
                            img.Draw(new Cross2DF(p, 6, 6), new Bgr(255, 255, 0), 1);

                        var und = Intrinsics.Undistort(img);

                        img.Save(@"C:\Users\Jaap\Temp\img.png");
                        und.Save(@"C:\Users\Jaap\Temp\und.png");
                    }
            }
Beispiel #14
0
        public void CalibrateSingleCamera(int index, int patternX, int patternY,
                                            ref Image<Bgr, byte> slikaVhodna,
                                            ref Image<Gray, Byte> slikaRobovi,
                                            ref Image<Bgr, byte> slikaTransformirana,
                                            ref IntrinsicCameraParameters intrinsic_param )
        {
            setCamCapture(index);

            int chessboard_number = 3;

            int corners_number = patternY * patternX;
            int success = 0;

            int count = 0;
            int board_dt = 20;

            MCvPoint3D32f[][] object_points = new MCvPoint3D32f[chessboard_number][];
            PointF[][] image_points = new PointF[chessboard_number][];

            ExtrinsicCameraParameters[] extrinsic_param;

            Image<Bgr, byte> slika2 = slikaVhodna;

            Image<Gray, Byte> slika = slika2.Convert<Gray, Byte>();
            slikaVhodna = slika2;

            PointF[] corners = new PointF[] { };
            Size patternSize = new Size(patternX, patternY);

            while (success < chessboard_number)
            {

                if ((count++ % board_dt) == 0)
                {

                    bool found = CameraCalibration.FindChessboardCorners(slika, patternSize, CALIB_CB_TYPE.DEFAULT, out corners);

                    CameraCalibration.DrawChessboardCorners(slika, patternSize, corners, found);
                    slikaRobovi = slika;

                    slika.FindCornerSubPix(new PointF[][] { corners }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.05));

                    if (corners.Length == corners_number)
                    {
                        object_points[success] = new MCvPoint3D32f[corners_number];
                        for (int j = 0; j < corners_number; j++)
                        {
                            image_points[success] = corners;
                            object_points[success][j].x = j / patternX;
                            object_points[success][j].y = j % patternY;
                            object_points[success][j].z = 0.0f;
                        }

                        success++;
                    }
                }

                slika2 = CamCapture.QueryFrame();
                slika = slika2.Convert<Gray, Byte>();

            }

            intrinsic_param = new IntrinsicCameraParameters();
            extrinsic_param = new ExtrinsicCameraParameters[success];

            for (int i = 0; i < success; i++)
            {
                extrinsic_param[i] = new ExtrinsicCameraParameters();
            }

            CameraCalibration.CalibrateCamera(object_points, image_points, new Size(slika.Width, slika.Height), intrinsic_param, CALIB_TYPE.DEFAULT, out extrinsic_param);

            uintdisort(ref slikaTransformirana, ref intrinsic_param, ref slika2, ref slika);
        }
Beispiel #15
0
        public void CalibrateSingleCamera(int index, int patternX, int patternY,
                                            ref Image<Bgr, byte> slikaVhodna,
                                            ref Image<Gray, Byte> slikaRobovi,
                                            ref Image<Bgr, byte> slikaTransformirana)
        {
            setCamCapture(index);

            int chessboard_number = 3;

            int corners_number = patternY * patternX;
            int success = 0;

            int count = 0;
            int board_dt = 20;

            MCvPoint3D32f[][] object_points = new MCvPoint3D32f[chessboard_number][];
            PointF[][] image_points = new PointF[chessboard_number][];

            IntrinsicCameraParameters intrinsic_param;
            ExtrinsicCameraParameters[] extrinsic_param;

            Image<Bgr, byte> slika2 = slikaVhodna;
               // Image<Bgr, byte> slika2 = CamCapture.QueryFrame();
            Image<Gray, Byte> slika = slika2.Convert<Gray, Byte>();
            slikaVhodna = slika2;
            //imageBox1.Image = slika2;
            //   Image<Gray, byte> slika = new Image<Gray, byte>("test.jpg");
            //  Image<Bgr, byte> slika2 = new Image<Bgr, byte>("test.jpg");
            // imageBox1.Image = slika;

            PointF[] corners = new PointF[] { };
            Size patternSize = new Size(patternX, patternY);

            while (success < chessboard_number)
            {

                if ((count++ % board_dt) == 0)
                {

                    if (corners == null || corners.Length == 0)
                    {
                        corners = CameraCalibration.FindChessboardCorners(slika, patternSize, CALIB_CB_TYPE.DEFAULT);
                        continue;
                    }

                    else
                    {

                        CameraCalibration.DrawChessboardCorners(slika, patternSize, corners);
                        slikaRobovi = slika;

                        slika.FindCornerSubPix(new PointF[][] { corners }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));

                        if (corners.Length == corners_number)
                        {
                            object_points[success] = new MCvPoint3D32f[corners_number];
                            for (int j = 0; j < corners_number; j++)
                            {
                                image_points[success] = corners;
                                object_points[success][j].x = j / patternX;
                                object_points[success][j].y = j % patternY;
                                object_points[success][j].z = 0.0f;
                            }

                            success++;
                        }
                    }
                }

                slika2 = CamCapture.QueryFrame();
                slika = slika2.Convert<Gray, Byte>();
                //imageBox1.Image = slika2;

                // slika = new Image<Gray, Byte>("test.jpg");

            }

            intrinsic_param = new IntrinsicCameraParameters();
            extrinsic_param = new ExtrinsicCameraParameters[success];

            for (int i = 0; i < success; i++)
            {
                extrinsic_param[i] = new ExtrinsicCameraParameters();
            }

            CameraCalibration.CalibrateCamera(object_points, image_points, new Size(slika.Width, slika.Height), intrinsic_param, CALIB_TYPE.DEFAULT, out extrinsic_param);

            Matrix<float> mapx = new Matrix<float>(new Size(slika.Width, slika.Height));
            Matrix<float> mapy = new Matrix<float>(new Size(slika.Width, slika.Height));

            intrinsic_param.InitUndistortMap(slika.Width, slika.Height, out mapx, out mapy);

            Image<Bgr, byte> image_calibrated = slika2.Clone();

            CvInvoke.cvRemap(slika2.Ptr, image_calibrated.Ptr, mapx.Ptr, mapy.Ptr, 8 /*(int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS*/, new MCvScalar(0));

            slikaTransformirana = image_calibrated;
        }
 /// <summary>
 /// single camera calibration
 /// </summary>
 /// <param name="objectPoints">corners value in world coordinate</param>
 /// <param name="imagePoints">corners value in image coordinate</param>
 /// <param name="imageSize">image size</param>
 /// <param name="intrinsicParam">camera intrinsic</param>
 /// <param name="extrinsicParams">camera extrinsic</param>
 /// <returns>reprojection error</returns>
 private double singleCameraCalibration(MCvPoint3D32f[][] objectPoints,
                                         PointF[][] imagePoints,
                                         Size imageSize,
                                         IntrinsicCameraParameters intrinsicParam,
                                         ExtrinsicCameraParameters[] extrinsicParams
                                     )
 {
     return (CameraCalibration.CalibrateCamera(objectPoints, imagePoints, imageSize, intrinsicParam,
         CALIB_TYPE.CV_CALIB_FIX_K3, criteria, out extrinsicParams));
 }
Beispiel #17
0
        /// <summary>
        /// Calculates the transformation matrix, which is used to transform the 3d-object points, which were scanned with reference
        /// to the moved marker coordinate system, back to the initial marker system and henceforth back to the camera system.
        /// The camera object is needed in order to gain the current camera frame. Furthermore, the cameras' intrinsics are needed
        /// to perform an extrinsic calibration. Note, that every kind of pattern can be used.
        ///
        /// The transformation matrix is calculated as follows:
        /// * If 'UpdateTransformation' is being called for the first time, an extrinsic calibration is performed in order to find
        ///   the initial orientation of the used pattern.
        /// * If the initial orientation has already been found, the extrinsic calibration is performed again. Afterwards
        ///   the current orientation is available, represented by the extrinsic matrix.
        /// * Form the extrinsic matrix (4x3) (current position) to a homogeneous 4x4 matrix.
        /// * The final transformation matrix is calculated as follows: _final = initial * current.Inverse();
        /// </summary>
        public bool UpdateTransformation(Camera the_cam)
        {
            Matrix extrinsicM1 = Matrix.Identity(4, 4);
            ExtrinsicCameraParameters ecp_pattern = null;
            ExtrinsicCalibration      ec_pattern  = null;

            Emgu.CV.Image <Gray, Byte> gray_img = null;
            System.Drawing.PointF[]    currentImagePoints;

            //first call: calculate extrinsics for initial position
            if (_firstCallUpdateTransformation == true && _cpattern != null)
            {
                gray_img = the_cam.Frame().Convert <Gray, Byte>();
                //set the patterns property: intrinsic parameters
                _cpattern.IntrinsicParameters = the_cam.Intrinsics;

                if (_cpattern.FindPattern(gray_img, out currentImagePoints))
                {
                    try
                    {
                        //extr. calibration (initial position)
                        ec_pattern  = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
                        ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

                        if (ecp_pattern != null)
                        {
                            _ecp_A             = ecp_pattern;
                            _extrinsicMatrix_A = ExtractExctrinsicMatrix(_ecp_A);

                            _logger.Info("Initial Position found.");
                            _firstCallUpdateTransformation = false;
                        }
                    }
                    catch (Exception e)
                    {
                        _logger.Warn("Initial Position - Caught Exception: {0}.", e);
                        _firstCallUpdateTransformation = true;
                        _ecp_A = null;
                        return(false);
                    }
                }
                else
                {
                    _logger.Warn("Pattern not found.");
                    _firstCallUpdateTransformation = true;
                    _ecp_A = null;

                    return(false);
                }
            }

            //if initial position and pattern are available - calculate the transformation
            if (_ecp_A != null && _cpattern != null)
            {
                gray_img = the_cam.Frame().Convert <Gray, Byte>();

                //try to find composite pattern
                if (_cpattern.FindPattern(gray_img, out currentImagePoints))
                {
                    //extrinsic calibration in order to find the current orientation
                    ec_pattern  = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
                    ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

                    if (ecp_pattern != null)
                    {
                        //extract current extrinsic matrix
                        extrinsicM1 = ExtractExctrinsicMatrix(ecp_pattern);
                        _logger.Info("UpdateTransformation: Transformation found.");
                    }
                    else
                    {
                        _logger.Warn("UpdateTransformation: Extrinsics of moved marker system not found.");
                        return(false);
                    }
                }
                else
                {
                    _logger.Warn("UpdateTransformation: Pattern not found.");
                    return(false);
                }

                //now calculate the final transformation matrix
                _final = _extrinsicMatrix_A * extrinsicM1.Inverse();
                return(true);
            }
            else
            {
                _logger.Warn("UpdateTransformation: No Pattern has been chosen.");
                return(false);
            }
        }
Beispiel #18
0
        public void CalibrateCamera(string directory)
        {
            DirectoryInfo di = new DirectoryInfo(directory);
            FileInfo[] rgFiles = di.GetFiles("*.jpg");

            Size chessboardSize = new Size(5, 8);
            int successes = 0;
            int numberOfCorners = chessboardSize.Width * chessboardSize.Height;
            int chessboard_num = rgFiles.Length;
            int counter = 0;

            MCvPoint3D32f[][] object_points1 = new MCvPoint3D32f[chessboard_num][];
            PointF[][] image_points1 = new PointF[chessboard_num][];

             // Process all chessboard images in this directory
             foreach(FileInfo fi in rgFiles)
             {
                // Find all the corners on a given chessboard image
                PointF[] corners;
                _calibrationImage = new Image<Bgr, byte>(fi.FullName);
                Image<Gray, byte> grayImage = _calibrationImage.Convert<Gray, byte>();
                bool patternFound = CameraCalibration.FindChessboardCorners(grayImage, new Size(5, 8), Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH, out corners);
                // grayImage.FindCornerSubPix(new PointF[][] { corners }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(0.05));
                CvInvoke.cvDrawChessboardCorners(_calibrationImage.Ptr, chessboardSize, corners, corners.Length, patternFound ? 1 : 0);

                MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 5, 5);
                _calibrationImage.Draw(counter++.ToString() + ":" + fi.Name + " " + patternFound.ToString(), ref font , new Point(0, 200), new Bgr(Color.Red));

                if (OnCalibrationImageChanged != null)
                    OnCalibrationImageChanged(this, EventArgs.Empty);

                // TODO bit of a hack so that the display updates, should probably do this in a BackgroundWorker
                System.Windows.Forms.Application.DoEvents();

                if (patternFound && (corners.Length == (chessboardSize.Height * chessboardSize.Width)))
                {
                    object_points1[successes] = new MCvPoint3D32f[numberOfCorners];
                    for (int j = 0; j < numberOfCorners; j++)
                    {
                        image_points1[successes] = corners;
                        object_points1[successes][j].x = j / chessboardSize.Width;
                        object_points1[successes][j].y = j % chessboardSize.Width;
                        object_points1[successes][j].z = 0.0f;
                    }
                    successes++;
                }
             }

             // TODO Hack to get arrays of the correct length, should probably use a collection
             MCvPoint3D32f[][] object_points = new MCvPoint3D32f[successes][];
             PointF[][] image_points = new PointF[successes][];
             for (int i = 0; i < successes; i++)
             {
                 object_points[i] = object_points1[i];
                 image_points[i] = image_points1[i];
             }

             _intrinsicParameters = new IntrinsicCameraParameters();
             ExtrinsicCameraParameters[] extrinsicParameters = new ExtrinsicCameraParameters[successes];
             for (int i = 0; i < successes; i++)
                 extrinsicParameters[i] = new ExtrinsicCameraParameters();

             CameraCalibration.CalibrateCamera(object_points, image_points, new Size(_calibrationImage.Width, _calibrationImage.Height), _intrinsicParameters,Emgu.CV.CvEnum.CALIB_TYPE.DEFAULT, out extrinsicParameters);

             FileStream fs = new FileStream(directory + "\\1.calibration", FileMode.Create);
             System.Xml.Serialization.XmlSerializer x = new System.Xml.Serialization.XmlSerializer(_intrinsicParameters.GetType());
             x.Serialize(fs, _intrinsicParameters);
             fs.Close();

             CorrectCameraDistortion(directory + "\\1.calibration");
        }
 void _interactor_InteractionCompleted(object sender, Parsley.UI.InteractionEventArgs e) {
   _last_detected_plane = null;
   _last_error = Double.MaxValue;
   _on_roi = true;
   _r = (Rectangle)e.InteractionResult;
 }
Beispiel #20
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems; 
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">essential matrix</param>
        /// <param name="termCrit"> Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CALIB_TYPE flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix<double> foundamentalMatrix,
            out Matrix<double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

             #region get the matrix that represent the point counts
             int[,] pointCounts = new int[objectPoints.Length, 1];
             for (int i = 0; i < objectPoints.Length; i++)
             {
            Debug.Assert(objectPoints[i].Length == imagePoints1[i].Length && objectPoints[i].Length == imagePoints2[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i, 0] = objectPoints[i].Length;
             }
             #endregion

             using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
             using (Matrix<float> imagePointMatrix1 = ToMatrix(imagePoints1))
             using (Matrix<float> imagePointMatrix2 = ToMatrix(imagePoints2))
             using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
             {
            extrinsicParams = new ExtrinsicCameraParameters();
            essentialMatrix = new Matrix<double>(3, 3);
            foundamentalMatrix = new Matrix<double>(3, 3);

            CvInvoke.cvStereoCalibrate(
               objectPointMatrix.Ptr,
               imagePointMatrix1.Ptr,
               imagePointMatrix2.Ptr,
               pointCountsMatrix.Ptr,
               intrinsicParam1.IntrinsicMatrix,
               intrinsicParam1.DistortionCoeffs,
               intrinsicParam2.IntrinsicMatrix,
               intrinsicParam2.DistortionCoeffs,
               imageSize,
               extrinsicParams.RotationVector,
               extrinsicParams.TranslationVector,
               essentialMatrix.Ptr,
               foundamentalMatrix.Ptr,
               termCrit,
               flags);
             }
        }
Beispiel #21
0
        /// <summary>
        /// Computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters. 
        /// Optionally, the function computes jacobians - matrices of partial derivatives of image points as functions of all the input parameters w.r.t. the particular parameters, intrinsic and/or extrinsic. 
        /// The jacobians are used during the global optimization in cvCalibrateCamera2 and cvFindExtrinsicCameraParams2. 
        /// The function itself is also used to compute back-projection error for with current intrinsic and extrinsic parameters. 
        /// </summary>
        /// <remarks>Note, that with intrinsic and/or extrinsic parameters set to special values, the function can be used to compute just extrinsic transformation or just intrinsic transformation (i.e. distortion of a sparse set of points) </remarks>
        /// <param name="objectPoints">The array of object points.</param>
        /// <param name="extrin">Extrinsic parameters</param>
        /// <param name="intrin">Intrinsic parameters</param>
        /// <param name="mats">Optional matrix supplied in the following order: dpdrot, dpdt, dpdf, dpdc, dpddist</param>
        /// <returns>The array of image points which is the projection of <paramref name="objectPoints"/></returns>
        public static PointF[] ProjectPoints(
            MCvPoint3D32f[] objectPoints,
            ExtrinsicCameraParameters extrin,
            IntrinsicCameraParameters intrin,
            params Matrix<float>[] mats)
        {
            PointF[] imagePoints = new PointF[objectPoints.Length];

             int matsLength = mats.Length;
             GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
             GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);
             using (Matrix<float> pointMatrix = new Matrix<float>(objectPoints.Length, 1, 3, handle1.AddrOfPinnedObject(), 3 * sizeof(float)))
             using (Matrix<float> imagePointMatrix = new Matrix<float>(imagePoints.Length, 1, 2, handle2.AddrOfPinnedObject(), 2 * sizeof(float)))
            CvInvoke.cvProjectPoints2(
                  pointMatrix,
                  extrin.RotationVector.Ptr,
                  extrin.TranslationVector.Ptr,
                  intrin.IntrinsicMatrix.Ptr,
                  intrin.DistortionCoeffs.Ptr,
                  imagePointMatrix,
                  matsLength > 0 ? mats[0] : IntPtr.Zero,
                  matsLength > 1 ? mats[1] : IntPtr.Zero,
                  matsLength > 2 ? mats[2] : IntPtr.Zero,
                  matsLength > 3 ? mats[3] : IntPtr.Zero,
                  matsLength > 4 ? mats[4] : IntPtr.Zero);
             handle1.Free();
             handle2.Free();
             return imagePoints;
        }
        public void StereoRectify(IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            ExtrinsicCameraParameters extrinsicParams,
            out Matrix<double> R1,
            out Matrix<double> R2,
            out Matrix<double> P1,
            out Matrix<double> P2,
            out Matrix<double> Q,
            STEREO_RECTIFY_TYPE flags,
            double alpha,
            Size newImageSize,
            ref Rectangle validPixROI1,
            ref Rectangle validPixROI2
            )
        {
            R1 = new Matrix<double>(3, 3);
            R2 = new Matrix<double>(3, 3);
            P1 = new Matrix<double>(3, 4);
            P2 = new Matrix<double>(3, 4);
            Q = new Matrix<double>(4, 4);

            CvInvoke.cvStereoRectify(
                intrinsicParam1.IntrinsicMatrix.Ptr,
                intrinsicParam2.IntrinsicMatrix.Ptr,
                intrinsicParam1.DistortionCoeffs.Ptr,
                intrinsicParam2.DistortionCoeffs.Ptr,
                imageSize,
                extrinsicParams.RotationVector.Ptr,
                extrinsicParams.TranslationVector.Ptr,
                R1.Ptr,
                R2.Ptr,
                P1.Ptr,
                P2.Ptr,
                Q.Ptr,
                STEREO_RECTIFY_TYPE.DEFAULT,
                alpha,
                newImageSize,
                ref validPixROI1,
                ref validPixROI1);
        }
Beispiel #23
0
        public void TestExtrinsicCameraParametersRuntimeSerialize()
        {
            ExtrinsicCameraParameters param = new ExtrinsicCameraParameters();

             param.RotationVector.SetRandUniform(new MCvScalar(), new MCvScalar(1.0));
             param.TranslationVector.SetRandUniform(new MCvScalar(), new MCvScalar(100));

             System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
             formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();

             Byte[] bytes;
             using (MemoryStream ms = new MemoryStream())
             {
            formatter.Serialize(ms, param);
            bytes = ms.GetBuffer();
             }
             using (MemoryStream ms2 = new MemoryStream(bytes))
             {
            ExtrinsicCameraParameters param2 = (ExtrinsicCameraParameters)formatter.Deserialize(ms2);

            Assert.IsTrue(param.Equals(param2));
             }
        }
Beispiel #24
0
        /// <summary>
        /// Estimates extrinsic camera parameters using known intrinsic parameters and and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
        /// </summary>
        /// <param name="objectPoints">The array of object points</param>
        /// <param name="imagePoints">The array of corresponding image points</param>
        /// <param name="intrin">The intrinsic parameters</param>
        /// <returns>The extrinsic parameters</returns>
        public static ExtrinsicCameraParameters FindExtrinsicCameraParams2(
            MCvPoint3D32f[] objectPoints,
            PointF[] imagePoints,
            IntrinsicCameraParameters intrin)
        {
            ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();

             GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
             GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);
             using (Matrix<float> objectPointMatrix = new Matrix<float>(objectPoints.Length, 3, handle1.AddrOfPinnedObject()))
             using (Matrix<float> imagePointMatrix = new Matrix<float>(imagePoints.Length, 2, handle2.AddrOfPinnedObject()))
            CvInvoke.cvFindExtrinsicCameraParams2(objectPointMatrix, imagePointMatrix, intrin.IntrinsicMatrix.Ptr, intrin.DistortionCoeffs.Ptr, p.RotationVector.Ptr, p.TranslationVector.Ptr, 0);
             handle1.Free();
             handle2.Free();

             return p;
        }
Beispiel #25
0
	protected override CalibrationResult Process()
	{
		ExtrinsicCameraParameters[] ext = new ExtrinsicCameraParameters[5];
		CameraCalibration.CalibrateCamera(this.ObjectPoints, this.ImagePoint, this.CheckBoard.Pattern, this.CameraIntrinsic, Emgu.CV.CvEnum.CalibType.Default, this.Criteria, out ext);
		return new CalibrationResult(false,null);
	/*

	def _start(this, progressCallback, afterCallback):
		ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(this.objPointsStack, this.imagePointsStack, this.shape)
		
		if progressCallback is not null:
			progressCallback(100)

		if ret:
			response = (true, (mtx, dist[0], rvecs, tvecs))
		else:
			response = (false, Error.CalibrationError)

		if afterCallback is not null:
			afterCallback(response)
*/
	}
Beispiel #26
0
        public void TestProjectPoints()
        {
            IntrinsicCameraParameters intrin = new IntrinsicCameraParameters();
             intrin.IntrinsicMatrix.SetIdentity();
             ExtrinsicCameraParameters extrin = new ExtrinsicCameraParameters();
             MCvPoint3D32f point = new MCvPoint3D32f();

             PointF[] points = CameraCalibration.ProjectPoints(new MCvPoint3D32f[] { point }, extrin, intrin);
        }
    /// <summary>
    /// Calculates the transformation matrix, which is used to transform the 3d-object points, which were scanned with reference
    /// to the moved marker coordinate system, back to the initial marker system and henceforth back to the camera system.
    /// The camera object is needed in order to gain the current camera frame. Furthermore, the cameras' intrinsics are needed
    /// to perform an extrinsic calibration. Note, that every kind of pattern can be used.
    /// 
    /// The transformation matrix is calculated as follows:
    /// * If 'UpdateTransformation' is being called for the first time, an extrinsic calibration is performed in order to find
    ///   the initial orientation of the used pattern.
    /// * If the initial orientation has already been found, the extrinsic calibration is performed again. Afterwards
    ///   the current orientation is available, represented by the extrinsic matrix.
    /// * Form the extrinsic matrix (4x3) (current position) to a homogeneous 4x4 matrix.
    /// * The final transformation matrix is calculated as follows: _final = initial * current.Inverse();
    /// </summary>
    public bool UpdateTransformation(Camera the_cam)
    {
      Matrix extrinsicM1 = Matrix.Identity(4, 4);
      ExtrinsicCameraParameters ecp_pattern = null;
      ExtrinsicCalibration ec_pattern = null;
      Emgu.CV.Image<Gray, Byte> gray_img = null;
      System.Drawing.PointF[] currentImagePoints;

      //first call: calculate extrinsics for initial position
      if (_firstCallUpdateTransformation == true && _cpattern != null)
      {
         gray_img = the_cam.Frame().Convert<Gray, Byte>();
         //set the patterns property: intrinsic parameters
         _cpattern.IntrinsicParameters = the_cam.Intrinsics;

         if (_cpattern.FindPattern(gray_img, out currentImagePoints))
         {
           try
           {
             //extr. calibration (initial position)
             ec_pattern = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
             ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

             if (ecp_pattern != null)
             {
               _ecp_A = ecp_pattern;
               _extrinsicMatrix_A = ExtractExctrinsicMatrix(_ecp_A);

               _logger.Info("Initial Position found.");
               _firstCallUpdateTransformation = false;
             }
           }
           catch (Exception e)
           {
             _logger.Warn("Initial Position - Caught Exception: {0}.", e);
             _firstCallUpdateTransformation = true;
             _ecp_A = null;
             return false; 
           }
         }
         else
         {
           _logger.Warn("Pattern not found.");
           _firstCallUpdateTransformation = true;
           _ecp_A = null;

           return false; 
         }
      }

      //if initial position and pattern are available - calculate the transformation
      if (_ecp_A != null && _cpattern != null)
      {
        gray_img = the_cam.Frame().Convert<Gray, Byte>();

        //try to find composite pattern
        if (_cpattern.FindPattern(gray_img, out currentImagePoints))
        {
          //extrinsic calibration in order to find the current orientation
          ec_pattern = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
          ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

          if (ecp_pattern != null)
          {
            //extract current extrinsic matrix
            extrinsicM1 = ExtractExctrinsicMatrix(ecp_pattern);
            _logger.Info("UpdateTransformation: Transformation found.");
          }
          else
          {
            _logger.Warn("UpdateTransformation: Extrinsics of moved marker system not found.");
            return false;
          }
        }
        else
        {
          _logger.Warn("UpdateTransformation: Pattern not found.");
          return false;
        }

        //now calculate the final transformation matrix
        _final = _extrinsicMatrix_A * extrinsicM1.Inverse();
        return true;
      }
      else
      {
        _logger.Warn("UpdateTransformation: No Pattern has been chosen.");
        return false;
      }
    }
    protected override void OnFrame(Parsley.Core.BuildingBlocks.FrameGrabber fp, Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> img) 
    {
      // Constraint checking
      if (!Context.Setup.Camera.HasIntrinsics) 
      {
        _on_roi = false;
        return;
      }

      if (_interactor.State == Parsley.UI.InteractionState.Interacting) 
      {
        _interactor.DrawIndicator(_interactor.Current, img);
      } 
      else 
      {
        _interactor.DrawIndicator(_r, img);
      }

      if (_on_roi && _pattern != null) 
      {
        Image<Gray, Byte> gray = img.Convert<Gray, Byte>();
        _pattern.IntrinsicParameters = Context.Setup.Camera.Intrinsics;

        try 
        {
          _pattern.FindPattern(gray, _r);
          if (_pattern.PatternFound) 
          {
            Parsley.Core.ExtrinsicCalibration ec = new Parsley.Core.ExtrinsicCalibration(_pattern.ObjectPoints, Context.Setup.Camera.Intrinsics);
            ExtrinsicCameraParameters ecp = ec.Calibrate(_pattern.ImagePoints);
            double[] deviations;
            Vector[] points;
            
            Core.ExtrinsicCalibration.CalibrationError(ecp,Context.Setup.Camera.Intrinsics,_pattern.ImagePoints,
                _pattern.ObjectPoints,out deviations,out points);

            double max_error = deviations.Max();
            if (max_error < _last_error) 
            {
              _last_detected_plane = ecp;
              _last_error = max_error;
              this.Logger.Info(String.Format("Extrinsics successfully calculated. Maximum error {0:F3}", _last_error));
            }
          } 
          else if (!_pattern.PatternFound & _last_detected_plane == null)
          {
            this.Logger.Warn("Pattern not found.");
          }
        }
        catch (System.Exception e) 
        {
          this.Logger.Warn(String.Format("Failed to determine extrinsic calibration: {0}", e.Message));
        }
      }
      if (_last_detected_plane != null) 
      {
        Core.Drawing.DrawCoordinateFrame(img, _last_detected_plane, Context.Setup.Camera.Intrinsics);
      }
    }
    private void _btn_save_extrinsics_Click(object sender, EventArgs e) {

      //save modified extrinsic camera parameters
      _last_detected_plane = CalculateShiftedECP();

      if (_last_detected_plane != null && saveFileDialog1.ShowDialog() == DialogResult.OK)
      {
        using (Stream s = File.OpenWrite(saveFileDialog1.FileName))
        {
          if (s != null)
          {
            IFormatter formatter = new BinaryFormatter();
            formatter.Serialize(s, _last_detected_plane);
            s.Close();
            _logger.Info("Extrinsics successfully saved.");
          }
        }
      }
      else
        _logger.Warn("Error saving Extrinsics.");
    }
 private void _btn_load_pattern_Click(object sender, EventArgs e) {
   if (openFileDialog1.ShowDialog() == DialogResult.OK) {
     using (Stream s = File.Open(openFileDialog1.FileName, FileMode.Open)) {
       if (s != null) {
         IFormatter formatter = new BinaryFormatter();
         _pattern = formatter.Deserialize(s) as Core.CalibrationPattern;
         s.Close();
         _last_detected_plane = null;
         _last_error = Double.MaxValue;
         _logger.Info(String.Format("Calibration pattern {0} successfully loaded.", new FileInfo(openFileDialog1.FileName).Name));
       }
     }
   }
 }
Beispiel #31
0
		public Extrinsics(ExtrinsicCameraParameters extrinsics)
		{
			this.extrinsics = extrinsics;
		}
Beispiel #32
-1
 private void ApplyCalibrationToUnityCamera(IntrinsicCameraParameters intrinsic, ExtrinsicCameraParameters extrinsics)
 {
     Matrix<double> rotationInverse = flipZAxis(extrinsics.RotationVector.RotationMatrix).Transpose(); // transpose is same as inverse for rotation matrix
     Matrix<double> transFinal = (rotationInverse * -1) * extrinsics.TranslationVector;
     _mainCamera.projectionMatrix = LoadProjectionMatrix((float)intrinsic.IntrinsicMatrix[0, 0], (float)intrinsic.IntrinsicMatrix[1, 1], (float)intrinsic.IntrinsicMatrix[0, 2], (float)intrinsic.IntrinsicMatrix[1, 2]);
     ApplyTranslationAndRotationToCamera(transFinal, RotationConversion.RotationMatrixToEulerZXY(rotationInverse));
 }
 /// <summary>
 /// Default Constructor
 /// </summary>
 /// <param name="theCam"> Camera Object needed in order to obtain the current camera frame.</param>
 public MarkerPositioner() {
   _final = Matrix.Identity(4, 4);
   _ecp_A = null;
   _cpattern = null;
   _extrinsicMatrix_A = Matrix.Identity(4, 4);
   _logger = LogManager.GetLogger(typeof(MarkerPositioner));
   _firstCallUpdateTransformation = true;
 }