Esempio n. 1
0
 void _interactor_InteractionCompleted(object sender, Parsley.UI.InteractionEventArgs e)
 {
     _last_detected_plane = null;
     _last_error          = Double.MaxValue;
     _on_roi = true;
     _r      = (Rectangle)e.InteractionResult;
 }
Esempio n. 2
0
        protected override void OnFrame(Parsley.Core.BuildingBlocks.FrameGrabber fp, Emgu.CV.Image <Emgu.CV.Structure.Bgr, byte> img)
        {
            // Constraint checking
            if (!Context.Setup.Camera.HasIntrinsics)
            {
                _on_roi = false;
                return;
            }

            if (_interactor.State == Parsley.UI.InteractionState.Interacting)
            {
                _interactor.DrawIndicator(_interactor.Current, img);
            }
            else
            {
                _interactor.DrawIndicator(_r, img);
            }

            if (_on_roi && _pattern != null)
            {
                Image <Gray, Byte> gray = img.Convert <Gray, Byte>();
                _pattern.IntrinsicParameters = Context.Setup.Camera.Intrinsics;

                try
                {
                    _pattern.FindPattern(gray, _r);
                    if (_pattern.PatternFound)
                    {
                        Parsley.Core.ExtrinsicCalibration ec  = new Parsley.Core.ExtrinsicCalibration(_pattern.ObjectPoints, Context.Setup.Camera.Intrinsics);
                        ExtrinsicCameraParameters         ecp = ec.Calibrate(_pattern.ImagePoints);
                        double[] deviations;
                        Vector[] points;

                        Core.ExtrinsicCalibration.CalibrationError(ecp, Context.Setup.Camera.Intrinsics, _pattern.ImagePoints,
                                                                   _pattern.ObjectPoints, out deviations, out points);

                        double max_error = deviations.Max();
                        if (max_error < _last_error)
                        {
                            _last_detected_plane = ecp;
                            _last_error          = max_error;
                            this.Logger.Info(String.Format("Extrinsics successfully calculated. Maximum error {0:F3}", _last_error));
                        }
                    }
                    else if (!_pattern.PatternFound & _last_detected_plane == null)
                    {
                        this.Logger.Warn("Pattern not found.");
                    }
                }
                catch (System.Exception e)
                {
                    this.Logger.Warn(String.Format("Failed to determine extrinsic calibration: {0}", e.Message));
                }
            }
            if (_last_detected_plane != null)
            {
                Core.Drawing.DrawCoordinateFrame(img, _last_detected_plane, Context.Setup.Camera.Intrinsics);
            }
        }
Esempio n. 3
0
    private void ApplyCalibrationToUnityCamera(IntrinsicCameraParameters intrinsic, ExtrinsicCameraParameters extrinsics)
    {
        Matrix <double> rotationInverse = flipZAxis(extrinsics.RotationVector.RotationMatrix).Transpose(); // transpose is same as inverse for rotation matrix
        Matrix <double> transFinal      = (rotationInverse * -1) * extrinsics.TranslationVector;

        _mainCamera.projectionMatrix = LoadProjectionMatrix((float)intrinsic.IntrinsicMatrix[0, 0], (float)intrinsic.IntrinsicMatrix[1, 1], (float)intrinsic.IntrinsicMatrix[0, 2], (float)intrinsic.IntrinsicMatrix[1, 2]);
        ApplyTranslationAndRotationToCamera(transFinal, RotationConversion.RotationMatrixToEulerZXY(rotationInverse));
    }
Esempio n. 4
0
      /// <summary>
      /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
      /// <param name="flags">Flags</param>
      /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
      /// <returns>The final reprojection error</returns>
      public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IntrinsicCameraParameters intrinsicParam,
         CvEnum.CALIB_TYPE flags,
         out ExtrinsicCameraParameters[] extrinsicParams)
      {
         Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
         int imageCount = objectPoints.Length;

         #region get the array that represent the point counts
         int[] pointCounts = new int[objectPoints.Length];
         for (int i = 0; i < objectPoints.Length; i++)
         {
            Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i] = objectPoints[i].Length;
         }
         #endregion

         double reprojectionError = -1;
         using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
         using (Matrix<float> imagePointMatrix = ToMatrix(imagePoints))
         using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
         using (Matrix<double> rotationVectors = new Matrix<double>(imageCount, 3))
         using (Matrix<double> translationVectors = new Matrix<double>(imageCount, 3))
         {
            reprojectionError = CvInvoke.cvCalibrateCamera2(
                objectPointMatrix.Ptr,
                imagePointMatrix.Ptr,
                pointCountsMatrix.Ptr,
                imageSize,
                intrinsicParam.IntrinsicMatrix,
                intrinsicParam.DistortionCoeffs,
                rotationVectors,
                translationVectors,
                flags);

            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
            IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
            for (int i = 0; i < imageCount; i++)
            {
               ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
               CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
               CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
               extrinsicParams[i] = p;
            }
            Marshal.FreeHGlobal(matPtr);
         }
         return reprojectionError;
      }
Esempio n. 5
0
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            if (FPinInDo[0])
            {
                bool useVVVVCoords = FPinInCoordSystem[0] == TCoordinateSystem.VVVV;

                SpreadMax = Math.Max(FPinInObject.SliceCount, FPinInImage.SliceCount);

                FPinOutExtrinsics.SliceCount = SpreadMax;
                FPinOutStatus.SliceCount     = SpreadMax;

                for (int i = 0; i < SpreadMax; i++)
                {
                    try
                    {
                        if (FPinInObject[i].SliceCount == 0 || FPinInImage[i].SliceCount == 0)
                        {
                            throw new Exception("No datapoints");
                        }
                        if (FPinInImage[i].SliceCount == 1)
                        {
                            throw new Exception("Only 1 image point is being input per board, check SliceCount!");
                        }
                        if (FPinInObject[i].SliceCount == 1)
                        {
                            throw new Exception("Only 1 object point is being input per board, check SliceCount!");
                        }
                        if (FPinInIntrinsics[i].intrinsics == null)
                        {
                            throw new Exception("Waiting for camera calibration intrinsics");
                        }

                        ExtrinsicCameraParameters extrinsics = CameraCalibration.FindExtrinsicCameraParams2(MatrixUtils.ObjectPoints(FPinInObject[i], useVVVVCoords), MatrixUtils.ImagePoints(FPinInImage[i]), FPinInIntrinsics[i].intrinsics);
                        FPinOutExtrinsics[i] = new Extrinsics(extrinsics);

                        if (useVVVVCoords)
                        {
                            FPinOutView[i] = MatrixUtils.ConvertToVVVV(FPinOutExtrinsics[i].Matrix);
                        }
                        else
                        {
                            FPinOutView[i] = FPinOutExtrinsics[i].Matrix;
                        }

                        FPinOutStatus[i] = "OK";
                    }
                    catch (Exception e)
                    {
                        FPinOutStatus[i] = e.Message;
                    }
                }
            }
        }
Esempio n. 6
0
        /// <summary>
        /// 初始化内部数据
        /// </summary>
        /// <param name="imgsize"></param>
        private void Initializer(Size imgsize)
        {
            _nPointsPerImage = _ChessBoard.BoardRows * _ChessBoard.BoardColumns; // 每幅棋盘的角点数
            _nPoints         = _nPointsPerImage * _ChessBoard.NImages;           // 棋盘角点总数
            _imageSize       = imgsize;                                          // 图像分辨率
            _objectPoints    = new List <MCvPoint3D32f[]>(_ChessBoard.NImages);  //棋盘角点的世界坐标值(三维)
            for (int i = 0; i < _ChessBoard.NImages; i++)
            {
                _objectPoints.Add(new MCvPoint3D32f[_nPointsPerImage]);
            }
            int currentImage;

            for (currentImage = 0; currentImage < _ChessBoard.NImages; currentImage++)
            {
                int currentRow;
                for (currentRow = 0; currentRow < _ChessBoard.BoardRows; currentRow++)
                {
                    int currentCol;
                    for (currentCol = 0; currentCol < _ChessBoard.BoardColumns; currentCol++)
                    {
                        int nPoint = currentRow * _ChessBoard.BoardColumns + currentCol;
                        _objectPoints[currentImage][nPoint].X = (float)currentCol * _ChessBoard.SquareWidth;
                        _objectPoints[currentImage][nPoint].Y = (float)currentRow * _ChessBoard.SquareWidth;
                        _objectPoints[currentImage][nPoint].Z = (float)0.0f;
                    }
                }
            }


            _imagePointsL       = new List <PointF[]>();     // 左视图的棋盘角点像素坐标序列(二维)
            _imagePointsR       = new List <PointF[]>();     // 右视图的棋盘角点像素坐标序列(二维)
            _q                  = new Matrix <double>(4, 4); // 用于计算三维点云的 Q 矩阵
            _roi1               = new Rectangle();           // 左视图有效区域的矩形
            _roi2               = new Rectangle();           // 右视图有效区域的矩形
            _r1                 = new Matrix <double>(3, 3);
            _r2                 = new Matrix <double>(3, 3);
            _p1                 = new Matrix <double>(3, 4);
            _p2                 = new Matrix <double>(3, 4);
            _mx1                = new Matrix <float>(_imageSize);
            _my1                = new Matrix <float>(_imageSize);
            _mx2                = new Matrix <float>(_imageSize);
            _my2                = new Matrix <float>(_imageSize);
            _extrParamsS        = new ExtrinsicCameraParameters(); //立体摄像机外部参数
            _intrParamL         = new IntrinsicCameraParameters(); //左摄像机内参
            _intrParamR         = new IntrinsicCameraParameters(); //右摄像机内参
            _extrParamsL        = new ExtrinsicCameraParameters[_ChessBoard.NImages];
            _extrParamsR        = new ExtrinsicCameraParameters[_ChessBoard.NImages];
            _termCriteria       = new MCvTermCriteria(30, 0.05); //终止准则
            _calibType          = CalibType.FixK3;
            _DoubleCapture.ImgL = new Mat();
            _DoubleCapture.ImgR = new Mat();
        }
Esempio n. 7
0
        public CalibrationResult SolvePnP(Bitmap image, MCvPoint3D32f[] objpoints, IntrinsicCameraParameters camIntrinsic, CheckBoardDefinition pattern)
        {
            //cvCvtColor(image, cv2.COLOR_BGR2GRAY)
            Image <Gray, Byte> gray = new Image <Gray, byte>(image);

            // the fast check flag reduces significantly the computation time if the pattern is out of sight
            PointF[] corners = CameraCalibration.FindChessboardCorners(gray, pattern.Pattern, Emgu.CV.CvEnum.CalibCbType.FastCheck);
            ExtrinsicCameraParameters ret = null;

            if (corners != null)
            {
                ret = CameraCalibration.SolvePnP(objpoints, corners, camIntrinsic);
            }
            return(new CalibrationResult(ret != null, ret));
        }
Esempio n. 8
0
        /// <summary>
        /// Extracts the extrinsic matrix from the cameras' extrinsic parameters
        /// The values of the extrinsic matrix are extracted into a 4x4 Matrix.
        /// This function is static, since it is called firstly in the deserialization constructor.
        /// </summary>
        private static Matrix ExtractExctrinsicMatrix(ExtrinsicCameraParameters ecp)
        {
            Matrix extrinsicMatrix = Matrix.Identity(4, 4);

            if (ecp != null)
            {
                for (int i = 0; i < 3; i++)
                {
                    for (int j = 0; j < 4; j++)
                    {
                        extrinsicMatrix[i, j] = ecp.ExtrinsicMatrix[i, j];
                    }
                }
            }
            return(extrinsicMatrix);
        }
Esempio n. 9
0
 private void _btn_load_pattern_Click(object sender, EventArgs e)
 {
     if (openFileDialog1.ShowDialog() == DialogResult.OK)
     {
         using (Stream s = File.Open(openFileDialog1.FileName, FileMode.Open)) {
             if (s != null)
             {
                 IFormatter formatter = new BinaryFormatter();
                 _pattern = formatter.Deserialize(s) as Core.CalibrationPattern;
                 s.Close();
                 _last_detected_plane = null;
                 _last_error          = Double.MaxValue;
                 _logger.Info(String.Format("Calibration pattern {0} successfully loaded.", new FileInfo(openFileDialog1.FileName).Name));
             }
         }
     }
 }
Esempio n. 10
0
        public void StereoRectify
            (IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> R1,
            out Matrix <double> R2,
            out Matrix <double> P1,
            out Matrix <double> P2,
            out Matrix <double> Q,
            STEREO_RECTIFY_TYPE flags,
            double alpha,
            Size newImageSize,
            ref Rectangle validPixROI1,
            ref Rectangle validPixROI2
            )
        {
            R1 = new Matrix <double>(3, 3);
            R2 = new Matrix <double>(3, 3);
            P1 = new Matrix <double>(3, 4);
            P2 = new Matrix <double>(3, 4);
            Q  = new Matrix <double>(4, 4);

            CvInvoke.cvStereoRectify(
                intrinsicParam1.IntrinsicMatrix.Ptr,
                intrinsicParam2.IntrinsicMatrix.Ptr,
                intrinsicParam1.DistortionCoeffs.Ptr,
                intrinsicParam2.DistortionCoeffs.Ptr,
                imageSize,
                extrinsicParams.RotationVector.Ptr,
                extrinsicParams.TranslationVector.Ptr,
                R1.Ptr,
                R2.Ptr,
                P1.Ptr,
                P2.Ptr,
                Q.Ptr,
                STEREO_RECTIFY_TYPE.DEFAULT,
                alpha,
                newImageSize,
                ref validPixROI1,
                ref validPixROI1);
        }
Esempio n. 11
0
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            FPinOutExtrinsics.SliceCount = SpreadMax;
            FPinOutStatus.SliceCount     = SpreadMax;

            if (FPinInImagePoints.IsChanged || FPinInObjectPoints.IsChanged)
            {
                MCvPoint3D32f[] objectPoints;
                PointF[]        imagePoints;
                for (int i = 0; i < SpreadMax; i++)
                {
                    if (FPinInObjectPoints[i].SliceCount != FPinInImagePoints.SliceCount)
                    {
                        FPinOutStatus[i] = "Number of object and image points does not match";
                        continue;
                    }

                    if (FPinInIntrinsics[i] == null)
                    {
                        FPinOutStatus[i] = "Waiting for intrinsics";
                        continue;
                    }

                    int nPoints = FPinInObjectPoints[i].SliceCount;

                    objectPoints = new MCvPoint3D32f[nPoints];
                    imagePoints  = new PointF[nPoints];

                    ExtrinsicCameraParameters extrinsics = CameraCalibration.FindExtrinsicCameraParams2(objectPoints, imagePoints, FPinInIntrinsics[i]);

                    if (extrinsics == null)
                    {
                        FPinOutStatus[i] = "Something went wrong";
                        continue;
                    }

                    FPinOutExtrinsics[i] = extrinsics;
                    FPinOutStatus[i]     = "ok";
                }
            }
        }
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            SpreadMax = Math.Max(FPinInObject.SliceCount, FPinInImage.SliceCount);

            FPinOutExtrinsics.SliceCount = SpreadMax;
            FPinOutStatus.SliceCount     = SpreadMax;

            for (int i = 0; i < SpreadMax; i++)
            {
                try
                {
                    if (FPinInObject[i].SliceCount == 0 || FPinInImage[i].SliceCount == 0)
                    {
                        throw new Exception("No datapoints");
                    }
                    if (FPinInImage[i].SliceCount == 1)
                    {
                        throw new Exception("Only 1 image point is being input per board, check SliceCount!");
                    }
                    if (FPinInObject[i].SliceCount == 1)
                    {
                        throw new Exception("Only 1 object point is being input per board, check SliceCount!");
                    }
                    if (FPinInIntrinsics[i].intrinsics == null)
                    {
                        throw new Exception("Waiting for camera calibration intrinsics");
                    }

                    ExtrinsicCameraParameters extrinsics = CameraCalibration.FindExtrinsicCameraParams2(MatrixUtils.ObjectPoints(FPinInObject[i]), MatrixUtils.ImagePoints(FPinInImage[i]), FPinInIntrinsics[i].intrinsics);
                    FPinOutExtrinsics[i] = new Extrinsics(extrinsics);
                    FPinOutStatus[i]     = "OK";
                }
                catch (Exception e)
                {
                    FPinOutStatus[i] = e.Message;
                }
            }
        }
Esempio n. 13
0
        private void _btn_save_extrinsics_Click(object sender, EventArgs e)
        {
            //save modified extrinsic camera parameters
            _last_detected_plane = CalculateShiftedECP();

            if (_last_detected_plane != null && saveFileDialog1.ShowDialog() == DialogResult.OK)
            {
                using (Stream s = File.OpenWrite(saveFileDialog1.FileName))
                {
                    if (s != null)
                    {
                        IFormatter formatter = new BinaryFormatter();
                        formatter.Serialize(s, _last_detected_plane);
                        s.Close();
                        _logger.Info("Extrinsics successfully saved.");
                    }
                }
            }
            else
            {
                _logger.Warn("Error saving Extrinsics.");
            }
        }
Esempio n. 14
0
        protected override CalibrationResult Process()
        {
            ExtrinsicCameraParameters[] ext = new ExtrinsicCameraParameters[5];
            CameraCalibration.CalibrateCamera(this.ObjectPoints, this.ImagePoint, this.CheckBoard.Pattern, this.CameraIntrinsic, Emgu.CV.CvEnum.CalibType.Default, this.Criteria, out ext);
            return(new CalibrationResult(false, null));

            /*
             *
             * def _start(this, progressCallback, afterCallback):
             *      ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(this.objPointsStack, this.imagePointsStack, this.shape)
             *
             *      if progressCallback is not null:
             *              progressCallback(100)
             *
             *      if ret:
             *              response = (true, (mtx, dist[0], rvecs, tvecs))
             *      else:
             *              response = (false, Error.CalibrationError)
             *
             *      if afterCallback is not null:
             *              afterCallback(response)
             */
        }
Esempio n. 15
0
        //called when data for any output pin is requested
        public void Evaluate(int SpreadMax)
        {
            FPinOutTransform.SliceCount = SpreadMax;

            for (int i = 0; i < SpreadMax; i++)
            {
                ExtrinsicCameraParameters extrinsics = FPinInExtrinsics[i];
                if (extrinsics == null)
                {
                    continue;
                }

                Matrix <double> t = extrinsics.ExtrinsicMatrix;
                if (extrinsics == null)
                {
                    FPinOutTransform[i] = new Matrix4x4();
                }
                else
                {
                    Matrix4x4 m = new Matrix4x4();
                    for (int x = 0; x < 3; x++)
                    {
                        for (int y = 0; y < 4; y++)
                        {
                            m[y, x] = t[x, y];
                        }
                    }

                    m[0, 3] = 0;
                    m[1, 3] = 0;
                    m[2, 3] = 0;
                    m[3, 3] = 1;

                    FPinOutTransform[i] = m;
                }
            }
        }
Esempio n. 16
0
        private void ProcessFrame(object sender, EventArgs e)
        {
            Mat image = new Mat();

            _capture.Retrieve(image);
            current_frame = image;

            if (this.Disposing || this.IsDisposed)
            {
                return;
            }

            if (previous_frame == null)
            {
                //CaptureFirst();
                Console.WriteLine("First Frame Captured !!");
                previous_frame = current_frame;
                FindPoints(current_frame, out matchTime, out previousKeyPoints, out previousDescriptors);
                return;
            }

            // Detect and compute points in current frame
            FindPoints(current_frame, out matchTime, out currentKeyPoints, out currentDescriptors);

            ////////////////////////////////////////////////////////////////////////////////////
            //      Check for matches between current and previous frame
            ////////////////////////////////////////////////////////////////////////////////////

            //using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
            //{
            Mat mask;

            FindMatch2(previousDescriptors, currentDescriptors, out matchTime, previousKeyPoints, currentKeyPoints, matches,
                       out mask, out homography);

            Console.WriteLine("Total matches : " + matches.Size);

            //}

            //////////////////////////////////
            //          To-do
            //////////////////////////////////
            //IntrinsicCameraParameters intrin = new IntrinsicCameraParameters(4);
            ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();

            VectorOfMat rotationVectors    = new VectorOfMat();
            VectorOfMat translationVectors = new VectorOfMat();

            //Matrix<VectorOfDMatch> inliers = new Matrix<VectorOfDMatch>(99) ;

            //Matrix<double> intrinc = new Matrix<double>(3, 3);
            //Matrix<double> distorc = new Matrix<double>(8, 1);
            //Mat intrinc = new Mat(3, 3, DepthType.Cv64F, 1);
            //Mat distorc = new Mat(8, 1, DepthType.Cv64F, 1);
            //[6.5475340581882324e+002, 0.0F, 2.2678108877714533e+002, 0.0F, 6.5475340581882324e+002, 1.4283035250823869e+002, 0.0F, 0.0F, 1.0F ];
            //intrinc = new Mat(3, 3, DepthType.Cv64F, 1, intrinc1, 4);

            //Double[] tdat = new double[] { 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F };
            Double[,] tdat = new double[3, 3] {
                { 0.0F, 0.0F, 0.0F }, { 0.0F, 0.0F, 0.0F }, { 0.0F, 0.0F, 0.0F }
            };
            Double[,] tdat2 = new double[1, 4] {
                { 0.0F, 0.0F, 0.0F, 0.0F }
            };
            MCvPoint3D32f[] objpts = new MCvPoint3D32f[matches.Size];
            for (int i = 0; i < matches.Size; i++)
            {
                int r = matches[i].;
Esempio n. 17
0
      /// <summary>
      /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
      /// R2=R*R1,
      /// T2=R*T1 + T
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
      /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
      /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="flags">Different flags</param>
      /// <param name="extrinsicParams">The extrinsic parameters which contains:
      /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems; 
      /// T - The translation vector between the cameras' coordinate systems. </param>
      /// <param name="essentialMatrix">The essential matrix</param>
      /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
      /// <param name="foundamentalMatrix">The fundamental matrix</param>
      public static void StereoCalibrate(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints1,
         PointF[][] imagePoints2,
         IntrinsicCameraParameters intrinsicParam1,
         IntrinsicCameraParameters intrinsicParam2,
         Size imageSize,
         CvEnum.CALIB_TYPE flags,
         MCvTermCriteria termCrit,
         out ExtrinsicCameraParameters extrinsicParams,
         out Matrix<double> foundamentalMatrix,
         out Matrix<double> essentialMatrix)
      {
         Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

         #region get the matrix that represent the point counts
         int[,] pointCounts = new int[objectPoints.Length, 1];
         for (int i = 0; i < objectPoints.Length; i++)
         {
            Debug.Assert(objectPoints[i].Length == imagePoints1[i].Length && objectPoints[i].Length == imagePoints2[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i, 0] = objectPoints[i].Length;
         }
         #endregion

         using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
         using (Matrix<float> imagePointMatrix1 = ToMatrix(imagePoints1))
         using (Matrix<float> imagePointMatrix2 = ToMatrix(imagePoints2))
         using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
         {
            extrinsicParams = new ExtrinsicCameraParameters();
            essentialMatrix = new Matrix<double>(3, 3);
            foundamentalMatrix = new Matrix<double>(3, 3);

            CvInvoke.cvStereoCalibrate(
               objectPointMatrix.Ptr,
               imagePointMatrix1.Ptr,
               imagePointMatrix2.Ptr,
               pointCountsMatrix.Ptr,
               intrinsicParam1.IntrinsicMatrix,
               intrinsicParam1.DistortionCoeffs,
               intrinsicParam2.IntrinsicMatrix,
               intrinsicParam2.DistortionCoeffs,
               imageSize,
               extrinsicParams.RotationVector,
               extrinsicParams.TranslationVector,
               essentialMatrix.Ptr,
               foundamentalMatrix.Ptr,
               termCrit,
               flags);
         }
      }
Esempio n. 18
0
        /// <summary>
        /// Calculates the transformation matrix, which is used to transform the 3d-object points, which were scanned with reference
        /// to the moved marker coordinate system, back to the initial marker system and henceforth back to the camera system.
        /// The camera object is needed in order to gain the current camera frame. Furthermore, the cameras' intrinsics are needed
        /// to perform an extrinsic calibration. Note, that every kind of pattern can be used.
        ///
        /// The transformation matrix is calculated as follows:
        /// * If 'UpdateTransformation' is being called for the first time, an extrinsic calibration is performed in order to find
        ///   the initial orientation of the used pattern.
        /// * If the initial orientation has already been found, the extrinsic calibration is performed again. Afterwards
        ///   the current orientation is available, represented by the extrinsic matrix.
        /// * Form the extrinsic matrix (4x3) (current position) to a homogeneous 4x4 matrix.
        /// * The final transformation matrix is calculated as follows: _final = initial * current.Inverse();
        /// </summary>
        public bool UpdateTransformation(Camera the_cam)
        {
            Matrix extrinsicM1 = Matrix.Identity(4, 4);
            ExtrinsicCameraParameters ecp_pattern = null;
            ExtrinsicCalibration      ec_pattern  = null;

            Emgu.CV.Image <Gray, Byte> gray_img = null;
            System.Drawing.PointF[]    currentImagePoints;

            //first call: calculate extrinsics for initial position
            if (_firstCallUpdateTransformation == true && _cpattern != null)
            {
                gray_img = the_cam.Frame().Convert <Gray, Byte>();
                //set the patterns property: intrinsic parameters
                _cpattern.IntrinsicParameters = the_cam.Intrinsics;

                if (_cpattern.FindPattern(gray_img, out currentImagePoints))
                {
                    try
                    {
                        //extr. calibration (initial position)
                        ec_pattern  = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
                        ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

                        if (ecp_pattern != null)
                        {
                            _ecp_A             = ecp_pattern;
                            _extrinsicMatrix_A = ExtractExctrinsicMatrix(_ecp_A);

                            _logger.Info("Initial Position found.");
                            _firstCallUpdateTransformation = false;
                        }
                    }
                    catch (Exception e)
                    {
                        _logger.Warn("Initial Position - Caught Exception: {0}.", e);
                        _firstCallUpdateTransformation = true;
                        _ecp_A = null;
                        return(false);
                    }
                }
                else
                {
                    _logger.Warn("Pattern not found.");
                    _firstCallUpdateTransformation = true;
                    _ecp_A = null;

                    return(false);
                }
            }

            //if initial position and pattern are available - calculate the transformation
            if (_ecp_A != null && _cpattern != null)
            {
                gray_img = the_cam.Frame().Convert <Gray, Byte>();

                //try to find composite pattern
                if (_cpattern.FindPattern(gray_img, out currentImagePoints))
                {
                    //extrinsic calibration in order to find the current orientation
                    ec_pattern  = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
                    ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

                    if (ecp_pattern != null)
                    {
                        //extract current extrinsic matrix
                        extrinsicM1 = ExtractExctrinsicMatrix(ecp_pattern);
                        _logger.Info("UpdateTransformation: Transformation found.");
                    }
                    else
                    {
                        _logger.Warn("UpdateTransformation: Extrinsics of moved marker system not found.");
                        return(false);
                    }
                }
                else
                {
                    _logger.Warn("UpdateTransformation: Pattern not found.");
                    return(false);
                }

                //now calculate the final transformation matrix
                _final = _extrinsicMatrix_A * extrinsicM1.Inverse();
                return(true);
            }
            else
            {
                _logger.Warn("UpdateTransformation: No Pattern has been chosen.");
                return(false);
            }
        }
Esempio n. 19
0
      /// <summary>
      /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
      /// </summary>
      /// <param name="objectPoints">The array of object points</param>
      /// <param name="imagePoints">The array of corresponding image points</param>
      /// <param name="intrin">The intrinsic parameters</param>
      /// <returns>The extrinsic parameters</returns>
      public static ExtrinsicCameraParameters FindExtrinsicCameraParams2(
          MCvPoint3D32f[] objectPoints,
          PointF[] imagePoints,
          IntrinsicCameraParameters intrin)
      {
         ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();

         GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
         GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);
         using (Matrix<float> objectPointMatrix = new Matrix<float>(objectPoints.Length, 3, handle1.AddrOfPinnedObject()))
         using (Matrix<float> imagePointMatrix = new Matrix<float>(imagePoints.Length, 2, handle2.AddrOfPinnedObject()))
            CvInvoke.cvFindExtrinsicCameraParams2(objectPointMatrix, imagePointMatrix, intrin.IntrinsicMatrix.Ptr, intrin.DistortionCoeffs.Ptr, p.RotationVector.Ptr, p.TranslationVector.Ptr, 0);
         handle1.Free();
         handle2.Free();

         return p;
      }
Esempio n. 20
0
      /// <summary>
      /// Computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters. 
      /// Optionally, the function computes jacobians - matrices of partial derivatives of image points as functions of all the input parameters w.r.t. the particular parameters, intrinsic and/or extrinsic. 
      /// The jacobians are used during the global optimization in cvCalibrateCamera2 and cvFindExtrinsicCameraParams2. 
      /// The function itself is also used to compute back-projection error for with current intrinsic and extrinsic parameters. 
      /// </summary>
      /// <remarks>Note, that with intrinsic and/or extrinsic parameters set to special values, the function can be used to compute just extrinsic transformation or just intrinsic transformation (i.e. distortion of a sparse set of points) </remarks>
      /// <param name="objectPoints">The array of object points.</param>
      /// <param name="extrin">Extrinsic parameters</param>
      /// <param name="intrin">Intrinsic parameters</param>
      /// <param name="mats">Optional matrix supplied in the following order: dpdrot, dpdt, dpdf, dpdc, dpddist</param>
      /// <returns>The array of image points which is the projection of <paramref name="objectPoints"/></returns>
      public static PointF[] ProjectPoints(
          MCvPoint3D32f[] objectPoints,
          ExtrinsicCameraParameters extrin,
          IntrinsicCameraParameters intrin,
          params Matrix<float>[] mats)
      {
         PointF[] imagePoints = new PointF[objectPoints.Length];

         int matsLength = mats.Length;
         GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
         GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);
         using (Matrix<float> pointMatrix = new Matrix<float>(objectPoints.Length, 1, 3, handle1.AddrOfPinnedObject(), 3 * sizeof(float)))
         using (Matrix<float> imagePointMatrix = new Matrix<float>(imagePoints.Length, 1, 2, handle2.AddrOfPinnedObject(), 2 * sizeof(float)))
            CvInvoke.cvProjectPoints2(
                  pointMatrix,
                  extrin.RotationVector.Ptr,
                  extrin.TranslationVector.Ptr,
                  intrin.IntrinsicMatrix.Ptr,
                  intrin.DistortionCoeffs.Ptr,
                  imagePointMatrix,
                  matsLength > 0 ? mats[0] : IntPtr.Zero,
                  matsLength > 1 ? mats[1] : IntPtr.Zero,
                  matsLength > 2 ? mats[2] : IntPtr.Zero,
                  matsLength > 3 ? mats[3] : IntPtr.Zero,
                  matsLength > 4 ? mats[4] : IntPtr.Zero, 
                  0.0);
         handle1.Free();
         handle2.Free();
         return imagePoints;
      }
Esempio n. 21
0
 public Extrinsics(ExtrinsicCameraParameters extrinsics)
 {
     this.extrinsics = extrinsics;
 }
Esempio n. 22
0
        public void ProcessData(MotionControllerModel mc)
        {
            if (_camera.Handle != IntPtr.Zero)
            {
                Vector3             rawPosition    = Vector3.zero;
                Vector3             fusionPosition = Vector3.zero;
                PSMoveTrackerStatus trackerStatus  = mc.TrackerStatus[_camera];

                if (!mc.Design)
                {
                    trackerStatus = PsMoveApi.psmove_tracker_get_status(_camera.Handle, mc.Handle);
                }

                if (trackerStatus == PSMoveTrackerStatus.Tracking)
                {
                    float rx, ry, rrad;
                    float fx, fy, fz;
                    PsMoveApi.psmove_tracker_get_position(_camera.Handle, mc.Handle, out rx, out ry, out rrad);
                    PsMoveApi.psmove_fusion_get_position(_camera.Fusion, mc.Handle, out fx, out fy, out fz);
                    rx = (int)(rx + 0.5);
                    ry = (int)(ry + 0.5);

                    rawPosition    = new Vector3(rx, ry, rrad);
                    fusionPosition = new Vector3(fx, fy, fz);
                }
                else if (mc.Design)
                {
                    switch (_camera.Calibration.Index)
                    {
                    case 0:
                        rawPosition = new Vector3(129, 280, 8.970074f);
                        break;

                    case 1:
                        rawPosition = new Vector3(180, 293, 11.9714022f);
                        break;

                    case 2:
                        rawPosition = new Vector3(528, 286, 9.038924f);
                        break;

                    case 3:
                        rawPosition = new Vector3(389, 275, 6.530668f);
                        break;
                    }
                }
                mc.TrackerStatus[_camera]  = trackerStatus;
                mc.RawPosition[_camera]    = rawPosition;
                mc.FusionPosition[_camera] = fusionPosition;

                if (trackerStatus == PSMoveTrackerStatus.Tracking || mc.Design)
                {
                    // controller position -> rectangle in surrounding the sphere in image coordinates
                    PointF[] imgPts = CvHelper.GetImagePointsF(mc.RawPosition[_camera]);

                    ExtrinsicCameraParameters ex = CameraCalibration.FindExtrinsicCameraParams2(
                        _camera.Calibration.ObjectPoints2D,
                        imgPts,
                        _camera.Calibration.IntrinsicParameters);

                    Matrix <double> coordinatesInCameraSpace_homo = new Matrix <double>(new double[]
                    {
                        ex.TranslationVector[0, 0],
                        ex.TranslationVector[1, 0],
                        ex.TranslationVector[2, 0],
                        1
                    });
                    mc.CameraPosition[_camera] = new Vector3(
                        (float)coordinatesInCameraSpace_homo[0, 0],
                        (float)coordinatesInCameraSpace_homo[1, 0],
                        (float)coordinatesInCameraSpace_homo[2, 0]);


                    ex.RotationVector[0, 0] += (Math.PI / 180) * (_camera.Calibration.RotX + _camera.Calibration.XAngle);
                    ex.RotationVector[1, 0] += (Math.PI / 180) * (_camera.Calibration.RotY + _camera.Calibration.YAngle);
                    ex.RotationVector[2, 0] += (Math.PI / 180) * (_camera.Calibration.RotZ + _camera.Calibration.ZAngle);

                    _camera.Calibration.ExtrinsicParameters[mc.Id] = ex;
                    Matrix <double> minusRotation = new Matrix <double>(3, 3);
                    minusRotation = CvHelper.Rotate(
                        -_camera.Calibration.RotX - _camera.Calibration.XAngle,
                        -_camera.Calibration.RotY - _camera.Calibration.YAngle,
                        -_camera.Calibration.RotZ - _camera.Calibration.ZAngle);

                    Matrix <double> R3x3_cameraToWorld = new Matrix <double>(3, 3);
                    R3x3_cameraToWorld = CvHelper.Rotate(
                        _camera.Calibration.RotX,
                        _camera.Calibration.RotY + _camera.Calibration.YAngle,
                        _camera.Calibration.RotZ);

                    Matrix <double> rotInv = new Matrix <double>(3, 3);
                    CvInvoke.cvInvert(ex.RotationVector.RotationMatrix.Ptr, rotInv, SOLVE_METHOD.CV_LU);

                    Matrix <double> test = CvHelper.ConvertToHomogenous(-1 * R3x3_cameraToWorld);

                    _camera.Calibration.ObjectPointsProjected = CameraCalibration.ProjectPoints(
                        _camera.Calibration.ObjectPoints3D,
                        _camera.Calibration.ExtrinsicParameters[mc.Id],
                        _camera.Calibration.IntrinsicParameters);

                    Matrix <double> cameraPositionInWorldSpace4x4 = new Matrix <double>(new double[, ]
                    {
                        { 1, 0, 0, _camera.Calibration.TranslationToWorld[0, 0] },
                        { 0, 1, 0, _camera.Calibration.TranslationToWorld[1, 0] },
                        { 0, 0, 1, _camera.Calibration.TranslationToWorld[2, 0] },
                        { 0, 0, 0, 1 },
                    });

                    Matrix <double> Rt_homo      = CvHelper.ConvertToHomogenous(R3x3_cameraToWorld);
                    Matrix <double> x_world_homo = CvHelper.ConvertToHomogenous(minusRotation) * coordinatesInCameraSpace_homo;
                    Rt_homo[0, 3] = x_world_homo[0, 0];
                    Rt_homo[1, 3] = x_world_homo[1, 0];
                    Rt_homo[2, 3] = x_world_homo[2, 0];
                    x_world_homo  = cameraPositionInWorldSpace4x4 * x_world_homo;
                    Vector3 v3world = new Vector3((float)x_world_homo[0, 0], (float)x_world_homo[1, 0],
                                                  (float)x_world_homo[2, 0]);
                    mc.WorldPosition[_camera] = v3world;

                    for (int i = mc.PositionHistory[_camera].Length - 1; i > 0; --i)
                    {
                        mc.PositionHistory[_camera][i] = mc.PositionHistory[_camera][i - 1];
                    }
                    mc.PositionHistory[_camera][0] = v3world;
                }
            }
        } // ProcessData