/// <summary>
    /// Calculates the transformation matrix, which is used to transform the 3d-object points, which were scanned with reference
    /// to the moved marker coordinate system, back to the initial marker system and henceforth back to the camera system.
    /// The camera object is needed in order to gain the current camera frame. Furthermore, the cameras' intrinsics are needed
    /// to perform an extrinsic calibration. Note, that every kind of pattern can be used.
    /// 
    /// The transformation matrix is calculated as follows:
    /// * If 'UpdateTransformation' is being called for the first time, an extrinsic calibration is performed in order to find
    ///   the initial orientation of the used pattern.
    /// * If the initial orientation has already been found, the extrinsic calibration is performed again. Afterwards
    ///   the current orientation is available, represented by the extrinsic matrix.
    /// * Form the extrinsic matrix (4x3) (current position) to a homogeneous 4x4 matrix.
    /// * The final transformation matrix is calculated as follows: _final = initial * current.Inverse();
    /// </summary>
    public bool UpdateTransformation(Camera the_cam)
    {
      Matrix extrinsicM1 = Matrix.Identity(4, 4);
      ExtrinsicCameraParameters ecp_pattern = null;
      ExtrinsicCalibration ec_pattern = null;
      Emgu.CV.Image<Gray, Byte> gray_img = null;
      System.Drawing.PointF[] currentImagePoints;

      //first call: calculate extrinsics for initial position
      if (_firstCallUpdateTransformation == true && _cpattern != null)
      {
         gray_img = the_cam.Frame().Convert<Gray, Byte>();
         //set the patterns property: intrinsic parameters
         _cpattern.IntrinsicParameters = the_cam.Intrinsics;

         if (_cpattern.FindPattern(gray_img, out currentImagePoints))
         {
           try
           {
             //extr. calibration (initial position)
             ec_pattern = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
             ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

             if (ecp_pattern != null)
             {
               _ecp_A = ecp_pattern;
               _extrinsicMatrix_A = ExtractExctrinsicMatrix(_ecp_A);

               _logger.Info("Initial Position found.");
               _firstCallUpdateTransformation = false;
             }
           }
           catch (Exception e)
           {
             _logger.Warn("Initial Position - Caught Exception: {0}.", e);
             _firstCallUpdateTransformation = true;
             _ecp_A = null;
             return false; 
           }
         }
         else
         {
           _logger.Warn("Pattern not found.");
           _firstCallUpdateTransformation = true;
           _ecp_A = null;

           return false; 
         }
      }

      //if initial position and pattern are available - calculate the transformation
      if (_ecp_A != null && _cpattern != null)
      {
        gray_img = the_cam.Frame().Convert<Gray, Byte>();

        //try to find composite pattern
        if (_cpattern.FindPattern(gray_img, out currentImagePoints))
        {
          //extrinsic calibration in order to find the current orientation
          ec_pattern = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
          ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

          if (ecp_pattern != null)
          {
            //extract current extrinsic matrix
            extrinsicM1 = ExtractExctrinsicMatrix(ecp_pattern);
            _logger.Info("UpdateTransformation: Transformation found.");
          }
          else
          {
            _logger.Warn("UpdateTransformation: Extrinsics of moved marker system not found.");
            return false;
          }
        }
        else
        {
          _logger.Warn("UpdateTransformation: Pattern not found.");
          return false;
        }

        //now calculate the final transformation matrix
        _final = _extrinsicMatrix_A * extrinsicM1.Inverse();
        return true;
      }
      else
      {
        _logger.Warn("UpdateTransformation: No Pattern has been chosen.");
        return false;
      }
    }
Exemple #2
0
        /// <summary>
        /// Tries to find the composite pattern and returns the output parameter image_points.
        /// In case of success the boolean value 'true' is returned.
        /// Note, that CompositePatterns can only be found, if the cameras' intrinsics are set.
        ///
        /// The algorithm is working as follows:
        /// If the main pattern 'patternA' could be found, the algorithm is finished already and the resulting
        /// image_points are known and returned.
        /// If only 'patternB' could be found, the given object_points of 'patternA' are transformed in the
        /// 'patternB' coordinate system, using the predefined transformation matrix.
        /// Furthermore, an extrinsic calibration is performed in order to find the extrinsic matrix, which describes
        /// the relation between camera coordinate system and the coordinate system of 'patternB'.
        /// Finally, the library function 'ProjectPoints' is called in order to project the transformed object_points
        /// (currently expressed in 'patternB'-coordinates) into the camera image plane.
        /// The projected points correspond to the image_points of 'patternA'.
        /// ==> To sum up: the predefined transformation is used to calculate the image_points of 'patternA', even
        /// if 'patternA' is invisible.
        /// </summary>
        /// <param name="img"> Input grayscale image. </param>
        /// <param name="image_points"> 2D output image points. </param>
        /// <returns> true... if pattern has been found; false... otherwise. </returns>
        public override bool FindPattern(Emgu.CV.Image <Emgu.CV.Structure.Gray, byte> img, out System.Drawing.PointF[] image_points)
        {
            if (this.IntrinsicParameters != null && _patternA != null && _patternB != null)
            {
                bool foundA = false;
                System.Drawing.PointF[] currentImagePointsA;
                System.Drawing.PointF[] currentImagePointsB;

                //set the object_points of the composite pattern to the object_points of 'patternA'
                this.ObjectPoints = _patternA.ObjectPoints;

                //try to find 'patternA'
                foundA = _patternA.FindPattern(img, out currentImagePointsA);

                //if 'patternA' could be found: the image_points have been found.
                if (foundA)
                {
                    image_points = currentImagePointsA;
                    //_logger.Info("Pattern found.");
                    return(true);
                }
                else
                //else: try to find 'patternB'
                if (_patternB.FindPattern(img, out currentImagePointsB))
                {
                    ExtrinsicCalibration ec_B = null;
                    Emgu.CV.ExtrinsicCameraParameters ecp_B = null;
                    Matrix extrinsic_matrix = Matrix.Identity(4, 4);
                    Matrix temp_matrix      = null;
                    Emgu.CV.Structure.MCvPoint3D32f[] transformedCornerPoints = new Emgu.CV.Structure.MCvPoint3D32f[_patternA.ObjectPoints.Length];

                    try
                    {
                        //if 'patternB' has been found: find the extrinsic matrix (relation between coordinate systems of 'patternB' and camera
                        ec_B  = new ExtrinsicCalibration(_patternB.ObjectPoints, this.IntrinsicParameters);
                        ecp_B = ec_B.Calibrate(currentImagePointsB);

                        if (ecp_B != null)
                        {
                            //form the resulting extrinsic matrix to a homogeneous (4x4) matrix.
                            temp_matrix = Parsley.Core.Extensions.ConvertToParsley.ToParsley(ecp_B.ExtrinsicMatrix);
                            extrinsic_matrix.SetMatrix(0, temp_matrix.RowCount - 1, 0, temp_matrix.ColumnCount - 1, temp_matrix);

                            //transform object points of A into B coordinate system.
                            transformedCornerPoints = MatrixTransformation.TransformVectorToEmgu(_transformationBToA.Inverse(), 1.0, _patternA.ObjectPoints).ToArray <Emgu.CV.Structure.MCvPoint3D32f>();

                            //project the points into the 2D camera plane (image_points)
                            image_points = Emgu.CV.CameraCalibration.ProjectPoints(transformedCornerPoints, ecp_B, this.IntrinsicParameters);
                            return(true);
                        }
                        else
                        {
                            _logger.Warn("Error calculating extrinsic parameters.");
                            image_points = null;
                            return(false);
                        }
                    }
                    catch (Exception e)
                    {
                        _logger.Warn("Caught Exception: {0}.", e);
                        image_points = null;
                        return(false);
                    }
                }
                else
                {
                    //reset the image_points if the pattern could not be found.
                    image_points = null;
                    return(false);
                }
            }
            else
            {
                _logger.Warn("Error: Intrinsics are needed to find a Composite Pattern but not available.");
                image_points = null;
                return(false);
            }
        }
        /// <summary>
        /// Calculates the transformation matrix, which is used to transform the 3d-object points, which were scanned with reference
        /// to the moved marker coordinate system, back to the initial marker system and henceforth back to the camera system.
        /// The camera object is needed in order to gain the current camera frame. Furthermore, the cameras' intrinsics are needed
        /// to perform an extrinsic calibration. Note, that every kind of pattern can be used.
        ///
        /// The transformation matrix is calculated as follows:
        /// * If 'UpdateTransformation' is being called for the first time, an extrinsic calibration is performed in order to find
        ///   the initial orientation of the used pattern.
        /// * If the initial orientation has already been found, the extrinsic calibration is performed again. Afterwards
        ///   the current orientation is available, represented by the extrinsic matrix.
        /// * Form the extrinsic matrix (4x3) (current position) to a homogeneous 4x4 matrix.
        /// * The final transformation matrix is calculated as follows: _final = initial * current.Inverse();
        /// </summary>
        public bool UpdateTransformation(Camera the_cam)
        {
            Matrix extrinsicM1 = Matrix.Identity(4, 4);
            ExtrinsicCameraParameters ecp_pattern = null;
            ExtrinsicCalibration      ec_pattern  = null;

            Emgu.CV.Image <Gray, Byte> gray_img = null;
            System.Drawing.PointF[]    currentImagePoints;

            //first call: calculate extrinsics for initial position
            if (_firstCallUpdateTransformation == true && _cpattern != null)
            {
                gray_img = the_cam.Frame().Convert <Gray, Byte>();
                //set the patterns property: intrinsic parameters
                _cpattern.IntrinsicParameters = the_cam.Intrinsics;

                if (_cpattern.FindPattern(gray_img, out currentImagePoints))
                {
                    try
                    {
                        //extr. calibration (initial position)
                        ec_pattern  = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
                        ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

                        if (ecp_pattern != null)
                        {
                            _ecp_A             = ecp_pattern;
                            _extrinsicMatrix_A = ExtractExctrinsicMatrix(_ecp_A);

                            _logger.Info("Initial Position found.");
                            _firstCallUpdateTransformation = false;
                        }
                    }
                    catch (Exception e)
                    {
                        _logger.Warn("Initial Position - Caught Exception: {0}.", e);
                        _firstCallUpdateTransformation = true;
                        _ecp_A = null;
                        return(false);
                    }
                }
                else
                {
                    _logger.Warn("Pattern not found.");
                    _firstCallUpdateTransformation = true;
                    _ecp_A = null;

                    return(false);
                }
            }

            //if initial position and pattern are available - calculate the transformation
            if (_ecp_A != null && _cpattern != null)
            {
                gray_img = the_cam.Frame().Convert <Gray, Byte>();

                //try to find composite pattern
                if (_cpattern.FindPattern(gray_img, out currentImagePoints))
                {
                    //extrinsic calibration in order to find the current orientation
                    ec_pattern  = new ExtrinsicCalibration(_cpattern.ObjectPoints, the_cam.Intrinsics);
                    ecp_pattern = ec_pattern.Calibrate(currentImagePoints);

                    if (ecp_pattern != null)
                    {
                        //extract current extrinsic matrix
                        extrinsicM1 = ExtractExctrinsicMatrix(ecp_pattern);
                        _logger.Info("UpdateTransformation: Transformation found.");
                    }
                    else
                    {
                        _logger.Warn("UpdateTransformation: Extrinsics of moved marker system not found.");
                        return(false);
                    }
                }
                else
                {
                    _logger.Warn("UpdateTransformation: Pattern not found.");
                    return(false);
                }

                //now calculate the final transformation matrix
                _final = _extrinsicMatrix_A * extrinsicM1.Inverse();
                return(true);
            }
            else
            {
                _logger.Warn("UpdateTransformation: No Pattern has been chosen.");
                return(false);
            }
        }
    /// <summary>
    /// Tries to find the composite pattern and returns the output parameter image_points.
    /// In case of success the boolean value 'true' is returned.
    /// Note, that CompositePatterns can only be found, if the cameras' intrinsics are set.
    /// 
    /// The algorithm is working as follows:
    /// If the main pattern 'patternA' could be found, the algorithm is finished already and the resulting
    /// image_points are known and returned.
    /// If only 'patternB' could be found, the given object_points of 'patternA' are transformed in the 
    /// 'patternB' coordinate system, using the predefined transformation matrix.
    /// Furthermore, an extrinsic calibration is performed in order to find the extrinsic matrix, which describes
    /// the relation between camera coordinate system and the coordinate system of 'patternB'.
    /// Finally, the library function 'ProjectPoints' is called in order to project the transformed object_points
    /// (currently expressed in 'patternB'-coordinates) into the camera image plane.
    /// The projected points correspond to the image_points of 'patternA'.
    /// ==> To sum up: the predefined transformation is used to calculate the image_points of 'patternA', even
    /// if 'patternA' is invisible.
    /// </summary>
    /// <param name="img"> Input grayscale image. </param>
    /// <param name="image_points"> 2D output image points. </param>
    /// <returns> true... if pattern has been found; false... otherwise. </returns>
    public override bool FindPattern(Emgu.CV.Image<Emgu.CV.Structure.Gray, byte> img, out System.Drawing.PointF[] image_points)
    {
      if (this.IntrinsicParameters != null && _patternA != null && _patternB != null)
      {
        bool foundA = false;
        System.Drawing.PointF[] currentImagePointsA;
        System.Drawing.PointF[] currentImagePointsB;

        //set the object_points of the composite pattern to the object_points of 'patternA'
        this.ObjectPoints = _patternA.ObjectPoints;

        //try to find 'patternA'
        foundA = _patternA.FindPattern(img, out currentImagePointsA);

        //if 'patternA' could be found: the image_points have been found. 
        if (foundA)
        {
          image_points = currentImagePointsA;
          //_logger.Info("Pattern found.");
          return true;
        }
        else
          //else: try to find 'patternB'
          if (_patternB.FindPattern(img, out currentImagePointsB))
          {
            ExtrinsicCalibration ec_B = null;
            Emgu.CV.ExtrinsicCameraParameters ecp_B = null;
            Matrix extrinsic_matrix = Matrix.Identity(4, 4);
            Matrix temp_matrix = null;
            Emgu.CV.Structure.MCvPoint3D32f[] transformedCornerPoints = new Emgu.CV.Structure.MCvPoint3D32f[_patternA.ObjectPoints.Length];

            try
            {
              //if 'patternB' has been found: find the extrinsic matrix (relation between coordinate systems of 'patternB' and camera
              ec_B = new ExtrinsicCalibration(_patternB.ObjectPoints, this.IntrinsicParameters);
              ecp_B = ec_B.Calibrate(currentImagePointsB);

              if (ecp_B != null)
              {
                //form the resulting extrinsic matrix to a homogeneous (4x4) matrix.
                temp_matrix = Parsley.Core.Extensions.ConvertToParsley.ToParsley(ecp_B.ExtrinsicMatrix);
                extrinsic_matrix.SetMatrix(0, temp_matrix.RowCount - 1, 0, temp_matrix.ColumnCount - 1, temp_matrix);

                //transform object points of A into B coordinate system.
                transformedCornerPoints = MatrixTransformation.TransformVectorToEmgu(_transformationBToA.Inverse(), 1.0, _patternA.ObjectPoints).ToArray<Emgu.CV.Structure.MCvPoint3D32f>();

                //project the points into the 2D camera plane (image_points)
                image_points = Emgu.CV.CameraCalibration.ProjectPoints(transformedCornerPoints, ecp_B, this.IntrinsicParameters);
                return true;
              }
              else
              {
                _logger.Warn("Error calculating extrinsic parameters.");
                image_points = null;
                return false;
              }
            }
            catch (Exception e)
            {
              _logger.Warn("Caught Exception: {0}.", e);
              image_points = null;
              return false;
            }

          }
          else
          {
            //reset the image_points if the pattern could not be found.
            image_points = null;
            return false;
          }

      }
      else
      {
        _logger.Warn("Error: Intrinsics are needed to find a Composite Pattern but not available.");
        image_points = null;
        return false;
      }
    }