Exemplo n.º 1
0
        public void TestConvertParsleyToEmgu()
        {
            MathNet.Numerics.LinearAlgebra.Vector v = new MathNet.Numerics.LinearAlgebra.Vector(new double[] { 1.0f, 2.0f, 3.0f });
            Emgu.CV.Structure.MCvPoint3D32f       f = v.ToEmguF();
            Assert.AreEqual(1.0, f.x);
            Assert.AreEqual(2.0, f.y);
            Assert.AreEqual(3.0, f.z);

            Emgu.CV.Structure.MCvPoint3D64f d = v.ToEmgu();
            Assert.AreEqual(1.0, d.x);
            Assert.AreEqual(2.0, d.y);
            Assert.AreEqual(3.0, d.z);

            double[,] data = new double[2, 3] {
                { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 }
            };

            MathNet.Numerics.LinearAlgebra.Matrix m = MathNet.Numerics.LinearAlgebra.Matrix.Create(data);
            Emgu.CV.Matrix <double> m2 = m.ToEmgu();

            Assert.AreEqual(data[0, 0], m2[0, 0]);
            Assert.AreEqual(data[0, 1], m2[0, 1]);
            Assert.AreEqual(data[0, 2], m2[0, 2]);
            Assert.AreEqual(data[1, 0], m2[1, 0]);
            Assert.AreEqual(data[1, 1], m2[1, 1]);
            Assert.AreEqual(data[1, 2], m2[1, 2]);
        }
Exemplo n.º 2
0
 /// <summary>
 /// Convert MCvPoint3D32f to MathNet.Numerics.LinearAlgebra.Vector
 /// </summary>
 /// <param name="p">MCvPoint3D32f to convert</param>
 /// <returns>MathNet.Numerics.LinearAlgebra.Vector</returns>
 public static MathNet.Numerics.LinearAlgebra.Vector ToParsley(this Emgu.CV.Structure.MCvPoint3D32f p)
 {
     MathNet.Numerics.LinearAlgebra.Vector v = new MathNet.Numerics.LinearAlgebra.Vector(3);
     v[0] = p.x;
     v[1] = p.y;
     v[2] = p.z;
     return(v);
 }
    public void TestConvertEmguToParsley() {
      Emgu.CV.Structure.MCvPoint3D32f p = new Emgu.CV.Structure.MCvPoint3D32f(1.0f, 2.0f, 3.0f);
      MathNet.Numerics.LinearAlgebra.Vector v = p.ToParsley();

      Assert.AreEqual(1.0, v[0]);
      Assert.AreEqual(2.0, v[1]);
      Assert.AreEqual(3.0, v[2]);

      double[,] data = new double[2, 3] { { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 } };

      Emgu.CV.Matrix<double> m = new Emgu.CV.Matrix<double>(data);
      MathNet.Numerics.LinearAlgebra.Matrix m2 = m.ToParsley();

      Assert.AreEqual(data[0, 0], m2[0, 0]);
      Assert.AreEqual(data[0, 1], m2[0, 1]);
      Assert.AreEqual(data[0, 2], m2[0, 2]);
      Assert.AreEqual(data[1, 0], m2[1, 0]);
      Assert.AreEqual(data[1, 1], m2[1, 1]);
      Assert.AreEqual(data[1, 2], m2[1, 2]);
    }
Exemplo n.º 4
0
        public void TestConvertEmguToParsley()
        {
            Emgu.CV.Structure.MCvPoint3D32f       p = new Emgu.CV.Structure.MCvPoint3D32f(1.0f, 2.0f, 3.0f);
            MathNet.Numerics.LinearAlgebra.Vector v = p.ToParsley();

            Assert.AreEqual(1.0, v[0]);
            Assert.AreEqual(2.0, v[1]);
            Assert.AreEqual(3.0, v[2]);

            double[,] data = new double[2, 3] {
                { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 }
            };

            Emgu.CV.Matrix <double> m = new Emgu.CV.Matrix <double>(data);
            MathNet.Numerics.LinearAlgebra.Matrix m2 = m.ToParsley();

            Assert.AreEqual(data[0, 0], m2[0, 0]);
            Assert.AreEqual(data[0, 1], m2[0, 1]);
            Assert.AreEqual(data[0, 2], m2[0, 2]);
            Assert.AreEqual(data[1, 0], m2[1, 0]);
            Assert.AreEqual(data[1, 1], m2[1, 1]);
            Assert.AreEqual(data[1, 2], m2[1, 2]);
        }
Exemplo n.º 5
0
        /// <summary>
        /// Tries to find the composite pattern and returns the output parameter image_points.
        /// In case of success the boolean value 'true' is returned.
        /// Note, that CompositePatterns can only be found, if the cameras' intrinsics are set.
        ///
        /// The algorithm is working as follows:
        /// If the main pattern 'patternA' could be found, the algorithm is finished already and the resulting
        /// image_points are known and returned.
        /// If only 'patternB' could be found, the given object_points of 'patternA' are transformed in the
        /// 'patternB' coordinate system, using the predefined transformation matrix.
        /// Furthermore, an extrinsic calibration is performed in order to find the extrinsic matrix, which describes
        /// the relation between camera coordinate system and the coordinate system of 'patternB'.
        /// Finally, the library function 'ProjectPoints' is called in order to project the transformed object_points
        /// (currently expressed in 'patternB'-coordinates) into the camera image plane.
        /// The projected points correspond to the image_points of 'patternA'.
        /// ==> To sum up: the predefined transformation is used to calculate the image_points of 'patternA', even
        /// if 'patternA' is invisible.
        /// </summary>
        /// <param name="img"> Input grayscale image. </param>
        /// <param name="image_points"> 2D output image points. </param>
        /// <returns> true... if pattern has been found; false... otherwise. </returns>
        public override bool FindPattern(Emgu.CV.Image <Emgu.CV.Structure.Gray, byte> img, out System.Drawing.PointF[] image_points)
        {
            if (this.IntrinsicParameters != null && _patternA != null && _patternB != null)
            {
                bool foundA = false;
                System.Drawing.PointF[] currentImagePointsA;
                System.Drawing.PointF[] currentImagePointsB;

                //set the object_points of the composite pattern to the object_points of 'patternA'
                this.ObjectPoints = _patternA.ObjectPoints;

                //try to find 'patternA'
                foundA = _patternA.FindPattern(img, out currentImagePointsA);

                //if 'patternA' could be found: the image_points have been found.
                if (foundA)
                {
                    image_points = currentImagePointsA;
                    //_logger.Info("Pattern found.");
                    return(true);
                }
                else
                //else: try to find 'patternB'
                if (_patternB.FindPattern(img, out currentImagePointsB))
                {
                    ExtrinsicCalibration ec_B = null;
                    Emgu.CV.ExtrinsicCameraParameters ecp_B = null;
                    Matrix extrinsic_matrix = Matrix.Identity(4, 4);
                    Matrix temp_matrix      = null;
                    Emgu.CV.Structure.MCvPoint3D32f[] transformedCornerPoints = new Emgu.CV.Structure.MCvPoint3D32f[_patternA.ObjectPoints.Length];

                    try
                    {
                        //if 'patternB' has been found: find the extrinsic matrix (relation between coordinate systems of 'patternB' and camera
                        ec_B  = new ExtrinsicCalibration(_patternB.ObjectPoints, this.IntrinsicParameters);
                        ecp_B = ec_B.Calibrate(currentImagePointsB);

                        if (ecp_B != null)
                        {
                            //form the resulting extrinsic matrix to a homogeneous (4x4) matrix.
                            temp_matrix = Parsley.Core.Extensions.ConvertToParsley.ToParsley(ecp_B.ExtrinsicMatrix);
                            extrinsic_matrix.SetMatrix(0, temp_matrix.RowCount - 1, 0, temp_matrix.ColumnCount - 1, temp_matrix);

                            //transform object points of A into B coordinate system.
                            transformedCornerPoints = MatrixTransformation.TransformVectorToEmgu(_transformationBToA.Inverse(), 1.0, _patternA.ObjectPoints).ToArray <Emgu.CV.Structure.MCvPoint3D32f>();

                            //project the points into the 2D camera plane (image_points)
                            image_points = Emgu.CV.CameraCalibration.ProjectPoints(transformedCornerPoints, ecp_B, this.IntrinsicParameters);
                            return(true);
                        }
                        else
                        {
                            _logger.Warn("Error calculating extrinsic parameters.");
                            image_points = null;
                            return(false);
                        }
                    }
                    catch (Exception e)
                    {
                        _logger.Warn("Caught Exception: {0}.", e);
                        image_points = null;
                        return(false);
                    }
                }
                else
                {
                    //reset the image_points if the pattern could not be found.
                    image_points = null;
                    return(false);
                }
            }
            else
            {
                _logger.Warn("Error: Intrinsics are needed to find a Composite Pattern but not available.");
                image_points = null;
                return(false);
            }
        }
    /// <summary>
    /// Tries to find the composite pattern and returns the output parameter image_points.
    /// In case of success the boolean value 'true' is returned.
    /// Note, that CompositePatterns can only be found, if the cameras' intrinsics are set.
    /// 
    /// The algorithm is working as follows:
    /// If the main pattern 'patternA' could be found, the algorithm is finished already and the resulting
    /// image_points are known and returned.
    /// If only 'patternB' could be found, the given object_points of 'patternA' are transformed in the 
    /// 'patternB' coordinate system, using the predefined transformation matrix.
    /// Furthermore, an extrinsic calibration is performed in order to find the extrinsic matrix, which describes
    /// the relation between camera coordinate system and the coordinate system of 'patternB'.
    /// Finally, the library function 'ProjectPoints' is called in order to project the transformed object_points
    /// (currently expressed in 'patternB'-coordinates) into the camera image plane.
    /// The projected points correspond to the image_points of 'patternA'.
    /// ==> To sum up: the predefined transformation is used to calculate the image_points of 'patternA', even
    /// if 'patternA' is invisible.
    /// </summary>
    /// <param name="img"> Input grayscale image. </param>
    /// <param name="image_points"> 2D output image points. </param>
    /// <returns> true... if pattern has been found; false... otherwise. </returns>
    public override bool FindPattern(Emgu.CV.Image<Emgu.CV.Structure.Gray, byte> img, out System.Drawing.PointF[] image_points)
    {
      if (this.IntrinsicParameters != null && _patternA != null && _patternB != null)
      {
        bool foundA = false;
        System.Drawing.PointF[] currentImagePointsA;
        System.Drawing.PointF[] currentImagePointsB;

        //set the object_points of the composite pattern to the object_points of 'patternA'
        this.ObjectPoints = _patternA.ObjectPoints;

        //try to find 'patternA'
        foundA = _patternA.FindPattern(img, out currentImagePointsA);

        //if 'patternA' could be found: the image_points have been found. 
        if (foundA)
        {
          image_points = currentImagePointsA;
          //_logger.Info("Pattern found.");
          return true;
        }
        else
          //else: try to find 'patternB'
          if (_patternB.FindPattern(img, out currentImagePointsB))
          {
            ExtrinsicCalibration ec_B = null;
            Emgu.CV.ExtrinsicCameraParameters ecp_B = null;
            Matrix extrinsic_matrix = Matrix.Identity(4, 4);
            Matrix temp_matrix = null;
            Emgu.CV.Structure.MCvPoint3D32f[] transformedCornerPoints = new Emgu.CV.Structure.MCvPoint3D32f[_patternA.ObjectPoints.Length];

            try
            {
              //if 'patternB' has been found: find the extrinsic matrix (relation between coordinate systems of 'patternB' and camera
              ec_B = new ExtrinsicCalibration(_patternB.ObjectPoints, this.IntrinsicParameters);
              ecp_B = ec_B.Calibrate(currentImagePointsB);

              if (ecp_B != null)
              {
                //form the resulting extrinsic matrix to a homogeneous (4x4) matrix.
                temp_matrix = Parsley.Core.Extensions.ConvertToParsley.ToParsley(ecp_B.ExtrinsicMatrix);
                extrinsic_matrix.SetMatrix(0, temp_matrix.RowCount - 1, 0, temp_matrix.ColumnCount - 1, temp_matrix);

                //transform object points of A into B coordinate system.
                transformedCornerPoints = MatrixTransformation.TransformVectorToEmgu(_transformationBToA.Inverse(), 1.0, _patternA.ObjectPoints).ToArray<Emgu.CV.Structure.MCvPoint3D32f>();

                //project the points into the 2D camera plane (image_points)
                image_points = Emgu.CV.CameraCalibration.ProjectPoints(transformedCornerPoints, ecp_B, this.IntrinsicParameters);
                return true;
              }
              else
              {
                _logger.Warn("Error calculating extrinsic parameters.");
                image_points = null;
                return false;
              }
            }
            catch (Exception e)
            {
              _logger.Warn("Caught Exception: {0}.", e);
              image_points = null;
              return false;
            }

          }
          else
          {
            //reset the image_points if the pattern could not be found.
            image_points = null;
            return false;
          }

      }
      else
      {
        _logger.Warn("Error: Intrinsics are needed to find a Composite Pattern but not available.");
        image_points = null;
        return false;
      }
    }