Example #1
0
 public void TestCalibration()
 {
    Size patternSize = new Size(9, 6);
    Image<Gray, Byte> left01 = EmguAssert.LoadImage<Gray, byte>("left01.jpg");
    using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
    {
       CvInvoke.FindChessboardCorners(left01, patternSize, vec);
       PointF[] corners = vec.ToArray();
    } 
 }
Example #2
0
        /// <summary>
        /// Convert the standard vector to arrays of int
        /// </summary>
        /// <returns>Arrays of int</returns>
        public PointF[][] ToArrayOfArray()
        {
            int size = Size;

            PointF[][] res = new PointF[size][];
            for (int i = 0; i < size; i++)
            {
                using (VectorOfPointF v = this[i])
                {
                    res[i] = v.ToArray();
                }
            }
            return(res);
        }
Example #3
0
        /*
         * public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
         * {
         * TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
         * }
         *
         * [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
         * private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
         *
         * /// <summary>
         * /// Implements the chamfer matching algorithm on images taking into account both distance from
         * /// the template pixels to the nearest pixels and orientation alignment between template and image
         * /// contours.
         * /// </summary>
         * /// <param name="img">The edge image where search is performed</param>
         * /// <param name="templ">The template (an edge image)</param>
         * /// <param name="contours">The output contours</param>
         * /// <param name="cost">The cost associated with the matching</param>
         * /// <param name="templScale">The template scale</param>
         * /// <param name="maxMatches">The maximum number of matches</param>
         * /// <param name="minMatchDistance">The minimum match distance</param>
         * /// <param name="padX">PadX</param>
         * /// <param name="padY">PadY</param>
         * /// <param name="scales">Scales</param>
         * /// <param name="minScale">Minimum scale</param>
         * /// <param name="maxScale">Maximum scale</param>
         * /// <param name="orientationWeight">Orientation weight</param>
         * /// <param name="truncate">Truncate</param>
         * /// <returns>The number of matches</returns>
         * public static int ChamferMatching(Mat img, Mat templ,
         * out Point[][] contours, out float[] cost,
         * double templScale = 1, int maxMatches = 20,
         * double minMatchDistance = 1.0, int padX = 3,
         * int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
         * double orientationWeight = 0.5, double truncate = 20)
         * {
         * using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         * using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         * {
         *    int count = cveChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
         *    contours = vecOfVecOfPoint.ToArrayOfArray();
         *    cost = vecOfFloat.ToArray();
         *    return count;
         * }
         * }
         * [DllImport(ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
         * private static extern int cveChamferMatching(
         * IntPtr img, IntPtr templ,
         * IntPtr results, IntPtr cost,
         * double templScale, int maxMatches,
         * double minMatchDistance, int padX,
         * int padY, int scales, double minScale, double maxScale,
         * double orientationWeight, double truncate);
         */

        /// <summary>
        /// Finds centers in the grid of circles
        /// </summary>
        /// <param name="image">Source chessboard view</param>
        /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
        /// <param name="flags">Various operation flags</param>
        /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
        /// <returns>The center of circles detected if the chess board pattern is found, otherwise null is returned</returns>
        public static PointF[] FindCirclesGrid(Image <Gray, Byte> image, Size patternSize, CvEnum.CalibCgType flags, Feature2D featureDetector)
        {
            using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
            {
                bool patternFound =
                    FindCirclesGrid(
                        image,
                        patternSize,
                        vec,
                        flags,
                        featureDetector
                        );
                return(patternFound ? vec.ToArray() : null);
            }
        }
Example #4
0
      public void TestChessboardCalibration()
      {
         Size patternSize = new Size(9, 6);

         Image<Gray, Byte> chessboardImage = EmguAssert.LoadImage<Gray, byte>("left01.jpg");
         Util.VectorOfPointF corners = new Util.VectorOfPointF();
         bool patternWasFound = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

         chessboardImage.FindCornerSubPix(
            new PointF[][] {corners.ToArray()},
            new Size(10, 10),
            new Size(-1, -1),
            new MCvTermCriteria(0.05));

         MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);
         IntrinsicCameraParameters intrisic = new IntrinsicCameraParameters(8);
         ExtrinsicCameraParameters[] extrinsic;
         double error = CameraCalibration.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
            chessboardImage.Size, intrisic, CvEnum.CalibType.Default, new MCvTermCriteria(30, 1.0e-10),  out extrinsic);
         CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
         //CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners);
         Image<Gray, Byte> undistorted = intrisic.Undistort(chessboardImage);
         //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
      }
        public VectorOfPointF Detect(Image<Gray, byte> image, int innerCornersPerChessboardCols,
            int innerCornersPerChessboardRows)
        {
            var corners = new VectorOfPointF();

            CvInvoke.FindChessboardCorners(image, new Size(innerCornersPerChessboardCols, innerCornersPerChessboardRows),
                corners);

            if (corners.Size != innerCornersPerChessboardCols*innerCornersPerChessboardRows)
            {
                return new VectorOfPointF(new[] {new PointF(0, 0)});
            }

            var refinedCorners = new[] {corners.ToArray()};

            image.FindCornerSubPix(refinedCorners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(10));

            return new VectorOfPointF(refinedCorners[0]);
        }
Example #6
0
        /// <summary>
        /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
        /// </summary>
        /// <param name="prev">First frame, at time t</param>
        /// <param name="curr">Second frame, at time t + dt </param>
        /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
        /// <param name="winSize">Size of the search window of each pyramid level</param>
        /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
        /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
        /// <param name="flags">Flags</param>
        /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
        /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
        /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
        /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
        public static void CalcOpticalFlowPyrLK(
            IInputArray prev,
            IInputArray curr,
            PointF[] prevFeatures,
            Size winSize,
            int level,
            MCvTermCriteria criteria,
            out PointF[] currFeatures,
            out Byte[] status,
            out float[] trackError,
            Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
            double minEigThreshold          = 1.0e-4)
        {
            using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF())
                using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF())
                    using (Util.VectorOfByte statusVec = new Util.VectorOfByte())
                        using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat())
                        {
                            prevPts.Push(prevFeatures);

                            CalcOpticalFlowPyrLK(
                                prev,
                                curr,
                                prevPts,
                                nextPts,
                                statusVec,
                                errorVec,
                                winSize,
                                level,
                                criteria,
                                flags,
                                minEigThreshold);
                            status       = statusVec.ToArray();
                            trackError   = errorVec.ToArray();
                            currFeatures = nextPts.ToArray();
                        }
        }
Example #7
0
 /// <summary>
 /// Finds convex hull of 2D point set using Sklansky's algorithm
 /// </summary>
 /// <param name="points">The points to find convex hull from</param>
 /// <param name="clockwise">Orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing to the right, and its Y axis pointing upwards.</param>
 /// <returns>The convex hull of the points</returns>
 public static PointF[] ConvexHull(PointF[] points, bool clockwise = false)
 {
    using (VectorOfPointF vpf = new VectorOfPointF(points))
    using (VectorOfPointF hull = new VectorOfPointF())
    {
       CvInvoke.ConvexHull(vpf, hull, clockwise, true);
       return hull.ToArray();
    }
 }
Example #8
0
      /*
      #region Kalman Filter
      /// <summary>
      /// Allocates CvKalman and all its matrices and initializes them somehow. 
      /// </summary>
      /// <param name="dynamParams">dimensionality of the state vector</param>
      /// <param name="measureParams">dimensionality of the measurement vector </param>
      /// <param name="controlParams">dimensionality of the control vector </param>
      /// <returns>Pointer to the created Kalman filter</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvCreateKalman(int dynamParams, int measureParams, int controlParams);

      /// <summary>
      /// Adjusts stochastic model state on the basis of the given measurement of the model state.
      /// The function stores adjusted state at kalman->state_post and returns it on output
      /// </summary>
      /// <param name="kalman">Pointer to the structure to be updated</param>
      /// <param name="measurement">Pointer to the structure CvMat containing the measurement vector</param>
      /// <returns>The function stores adjusted state at kalman->state_post and returns it on output</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvKalmanCorrect(ref MCvKalman kalman, IntPtr measurement);


      /// <summary>
      /// Estimates the subsequent stochastic model state by its current state and stores it at kalman->state_pre
      /// The function returns the estimated state
      /// </summary>
      /// <param name="kalman">Kalman filter state</param>
      /// <param name="control">Control vector (uk), should be NULL iff there is no external control (controlParams=0). </param>
      /// <returns>the estimated state</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvKalmanPredict(ref MCvKalman kalman, IntPtr control);

      /// <summary>
      /// Releases the structure CvKalman and all underlying matrices
      /// </summary>
      /// <param name="kalman">reference of the pointer to the Kalman filter structure.</param>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern void cvReleaseKalman(ref IntPtr kalman);
      #endregion
*/
      #region optical flow
      /// <summary>
      /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
      /// </summary>
      /// <param name="prev">First frame, at time t</param>
      /// <param name="curr">Second frame, at time t + dt </param>
      /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
      /// <param name="winSize">Size of the search window of each pyramid level</param>
      /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
      /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
      /// <param name="flags">Flags</param>
      /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
      /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
      /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
      /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
      public static void CalcOpticalFlowPyrLK(
         IInputArray prev,
         IInputArray curr,
         PointF[] prevFeatures,
         Size winSize,
         int level,
         MCvTermCriteria criteria,
         out PointF[] currFeatures,
         out Byte[] status,
         out float[] trackError,
         Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
         double minEigThreshold = 1.0e-4)
      {
         using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF())
         using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF())
         using (Util.VectorOfByte statusVec = new Util.VectorOfByte())
         using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat())
         {
            prevPts.Push(prevFeatures);

            CalcOpticalFlowPyrLK(
               prev,
               curr,
               prevPts,
               nextPts,
               statusVec,
               errorVec,
               winSize,
               level,
               criteria,
               flags,
               minEigThreshold);
            status = statusVec.ToArray();
            trackError = errorVec.ToArray();
            currFeatures = nextPts.ToArray();
         }
      }
Example #9
0
      /*
      public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
      {
         TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
      }

      [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
      private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);

      /// <summary>
      /// Implements the chamfer matching algorithm on images taking into account both distance from
      /// the template pixels to the nearest pixels and orientation alignment between template and image
      /// contours.
      /// </summary>
      /// <param name="img">The edge image where search is performed</param>
      /// <param name="templ">The template (an edge image)</param>
      /// <param name="contours">The output contours</param>
      /// <param name="cost">The cost associated with the matching</param>
      /// <param name="templScale">The template scale</param>
      /// <param name="maxMatches">The maximum number of matches</param>
      /// <param name="minMatchDistance">The minimum match distance</param>
      /// <param name="padX">PadX</param>
      /// <param name="padY">PadY</param>
      /// <param name="scales">Scales</param>
      /// <param name="minScale">Minimum scale</param>
      /// <param name="maxScale">Maximum scale</param>
      /// <param name="orientationWeight">Orientation weight</param>
      /// <param name="truncate">Truncate</param>
      /// <returns>The number of matches</returns>
      public static int ChamferMatching(Mat img, Mat templ,
         out Point[][] contours, out float[] cost,
         double templScale = 1, int maxMatches = 20,
         double minMatchDistance = 1.0, int padX = 3,
         int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
         double orientationWeight = 0.5, double truncate = 20)
      {
         using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         {
            int count = cveChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
            contours = vecOfVecOfPoint.ToArrayOfArray();
            cost = vecOfFloat.ToArray();
            return count;
         }
      }
      [DllImport(ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      private static extern int cveChamferMatching(
         IntPtr img, IntPtr templ,
         IntPtr results, IntPtr cost,
         double templScale, int maxMatches,
         double minMatchDistance, int padX,
         int padY, int scales, double minScale, double maxScale,
         double orientationWeight, double truncate);
      */

      /// <summary>
      /// Finds centers in the grid of circles
      /// </summary>
      /// <param name="image">Source chessboard view</param>
      /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
      /// <param name="flags">Various operation flags</param>
      /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
      /// <returns>The center of circles detected if the chess board pattern is found, otherwise null is returned</returns>
      public static PointF[] FindCirclesGrid(Image<Gray, Byte> image, Size patternSize, CvEnum.CalibCgType flags, Feature2D featureDetector)
      {
         using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
         {
            bool patternFound =
               FindCirclesGrid(
                  image,
                  patternSize,
                  vec,
                  flags,
                  featureDetector
                  );
            return patternFound ? vec.ToArray() : null;
         }
      }
Example #10
0
      /// <summary>
      /// Obtains the list of Voronoi Facets 
      /// </summary>
      /// <returns>The list of Voronoi Facets</returns>
      public VoronoiFacet[] GetVoronoiFacets(int[] idx = null)
      {
         using (VectorOfInt vi = new VectorOfInt())
         using (VectorOfVectorOfPointF facetVec = new VectorOfVectorOfPointF())
         using (VectorOfPointF centerVec = new VectorOfPointF())
         {
            if (idx != null)
               vi.Push(idx);
         
            CvInvoke.cveSubdiv2DGetVoronoiFacetList(_ptr, vi, facetVec, centerVec);
            PointF[][] vertices = facetVec.ToArrayOfArray();
            PointF[] centers = centerVec.ToArray();

            VoronoiFacet[] facets = new VoronoiFacet[centers.Length];
            for (int i = 0; i < facets.Length; i++)
            {
               facets[i] = new VoronoiFacet(centers[i], vertices[i]);
            }
            return facets;
         }
         
      }
Example #11
0
 public void TestHoughLine()
 {
    Mat img = EmguAssert.LoadMat("box.png");
    
    using (Mat imgGray = new Mat())
    using (VectorOfPointF vp = new VectorOfPointF())
    {
       if (img.NumberOfChannels == 1)
          img.CopyTo(imgGray);
       else
          CvInvoke.CvtColor(img, imgGray, ColorConversion.Bgr2Gray);
       CvInvoke.HoughLines(imgGray, vp, 10, Math.PI/30, 5);
       PointF[] pts = vp.ToArray();
    }
 }