示例#1
0
        public Vectors Calculate(VectorOfPointF cornerPoints, Size imageSize)
        {
            cameraCalibration.Calibrate(new VectorOfVectorOfPointF(cornerPoints), imageSize,
                configuration.InnerCornersPerChessboardCols, configuration.InnerCornersPerChessboardRows);

            var vectors = vectorsFilter.Correct(cameraCalibration.Rotation, cameraCalibration.Translation);

            return vectors;
        }
示例#2
0
 public void TestCalibration()
 {
    Size patternSize = new Size(9, 6);
    Image<Gray, Byte> left01 = EmguAssert.LoadImage<Gray, byte>("left01.jpg");
    using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
    {
       CvInvoke.FindChessboardCorners(left01, patternSize, vec);
       PointF[] corners = vec.ToArray();
    } 
 }
 public void TestCirclesGrid()
 {
    Size patternSize = new Size(4, 3);
    Image<Gray, Byte> circlesGridImage = EmguAssert.LoadImage<Gray, byte>("circlesGrid.bmp");
    using (SimpleBlobDetector detector = new SimpleBlobDetector())
    using (Util.VectorOfPointF centers = new Util.VectorOfPointF())
    {
       bool found = CvInvoke.FindCirclesGrid(circlesGridImage, patternSize, centers, CvEnum.CalibCgType.SymmetricGrid | CvEnum.CalibCgType.Clustering, detector);
       CvInvoke.DrawChessboardCorners(circlesGridImage, patternSize, centers, found);
       //UI.ImageViewer.Show(circlesGridImage);
    }
 }
示例#4
0
 /// <summary>
 /// Create the standard vector of VectorOfPointF
 /// </summary>
 public VectorOfVectorOfPointF(PointF[][] values)
     : this()
 {
     using (VectorOfPointF v = new VectorOfPointF())
     {
         for (int i = 0; i < values.Length; i++)
         {
             v.Push(values[i]);
             Push(v);
             v.Clear();
         }
     }
 }
示例#5
0
        /// <summary>
        /// Convert the standard vector to arrays of int
        /// </summary>
        /// <returns>Arrays of int</returns>
        public PointF[][] ToArrayOfArray()
        {
            int size = Size;

            PointF[][] res = new PointF[size][];
            for (int i = 0; i < size; i++)
            {
                using (VectorOfPointF v = this[i])
                {
                    res[i] = v.ToArray();
                }
            }
            return(res);
        }
示例#6
0
        /*
         * public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
         * {
         * TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
         * }
         *
         * [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
         * private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
         *
         * /// <summary>
         * /// Implements the chamfer matching algorithm on images taking into account both distance from
         * /// the template pixels to the nearest pixels and orientation alignment between template and image
         * /// contours.
         * /// </summary>
         * /// <param name="img">The edge image where search is performed</param>
         * /// <param name="templ">The template (an edge image)</param>
         * /// <param name="contours">The output contours</param>
         * /// <param name="cost">The cost associated with the matching</param>
         * /// <param name="templScale">The template scale</param>
         * /// <param name="maxMatches">The maximum number of matches</param>
         * /// <param name="minMatchDistance">The minimum match distance</param>
         * /// <param name="padX">PadX</param>
         * /// <param name="padY">PadY</param>
         * /// <param name="scales">Scales</param>
         * /// <param name="minScale">Minimum scale</param>
         * /// <param name="maxScale">Maximum scale</param>
         * /// <param name="orientationWeight">Orientation weight</param>
         * /// <param name="truncate">Truncate</param>
         * /// <returns>The number of matches</returns>
         * public static int ChamferMatching(Mat img, Mat templ,
         * out Point[][] contours, out float[] cost,
         * double templScale = 1, int maxMatches = 20,
         * double minMatchDistance = 1.0, int padX = 3,
         * int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
         * double orientationWeight = 0.5, double truncate = 20)
         * {
         * using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         * using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         * {
         *    int count = cveChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
         *    contours = vecOfVecOfPoint.ToArrayOfArray();
         *    cost = vecOfFloat.ToArray();
         *    return count;
         * }
         * }
         * [DllImport(ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
         * private static extern int cveChamferMatching(
         * IntPtr img, IntPtr templ,
         * IntPtr results, IntPtr cost,
         * double templScale, int maxMatches,
         * double minMatchDistance, int padX,
         * int padY, int scales, double minScale, double maxScale,
         * double orientationWeight, double truncate);
         */

        /// <summary>
        /// Finds centers in the grid of circles
        /// </summary>
        /// <param name="image">Source chessboard view</param>
        /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
        /// <param name="flags">Various operation flags</param>
        /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
        /// <returns>The center of circles detected if the chess board pattern is found, otherwise null is returned</returns>
        public static PointF[] FindCirclesGrid(Image <Gray, Byte> image, Size patternSize, CvEnum.CalibCgType flags, Feature2D featureDetector)
        {
            using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
            {
                bool patternFound =
                    FindCirclesGrid(
                        image,
                        patternSize,
                        vec,
                        flags,
                        featureDetector
                        );
                return(patternFound ? vec.ToArray() : null);
            }
        }
        public VectorOfPointF Detect(Image<Gray, byte> image, int innerCornersPerChessboardCols,
            int innerCornersPerChessboardRows)
        {
            var corners = new VectorOfPointF();

            CvInvoke.FindChessboardCorners(image, new Size(innerCornersPerChessboardCols, innerCornersPerChessboardRows),
                corners);

            if (corners.Size != innerCornersPerChessboardCols*innerCornersPerChessboardRows)
            {
                return new VectorOfPointF(new[] {new PointF(0, 0)});
            }

            var refinedCorners = new[] {corners.ToArray()};

            image.FindCornerSubPix(refinedCorners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(10));

            return new VectorOfPointF(refinedCorners[0]);
        }
示例#8
0
      /// <summary>
      /// Insert a collection of points to this planar subdivision
      /// </summary>
      /// <param name="points">The points to be inserted to this planar subdivision</param>
      /// <param name="silent">If true, any exception during insert will be ignored</param>
      public void Insert(PointF[] points, bool silent)
      {
         using (VectorOfPointF vpf = new VectorOfPointF(points))
         if (silent)
         {
#if !UNITY_IPHONE
            //ignore all errors
            IntPtr oldErrorCallback = CvInvoke.RedirectError(CvInvoke.CvErrorHandlerIgnoreError, IntPtr.Zero, IntPtr.Zero);
#endif
            CvInvoke.cveSubdiv2DInsertMulti(_ptr, vpf);
            
#if !UNITY_IPHONE
            //reset the error handler 
            CvInvoke.RedirectError(oldErrorCallback, IntPtr.Zero, IntPtr.Zero);
#endif
         }
         else
            CvInvoke.cveSubdiv2DInsertMulti(_ptr, vpf);
      }
示例#9
0
        /// <summary>
        /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
        /// </summary>
        /// <param name="prev">First frame, at time t</param>
        /// <param name="curr">Second frame, at time t + dt </param>
        /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
        /// <param name="winSize">Size of the search window of each pyramid level</param>
        /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
        /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
        /// <param name="flags">Flags</param>
        /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
        /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
        /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
        /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
        public static void CalcOpticalFlowPyrLK(
            IInputArray prev,
            IInputArray curr,
            PointF[] prevFeatures,
            Size winSize,
            int level,
            MCvTermCriteria criteria,
            out PointF[] currFeatures,
            out Byte[] status,
            out float[] trackError,
            Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
            double minEigThreshold          = 1.0e-4)
        {
            using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF())
                using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF())
                    using (Util.VectorOfByte statusVec = new Util.VectorOfByte())
                        using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat())
                        {
                            prevPts.Push(prevFeatures);

                            CalcOpticalFlowPyrLK(
                                prev,
                                curr,
                                prevPts,
                                nextPts,
                                statusVec,
                                errorVec,
                                winSize,
                                level,
                                criteria,
                                flags,
                                minEigThreshold);
                            status       = statusVec.ToArray();
                            trackError   = errorVec.ToArray();
                            currFeatures = nextPts.ToArray();
                        }
        }
示例#10
0
      public void TestChessboardCalibration()
      {
         Size patternSize = new Size(9, 6);

         Image<Gray, Byte> chessboardImage = EmguAssert.LoadImage<Gray, byte>("left01.jpg");
         Util.VectorOfPointF corners = new Util.VectorOfPointF();
         bool patternWasFound = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

         chessboardImage.FindCornerSubPix(
            new PointF[][] {corners.ToArray()},
            new Size(10, 10),
            new Size(-1, -1),
            new MCvTermCriteria(0.05));

         MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);
         IntrinsicCameraParameters intrisic = new IntrinsicCameraParameters(8);
         ExtrinsicCameraParameters[] extrinsic;
         double error = CameraCalibration.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
            chessboardImage.Size, intrisic, CvEnum.CalibType.Default, new MCvTermCriteria(30, 1.0e-10),  out extrinsic);
         CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
         //CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners);
         Image<Gray, Byte> undistorted = intrisic.Undistort(chessboardImage);
         //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
      }
示例#11
0
 public void TestHoughLine()
 {
    Mat img = EmguAssert.LoadMat("box.png");
    
    using (Mat imgGray = new Mat())
    using (VectorOfPointF vp = new VectorOfPointF())
    {
       if (img.NumberOfChannels == 1)
          img.CopyTo(imgGray);
       else
          CvInvoke.CvtColor(img, imgGray, ColorConversion.Bgr2Gray);
       CvInvoke.HoughLines(imgGray, vp, 10, Math.PI/30, 5);
       PointF[] pts = vp.ToArray();
    }
 }
示例#12
0
        void ProcessFrame(object sender, EventArgs e)
        {
            _capture.Retrieve(_frame);
            CvInvoke.CvtColor(_frame, _grayFrame, ColorConversion.Bgr2Gray);

            //apply chess board detection
            if (_currentMode == Mode.SavingFrames)
            {
                _find = CvInvoke.FindChessboardCorners(_grayFrame, _patternSize, _corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage);
                //we use this loop so we can show a colour image rather than a gray:
                if (_find) //chess board found
                {

                    //make mesurments more accurate by using FindCornerSubPixel
                    CvInvoke.CornerSubPix(_grayFrame, _corners, new Size(11, 11), new Size(-1, -1),
                        new MCvTermCriteria(30, 0.1));

                    //if go button has been pressed start aquiring frames else we will just display the points
                    if (_startFlag)
                    {
                        _frameArrayBuffer[_frameBufferSavepoint] = _grayFrame; //store the image
                        _frameBufferSavepoint++; //increase buffer positon

                        //check the state of buffer
                        if (_frameBufferSavepoint == _frameArrayBuffer.Length)
                            _currentMode = Mode.CalculatingIntrinsics; //buffer full
                    }

                    //draw the results
                    CvInvoke.DrawChessboardCorners(_frame, _patternSize, _corners, _find);
                    string msg = string.Format("{0}/{1}", _frameBufferSavepoint + 1, _frameArrayBuffer.Length);
                    Console.WriteLine(msg);

                       int baseLine = 0;
                    var textOrigin = new Point(_frame.Cols - 2 * 120 - 10, _frame.Rows - 2 * baseLine - 10);
                    CvInvoke.PutText(_frame, msg, textOrigin, FontFace.HersheyPlain, 3, new MCvScalar(0, 0, 255), 2);

                    //calibrate the delay bassed on size of buffer
                    //if buffer small you want a big delay if big small delay
                    Thread.Sleep(100); //allow the user to move the board to a different position
                }
                _corners = new VectorOfPointF();
                _find = false;

            }
            if (_currentMode == Mode.CalculatingIntrinsics)
            {
                for (int k = 0; k < _frameArrayBuffer.Length; k++)
                {
                    _cornersPointsVec[k] = new VectorOfPointF();
                    CvInvoke.FindChessboardCorners(_frameArrayBuffer[k], _patternSize, _cornersPointsVec[k], CalibCbType.AdaptiveThresh
                        | CalibCbType.FastCheck | CalibCbType.NormalizeImage);
                    //for accuracy
                    CvInvoke.CornerSubPix(_grayFrame, _cornersPointsVec[k], new Size(11, 11), new Size(-1, -1),
                         new MCvTermCriteria(30, 0.1));

                    //Fill our objects list with the real world mesurments for the intrinsic calculations
                    var objectList = new List<MCvPoint3D32f>();
                    for (int i = 0; i < _height; i++)
                    {
                        for (int j = 0; j < _width; j++)
                        {
                            objectList.Add(new MCvPoint3D32f(j * _squareSize, i * _squareSize, 0.0F));
                        }
                    }

                    //corners_object_list[k] = new MCvPoint3D32f[];
                    _cornersObjectList[k] = objectList.ToArray();
                    _cornersPointsList[k] = _cornersPointsVec[k].ToArray();
                }

                //our error should be as close to 0 as possible
                double error = CvInvoke.CalibrateCamera(_cornersObjectList, _cornersPointsList, _grayFrame.Size,
                     _cameraMatrix, _distCoeffs, CalibType.FixFocalLength, new MCvTermCriteria(30, 0.1), out _rvecs, out _tvecs);
                MessageBox.Show(@"Intrinsic Calculation Error: " + error.ToString(CultureInfo.InvariantCulture), @"Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user

                _currentMode = Mode.Calibrated;

                double[,] c = new double[3,3];
                for(int i = 0; i < 3; i++)
                {
                    for(int j = 0; j < 3; j++)
                    {
                        c[i,j] = MatExtension.GetValue(_cameraMatrix,i,j);
                        Console.Write(MatExtension.GetValue(_cameraMatrix,i,j) + "\t");
                    }
                    Console.WriteLine();
                }

                double[] d = new double[8];
                for(int i = 0; i < 8; i++)
                {
                    d[i] = MatExtension.GetValue(_distCoeffs, i, 0);
                    Console.Write(MatExtension.GetValue(_distCoeffs, i, 0) + "\t");
                }
                OutputJsonFile output = new OutputJsonFile(c, d);

                string jsonObject = JsonConvert.SerializeObject(output);
                Console.WriteLine(jsonObject);
                System.IO.File.WriteAllText("parameters.json", jsonObject);

                //       XmlDocument xDoc = Emgu.Util.Toolbox.XmlSerialize<Matrix<float>>((Matrix<float>)_cameraMatrix);
            }
            if (_currentMode == Mode.Calibrated)
            {
                Sub_PicturBox.Image = _frame;
                Mat outFrame = _frame.Clone();
                CvInvoke.Undistort(_frame, outFrame, _cameraMatrix, _distCoeffs);
                _frame = outFrame.Clone();

                PointF[] transformCorners = new PointF[4];
                PointF[] newCorners = new PointF[]{
                    new PointF(0,0),
                    new PointF(_frame.Width - 1,0),
                    new PointF(_frame.Width - 1, _frame.Height - 1),
                    new PointF(0, _frame.Height-1)};

            }

            Main_Picturebox.Image = _frame;
        }
示例#13
0
 /// <summary>
 /// Push multiple values from the other vector into this vector
 /// </summary>
 /// <param name="other">The other vector, from which the values will be pushed to the current vector</param>
 public void Push(VectorOfPointF other)
 {
     VectorOfPointFPushVector(_ptr, other);
 }
示例#14
0
      /*
      /// <summary>
      /// A comparator which compares only the X value of the point
      /// </summary>
      private class XValueOfPointComparator : IComparer<PointF>
      {
         public int Compare(PointF p1, PointF p2)
         {
            return p1.X.CompareTo(p2.X);
         }
      }

      /// <summary>
      /// Perform a first degree interpolation to lookup the y coordinate given the x coordinate
      /// </summary>
      /// <param name="points">The collection of points. Must be sorted by the x value.</param>
      /// <param name="index">the x coordinate</param>
      /// <returns>the y coordinate as the result of the first degree interpolation</returns>
      public static float FirstDegreeInterpolate(PointF[] points, float index)
      {
         XValueOfPointComparator comparator = new XValueOfPointComparator();
         int idx = Array.BinarySearch<PointF>(points, new PointF(index, 0.0f), comparator);
         
         if (idx >= 0) // an exact index is matched
            return points[idx].Y;

         // the index fall into a range, in this case we do interpolation
         idx = -idx;

         if (idx == 1)
            // the specific index is smaller than all indexes
            idx = 0;
         else if (idx == points.Length + 1)
            // the specific index is larger than all indexes
            idx = points.Length - 2;
         else
            idx -= 2;

         LineSegment2DF line = new LineSegment2DF(points[idx], points[idx + 1]);
         return line.YByX(index);         
      }

      /// <summary>
      /// Perform a first degree interpolation to lookup the y coordinates given the x coordinates
      /// </summary>
      /// <param name="points">The collection of points, Must be sorted by x value</param>
      /// <param name="indexes">the x coordinates</param>
      /// <returns>The y coordinates as the result of the first degree interpolation</returns>
      public static float[] FirstDegreeInterpolate(PointF[] points, float[] indexes)
      {
         return Array.ConvertAll<float, float>(
             indexes,
             delegate(float d) { return FirstDegreeInterpolate(points, d); });
      }*/

      /*
      /// <summary>
      /// Fit a line to the points collection
      /// </summary>
      /// <param name="points">The points to be fitted</param>
      /// <param name="type">The type of the fitting</param>
      /// <param name="normalizedDirection">The normalized direction of the fitted line</param>
      /// <param name="aPointOnLine">A point on the fitted line</param>
      public static void Line2DFitting(PointF[] points, CvEnum.DistType type, out PointF normalizedDirection, out PointF aPointOnLine)
      {
         float[] data = new float[6];
         IntPtr seq = Marshal.AllocHGlobal(StructSize.MCvSeq);
         IntPtr block = Marshal.AllocHGlobal(StructSize.MCvSeqBlock);
         GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned);

         CvInvoke.cvMakeSeqHeaderForArray(
            CvInvoke.MakeType(CvEnum.DepthType.Cv32F, 2),
            StructSize.MCvSeq,
            StructSize.PointF,
            handle.AddrOfPinnedObject(),
            points.Length,
            seq,
            block); 

         CvInvoke.cvFitLine(seq, type, 0.0, 0.01, 0.01, data);

         handle.Free();
         Marshal.FreeHGlobal(seq);
         Marshal.FreeHGlobal(block);
         normalizedDirection = new PointF(data[0], data[1]);
         aPointOnLine = new PointF(data[2], data[3]);
      }*/

      /// <summary>
      /// Fit an ellipse to the points collection
      /// </summary>
      /// <param name="points">The points to be fitted</param>
      /// <returns>An ellipse</returns>
      public static Ellipse EllipseLeastSquareFitting(PointF[] points)
      {
         using (VectorOfPointF vp = new VectorOfPointF(points))
         {
            Ellipse e = new Ellipse(CvInvoke.FitEllipse(vp));

            //The angle returned by cvFitEllipse2 has the wrong sign.
            //Returned angle is clock wise rotation, what we need for the definition of MCvBox is the counter clockwise rotation.
            //For this, we needs to change the sign of the angle
            RotatedRect b = e.RotatedRect;
            b.Angle = -b.Angle;
            if (b.Angle < 0) b.Angle += 360;
            e.RotatedRect = b;

            return e;
         }
      }
示例#15
0
 /// <summary>
 /// Finds the minimal circumscribed circle for 2D point set using iterative algorithm. It returns nonzero if the resultant circle contains all the input points and zero otherwise (i.e. algorithm failed)
 /// </summary>
 /// <param name="points">Sequence or array of 2D points</param>
 ///<returns>The minimal circumscribed circle for 2D point set</returns>
 public static CircleF MinEnclosingCircle(PointF[] points)
 {
    using (VectorOfPointF vp = new VectorOfPointF(points))
       return MinEnclosingCircle(vp);
 }
示例#16
0
 /// <summary>
 /// Finds convex hull of 2D point set using Sklansky's algorithm
 /// </summary>
 /// <param name="points">The points to find convex hull from</param>
 /// <param name="clockwise">Orientation flag. If it is true, the output convex hull is oriented clockwise. Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing to the right, and its Y axis pointing upwards.</param>
 /// <returns>The convex hull of the points</returns>
 public static PointF[] ConvexHull(PointF[] points, bool clockwise = false)
 {
    using (VectorOfPointF vpf = new VectorOfPointF(points))
    using (VectorOfPointF hull = new VectorOfPointF())
    {
       CvInvoke.ConvexHull(vpf, hull, clockwise, true);
       return hull.ToArray();
    }
 }
示例#17
0
 public DebuggerProxy(VectorOfPointF v)
 {
    _v = v;
 }
示例#18
0
      /*
      public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
      {
         TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
      }

      [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
      private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);

      /// <summary>
      /// Implements the chamfer matching algorithm on images taking into account both distance from
      /// the template pixels to the nearest pixels and orientation alignment between template and image
      /// contours.
      /// </summary>
      /// <param name="img">The edge image where search is performed</param>
      /// <param name="templ">The template (an edge image)</param>
      /// <param name="contours">The output contours</param>
      /// <param name="cost">The cost associated with the matching</param>
      /// <param name="templScale">The template scale</param>
      /// <param name="maxMatches">The maximum number of matches</param>
      /// <param name="minMatchDistance">The minimum match distance</param>
      /// <param name="padX">PadX</param>
      /// <param name="padY">PadY</param>
      /// <param name="scales">Scales</param>
      /// <param name="minScale">Minimum scale</param>
      /// <param name="maxScale">Maximum scale</param>
      /// <param name="orientationWeight">Orientation weight</param>
      /// <param name="truncate">Truncate</param>
      /// <returns>The number of matches</returns>
      public static int ChamferMatching(Mat img, Mat templ,
         out Point[][] contours, out float[] cost,
         double templScale = 1, int maxMatches = 20,
         double minMatchDistance = 1.0, int padX = 3,
         int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
         double orientationWeight = 0.5, double truncate = 20)
      {
         using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         {
            int count = cveChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
            contours = vecOfVecOfPoint.ToArrayOfArray();
            cost = vecOfFloat.ToArray();
            return count;
         }
      }
      [DllImport(ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      private static extern int cveChamferMatching(
         IntPtr img, IntPtr templ,
         IntPtr results, IntPtr cost,
         double templScale, int maxMatches,
         double minMatchDistance, int padX,
         int padY, int scales, double minScale, double maxScale,
         double orientationWeight, double truncate);
      */

      /// <summary>
      /// Finds centers in the grid of circles
      /// </summary>
      /// <param name="image">Source chessboard view</param>
      /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
      /// <param name="flags">Various operation flags</param>
      /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
      /// <returns>The center of circles detected if the chess board pattern is found, otherwise null is returned</returns>
      public static PointF[] FindCirclesGrid(Image<Gray, Byte> image, Size patternSize, CvEnum.CalibCgType flags, Feature2D featureDetector)
      {
         using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
         {
            bool patternFound =
               FindCirclesGrid(
                  image,
                  patternSize,
                  vec,
                  flags,
                  featureDetector
                  );
            return patternFound ? vec.ToArray() : null;
         }
      }
示例#19
0
      /// <summary>
      /// Obtains the list of Voronoi Facets 
      /// </summary>
      /// <returns>The list of Voronoi Facets</returns>
      public VoronoiFacet[] GetVoronoiFacets(int[] idx = null)
      {
         using (VectorOfInt vi = new VectorOfInt())
         using (VectorOfVectorOfPointF facetVec = new VectorOfVectorOfPointF())
         using (VectorOfPointF centerVec = new VectorOfPointF())
         {
            if (idx != null)
               vi.Push(idx);
         
            CvInvoke.cveSubdiv2DGetVoronoiFacetList(_ptr, vi, facetVec, centerVec);
            PointF[][] vertices = facetVec.ToArrayOfArray();
            PointF[] centers = centerVec.ToArray();

            VoronoiFacet[] facets = new VoronoiFacet[centers.Length];
            for (int i = 0; i < facets.Length; i++)
            {
               facets[i] = new VoronoiFacet(centers[i], vertices[i]);
            }
            return facets;
         }
         
      }
示例#20
0
        public void ParseTestVideo(string testFile)
        {
            //Capture Image
            if(string.IsNullOrWhiteSpace(OutputPath))
                OutputPath = _defaultTestVideoPath;

            List<string> grayImgList = CatchImages(testFile, 0, OutputPath);

            //Get the Optical flow of L-K feature
            Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First());
            Image<Gray, Byte> grayImage1 = new Image<Gray, Byte>(grayImgList.First());

            Image<Gray, Byte> grayImage2 = new Image<Gray, Byte>(grayImgList.Last());
            EmguType features1 = SURFFeatureDetect(grayImage1, mask);

            VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray());
            VectorOfPointF vp2 = new VectorOfPointF(vp1.Size);
            VectorOfByte vstatus = new VectorOfByte(vp1.Size);
            VectorOfFloat verr = new VectorOfFloat(vp1.Size);
            Size winsize = new Size(grayImage1.Width, grayImage1.Height);
            int maxLevel = 1; // if 0, winsize is not used
            MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

            try
            {
                //GFTTDetector gd = new GFTTDetector();
                //MKeyPoint[] gdkp = gd.Detect(grayImage1);
                //VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray());

                CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, vp1, vp2, vstatus, verr, winsize, maxLevel, criteria);

                Utils.WriteJsonFile(vp1, grayImgList.First() + "p.dat");
                Utils.WriteJsonFile(vp2, grayImgList.Last() + "p.dat");
            }
            catch (Exception e)
            {
                _log.Debug("error: " + e.Message);
            }

            //List<string> grayImgList = CatchImages(testFile, 0, OutputPath);
            /*
            //Get SIFT Feature
            foreach (string grayImgPath in grayImgList)
            {

                //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath);
                //List<Feature> features = SiftFeatureDetect(grayImage);
                Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath);
                //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true);
                //Write features To File
                EmguType features = SURFFeatureDetect(grayImage);

                Utils.WriteJsonFile(features, grayImgPath + ".dat");

            }
            */
            _parseSuccess = true;
        }
示例#21
0
 public DebuggerProxy(VectorOfPointF v)
 {
     _v = v;
 }
示例#22
0
 /// <summary>
 /// Push a value into the standard vector
 /// </summary>
 /// <param name="value">The value to be pushed to the vector</param>
 public void Push(VectorOfPointF value)
 {
     VectorOfVectorOfPointFPush(_ptr, value.Ptr);
 }
示例#23
0
 /// <summary>
 /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
 /// </summary>
 /// <param name="objectPoints">The array of object points</param>
 /// <param name="imagePoints">The array of corresponding image points</param>
 /// <param name="intrinsicMatrix">The camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1]. </param>
 /// <param name="distortionCoeffs">The vector of distortion coefficients, 4x1 or 1x4 [k1, k2, p1, p2]. If it is IntPtr.Zero, all distortion coefficients are considered 0's.</param>
 /// <param name="rotationVector">The output 3x1 or 1x3 rotation vector (compact representation of a rotation matrix, see cvRodrigues2). </param>
 /// <param name="translationVector">The output 3x1 or 1x3 translation vector</param>
 /// <param name="useExtrinsicGuess">Use the input rotation and translation parameters as a guess</param>
 /// <param name="method">Method for solving a PnP problem</param>
 /// <returns>The extrinsic parameters</returns>
 public static bool SolvePnP(
    MCvPoint3D32f[] objectPoints,
    PointF[] imagePoints,
    IInputArray intrinsicMatrix,
    IInputArray distortionCoeffs,
    IOutputArray rotationVector,
    IOutputArray translationVector,
    bool useExtrinsicGuess = false,
    CvEnum.SolvePnpMethod method = CvEnum.SolvePnpMethod.Iterative)
 {
    using (VectorOfPoint3D32F objPtVec = new VectorOfPoint3D32F(objectPoints))
    using (VectorOfPointF imgPtVec = new VectorOfPointF(imagePoints))
       return CvInvoke.SolvePnP(objPtVec, imgPtVec, intrinsicMatrix, distortionCoeffs, rotationVector,
          translationVector, false, method);
 }
示例#24
0
      /// <summary>
      /// Fits line to 2D or 3D point set 
      /// </summary>
      /// <param name="points">Input vector of 2D points.</param>
      /// <param name="distType">The distance used for fitting </param>
      /// <param name="param">Numerical parameter (C) for some types of distances, if 0 then some optimal value is chosen</param>
      /// <param name="reps">Sufficient accuracy for radius (distance between the coordinate origin and the line),  0.01 would be a good default</param>
      /// <param name="aeps">Sufficient accuracy for angle, 0.01 would be a good default</param>
      /// <param name="direction">A normalized vector collinear to the line </param>
      /// <param name="pointOnLine">A point on the line.</param>
      public static void FitLine(
          PointF[] points,
          out PointF direction,
          out PointF pointOnLine,
          CvEnum.DistType distType,
          double param,
          double reps,
          double aeps)
      {
         using (VectorOfPointF pv = new VectorOfPointF(points))
         using (VectorOfFloat line = new VectorOfFloat())
         using (InputArray iaPv = pv.GetInputArray())
         using (OutputArray oaLine = line.GetOutputArray())
         {
            cveFitLine(iaPv, oaLine, distType, param, reps, aeps);
            float[] values = line.ToArray();
            direction = new PointF(values[0], values[1]);
            pointOnLine = new PointF(values[2], values[3]);

         }
      }
示例#25
0
 /// <summary>
 /// Estimate rigid transformation between 2 point sets.
 /// </summary>
 /// <param name="sourcePoints">The points from the source image</param>
 /// <param name="destinationPoints">The corresponding points from the destination image</param>
 /// <param name="fullAffine">Indicates if full affine should be performed</param>
 /// <returns>If success, the 2x3 rotation matrix that defines the Affine transform. Otherwise null is returned.</returns>
 public static Mat EstimateRigidTransform(PointF[] sourcePoints, PointF[] destinationPoints, bool fullAffine)
 {
    using (VectorOfPointF srcVec = new VectorOfPointF(sourcePoints))
    using (VectorOfPointF dstVec = new VectorOfPointF(destinationPoints))
       return EstimateRigidTransform(srcVec, dstVec, fullAffine);
 }
示例#26
0
 /// <summary>
 /// Find the bounding rectangle for the specific array of points
 /// </summary>
 /// <param name="points">The collection of points</param>
 /// <returns>The bounding rectangle for the array of points</returns>
 public static RotatedRect MinAreaRect(PointF[] points)
 {
    using (VectorOfPointF vpf = new VectorOfPointF(points))
    {
       return MinAreaRect(vpf);
    }
 }
示例#27
0
 /// <summary>
 /// Find the bounding rectangle for the specific array of points
 /// </summary>
 /// <param name="points">The collection of points</param>
 /// <returns>The bounding rectangle for the array of points</returns>
 public static Rectangle BoundingRectangle(PointF[] points)
 {
    using (VectorOfPointF ptVec = new VectorOfPointF(points))
    return CvInvoke.BoundingRectangle(ptVec);
 }
示例#28
0
      /*
      #region Kalman Filter
      /// <summary>
      /// Allocates CvKalman and all its matrices and initializes them somehow. 
      /// </summary>
      /// <param name="dynamParams">dimensionality of the state vector</param>
      /// <param name="measureParams">dimensionality of the measurement vector </param>
      /// <param name="controlParams">dimensionality of the control vector </param>
      /// <returns>Pointer to the created Kalman filter</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvCreateKalman(int dynamParams, int measureParams, int controlParams);

      /// <summary>
      /// Adjusts stochastic model state on the basis of the given measurement of the model state.
      /// The function stores adjusted state at kalman->state_post and returns it on output
      /// </summary>
      /// <param name="kalman">Pointer to the structure to be updated</param>
      /// <param name="measurement">Pointer to the structure CvMat containing the measurement vector</param>
      /// <returns>The function stores adjusted state at kalman->state_post and returns it on output</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvKalmanCorrect(ref MCvKalman kalman, IntPtr measurement);


      /// <summary>
      /// Estimates the subsequent stochastic model state by its current state and stores it at kalman->state_pre
      /// The function returns the estimated state
      /// </summary>
      /// <param name="kalman">Kalman filter state</param>
      /// <param name="control">Control vector (uk), should be NULL iff there is no external control (controlParams=0). </param>
      /// <returns>the estimated state</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvKalmanPredict(ref MCvKalman kalman, IntPtr control);

      /// <summary>
      /// Releases the structure CvKalman and all underlying matrices
      /// </summary>
      /// <param name="kalman">reference of the pointer to the Kalman filter structure.</param>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern void cvReleaseKalman(ref IntPtr kalman);
      #endregion
*/
      #region optical flow
      /// <summary>
      /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
      /// </summary>
      /// <param name="prev">First frame, at time t</param>
      /// <param name="curr">Second frame, at time t + dt </param>
      /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
      /// <param name="winSize">Size of the search window of each pyramid level</param>
      /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
      /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
      /// <param name="flags">Flags</param>
      /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
      /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
      /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
      /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
      public static void CalcOpticalFlowPyrLK(
         IInputArray prev,
         IInputArray curr,
         PointF[] prevFeatures,
         Size winSize,
         int level,
         MCvTermCriteria criteria,
         out PointF[] currFeatures,
         out Byte[] status,
         out float[] trackError,
         Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
         double minEigThreshold = 1.0e-4)
      {
         using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF())
         using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF())
         using (Util.VectorOfByte statusVec = new Util.VectorOfByte())
         using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat())
         {
            prevPts.Push(prevFeatures);

            CalcOpticalFlowPyrLK(
               prev,
               curr,
               prevPts,
               nextPts,
               statusVec,
               errorVec,
               winSize,
               level,
               criteria,
               flags,
               minEigThreshold);
            status = statusVec.ToArray();
            trackError = errorVec.ToArray();
            currFeatures = nextPts.ToArray();
         }
      }
示例#29
0
        private Image<Bgr, byte> MarkChessboard(Bitmap bitmap, VectorOfPointF corners)
        {
            var width = configuration.InnerCornersPerChessboardCols;
            var height = configuration.InnerCornersPerChessboardRows;

            var colorImage = new Image<Bgr, byte>(bitmap);
            colorImage.Draw(new LineSegment2DF(corners[0], corners[width - 1]), new Bgr(Color.Lime), 2);
            colorImage.Draw(new LineSegment2DF(corners[width - 1], corners[width*height - 1]), new Bgr(Color.Lime),
                2);
            colorImage.Draw(new LineSegment2DF(corners[width*height - 1], corners[width*(height - 1)]),
                new Bgr(Color.Lime), 2);
            colorImage.Draw(new LineSegment2DF(corners[width*(height - 1)], corners[0]), new Bgr(Color.Lime), 2);

            return colorImage;
        }
示例#30
-2
        public void InitOriginalVideo(string initFile)
        {
            //Capture Image
            OutputPath = _defaultInitVideoPath;

            List<string> grayImgList = CatchImages(initFile, 0, OutputPath);

            if (grayImgList.Count < 3)
            {
                return;
            }

            //Get the Optical flow of L-K feature
            Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First());

            Image<Gray, Byte> grayImage1 = null;//new Image<Gray, Byte>(grayImgList[1]);
            Image<Gray, Byte> grayImage2 = null;//new Image<Gray, Byte>(grayImgList.Last());

            for (int i=1; i< grayImgList.Count-1; i++)
            {
                grayImage1 = new Image<Gray, Byte>(grayImgList[i]);
                grayImage2 = new Image<Gray, Byte>(grayImgList[i + 1]);
                EmguType features1 = SURFFeatureDetect(grayImage1, mask);

                Utils.WriteJsonFile(features1, grayImgList[i] + ".dat");

                //VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray());
                //VectorOfPointF vp2 = new VectorOfPointF(vp1.Size);
                //VectorOfByte vstatus = new VectorOfByte(vp1.Size);
                //VectorOfFloat verr = new VectorOfFloat(vp1.Size);
                Size winsize = new Size(grayImage1.Width, grayImage1.Height);
                int maxLevel = 1; // if 0, winsize is not used
                MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

                try
                {
                    if (i % Constants.DETECTIVE_GROUP_COUNT == 1)
                    {
                        GFTTDetector gd = new GFTTDetector();
                        MKeyPoint[] gdkp = gd.Detect(grayImage1, mask);
                        VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray());
                        VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size);
                        VectorOfByte vstatus = new VectorOfByte(gdvp1.Size);
                        VectorOfFloat verr = new VectorOfFloat(gdvp1.Size);

                        CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria);
                        Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat");
                    }
                    else
                    {
                        VectorOfPointF gdvp1 = Utils.ReadJsonFile<VectorOfPointF>(grayImgList[i - 1] + "pp.dat");
                        VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size);
                        VectorOfByte vstatus = new VectorOfByte(gdvp1.Size);
                        VectorOfFloat verr = new VectorOfFloat(gdvp1.Size);

                        CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria);
                        Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat");
                    }

                }
                catch (Exception e)
                {
                    _log.Debug("error: " + e.Message);
                }
            }

            /*
            //Get SIFT Feature
            foreach (string grayImgPath in grayImgList)
            {
                Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath);
                //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true);

                //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath);
                //List<Feature> features = SiftFeatureDetect(grayImage);

                EmguType features = SURFFeatureDetect(grayImage);

                Utils.WriteJsonFile(features, grayImgPath + ".dat");
            }
            */

            _initSuccess = true;
            OutputPath = string.Empty;
        }