コード例 #1
0
 /// <summary>
 /// Create a BOWImgDescriptorExtractor
 /// </summary>
 /// <param name="descriptorExtractor">Descriptor extractor that is used to compute descriptors for an input image and its key points.</param>
 /// <param name="descriptorMatcher">Descriptor matcher that is used to find the nearest word of the trained vocabulary for each key point descriptor of the image.</param>
 public BOWImgDescriptorExtractor(Feature2D descriptorExtractor, DescriptorMatcher descriptorMatcher)
 {
     _ptr = Features2DInvoke.cveBOWImgDescriptorExtractorCreate(descriptorExtractor.Feature2DPtr, descriptorMatcher);
 }
コード例 #2
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="descriptorExtractor">Descriptor extractor that is used to compute descriptors for an input image and its key points.</param>
 /// <param name="descriptorMatcher">Descriptor matcher that is used to find the nearest word of the trained vocabulary for each key point descriptor of the image.</param>
 public BOWImgDescriptorExtractor(Feature2D descriptorExtractor, DescriptorMatcher descriptorMatcher)
 {
    _ptr = BOWImgDescriptorExtractorInvoke.CvBOWImgDescriptorExtractorCreate(descriptorExtractor.Feature2DPtr, descriptorMatcher);
 }
コード例 #3
0
 /// <summary>
 /// Finds centers in the grid of circles
 /// </summary>
 /// <param name="image">Source chessboard view</param>
 /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
 /// <param name="flags">Various operation flags</param>
 /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
 /// <param name="centers">output array of detected centers.</param>
 /// <returns>True if grid found.</returns>
 public static bool FindCirclesGrid(IInputArray image, Size patternSize, IOutputArray centers, CvEnum.CalibCgType flags, Feature2D featureDetector)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCenters = centers.GetOutputArray())
       return cveFindCirclesGrid(iaImage, ref patternSize, oaCenters, flags, featureDetector.Feature2DPtr);
 }
コード例 #4
0
      /*
      public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
      {
         TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
      }

      [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
      private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);

      /// <summary>
      /// Implements the chamfer matching algorithm on images taking into account both distance from
      /// the template pixels to the nearest pixels and orientation alignment between template and image
      /// contours.
      /// </summary>
      /// <param name="img">The edge image where search is performed</param>
      /// <param name="templ">The template (an edge image)</param>
      /// <param name="contours">The output contours</param>
      /// <param name="cost">The cost associated with the matching</param>
      /// <param name="templScale">The template scale</param>
      /// <param name="maxMatches">The maximum number of matches</param>
      /// <param name="minMatchDistance">The minimum match distance</param>
      /// <param name="padX">PadX</param>
      /// <param name="padY">PadY</param>
      /// <param name="scales">Scales</param>
      /// <param name="minScale">Minimum scale</param>
      /// <param name="maxScale">Maximum scale</param>
      /// <param name="orientationWeight">Orientation weight</param>
      /// <param name="truncate">Truncate</param>
      /// <returns>The number of matches</returns>
      public static int ChamferMatching(Mat img, Mat templ,
         out Point[][] contours, out float[] cost,
         double templScale = 1, int maxMatches = 20,
         double minMatchDistance = 1.0, int padX = 3,
         int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
         double orientationWeight = 0.5, double truncate = 20)
      {
         using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         {
            int count = cveChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
            contours = vecOfVecOfPoint.ToArrayOfArray();
            cost = vecOfFloat.ToArray();
            return count;
         }
      }
      [DllImport(ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      private static extern int cveChamferMatching(
         IntPtr img, IntPtr templ,
         IntPtr results, IntPtr cost,
         double templScale, int maxMatches,
         double minMatchDistance, int padX,
         int padY, int scales, double minScale, double maxScale,
         double orientationWeight, double truncate);
      */

      /// <summary>
      /// Finds centers in the grid of circles
      /// </summary>
      /// <param name="image">Source chessboard view</param>
      /// <param name="patternSize">The number of inner circle per chessboard row and column</param>
      /// <param name="flags">Various operation flags</param>
      /// <param name="featureDetector">The feature detector. Use a SimpleBlobDetector for default</param>
      /// <returns>The center of circles detected if the chess board pattern is found, otherwise null is returned</returns>
      public static PointF[] FindCirclesGrid(Image<Gray, Byte> image, Size patternSize, CvEnum.CalibCgType flags, Feature2D featureDetector)
      {
         using (Util.VectorOfPointF vec = new Util.VectorOfPointF())
         {
            bool patternFound =
               FindCirclesGrid(
                  image,
                  patternSize,
                  vec,
                  flags,
                  featureDetector
                  );
            return patternFound ? vec.ToArray() : null;
         }
      }
コード例 #5
0
 public CreateProjectViewModel()
 {
     _detector = GetNativeDetector(SelectedDetector);
     _descripter = GetNativeDescripter(SelectedDescripter);
 }
コード例 #6
0
ファイル: AutoTestFeatures2d.cs プロジェクト: Delaley/emgucv
      public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator)
      {
         //for (int k = 0; k < 1; k++)
         {
            Feature2D feature2D = null;
            if (keyPointDetector == descriptorGenerator)
            {
               feature2D = keyPointDetector as Feature2D;
            }

            Mat modelImage = EmguAssert.LoadMat("box.png");
            //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
            //modelImage = modelImage.Resize(400, 400, true);

            //modelImage._EqualizeHist();

            #region extract features from the object image
            Stopwatch stopwatch = Stopwatch.StartNew();
            VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
            Mat modelDescriptors = new Mat();
            if (feature2D != null)
            {
               feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
            }
            else
            {
               keyPointDetector.DetectRaw(modelImage, modelKeypoints);
               descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
            }
            stopwatch.Stop();
            EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
            Image<Gray, Byte> observedImage = EmguAssert.LoadImage<Gray, byte>("box_in_scene.png");
            //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
            //image = image.Resize(400, 400, true);

            //observedImage._EqualizeHist();
            #region extract features from the observed image
            stopwatch.Reset();
            stopwatch.Start();
            VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
            using (Mat observedDescriptors = new Mat())
            {
               if (feature2D != null)
               {
                  
                  feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
               }
               else
               {
                  keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                  descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
               }

               stopwatch.Stop();
               EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

               //Merge the object image and the observed image into one big image for display
               Image<Gray, Byte> res = modelImage.ToImage<Gray, Byte>().ConcateVertical(observedImage);

               Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
               PointF[] pts = new PointF[] { 
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};

               Mat homography = null;

               stopwatch.Reset();
               stopwatch.Start();

               int k = 2;
               DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
               //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
               //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
               using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
               using (BFMatcher matcher = new BFMatcher(dt))
               {
                  //ParamDef[] parameterDefs = matcher.GetParams();
                  matcher.Add(modelDescriptors);
                  matcher.KnnMatch(observedDescriptors, matches, k, null);

                  Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  //mask.SetValue(255);
                  Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                  }
               }
               stopwatch.Stop();
               EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

               bool success = false;
               if (homography != null)
               {
                  PointF[] points = pts.Clone() as PointF[];
                  points = CvInvoke.PerspectiveTransform(points, homography);
                  //homography.ProjectPoints(points);

                  for (int i = 0; i < points.Length; i++)
                     points[i].Y += modelImage.Height;
                  
                  res.DrawPolyline(
#if NETFX_CORE
                     Extensions.
#else
                     Array.
#endif
                     ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                  success = true;
               }
               //Emgu.CV.UI.ImageViewer.Show(res);
               return success;
            }

            

            /*
            stopwatch.Reset(); stopwatch.Start();
            //set the initial region to be the whole image
            using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
            {
               priorMask.SetValue(1.0);
               homography = tracker.CamShiftTrack(
                  observedFeatures,
                  (RectangleF)observedImage.ROI,
                  priorMask);
            }
            Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            
            if (homography != null) //set the initial tracking window to be the whole image
            {
               PointF[] points = pts.Clone() as PointF[];
               homography.ProjectPoints(points);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += modelImage.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
               return true;
            }
            else
            {
               return false;
            }*/

         }
      }