public StopSignDetector(IInputArray stopSignModel)
      {
         _detector = new SURF(500);
         using (Mat redMask = new Mat())
         {
            GetRedPixelMask(stopSignModel, redMask);
            _modelKeypoints = new VectorOfKeyPoint();
            _modelDescriptors = new Mat();
            _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
            if (_modelKeypoints.Size == 0)
               throw new Exception("No image feature has been found in the stop sign model");
         }

         _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
         _modelDescriptorMatcher.Add(_modelDescriptors);

         _octagon = new VectorOfPoint(
            new Point[]
            {
               new Point(1, 0),
               new Point(2, 0),
               new Point(3, 1),
               new Point(3, 2),
               new Point(2, 3),
               new Point(1, 3),
               new Point(0, 2),
               new Point(0, 1)
            });

      }
Exemple #2
2
      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.80;

         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
         using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
         {
            KAZE featureDetector = new KAZE();

            //extract features from the object image
            Mat modelDescriptors = new Mat();
            featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            Mat observedDescriptors = new Mat();
            featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            
            matcher.Add(modelDescriptors);

            matcher.KnnMatch(observedDescriptors, matches, k, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                  matches, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                     observedKeyPoints, matches, mask, 2);
            }

            watch.Stop();

         }
         matchTime = watch.ElapsedMilliseconds;
      }
        public float classify(Image<Bgr, Byte> predImg)
        {
            using (SURF detector = new SURF(30))
            using (BFMatcher matcher = new BFMatcher(DistanceType.L2))
            using (Image<Gray, Byte> testImgGray = predImg.Convert<Gray, Byte>())
            using (VectorOfKeyPoint testKeyPoints = new VectorOfKeyPoint())
            using (Mat testBOWDescriptor = new Mat())
            using( bowDE = new BOWImgDescriptorExtractor(detector, matcher))
            {
                float result = 0;
                bowDE.SetVocabulary(vocabulary);
                detector.DetectRaw(predImg, testKeyPoints, null);
                bowDE.Compute(predImg, testKeyPoints, testBOWDescriptor);
                if(!testBOWDescriptor.IsEmpty)
                    result = svmClassifier.Predict(testBOWDescriptor);

                //result will indicate whether test image belongs to trainDescriptor label 1, 2
                return result;
            }
        }
        public void computeAndExtract()
        {
            using (detector = new SURF(30))
            using (matcher = new BFMatcher(DistanceType.L2))
            {
                bowDE = new BOWImgDescriptorExtractor(detector, matcher);
                BOWKMeansTrainer bowTrainer = new BOWKMeansTrainer(100, new MCvTermCriteria(100, 0.01), 3, Emgu.CV.CvEnum.KMeansInitType.PPCenters);

                foreach(FileInfo[] folder in _folders)
                    foreach (FileInfo file in folder)
                    {
                        using (Image<Bgr, Byte> model = new Image<Bgr, byte>(file.FullName))
                        using (VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint())
                        //Detect SURF key points from images
                        {
                            detector.DetectRaw(model, modelKeyPoints);
                            //Compute detected SURF key points & extract modelDescriptors
                            Mat modelDescriptors = new Mat();
                            detector.Compute(model, modelKeyPoints, modelDescriptors);
                            //Add the extracted BoW modelDescriptors into BOW trainer
                            bowTrainer.Add(modelDescriptors);
                        }
                        input_num++;
                    }

                //Cluster the feature vectors
                bowTrainer.Cluster(vocabulary);

                //Store the vocabulary
                bowDE.SetVocabulary(vocabulary);

                //training descriptors
                tDescriptors = new Mat();

                labels = new Matrix<int>(1, input_num);
                int index = 0;
                //compute and store BOWDescriptors and set labels
                for (int i = 1; i <= _folders.Count; i++)
                {
                    FileInfo[] files = _folders[i-1];
                    for (int j = 0; j < files.Length; j++)
                    {
                        FileInfo file = files[j];
                        using (Image<Bgr, Byte> model = new Image<Bgr, Byte>(file.FullName))
                        using (VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint())
                        using (Mat modelBOWDescriptor = new Mat())
                        {
                            detector.DetectRaw(model, modelKeyPoints);
                            bowDE.Compute(model, modelKeyPoints, modelBOWDescriptor);

                            tDescriptors.PushBack(modelBOWDescriptor);
                            labels[0, index++] = i;

                        }
                    }
                }
            }
        }
Exemple #5
1
      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.8;
         double hessianThresh = 300;
         
         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         #if !__IOS__
         if ( CudaInvoke.HasCuda)
         {
            CudaSURF surfCuda = new CudaSURF((float) hessianThresh);
            using (GpuMat gpuModelImage = new GpuMat(modelImage))
            //extract features from the object image
            using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
            using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
            {
               surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
               watch = Stopwatch.StartNew();

               // extract features from the observed image
               using (GpuMat gpuObservedImage = new GpuMat(observedImage))
               using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
               using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
               //using (GpuMat tmp = new GpuMat())
               //using (Stream stream = new Stream())
               {
                  matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                  surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                  mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                        matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                           observedKeyPoints, matches, mask, 2);
                  }
               }
                  watch.Stop();
               }
            }
         else
         #endif
         {
            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
            using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
            {
               SURF surfCPU = new SURF(hessianThresh);
               //extract features from the object image
               UMat modelDescriptors = new UMat();
               surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

               watch = Stopwatch.StartNew();

               // extract features from the observed image
               UMat observedDescriptors = new UMat();
               surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
               BFMatcher matcher = new BFMatcher(DistanceType.L2);
               matcher.Add(modelDescriptors);

               matcher.KnnMatch(observedDescriptors, matches, k, null);
               mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
               mask.SetTo(new MCvScalar(255));
               Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

               int nonZeroCount = CvInvoke.CountNonZero(mask);
               if (nonZeroCount >= 4)
               {
                  nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                     matches, mask, 1.5, 20);
                  if (nonZeroCount >= 4)
                     homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
               }

               watch.Stop();
            }
         }
         matchTime = watch.ElapsedMilliseconds;
      }
        public Mat Calculate(Bitmap referenceBitmap, Bitmap currentBitmap)
        {
            Mat homography;
            using (var detector = new SURF(threshold))
            using (var model = new Image<Gray, byte>(referenceBitmap))
            using (var modelMat = model.Mat.ToUMat(AccessType.Read))
            using (var modelKeyPoints = new VectorOfKeyPoint())
            using (var modelDescriptors = new UMat())
            using (var observed = new Image<Gray, byte>(currentBitmap))
            using (var observedMat = observed.Mat.ToUMat(AccessType.Read))
            using (var observedKeyPoints = new VectorOfKeyPoint())
            using (var observedDescriptors = new UMat())
            using (var matcher = new BFMatcher(DistanceType.L2))
            using (var matches = new VectorOfVectorOfDMatch())
            {
                detector.DetectAndCompute(modelMat, null, modelKeyPoints, modelDescriptors, false);
                detector.DetectAndCompute(observedMat, null, observedKeyPoints, observedDescriptors, false);

                matcher.Add(modelDescriptors);
                matcher.KnnMatch(observedDescriptors, matches, k, null);

                homography = TryFindHomography(modelKeyPoints, observedKeyPoints, matches);
            }

            return homography;
        }
        private void CalculateMatch(string username, string keyDescClass)
        {
            try
            {
                string decKeyClass = DecryptText(keyDescClass);

                KeypointsDescriptors kpDescriptors = DeSerializeObject <KeypointsDescriptors>(decKeyClass);

                Matrix <byte>    useDescriptors = kpDescriptors.descriptors;
                VectorOfKeyPoint useKeyPoints   = kpDescriptors.keyPoints;


                using (var db = new BioMexDatabaseEntities1())
                {
                    BFMatcher     matcher = new Emgu.CV.Features2D.BFMatcher(DistanceType.L2);
                    Matrix <byte> mask;

                    User use = (from person in db.Users where ((person.US_USERNAME == username)) select person).FirstOrDefault();

                    var regUser = new UserClass(use.US_USERNAME, use.US_PASSWORD, use.IdealFeatures.FirstOrDefault().IF_PASSWORD_COUNT, use.IdealFeatures.FirstOrDefault().IF_SHIFT_CLASS,
                                                use.IdealFeatures.FirstOrDefault().IF_TYPING_SPEED, 0, DeserializeIntegers(use.IdealFeatures.FirstOrDefault().IF_KEY_ORDER.ToString()).ToArray(), DeserializeIntegers(use.IdealFeatures.FirstOrDefault().IF_KEY_LATENCIES.ToString()).ToArray(),
                                                DeserializeIntegers(use.IdealFeatures.FirstOrDefault().IF_KEY_PRESS_DURATION.ToString()).ToArray(), DeserializeIntegers(use.IdealFeatures.FirstOrDefault().IF_PAIRED_KEYS.ToString()).ToArray());

                    if (use != null && use.US_USERNAME.Equals(username))
                    {
                        matcher.Add(useDescriptors);

                        Matrix <byte>    observedDesc      = DeSerializeObject <Matrix <byte> >(use.Features.FirstOrDefault().FaceFeature.FF_DESCRIPTORS);
                        VectorOfKeyPoint observedKeypoints = DeSerializeObject <VectorOfKeyPoint>(use.Features.FirstOrDefault().FaceFeature.FF_KEY_POINTS);

                        Matrix <int> indices = new Matrix <int>(observedDesc.Rows, 2);

                        VectorOfVectorOfDMatch vectMatch = new VectorOfVectorOfDMatch();

                        using (Matrix <float> Dist = new Matrix <float>(observedDesc.Rows, 2))
                        {
                            matcher.KnnMatch(observedDesc, vectMatch, 2, null);
                            mask = new Matrix <byte>(Dist.Rows, 1);
                            mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(vectMatch, 0.8, mask.ToUMat().ToMat(AccessType.Read));
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                System.Diagnostics.Debug.WriteLine("Error while matching");
                System.Diagnostics.Debug.WriteLine(ex.Message);
            }
        }
Exemple #8
0
        private void timer1_Tick(object sender, EventArgs e)
        {
            if (time == 10)
            {
                Mat frame = new Mat();
                capture.Retrieve(frame, 0);
                Mat grayVideo = new Mat();
                CvInvoke.CvtColor(frame, grayVideo, ColorConversion.Bgr2Gray);
                UMat videoDescriptors = new UMat();
                VectorOfKeyPoint videoKeyPoints = new VectorOfKeyPoint();
                calculatedescriptors(grayVideo, videoDescriptors, videoKeyPoints);
                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

                BFMatcher matcher = new BFMatcher(DistanceType.L2);
                matcher.Add(originalImageDescriptors);
                matcher.KnnMatch(videoDescriptors, matches, 2, null);
                Mat mask = mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));

                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
                Mat homography = new Mat();
                int nonZeroCount = CvInvoke.CountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(originalImageKeyPoints, videoKeyPoints,
                       matches, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(originalImageKeyPoints, videoKeyPoints, matches, mask, 2);
                }

                Mat result = new Mat();
                Features2DToolbox.DrawMatches(grayImage, originalImageKeyPoints, grayVideo, videoKeyPoints,
                   matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(Point.Empty, grayImage.Size);
                    PointF[] pts = new PointF[]
                    {
                              new PointF(rect.Left, rect.Bottom),
                              new PointF(rect.Right, rect.Bottom),
                              new PointF(rect.Right, rect.Top),
                              new PointF(rect.Left, rect.Top)
                    };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

                    Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
                    using (VectorOfPoint vp = new VectorOfPoint(points))
                    {
                        CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                    }
                    viewer.Image = result;
                    
                }

                
                time = 0; 
            }
            else
            {
                time++;
            }
    }
 protected override void DisposeObject()
 {
    if (_modelKeypoints != null)
    {
       _modelKeypoints.Dispose();
       _modelKeypoints = null;
    }
    if (_modelDescriptors != null)
    {
       _modelDescriptors.Dispose();
       _modelDescriptors = null;
    }
    if (_modelDescriptorMatcher != null)
    {
       _modelDescriptorMatcher.Dispose();
       _modelDescriptorMatcher = null;
    }
    if (_octagon != null)
    {
       _octagon.Dispose();
       _octagon = null;
    }
 }
Exemple #10
0
      public void TestBOWKmeansTrainer2()
      {
         Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
         Brisk detector = new Brisk(30, 3, 1.0f);
         VectorOfKeyPoint kpts = new VectorOfKeyPoint();
         Mat descriptors = new Mat();
         detector.DetectAndCompute(box, null, kpts, descriptors, false);
         Mat descriptorsF = new Mat();
         descriptors.ConvertTo(descriptorsF, CvEnum.DepthType.Cv32F);
         //Matrix<float> descriptorsF = descriptors.Convert<float>();
         BOWKMeansTrainer trainer = new BOWKMeansTrainer(100, new MCvTermCriteria(), 3, CvEnum.KMeansInitType.PPCenters);
         trainer.Add(descriptorsF);
         Mat vocabulary = new Mat();
         trainer.Cluster(vocabulary);

         BFMatcher matcher = new BFMatcher(DistanceType.L2);

         BOWImgDescriptorExtractor extractor = new BOWImgDescriptorExtractor(detector, matcher);
         Mat vocabularyByte = new Mat();
         vocabulary.ConvertTo(vocabularyByte, CvEnum.DepthType.Cv8U);
         extractor.SetVocabulary(vocabularyByte);

         Mat descriptors2 = new Mat();
         extractor.Compute(box, kpts, descriptors2);
      }
Exemple #11
0
      public void TestBOWKmeansTrainer()
      {
         Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png");
         SURF detector = new SURF(500);
         VectorOfKeyPoint kpts = new VectorOfKeyPoint();
         Mat descriptors = new Mat();
         detector.DetectAndCompute(box, null, kpts, descriptors, false);

         BOWKMeansTrainer trainer = new BOWKMeansTrainer(100, new MCvTermCriteria(), 3, CvEnum.KMeansInitType.PPCenters);
         trainer.Add(descriptors);
         Mat vocabulary = new Mat();
         trainer.Cluster(vocabulary);

         BFMatcher matcher = new BFMatcher(DistanceType.L2);

         BOWImgDescriptorExtractor extractor = new BOWImgDescriptorExtractor(detector, matcher);
         extractor.SetVocabulary(vocabulary);

         Mat descriptors2 = new Mat();
         extractor.Compute(box, kpts, descriptors2);
      }
Exemple #12
0
      public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator)
      {
         //for (int k = 0; k < 1; k++)
         {
            Feature2D feature2D = null;
            if (keyPointDetector == descriptorGenerator)
            {
               feature2D = keyPointDetector as Feature2D;
            }

            Mat modelImage = EmguAssert.LoadMat("box.png");
            //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
            //modelImage = modelImage.Resize(400, 400, true);

            //modelImage._EqualizeHist();

            #region extract features from the object image
            Stopwatch stopwatch = Stopwatch.StartNew();
            VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
            Mat modelDescriptors = new Mat();
            if (feature2D != null)
            {
               feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
            }
            else
            {
               keyPointDetector.DetectRaw(modelImage, modelKeypoints);
               descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
            }
            stopwatch.Stop();
            EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
            Image<Gray, Byte> observedImage = EmguAssert.LoadImage<Gray, byte>("box_in_scene.png");
            //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
            //image = image.Resize(400, 400, true);

            //observedImage._EqualizeHist();
            #region extract features from the observed image
            stopwatch.Reset();
            stopwatch.Start();
            VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
            using (Mat observedDescriptors = new Mat())
            {
               if (feature2D != null)
               {
                  
                  feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
               }
               else
               {
                  keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                  descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
               }

               stopwatch.Stop();
               EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

               //Merge the object image and the observed image into one big image for display
               Image<Gray, Byte> res = modelImage.ToImage<Gray, Byte>().ConcateVertical(observedImage);

               Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
               PointF[] pts = new PointF[] { 
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};

               Mat homography = null;

               stopwatch.Reset();
               stopwatch.Start();

               int k = 2;
               DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
               //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
               //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
               using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
               using (BFMatcher matcher = new BFMatcher(dt))
               {
                  //ParamDef[] parameterDefs = matcher.GetParams();
                  matcher.Add(modelDescriptors);
                  matcher.KnnMatch(observedDescriptors, matches, k, null);

                  Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  //mask.SetValue(255);
                  Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                  }
               }
               stopwatch.Stop();
               EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

               bool success = false;
               if (homography != null)
               {
                  PointF[] points = pts.Clone() as PointF[];
                  points = CvInvoke.PerspectiveTransform(points, homography);
                  //homography.ProjectPoints(points);

                  for (int i = 0; i < points.Length; i++)
                     points[i].Y += modelImage.Height;
                  
                  res.DrawPolyline(
#if NETFX_CORE
                     Extensions.
#else
                     Array.
#endif
                     ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                  success = true;
               }
               //Emgu.CV.UI.ImageViewer.Show(res);
               return success;
            }

            

            /*
            stopwatch.Reset(); stopwatch.Start();
            //set the initial region to be the whole image
            using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
            {
               priorMask.SetValue(1.0);
               homography = tracker.CamShiftTrack(
                  observedFeatures,
                  (RectangleF)observedImage.ROI,
                  priorMask);
            }
            Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            
            if (homography != null) //set the initial tracking window to be the whole image
            {
               PointF[] points = pts.Clone() as PointF[];
               homography.ProjectPoints(points);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += modelImage.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
               return true;
            }
            else
            {
               return false;
            }*/

         }
      }
Exemple #13
0
        public Image Compare2Features(
            string destFeatureFile,
            string origFeatureFile,
            string vpFileDest,
            string vpFileOrig,
            string destImageFile = "",
            string origImageFile = "",
            bool needMatchedImage = false)
        {
            EmguType destFeatures = Utils.ReadJsonFile<EmguType>(destFeatureFile);
            EmguType origFeatures = Utils.ReadJsonFile<EmguType>(origFeatureFile);

            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            matcher.Add(origFeatures.Descriptors);
            matcher.KnnMatch(destFeatures.Descriptors, matches, 2, null);
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

            Mat homography = null;
            int Count = CvInvoke.CountNonZero(mask);      //用于寻找模板在图中的位置
            if (Count >= 4)
            {
                Count = Features2DToolbox.VoteForSizeAndOrientation(origFeatures.KeyPoints, destFeatures.KeyPoints, matches, mask, 1.5, 20);
                if (Count >= 4)
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(origFeatures.KeyPoints, destFeatures.KeyPoints, matches, mask, 2);
            }

            Mat showImage = null;
            Mat pointImage = null;
            if (needMatchedImage
                && !string.IsNullOrWhiteSpace(destImageFile)
                && !string.IsNullOrWhiteSpace(origImageFile))
            {

                Image<Gray, Byte> destImage = new Image<Gray, Byte>(destImageFile);
                Image<Gray, Byte> origImage = new Image<Gray, Byte>(origImageFile);

                showImage = new Mat(origImage.Size, DepthType.Cv8U, 3);
                pointImage = new Mat(origImage.Size, DepthType.Cv8U, 3);
                //add optical vp line

                string vpPath = Path.GetDirectoryName(vpFileDest);
                List<FileInfo> files = new List<FileInfo>();
                DirectoryInfo dirInfo = new DirectoryInfo(vpPath);
                FileInfo[] fsinfo = dirInfo.GetFiles();
                FileInfo[] vppFiles = fsinfo.Where(p => p.Name.Contains(".jpgpp.dat")).OrderBy(p => p.Name).ToArray();
                //FileInfo[] vpdFiles = fsinfo.Where(p => p.Name.Contains(".jpgpd.dat")).OrderBy(p => p.Name).ToArray();

                for (int k = 0; k < vppFiles.Length - 1; k++)
                {

                    VectorOfPointF vpDest = Utils.ReadJsonFile<VectorOfPointF>(vppFiles[k+1].FullName);
                    //VectorOfPointF vpOrig = Utils.ReadJsonFile<VectorOfPointF>(vpdFiles[k].FullName);
                    VectorOfPointF vpOrig = Utils.ReadJsonFile<VectorOfPointF>(vppFiles[k].FullName);

                    //Restart the start point of motion tracking.
                    if ((k+1) % Constants.DETECTIVE_GROUP_COUNT == 0)
                        continue;

                    Point[] pointsDest = Array.ConvertAll<PointF, Point>(vpDest.ToArray(), Point.Round);
                    Point[] pointsOirg = Array.ConvertAll<PointF, Point>(vpOrig.ToArray(), Point.Round);

                    for (int i = 0; i < pointsDest.Length; i++)
                    {
                        Point[] ps = { pointsDest[i], pointsOirg[i] };
                        CvInvoke.Polylines(pointImage, ps, true, new MCvScalar(0, 0, 255, 255));
                        //CvInvoke.Circle(pointImage, pointsOirg[i], 1, new MCvScalar(0, 255, 0, 255));
                    }
                }

                Image<Bgr, Byte> firstImg = new Image<Bgr, Byte>(origImageFile);
                Image<Bgr, Byte> lastImg = new Image<Bgr, Byte>("D:\\MyPrj\\mygitcode\\MyCode\\ExamVideoProcess\\ExamVideoProcess\\bin\\x64\\Debug\\initVideo\\30Grayimg.jpg");
                CvInvoke.AddWeighted(firstImg, 0.5, lastImg, 0.5, 0.0, showImage, DepthType.Cv8U);
                CvInvoke.AddWeighted(showImage, 0.5, pointImage, 0.5, 0.0, showImage, DepthType.Cv8U);

                /*
                Features2DToolbox.DrawMatches(origImage.Convert<Gray, Byte>().Mat, origFeatures.KeyPoints, destImage.Convert<Gray, Byte>().Mat, destFeatures.KeyPoints, matches, showImage, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 255), mask);
                if (homography != null)     //如果在图中找到了模板,就把它画出来
                {
                    Rectangle rect = new Rectangle(Point.Empty, origImage.Size);
                    PointF[] points = new PointF[]
                {
                  new PointF(rect.Left, rect.Bottom),
                  new PointF(rect.Right, rect.Bottom),
                  new PointF(rect.Right, rect.Top),
                  new PointF(rect.Left, rect.Top)
                };
                    points = CvInvoke.PerspectiveTransform(points, homography);
                    Point[] points2 = Array.ConvertAll<PointF, Point>(points, Point.Round);
                    VectorOfPoint vp = new VectorOfPoint(points2);
                    CvInvoke.Polylines(showImage, vp, true, new MCvScalar(255, 0, 0, 255), 15);

                }
                */
                return showImage.Bitmap;
            }
            return null;
        }