public StopSignDetector(IInputArray stopSignModel) { _detector = new SURF(500); using (Mat redMask = new Mat()) { GetRedPixelMask(stopSignModel, redMask); _modelKeypoints = new VectorOfKeyPoint(); _modelDescriptors = new Mat(); _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false); if (_modelKeypoints.Size == 0) throw new Exception("No image feature has been found in the stop sign model"); } _modelDescriptorMatcher = new BFMatcher(DistanceType.L2); _modelDescriptorMatcher.Add(_modelDescriptors); _octagon = new VectorOfPoint( new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1) }); }
public float classify(Image<Bgr, Byte> predImg) { using (SURF detector = new SURF(30)) using (BFMatcher matcher = new BFMatcher(DistanceType.L2)) using (Image<Gray, Byte> testImgGray = predImg.Convert<Gray, Byte>()) using (VectorOfKeyPoint testKeyPoints = new VectorOfKeyPoint()) using (Mat testBOWDescriptor = new Mat()) using( bowDE = new BOWImgDescriptorExtractor(detector, matcher)) { float result = 0; bowDE.SetVocabulary(vocabulary); detector.DetectRaw(predImg, testKeyPoints, null); bowDE.Compute(predImg, testKeyPoints, testBOWDescriptor); if(!testBOWDescriptor.IsEmpty) result = svmClassifier.Predict(testBOWDescriptor); //result will indicate whether test image belongs to trainDescriptor label 1, 2 return result; } }
public void computeAndExtract() { using (detector = new SURF(30)) using (matcher = new BFMatcher(DistanceType.L2)) { bowDE = new BOWImgDescriptorExtractor(detector, matcher); BOWKMeansTrainer bowTrainer = new BOWKMeansTrainer(100, new MCvTermCriteria(100, 0.01), 3, Emgu.CV.CvEnum.KMeansInitType.PPCenters); foreach(FileInfo[] folder in _folders) foreach (FileInfo file in folder) { using (Image<Bgr, Byte> model = new Image<Bgr, byte>(file.FullName)) using (VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint()) //Detect SURF key points from images { detector.DetectRaw(model, modelKeyPoints); //Compute detected SURF key points & extract modelDescriptors Mat modelDescriptors = new Mat(); detector.Compute(model, modelKeyPoints, modelDescriptors); //Add the extracted BoW modelDescriptors into BOW trainer bowTrainer.Add(modelDescriptors); } input_num++; } //Cluster the feature vectors bowTrainer.Cluster(vocabulary); //Store the vocabulary bowDE.SetVocabulary(vocabulary); //training descriptors tDescriptors = new Mat(); labels = new Matrix<int>(1, input_num); int index = 0; //compute and store BOWDescriptors and set labels for (int i = 1; i <= _folders.Count; i++) { FileInfo[] files = _folders[i-1]; for (int j = 0; j < files.Length; j++) { FileInfo file = files[j]; using (Image<Bgr, Byte> model = new Image<Bgr, Byte>(file.FullName)) using (VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint()) using (Mat modelBOWDescriptor = new Mat()) { detector.DetectRaw(model, modelKeyPoints); bowDE.Compute(model, modelKeyPoints, modelBOWDescriptor); tDescriptors.PushBack(modelBOWDescriptor); labels[0, index++] = i; } } } } }
public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography) { int k = 2; double uniquenessThreshold = 0.8; double hessianThresh = 300; Stopwatch watch; homography = null; modelKeyPoints = new VectorOfKeyPoint(); observedKeyPoints = new VectorOfKeyPoint(); #if !__IOS__ if ( CudaInvoke.HasCuda) { CudaSURF surfCuda = new CudaSURF((float) hessianThresh); using (GpuMat gpuModelImage = new GpuMat(modelImage)) //extract features from the object image using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null)) using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints)) using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2)) { surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints); watch = Stopwatch.StartNew(); // extract features from the observed image using (GpuMat gpuObservedImage = new GpuMat(observedImage)) using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null)) using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints)) //using (GpuMat tmp = new GpuMat()) //using (Stream stream = new Stream()) { matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k); surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints); mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); mask.SetTo(new MCvScalar(255)); Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask); int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= 4) { nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20); if (nonZeroCount >= 4) homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2); } } watch.Stop(); } } else #endif { using (UMat uModelImage = modelImage.ToUMat(AccessType.Read)) using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read)) { SURF surfCPU = new SURF(hessianThresh); //extract features from the object image UMat modelDescriptors = new UMat(); surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false); watch = Stopwatch.StartNew(); // extract features from the observed image UMat observedDescriptors = new UMat(); surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false); BFMatcher matcher = new BFMatcher(DistanceType.L2); matcher.Add(modelDescriptors); matcher.KnnMatch(observedDescriptors, matches, k, null); mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); mask.SetTo(new MCvScalar(255)); Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask); int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= 4) { nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20); if (nonZeroCount >= 4) homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2); } watch.Stop(); } } matchTime = watch.ElapsedMilliseconds; }
public Mat Calculate(Bitmap referenceBitmap, Bitmap currentBitmap) { Mat homography; using (var detector = new SURF(threshold)) using (var model = new Image<Gray, byte>(referenceBitmap)) using (var modelMat = model.Mat.ToUMat(AccessType.Read)) using (var modelKeyPoints = new VectorOfKeyPoint()) using (var modelDescriptors = new UMat()) using (var observed = new Image<Gray, byte>(currentBitmap)) using (var observedMat = observed.Mat.ToUMat(AccessType.Read)) using (var observedKeyPoints = new VectorOfKeyPoint()) using (var observedDescriptors = new UMat()) using (var matcher = new BFMatcher(DistanceType.L2)) using (var matches = new VectorOfVectorOfDMatch()) { detector.DetectAndCompute(modelMat, null, modelKeyPoints, modelDescriptors, false); detector.DetectAndCompute(observedMat, null, observedKeyPoints, observedDescriptors, false); matcher.Add(modelDescriptors); matcher.KnnMatch(observedDescriptors, matches, k, null); homography = TryFindHomography(modelKeyPoints, observedKeyPoints, matches); } return homography; }
//Use EmguCV private EmguType SURFFeatureDetect(Image<Gray, Byte> image, Image<Gray, Byte> mask=null) { const int hessianThresh = 300; SURF siftCPU = new SURF(hessianThresh); EmguType result = new EmguType(); UMat matImage = image.Mat.ToUMat(AccessType.Read); //UMat matImage = cannyFrame.ToUMat(AccessType.Read); //GFTTDetector gd = new GFTTDetector(); try { //gd.DetectAndCompute(matImage, null, result.KeyPoints, result.Descriptors, false); siftCPU.DetectAndCompute( matImage, mask, result.KeyPoints, result.Descriptors, false); } catch (Exception e) { _log.Error("Feature Detect Exception:" + e.Message); } return result; }
public void TestSURFBlankImage() { SURF detector = new SURF(500); Image<Gray, Byte> img = new Image<Gray, byte>(1024, 900); VectorOfKeyPoint vp = new VectorOfKeyPoint(); Mat descriptors = new Mat(); detector.DetectAndCompute(img, null, vp, descriptors, false); }
public void TestSURF() { SURF detector = new SURF(500); //ParamDef[] parameters = detector.GetParams(); EmguAssert.IsTrue(TestFeature2DTracker(detector, detector), "Unable to find homography matrix"); }
public void TestLATCH() { SURF surf = new SURF(300); LATCH latch = new LATCH(); EmguAssert.IsTrue(TestFeature2DTracker(surf, latch), "Unable to find homography matrix"); }
public void TestBOWKmeansTrainer() { Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png"); SURF detector = new SURF(500); VectorOfKeyPoint kpts = new VectorOfKeyPoint(); Mat descriptors = new Mat(); detector.DetectAndCompute(box, null, kpts, descriptors, false); BOWKMeansTrainer trainer = new BOWKMeansTrainer(100, new MCvTermCriteria(), 3, CvEnum.KMeansInitType.PPCenters); trainer.Add(descriptors); Mat vocabulary = new Mat(); trainer.Cluster(vocabulary); BFMatcher matcher = new BFMatcher(DistanceType.L2); BOWImgDescriptorExtractor extractor = new BOWImgDescriptorExtractor(detector, matcher); extractor.SetVocabulary(vocabulary); Mat descriptors2 = new Mat(); extractor.Compute(box, kpts, descriptors2); }
public void TestDAISY() { SURF surf = new SURF(300); DAISY daisy = new DAISY(); EmguAssert.IsTrue(TestFeature2DTracker(surf, daisy), "Unable to find homography matrix"); }
public void TestSURFDetector2() { //Trace.WriteLine("Size of MCvSURFParams: " + Marshal.SizeOf(typeof(MCvSURFParams))); Image<Gray, byte> box = EmguAssert.LoadImage<Gray, byte>("box.png"); SURF detector = new SURF(400); Stopwatch watch = Stopwatch.StartNew(); VectorOfKeyPoint vp1 = new VectorOfKeyPoint(); Mat descriptors1 = new Mat(); detector.DetectAndCompute(box, null, vp1, descriptors1, false); watch.Stop(); EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds)); watch.Reset(); watch.Start(); MKeyPoint[] keypoints = detector.Detect(box, null); //ImageFeature<float>[] features2 = detector.Compute(box, keypoints); watch.Stop(); EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds)); watch.Reset(); watch.Start(); //MCvSURFParams p = detector.SURFParams; //SURFFeature[] features3 = box.ExtractSURF(ref p); //watch.Stop(); //EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds)); // EmguAssert.IsTrue(features1.Length == features2.Length); //EmguAssert.IsTrue(features2.Length == features3.Length); PointF[] pts = #if NETFX_CORE Extensions. #else Array. #endif ConvertAll<MKeyPoint, PointF>(keypoints, delegate(MKeyPoint mkp) { return mkp.Point; }); //SURFFeature[] features = box.ExtractSURF(pts, null, ref detector); //int count = features.Length; /* for (int i = 0; i < features1.Length; i++) { Assert.AreEqual(features1[i].KeyPoint.Point, features2[i].KeyPoint.Point); float[] d1 = features1[i].Descriptor; float[] d2 = features2[i].Descriptor; for (int j = 0; j < d1.Length; j++) Assert.AreEqual(d1[j], d2[j]); }*/ foreach (MKeyPoint kp in keypoints) { box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1); } }