Exemplo n.º 1
0
 private extern static void CvLDetectorDetectKeyPoints(
     ref LDetector detector,
     IntPtr image,
     IntPtr keypoints,
     int maxCount,
     [MarshalAs(UnmanagedType.I1)]
     bool scaleCoords);
Exemplo n.º 2
0
 internal extern static void CvPlanarObjectDetectorTrain(
     IntPtr objectDetector,
     IntPtr image,
     int npoints,
     int patchSize,
     int nstructs,
     int structSize,
     int nviews,
     ref LDetector keyPointDetector,
     ref PatchGenerator patchGenerator);
Exemplo n.º 3
0
 /// <summary>
 /// Train the planar object detector using the specific image
 /// </summary>
 /// <param name="image">The training image</param>
 /// <param name="npoints">Use 300 for default</param>
 /// <param name="patchSize">Use 31 for default</param>
 /// <param name="nstructs">Use 50 for default</param>
 /// <param name="structSize">Use 9 for default</param>
 /// <param name="nviews">Use 5000 for default</param>
 /// <param name="keyPointDetector">The keypoint detector to be used</param>
 /// <param name="patchGenerator">The patch generator to be used</param>
 public void Train(Image<Gray, byte> image, 
     int npoints,
     int patchSize,
     int nstructs,
     int structSize,
     int nviews,
     ref LDetector keyPointDetector,
     ref PatchGenerator patchGenerator)
 {
     CvPlanarObjectDetectorTrain(Ptr, image, npoints, patchSize, nstructs, structSize, nviews, ref keyPointDetector, ref patchGenerator);
 }
Exemplo n.º 4
0
 /// <summary>
 /// Train the planar object detector using the specific image
 /// </summary>
 /// <param name="image">The training image</param>
 /// <param name="npoints">Use 300 for default</param>
 /// <param name="patchSize">Use 31 for default</param>
 /// <param name="nstructs">Use 50 for default</param>
 /// <param name="structSize">Use 9 for default</param>
 /// <param name="nviews">Use 5000 for default</param>
 /// <param name="keyPointDetector">The keypoint detector to be used</param>
 /// <param name="patchGenerator">The patch generator to be used</param>
 public void Train(Image <Gray, byte> image,
                   int npoints,
                   int patchSize,
                   int nstructs,
                   int structSize,
                   int nviews,
                   ref LDetector keyPointDetector,
                   ref PatchGenerator patchGenerator)
 {
     CvPlanarObjectDetectorTrain(Ptr, image, npoints, patchSize, nstructs, structSize, nviews, ref keyPointDetector, ref patchGenerator);
 }
Exemplo n.º 5
0
 private static extern void CvLDetectorDetectKeyPoints(
     ref LDetector detector,
     IntPtr image,
     IntPtr keypoints,
     int maxCount,
     [MarshalAs(UnmanagedType.I1)]
     bool scaleCoords);
Exemplo n.º 6
0
 private static extern void CvPlanarObjectDetectorTrain(
     IntPtr objectDetector,
     IntPtr image,
     int npoints,
     int patchSize,
     int nstructs,
     int structSize,
     int nviews,
     ref LDetector keyPointDetector,
     ref PatchGenerator patchGenerator);
Exemplo n.º 7
0
        public void TestPlanarObjectDetector()
        {
            Image<Gray, byte> box = new Image<Gray, byte>("box.png");
             Image<Gray, byte> scene = new Image<Gray,byte>("box_in_scene.png");
             //Image<Gray, Byte> scene = box.Rotate(1, new Gray(), false);

             using (PlanarObjectDetector detector = new PlanarObjectDetector())
             {
            Stopwatch watch = Stopwatch.StartNew();
            LDetector keypointDetector = new LDetector();
            keypointDetector.SetDefaultParameters();

            PatchGenerator pGen = new PatchGenerator();
            pGen.SetDefaultParameters();

            detector.Train(box, 300, 31, 50, 9, 5000, ref keypointDetector, ref pGen);
            watch.Stop();
            Trace.WriteLine(String.Format("Training time: {0} milliseconds.", watch.ElapsedMilliseconds));

            MKeyPoint[] modelPoints = detector.GetModelPoints();
            int i = modelPoints.Length;

            HomographyMatrix h = new HomographyMatrix();
            watch = Stopwatch.StartNew();
            PointF[] corners = detector.Detect(scene, h);
            watch.Stop();
            Trace.WriteLine(String.Format("Detection time: {0} milliseconds.", watch.ElapsedMilliseconds));

            foreach (PointF c in corners)
            {
               scene.Draw(new CircleF(c, 2), new Gray(255), 1);
            }
            scene.DrawPolyline(Array.ConvertAll<PointF, Point>(corners, Point.Round), true, new Gray(255), 2);
             }
        }
Exemplo n.º 8
0
        public void TestLDetectorAndSelfSimDescriptor()
        {
            Image<Gray, byte> box = new Image<Gray, byte>("box.png");
             LDetector detector = new LDetector();
             detector.SetDefaultParameters();

             MKeyPoint[] keypoints = detector.DetectKeyPoints(box, 200, true);

             Point[] pts = Array.ConvertAll<MKeyPoint, Point>(keypoints, delegate(MKeyPoint k) { return Point.Round(k.Point); });

             SelfSimDescriptor descriptor = new SelfSimDescriptor(5, 41, 3, 7, 20);
             int descriptorSize = descriptor.DescriptorSize;

             float[] descriptors = descriptor.Compute(box, new Size(20, 20), pts);

             float absSum = 0;
             foreach (float f in descriptors)
            absSum += Math.Abs(f);

             //TODO: Find out why selfsimilarity always return descriptors of all zeros. Probaboly a bug in the opencv C++ code
             //Assert.AreNotEqual(0, absSum, "The sum of the descriptor should not be zero");

             Assert.AreEqual(descriptors.Length / descriptor.DescriptorSize, pts.Length);

             foreach (MKeyPoint kp in keypoints)
             {
            box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1);
             }
        }