public static Matrix<byte> ExtractBriefFeatureDescriptors(Emgu.CV.Image<Gray, byte> im, MKeyPoint kp)
 {
     var f = new VectorOfKeyPoint();
     f.Push(new MKeyPoint[] { kp });
     //i'm are going to invoke this with a single point because otherwise I cannot tell which points failed to get descriptors
     return new BriefDescriptorExtractor().ComputeDescriptorsRaw(im, (Emgu.CV.Image<Gray, byte>)null, f);
 }
Beispiel #2
0
 /// <summary>
 /// Push an array of value into the standard vector
 /// </summary>
 /// <param name="value">The value to be pushed to the vector</param>
 public void Push(MKeyPoint[] value)
 {
    if (value.Length > 0)
    {
       GCHandle handle = GCHandle.Alloc(value, GCHandleType.Pinned);
       CvInvoke.VectorOfKeyPointPushMulti(_ptr, handle.AddrOfPinnedObject(), value.Length);
       handle.Free();
    }
 }
Beispiel #3
0
 public PointF[] Calculate(IImage prev, IImage curr, MKeyPoint[] prevFeatures)
 {
     PointF[] features;
     byte[] status;
     float[] trackError;
     var points = prevFeatures.Select(e => e.Point).ToArray();
     CvInvoke.CalcOpticalFlowPyrLK(prev, curr, points, winSize, level, criteria, out features,
         out status, out trackError);
     return features;
 }
Beispiel #4
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            using (VectorOfFloat descs = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSURFDetectorComputeDescriptors(ref this, image, mask, kpts, descs);

            int n = keyPoints.Length;
            long address = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = extended == 0 ? 64 : 128;
            for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = keyPoints[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
             }
        }
Beispiel #5
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0) return new ImageFeature[0];
             using (VectorOfFloat descVec = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec);

            int n = keyPoints.Length;
            float[] descs = descVec.ToArray();
            //long address = descVec.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = DescriptorSize;
            for (int i = 0; i < n; i++)
            {
               features[i].KeyPoint = keyPoints[i];
               float[] d = new float[sizeOfdescriptor];
               Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
               features[i].Descriptor = d;
            }
            return features;
             }
        }
Beispiel #6
0
 /// <summary>
 /// Compute the ImageFeature on the image from the given keypoint locations.
 /// </summary>
 /// <param name="image">The image to compute descriptors from</param>
 /// <param name="keyPoints">The keypoints where the descriptor computation is perfromed</param>
 /// <returns>The ImageFeature from the given keypoints</returns>
 public ImageFeature[] ComputeDescriptors(Image<Gray, byte> image, MKeyPoint[] keyPoints)
 {
     return ComputeDescriptors(image, null, keyPoints);
 }
 /// <summary>
 /// Convert the standard vector to an array of float
 /// </summary>
 /// <returns>An array of float</returns>
 public MKeyPoint[] ToArray()
 {
     MKeyPoint[] res = new MKeyPoint[Size];
      GCHandle handle = GCHandle.Alloc(res, GCHandleType.Pinned);
      VectorOfKeyPointCopyData(_ptr, handle.AddrOfPinnedObject());
      handle.Free();
      return res;
 }
 /*
   /// <summary>
   /// Compute the descriptor given the bgr image and the point location, using oppponent color (CGIV 2008 "Color Descriptors for Object Category Recognition").
   /// </summary>
   /// <param name="image">The image where the descriptor will be computed from</param>
   /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
   /// <returns>The descriptors founded on the keypoint location</returns>
   public Matrix<float> ComputeDescriptorsRaw(Image<Bgr, Byte> image, VectorOfKeyPoint keyPoints)
   {
  int count = keyPoints.Size;
  if (count == 0) return null;
  Matrix<float> descriptors = new Matrix<float>(count, DescriptorSize * 3, 1);
  CvSIFTDetectorComputeDescriptorsBGR(_ptr, image, keyPoints, descriptors);
  return descriptors;
   }*/
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The descriptors founded on the keypoint location</returns>
 public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
 {
     if (keyPoints.Length == 0) return new ImageFeature[0];
      using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
      {
     kpts.Push(keyPoints);
     using (Matrix<float> descriptor = ComputeDescriptorsRaw(image, mask, kpts))
     {
        return Features2DTracker.ConvertToImageFeature(kpts, descriptor);
     }
      }
 }
Beispiel #9
0
 internal static extern void VectorOfKeyPointGetItem(IntPtr keypoints, int index, ref MKeyPoint keypoint);
Beispiel #10
0
 private static void ExternalSURFKeypoints(ref Image<Gray, Byte> frame, out MKeyPoint[] all_keypoints)
 {
     Console.WriteLine("External SURF Keypoint Detector");
     all_keypoints = SURF.DetectKeyPoints(frame, null);
 }
Beispiel #11
0
 private static void ExternalBRISKKeypoints(ref Image<Gray, Byte> frame, out MKeyPoint[] all_keypoints)
 {
     //Console.WriteLine("External BRISK Keypoint Detector");
     all_keypoints = featureDetector.DetectKeyPoints(frame, null);
 }
Beispiel #12
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime, out double featurenumber, out  MKeyPoint[] keyPoints, out List<int> index)
        {
            //index = new int[100];
            index = new List<int>();
            HomographyMatrix homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;
            Matrix<byte> mask;

            FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, out indices, out mask, out homography, out featurenumber);
            //foreach (byte data in mask.Data)
            //{
            //    if( data = 0)
            //}
            int n = 0;
            for (int i = 0; i < mask.Data.GetLength(0); i++)
            {
                if (mask.Data[i, 0] == 1)
                {
                    index.Add(i);
                    n++;
                    Console.WriteLine(i);
                }
            }

            //Draw the matched keypoints
            Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
               indices, new Bgr(255, 0, 0), new Bgr(255, 0, 255), mask, Features2DToolbox.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);
            #region draw the projected region on the image
            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] { 
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
            if (homography != null)
            {  //draw a rectangle along the projected model

                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);

            }
            #endregion

            keyPoints = observedKeyPoints.ToArray();
            featurenumber = n;
            return result;
        }
Beispiel #13
0
	  /// <summary>
      /// Create an standard vector of KeyPoint with the initial values
      /// </summary>
      /// <param name="values">The initial values</param>
	  public VectorOfKeyPoint(MKeyPoint[] values)
         :this()
      {
         Push(values);
      }
Beispiel #14
0
 internal static extern void VectorOfKeyPointGetItem(IntPtr vec, int index, ref MKeyPoint element);
Beispiel #15
0
 /// <summary>
 /// Convert the standard vector to an array of KeyPoint
 /// </summary>
 /// <returns>An array of KeyPoint</returns>
 public MKeyPoint[] ToArray()
 {
    MKeyPoint[] res = new MKeyPoint[Size];
    if (res.Length > 0)
    {
       GCHandle handle = GCHandle.Alloc(res, GCHandleType.Pinned);
       CvInvoke.VectorOfKeyPointCopyData(_ptr, handle.AddrOfPinnedObject());
       handle.Free();
    }
    return res;
 }
Beispiel #16
0
 /// <summary>
 /// Get the item in the specific index
 /// </summary>
 /// <param name="index">The index</param>
 /// <returns>The item in the specific index</returns>
 public MKeyPoint this[int index]
 {
    get
    {
       MKeyPoint result = new MKeyPoint();
       CvInvoke.VectorOfKeyPointGetItem(_ptr, index, ref result);
       return result;
    }
 }
Beispiel #17
0
 private static void ExternalBRISKDescriptors(ref Image<Gray, Byte> frame, MKeyPoint[] keypoints, out Matrix<byte> features)
 {
     //Console.WriteLine("External BRISK Keypoint Descriptor Extractor");
     Emgu.CV.Util.VectorOfKeyPoint keypoint_vector = new Emgu.CV.Util.VectorOfKeyPoint();
     keypoint_vector.Push(keypoints);
     features = featureDetector.ComputeDescriptorsRaw(frame, null, keypoint_vector);
 }
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The image features founded on the keypoint location</returns>
 public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
 {
     int sizeOfdescriptor = _surfParams.Extended ? 128 : 64;
      using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
      {
     pts.Push(keyPoints);
     using (Matrix<float> descriptors = ComputeDescriptorsRaw(image, mask, pts))
        return Features2DTracker.ConvertToImageFeature(pts, descriptors);
      }
 }
Beispiel #19
0
      /// <summary>
      /// Compute the descriptor given the image and the point location
      /// </summary>
      /// <param name="image">The image where the descriptor will be computed from</param>
      /// <param name="mask">The optional mask, can be null if not needed</param>
      /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
      /// <returns>The image features founded on the keypoint location</returns>
      public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
      {
         if (keyPoints.Length == 0) return new ImageFeature[0];
         using (VectorOfFloat descs = new VectorOfFloat())
         {
            GCHandle handle = GCHandle.Alloc(keyPoints, GCHandleType.Pinned);
            CvSIFTDetectorComputeDescriptors(_ptr, image, mask, handle.AddrOfPinnedObject(), keyPoints.Length, descs);
            handle.Free();

            int n = keyPoints.Length;
            long address = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = DescriptorSize;
            for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = keyPoints[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
         }
      }