Push() public method

Push an array of value into the standard vector
public Push ( Array value ) : void
value Array The value to be pushed to the vector
return void
Ejemplo n.º 1
0
 public static Matrix<byte> ExtractBriefFeatureDescriptors(Emgu.CV.Image<Gray, byte> im, MKeyPoint kp)
 {
     var f = new VectorOfKeyPoint();
     f.Push(new MKeyPoint[] { kp });
     //i'm are going to invoke this with a single point because otherwise I cannot tell which points failed to get descriptors
     return new BriefDescriptorExtractor().ComputeDescriptorsRaw(im, (Emgu.CV.Image<Gray, byte>)null, f);
 }
Ejemplo n.º 2
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            using (VectorOfFloat descs = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSURFDetectorComputeDescriptors(ref this, image, mask, kpts, descs);

            int n = keyPoints.Length;
            long address = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = extended == 0 ? 64 : 128;
            for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = keyPoints[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
             }
        }
Ejemplo n.º 3
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0) return new ImageFeature[0];
             using (VectorOfFloat descVec = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec);

            int n = keyPoints.Length;
            float[] descs = descVec.ToArray();
            //long address = descVec.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = DescriptorSize;
            for (int i = 0; i < n; i++)
            {
               features[i].KeyPoint = keyPoints[i];
               float[] d = new float[sizeOfdescriptor];
               Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
               features[i].Descriptor = d;
            }
            return features;
             }
        }
        /// <summary>
        /// Convert the image features to keypoint vector and descriptor matrix
        /// </summary>
        private static void ConvertFromImageFeature(ImageFeature[] features, out VectorOfKeyPoint keyPoints, out Matrix<float> descriptors)
        {
            keyPoints = new VectorOfKeyPoint();
             keyPoints.Push( Array.ConvertAll<ImageFeature, MKeyPoint>(features, delegate(ImageFeature feature) { return feature.KeyPoint; }));
             descriptors = new Matrix<float>(features.Length, features[0].Descriptor.Length);

             int descriptorLength = features[0].Descriptor.Length;
             float[,] data = descriptors.Data;
             for (int i = 0; i < features.Length; i++)
             {
            for (int j = 0; j < descriptorLength; j++)
               data[i, j] = features[i].Descriptor[j];
             }
        }
 /*
   /// <summary>
   /// Compute the descriptor given the bgr image and the point location, using oppponent color (CGIV 2008 "Color Descriptors for Object Category Recognition").
   /// </summary>
   /// <param name="image">The image where the descriptor will be computed from</param>
   /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
   /// <returns>The descriptors founded on the keypoint location</returns>
   public Matrix<float> ComputeDescriptorsRaw(Image<Bgr, Byte> image, VectorOfKeyPoint keyPoints)
   {
  int count = keyPoints.Size;
  if (count == 0) return null;
  Matrix<float> descriptors = new Matrix<float>(count, DescriptorSize * 3, 1);
  CvSIFTDetectorComputeDescriptorsBGR(_ptr, image, keyPoints, descriptors);
  return descriptors;
   }*/
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The descriptors founded on the keypoint location</returns>
 public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
 {
     if (keyPoints.Length == 0) return new ImageFeature[0];
      using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
      {
     kpts.Push(keyPoints);
     using (Matrix<float> descriptor = ComputeDescriptorsRaw(image, mask, kpts))
     {
        return Features2DTracker.ConvertToImageFeature(kpts, descriptor);
     }
      }
 }
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The image features founded on the keypoint location</returns>
 public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
 {
     int sizeOfdescriptor = _surfParams.Extended ? 128 : 64;
      using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
      {
     pts.Push(keyPoints);
     using (Matrix<float> descriptors = ComputeDescriptorsRaw(image, mask, pts))
        return Features2DTracker.ConvertToImageFeature(pts, descriptors);
      }
 }
Ejemplo n.º 7
0
      public void TestVectorOfKeyPointSerialization()
      {
         using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
         {

            using (MemoryStream ms = new MemoryStream())
            {
               kpts.Push(new MKeyPoint[] { new MKeyPoint() });
               System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
                   formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();
               formatter.Serialize(ms, kpts);
               Byte[] bytes = ms.GetBuffer();

               using (MemoryStream ms2 = new MemoryStream(bytes))
               {
                  Object o = formatter.Deserialize(ms2);
                  VectorOfKeyPoint kpts2 = (VectorOfKeyPoint) o;
               }
            }
         }
      }