Wraped class of the C++ standard vector of float.
Inheritance: Emgu.Util.UnmanagedObject
Esempio n. 1
0
 /// <summary>
 /// Create a new HOGDescriptor
 /// </summary>
 public GpuHOGDescriptor()
 {
     _ptr = gpuHOGDescriptorCreateDefault();
      _rectStorage = new MemStorage();
      _rectSeq = new Seq<Rectangle>(_rectStorage);
      _vector = new VectorOfFloat();
 }
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image"></param>
 /// <param name="winStride"></param>
 /// <param name="locations"></param>
 /// <returns></returns>
 public float[] Compute(Image<Gray, Byte> image, Size winStride, Point[] locations)
 {
     using (VectorOfFloat vof = new VectorOfFloat())
      {
     GCHandle handle = GCHandle.Alloc(locations, GCHandleType.Pinned);
     CvSelfSimDescriptorCompute(_ptr, image, vof, ref winStride, handle.AddrOfPinnedObject(), locations.Length);
     handle.Free();
     return vof.ToArray();
      }
 }
Esempio n. 3
0
        /*
         * public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
         * {
         * TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
         * }
         *
         * [DllImport(CvInvoke.EXTERN_LIBRARY, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
         * private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
         */

        /// <summary>
        /// Implements the chamfer matching algorithm on images taking into account both distance from
        /// the template pixels to the nearest pixels and orientation alignment between template and image
        /// contours.
        /// </summary>
        /// <param name="img">The edge image where search is performed</param>
        /// <param name="templ">The template (an edge image)</param>
        /// <param name="contours">The output contours</param>
        /// <param name="cost">The cost associated with the matching</param>
        /// <param name="templScale">The template scale, use 1 for default</param>
        /// <param name="maxMatches">The maximum number of matches, use 20 for default</param>
        /// <param name="minMatchDistance">The minimum match distance. use 1.0 for default</param>
        /// <param name="padX">PadX, use 3 for default</param>
        /// <param name="padY">PadY, use 3 for default</param>
        /// <param name="scales">Scales, use 5 for default</param>
        /// <param name="minScale">Minimum scale, use 0.6 for default</param>
        /// <param name="maxScale">Maximum scale, use 1.6 for default</param>
        /// <param name="orientationWeight">Orientation weight, use 0.5 for default</param>
        /// <param name="truncate">Truncate, use 20 for default</param>
        /// <returns>The number of matches</returns>
        public static int cvChamferMatching(Image <Gray, Byte> img, Image <Gray, Byte> templ,
                                            out Point[][] contours, out float[] cost,
                                            double templScale, int maxMatches,
                                            double minMatchDistance, int padX,
                                            int padY, int scales, double minScale, double maxScale,
                                            double orientationWeight, double truncate)
        {
            using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
                using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
                {
                    int count = _cvChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
                    contours = vecOfVecOfPoint.ToArray();
                    cost     = vecOfFloat.ToArray();
                    return(count);
                }
        }
Esempio n. 4
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            using (VectorOfFloat descs = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSURFDetectorComputeDescriptors(ref this, image, mask, kpts, descs);

            int n = keyPoints.Length;
            long address = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = extended == 0 ? 64 : 128;
            for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = keyPoints[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
             }
        }
Esempio n. 5
0
        /// <summary>
        /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
        /// </summary>
        /// <param name="prev">First frame, at time t</param>
        /// <param name="curr">Second frame, at time t + dt </param>
        /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
        /// <param name="winSize">Size of the search window of each pyramid level</param>
        /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
        /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
        /// <param name="flags">Flags</param>
        /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
        /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
        /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
        /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
        public static void CalcOpticalFlowPyrLK(
            IInputArray prev,
            IInputArray curr,
            PointF[] prevFeatures,
            Size winSize,
            int level,
            MCvTermCriteria criteria,
            out PointF[] currFeatures,
            out Byte[] status,
            out float[] trackError,
            Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
            double minEigThreshold          = 1.0e-4)
        {
            using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF())
                using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF())
                    using (Util.VectorOfByte statusVec = new Util.VectorOfByte())
                        using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat())
                        {
                            prevPts.Push(prevFeatures);

                            CalcOpticalFlowPyrLK(
                                prev,
                                curr,
                                prevPts,
                                nextPts,
                                statusVec,
                                errorVec,
                                winSize,
                                level,
                                criteria,
                                flags,
                                minEigThreshold);
                            status       = statusVec.ToArray();
                            trackError   = errorVec.ToArray();
                            currFeatures = nextPts.ToArray();
                        }
        }
Esempio n. 6
0
      /*
      #region Kalman Filter
      /// <summary>
      /// Allocates CvKalman and all its matrices and initializes them somehow. 
      /// </summary>
      /// <param name="dynamParams">dimensionality of the state vector</param>
      /// <param name="measureParams">dimensionality of the measurement vector </param>
      /// <param name="controlParams">dimensionality of the control vector </param>
      /// <returns>Pointer to the created Kalman filter</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvCreateKalman(int dynamParams, int measureParams, int controlParams);

      /// <summary>
      /// Adjusts stochastic model state on the basis of the given measurement of the model state.
      /// The function stores adjusted state at kalman->state_post and returns it on output
      /// </summary>
      /// <param name="kalman">Pointer to the structure to be updated</param>
      /// <param name="measurement">Pointer to the structure CvMat containing the measurement vector</param>
      /// <returns>The function stores adjusted state at kalman->state_post and returns it on output</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvKalmanCorrect(ref MCvKalman kalman, IntPtr measurement);


      /// <summary>
      /// Estimates the subsequent stochastic model state by its current state and stores it at kalman->state_pre
      /// The function returns the estimated state
      /// </summary>
      /// <param name="kalman">Kalman filter state</param>
      /// <param name="control">Control vector (uk), should be NULL iff there is no external control (controlParams=0). </param>
      /// <returns>the estimated state</returns>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvKalmanPredict(ref MCvKalman kalman, IntPtr control);

      /// <summary>
      /// Releases the structure CvKalman and all underlying matrices
      /// </summary>
      /// <param name="kalman">reference of the pointer to the Kalman filter structure.</param>
      [DllImport(OpencvVideoLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern void cvReleaseKalman(ref IntPtr kalman);
      #endregion
*/
      #region optical flow
      /// <summary>
      /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
      /// </summary>
      /// <param name="prev">First frame, at time t</param>
      /// <param name="curr">Second frame, at time t + dt </param>
      /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
      /// <param name="winSize">Size of the search window of each pyramid level</param>
      /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
      /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
      /// <param name="flags">Flags</param>
      /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
      /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
      /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
      /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
      public static void CalcOpticalFlowPyrLK(
         IInputArray prev,
         IInputArray curr,
         PointF[] prevFeatures,
         Size winSize,
         int level,
         MCvTermCriteria criteria,
         out PointF[] currFeatures,
         out Byte[] status,
         out float[] trackError,
         Emgu.CV.CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
         double minEigThreshold = 1.0e-4)
      {
         using (Util.VectorOfPointF prevPts = new Util.VectorOfPointF())
         using (Util.VectorOfPointF nextPts = new Util.VectorOfPointF())
         using (Util.VectorOfByte statusVec = new Util.VectorOfByte())
         using (Util.VectorOfFloat errorVec = new Util.VectorOfFloat())
         {
            prevPts.Push(prevFeatures);

            CalcOpticalFlowPyrLK(
               prev,
               curr,
               prevPts,
               nextPts,
               statusVec,
               errorVec,
               winSize,
               level,
               criteria,
               flags,
               minEigThreshold);
            status = statusVec.ToArray();
            trackError = errorVec.ToArray();
            currFeatures = nextPts.ToArray();
         }
      }
Esempio n. 7
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0) return new ImageFeature[0];
             using (VectorOfFloat descVec = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec);

            int n = keyPoints.Length;
            float[] descs = descVec.ToArray();
            //long address = descVec.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = DescriptorSize;
            for (int i = 0; i < n; i++)
            {
               features[i].KeyPoint = keyPoints[i];
               float[] d = new float[sizeOfdescriptor];
               Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
               features[i].Descriptor = d;
            }
            return features;
             }
        }
Esempio n. 8
0
        /// <summary>
        /// Detect image features from the given image
        /// </summary>
        /// <param name="image">The image to detect features from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <returns>The Image features detected from the given image</returns>
        public ImageFeature[] DetectFeatures(Image<Gray, Byte> image, Image<Gray, byte> mask)
        {
            using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
             using (VectorOfFloat descVec = new VectorOfFloat())
             {
            CvSIFTDetectorDetectFeature(_ptr, image, mask, pts, descVec);
            MKeyPoint[] kpts = pts.ToArray();
            float[] desc = descVec.ToArray();
            int n = kpts.Length;
            int sizeOfdescriptor = DescriptorSize;

            ImageFeature[] features = new ImageFeature[n];
            for (int i = 0; i < n; i++)
            {
               features[i].KeyPoint = kpts[i];
               float[] d = new float[sizeOfdescriptor];
               Array.Copy(desc, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
               features[i].Descriptor = d;
            }
            return features;
             }
        }
Esempio n. 9
0
 public DebuggerProxy(VectorOfFloat v)
 {
     _v = v;
 }
Esempio n. 10
0
 /// <summary>
 /// Push multiple values from the other vector into this vector
 /// </summary>
 /// <param name="other">The other vector, from which the values will be pushed to the current vector</param>
 public void Push(VectorOfFloat other)
 {
     VectorOfFloatPushVector(_ptr, other);
 }
Esempio n. 11
0
      /*
      public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
      {
         TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
      }

      [DllImport(CvInvoke.EXTERN_LIBRARY, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
      private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
      */

      /// <summary>
      /// Implements the chamfer matching algorithm on images taking into account both distance from
      /// the template pixels to the nearest pixels and orientation alignment between template and image
      /// contours.
      /// </summary>
      /// <param name="img">The edge image where search is performed</param>
      /// <param name="templ">The template (an edge image)</param>
      /// <param name="contours">The output contours</param>
      /// <param name="cost">The cost associated with the matching</param>
      /// <param name="templScale">The template scale, use 1 for default</param>
      /// <param name="maxMatches">The maximum number of matches, use 20 for default</param>
      /// <param name="minMatchDistance">The minimum match distance. use 1.0 for default</param>
      /// <param name="padX">PadX, use 3 for default</param>
      /// <param name="padY">PadY, use 3 for default</param>
      /// <param name="scales">Scales, use 5 for default</param>
      /// <param name="minScale">Minimum scale, use 0.6 for default</param>
      /// <param name="maxScale">Maximum scale, use 1.6 for default</param>
      /// <param name="orientationWeight">Orientation weight, use 0.5 for default</param>
      /// <param name="truncate">Truncate, use 20 for default</param>
      /// <returns>The number of matches</returns>
      public static int cvChamferMatching(Image<Gray, Byte> img, Image<Gray, Byte> templ,
         out Point[][] contours, out float[] cost,
         double templScale, int maxMatches,
         double minMatchDistance, int padX,
         int padY, int scales, double minScale, double maxScale,
         double orientationWeight, double truncate)
      {
         using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         {
            int count = _cvChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
            contours = vecOfVecOfPoint.ToArray();
            cost = vecOfFloat.ToArray();
            return count;
         }
      }