Exemplo n.º 1
0
 /// <summary>
 /// Converts objects array from internal representation to standard vector.
 /// </summary>
 /// <param name="objects">Objects array in internal representation.</param>
 /// <returns>Resulting array.</returns>
 public Rectangle[] Convert(IOutputArray objects)
 {
    using (OutputArray oaObjects = objects.GetOutputArray())
    using (VectorOfRect vr = new VectorOfRect())
    {
       CudaInvoke.cudaCascadeClassifierConvert(_ptr, oaObjects, vr);
       return vr.ToArray();
    }
 }
Exemplo n.º 2
0
 /// <summary>
 /// Performs non maximum suppression given boxes and corresponding scores.
 /// </summary>
 /// <param name="bboxes">A set of bounding boxes to apply NMS.</param>
 /// <param name="scores">A set of corresponding confidences.</param>
 /// <param name="scoreThreshold">A threshold used to filter boxes by score.</param>
 /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param>
 /// <param name="eta">A coefficient in adaptive threshold</param>
 /// <param name="topK">If &gt;0, keep at most top_k picked indices.</param>
 /// <returns>The indices of the boxes to keep after NMS</returns>
 public static int[] NMSBoxes(Rectangle[] bboxes, float[] scores, float scoreThreshold, float nmsThreshold, float eta = 1.0f, int topK = 0)
 {
     using (VectorOfRect vBoxes = new VectorOfRect(bboxes))
         using (VectorOfFloat vScores = new VectorOfFloat(scores))
             using (VectorOfInt indices = new VectorOfInt())
             {
                 NMSBoxes(vBoxes, vScores, scoreThreshold, nmsThreshold, indices, eta, topK);
                 return(indices.ToArray());
             }
 }
 /// <summary>
 /// Based on all images, graph segmentations and stragies, computes all possible rects and return them
 /// </summary>
 /// <param name="rects">The list of rects. The first ones are more relevents than the lasts ones.</param>
 public virtual void Process(out Rect[] rects)
 {
     ThrowIfDisposed();
     using (var rectsVec = new VectorOfRect())
     {
         NativeMethods.ximgproc_segmentation_SelectiveSearchSegmentation_process(ptr, rectsVec.CvPtr);
         rects = rectsVec.ToArray();
     }
     GC.KeepAlive(this);
 }
Exemplo n.º 4
0
 /// <summary>
 /// Get Motion Areas from history
 /// </summary>
 /// <returns>Array of motion areas</returns>
 private Rectangle[] GetMotionAreas()
 {
     Rectangle[] rects;
     using (VectorOfRect boundingRect = new VectorOfRect())
     {
         _motionHistory.GetMotionComponents(_segMask, boundingRect);
         rects = boundingRect.ToArray();
     }
     return(rects);
 }
Exemplo n.º 5
0
 /// <summary>
 /// Returns array containing proposal boxes.
 /// </summary>
 /// <param name="edgeMap">edge image.</param>
 /// <param name="orientationMap">orientation map.</param>
 /// <returns>Proposal boxes.</returns>
 public Rectangle[] GetBoundingBoxes(IInputArray edgeMap, IInputArray orientationMap)
 {
     using (InputArray iaEdgeMap = edgeMap.GetInputArray())
         using (InputArray iaOrientationMap = orientationMap.GetInputArray())
             using (VectorOfRect vr = new VectorOfRect())
             {
                 XImgprocInvoke.cveEdgeBoxesGetBoundingBoxes(_ptr, iaEdgeMap, iaOrientationMap, vr);
                 return(vr.ToArray());
             }
 }
Exemplo n.º 6
0
        public void MSER()
        {
            var sourceBitmap = Samples.sample13;

            var w = sourceBitmap.Width;
            var h = sourceBitmap.Height;

            using var src = new UMat();

            using var srcMat = sourceBitmap.ToMat();
            srcMat.CopyTo(src);

            using var gray = new UMat();

            CvInvoke.CvtColor(src, gray, ColorConversion.Bgra2Gray);

            using var detector = new MSERDetector(
                      minArea: 5, maxArea: 80, edgeBlurSize: 5);
            using var msers  = new VectorOfVectorOfPoint();
            using var bboxes = new VectorOfRect();

            detector.DetectRegions(gray, msers, bboxes);

            var sw = new Stopwatch();

            sw.Start();

            var n = 100;

            for (var i = 0; i < n; i++)
            {
                detector.DetectRegions(gray, msers, bboxes);
            }

            sw.Stop();

            Console.WriteLine($"{(int)(sw.Elapsed.TotalMicroseconds() / n)} us");

            var result = new byte[w * h];

            foreach (var mser in msers.ToArrayOfArray())
            {
                foreach (var point in mser)
                {
                    result[point.Y * w + point.X] = 255;
                }
            }
            foreach (var bbox in bboxes.ToArray())
            {
            }


            Run("samples/sample13.png");
            //result.RunAs(w, h, 1, "mser.png");
        }
Exemplo n.º 7
0
        public Rectangle[] FindBodyHOG(Mat image, out double[] confidence)
        {
            // If can't use Cuda then go for Without cuda implementation
            if (!Global.canRunCuda)
            {
                confidence = null;
                return(FindBodyHOG_WithoutGpu(image));
            }
            if (des == null)
            {
                InitalizeBodyTracker();
            }



            List <Rectangle> regions  = new List <Rectangle>();
            VectorOfRect     rects    = new VectorOfRect();
            VectorOfDouble   confColl = new VectorOfDouble();

            confidence = new double[0];
            try
            {
                des.SetSVMDetector(des.GetDefaultPeopleDetector());

                des.GroupThreshold = 1;
                des.HitThreshold   = 0;
                des.NumLevels      = 15;
                des.ScaleFactor    = 1.05;


                using (GpuMat cudaBgr = new GpuMat(image))
                    using (GpuMat cudaBgra = new GpuMat())
                    {
                        CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                        //des.DetectMultiScale(cudaBgra, rects, confColl);
                        des.DetectMultiScale(cudaBgra, rects, null);
                    }


                //confidence = confColl.ToArray();

                for (int i = 0; i < rects.ToArray().Length; i++)
                {
                    //if (confidence[i] > 0.5)
                    regions.Add(rects[i]);
                }
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }

            return(regions.ToArray());//rects.ToArray();
        }
Exemplo n.º 8
0
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
        /// <returns>The region where pedestrians are detected</returns>
        public static Rectangle[] Find(Mat image, bool tryUseCuda, bool tryUseOpenCL, out long processingTime)
        {
            Stopwatch watch;
            Rectangle[] regions;

#if !(IOS || NETFX_CORE)
            //check if there is a compatible Cuda device to run pedestrian detection
            if (tryUseCuda && CudaInvoke.HasCuda)
            {  //this is the Cuda version
                using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
                {
                    des.SetSVMDetector(des.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuMat cudaBgr = new GpuMat(image))
                    using (GpuMat cudaBgra = new GpuMat())
                    using (VectorOfRect vr = new VectorOfRect())
                    {
                        CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                        des.DetectMultiScale(cudaBgra, vr);
                        regions = vr.ToArray();
                    }
                }
            }
            else
#endif
            {
                //Many opencl functions require opencl compatible gpu devices. 
                //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented
                //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices).
                CvInvoke.UseOpenCL = tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice;

                //this is the CPU/OpenCL version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    //load the image to umat so it will automatically use opencl is available
                    UMat umat = image.ToUMat(AccessType.Read);

                    watch = Stopwatch.StartNew();

                    MCvObjectDetection[] results = des.DetectMultiScale(umat);
                    regions = new Rectangle[results.Length];
                    for (int i = 0; i < results.Length; i++)
                        regions[i] = results[i].Rect;
                    watch.Stop();
                }
            }

            processingTime = watch.ElapsedMilliseconds;

            return regions;
        }
        static private VectorOfPointF MarkFacialPoints(FacemarkLBF facemark, Image <Gray, byte> image, Rectangle faceRect, out bool isSuccess)
        {
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();
            VectorOfRect           faces     = new VectorOfRect(new Rectangle[] { faceRect });

            isSuccess = facemark.Fit(image, faces, landmarks);
            if (isSuccess)
            {
                return(landmarks[0]);     // return the landmarks for the first (and only) face rectangle
            }
            return(new VectorOfPointF()); // return an empty vector
        }
Exemplo n.º 10
0
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="processingTime">The processing time in milliseconds</param>
        /// <returns>The region where pedestrians are detected</returns>
        public static Rectangle[] Find(IInputArray image, out long processingTime)
        {
            Stopwatch watch;

            Rectangle[] regions;

            using (InputArray iaImage = image.GetInputArray())
            {
#if !(__IOS__ || NETFX_CORE)
                //if the input array is a GpuMat
                //check if there is a compatible Cuda device to run pedestrian detection
                if (iaImage.Kind == InputArray.Type.CudaGpuMat)
                {
                    //this is the Cuda version
                    using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
                    {
                        des.SetSVMDetector(des.GetDefaultPeopleDetector());

                        watch = Stopwatch.StartNew();
                        using (GpuMat cudaBgra = new GpuMat())
                            using (VectorOfRect vr = new VectorOfRect())
                            {
                                CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra);
                                des.DetectMultiScale(cudaBgra, vr);
                                regions = vr.ToArray();
                            }
                    }
                }
                else
#endif
                {
                    //this is the CPU/OpenCL version
                    using (HOGDescriptor des = new HOGDescriptor())
                    {
                        des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
                        watch = Stopwatch.StartNew();

                        MCvObjectDetection[] results = des.DetectMultiScale(image);
                        regions = new Rectangle[results.Length];
                        for (int i = 0; i < results.Length; i++)
                        {
                            regions[i] = results[i].Rect;
                        }
                        watch.Stop();
                    }
                }

                processingTime = watch.ElapsedMilliseconds;

                return(regions);
            }
        }
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
        /// <returns>The region where pedestrians are detected</returns>
        public static Rectangle[] Find(Mat image, bool tryUseCuda, out long processingTime)
        {
            Stopwatch watch;

            Rectangle[] regions;

#if !(__IOS__ || NETFX_CORE)
            //check if there is a compatible Cuda device to run pedestrian detection
            if (tryUseCuda && CudaInvoke.HasCuda)
            { //this is the Cuda version
                using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
                {
                    des.SetSVMDetector(des.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (GpuMat cudaBgr = new GpuMat(image))
                        using (GpuMat cudaBgra = new GpuMat())
                            using (VectorOfRect vr = new VectorOfRect())
                            {
                                CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                                des.DetectMultiScale(cudaBgra, vr);
                                regions = vr.ToArray();
                            }
                }
            }
            else
#endif
            {
                //this is the CPU/OpenCL version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                    //load the image to umat so it will automatically use opencl is available
                    UMat umat = image.ToUMat(AccessType.Read);

                    watch = Stopwatch.StartNew();

                    MCvObjectDetection[] results = des.DetectMultiScale(umat);
                    regions = new Rectangle[results.Length];
                    for (int i = 0; i < results.Length; i++)
                    {
                        regions[i] = results[i].Rect;
                    }
                    watch.Stop();
                }
            }

            processingTime = watch.ElapsedMilliseconds;

            return(regions);
        }
Exemplo n.º 12
0
 /// <summary>
 /// Finds all the motion segments and marks them in segMask with individual values each (1,2,...). It also returns a sequence of CvConnectedComp structures, one per each motion components. After than the motion direction for every component can be calculated with cvCalcGlobalOrientation using extracted mask of the particular component (using cvCmp)
 /// </summary>
 /// <param name="mhi">Motion history image</param>
 /// <param name="segMask">Image where the mask found should be stored, single-channel, 32-bit floating-point</param>
 /// <param name="timestamp">Current time in milliseconds or other units</param>
 /// <param name="segThresh">Segmentation threshold; recommended to be equal to the interval between motion history "steps" or greater</param>
 /// <param name="boundingRects">Vector containing ROIs of motion connected components.</param>
 public static void SegmentMotion(
     IInputArray mhi,
     IOutputArray segMask,
     VectorOfRect boundingRects,
     double timestamp,
     double segThresh)
 {
     using (InputArray iaMhi = mhi.GetInputArray())
         using (OutputArray oaSegMask = segMask.GetOutputArray())
         {
             cveSegmentMotion(iaMhi, oaSegMask, boundingRects, timestamp, segThresh);
         }
 }
Exemplo n.º 13
0
 /// <summary>
 /// Performs soft non maximum suppression given boxes and corresponding scores. Reference: https://arxiv.org/abs/1704.04503
 /// </summary>
 /// <param name="bboxes">A set of bounding boxes to apply Soft NMS.</param>
 /// <param name="scores">A set of corresponding confidences.</param>
 /// <param name="updatedScores">A set of corresponding updated confidences.</param>
 /// <param name="scoreThreshold">A threshold used to filter boxes by score.</param>
 /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param>
 /// <param name="indices">The kept indices of bboxes after NMS.</param>
 /// <param name="topK">Keep at most <paramref name="topK"/> picked indices.</param>
 /// <param name="sigma">Parameter of Gaussian weighting.</param>
 /// <param name="method">Gaussian or linear.</param>
 public static void SoftNMSBoxes(
     VectorOfRect bboxes,
     VectorOfFloat scores,
     VectorOfFloat updatedScores,
     float scoreThreshold,
     float nmsThreshold,
     VectorOfInt indices,
     int topK             = 0,
     float sigma          = 0.5f,
     SoftNMSMethod method = SoftNMSMethod.Gaussian)
 {
     cveDnnSoftNMSBoxes(bboxes, scores, updatedScores, scoreThreshold, nmsThreshold, indices, topK, sigma, method);
 }
Exemplo n.º 14
0
        private void FindFacialFeaturePoints()
        {
            string facePath;

            try
            {
                // get face detect dataset
                facePath = Path.GetFileName(@"data/haarcascade_frontalface_default.xml");

                // get FFP dataset
                facemarkParam = new FacemarkLBFParams();
                facemark      = new FacemarkLBF(facemarkParam);
                facemark.LoadModel(@"data/lbfmodel.yaml");
            }

            catch (Exception ex)
            {
                throw new Exception(ex.Message);
            }

            // initialize imageMat
            currImageMat = CurrImageI.Mat;
            nextImageMat = NextImageI.Mat;

            // Current Face
            FacesListCurr = facesArrCurr.OfType <Rectangle>().ToList();

            // Find facial feature points
            VectorOfRect vrLeft = new VectorOfRect(facesArrCurr);

            landmarksCurr = new VectorOfVectorOfPointF();

            facemark.Fit(currImageMat, vrLeft, landmarksCurr);
            ffpCurr = landmarksCurr[curr.SelectedFace];


            // Next Face
            FacesListNext = facesArrNext.OfType <Rectangle>().ToList();

            // Find facial feature points
            VectorOfRect vrRight = new VectorOfRect(facesArrNext);

            landmarksNext = new VectorOfVectorOfPointF();

            facemark.Fit(nextImageMat, vrRight, landmarksNext);
            ffpNext = landmarksNext[next.SelectedFace];

            // Add Corner points
            ffpCurr = AddCornerPoints(ffpCurr, this.curr.ResizedImage.Mat);
            ffpNext = AddCornerPoints(ffpNext, this.next.ResizedImage.Mat);
        }
Exemplo n.º 15
0
 /// <summary>
 /// Selects ROIs on the given image. Function creates a window and allows user to select a ROIs using mouse. Controls: use space or enter to finish current selection and start a new one, use esc to terminate multiple ROI selection process.
 /// </summary>
 /// <param name="windowName"> Name of the window where selection process will be shown.</param>
 /// <param name="img"> Image to select a ROI.</param>
 /// <param name="showCrosshair"> If true crosshair of selection rectangle will be shown.</param>
 /// <param name="fromCenter"> If true center of selection will match initial mouse position. In opposite case a corner of selection rectangle will correspont to the initial mouse position.</param>
 /// <returns> Selected ROIs.</returns>
 public static Rectangle[] SelectROIs(
     String windowName,
     IInputArray img,
     bool showCrosshair = true,
     bool fromCenter    = false)
 {
     using (VectorOfRect vr = new VectorOfRect())
         using (CvString csWindowName = new CvString(windowName))
             using (InputArray iaImg = img.GetInputArray())
             {
                 cveSelectROIs(csWindowName, iaImg, vr, showCrosshair, fromCenter);
                 return(vr.ToArray());
             }
 }
Exemplo n.º 16
0
 /// <summary>
 /// Perfroms object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The CudaImage to search in</param>
 /// <param name="hitThreshold">The threshold for the distance between features and classifying plane.</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride.</param>
 /// <param name="padding">Mock parameter to keep CPU interface compatibility. Must be (0,0).</param>
 /// <param name="scale">Coefficient of the detection window increase.</param>
 /// <param name="groupThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping.</param>
 /// <returns>The regions where positives are found</returns>
 public Rectangle[] DetectMultiScale(
     GpuMat image,
     double hitThreshold = 0,
     Size winStride      = new Size(),
     Size padding        = new Size(),
     double scale        = 1.05,
     int groupThreshold  = 2)
 {
     using (Util.VectorOfRect vr = new VectorOfRect())
     {
         CudaInvoke.cudaHOGDescriptorDetectMultiScale(_ptr, image, vr.Ptr, hitThreshold, ref winStride, ref padding, scale, groupThreshold);
         return(vr.ToArray());
     }
 }
Exemplo n.º 17
0
      /// <summary>
      /// Find the pedestrian in the image
      /// </summary>
      /// <param name="image">The image</param>
      /// <param name="processingTime">The processing time in milliseconds</param>
      /// <returns>The region where pedestrians are detected</returns>
      public static Rectangle[] Find(IInputArray image, out long processingTime)
      {
         Stopwatch watch;
         Rectangle[] regions;

         using (InputArray iaImage = image.GetInputArray())
         {
#if !(__IOS__ || NETFX_CORE)
            //if the input array is a GpuMat
            //check if there is a compatible Cuda device to run pedestrian detection
            if (iaImage.Kind == InputArray.Type.CudaGpuMat)
            {
               //this is the Cuda version
               using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
               {
                  des.SetSVMDetector(des.GetDefaultPeopleDetector());

                  watch = Stopwatch.StartNew();
                  using (GpuMat cudaBgra = new GpuMat())
                  using (VectorOfRect vr = new VectorOfRect())
                  {
                     CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra);
                     des.DetectMultiScale(cudaBgra, vr);
                     regions = vr.ToArray();
                  }
               }
            }
            else
#endif
            {
               //this is the CPU/OpenCL version
               using (HOGDescriptor des = new HOGDescriptor())
               {
                  des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
                  watch = Stopwatch.StartNew();

                  MCvObjectDetection[] results = des.DetectMultiScale(image);
                  regions = new Rectangle[results.Length];
                  for (int i = 0; i < results.Length; i++)
                     regions[i] = results[i].Rect;
                  watch.Stop();
               }
            }

            processingTime = watch.ElapsedMilliseconds;

            return regions;
         }
      }
Exemplo n.º 18
0
      /// <summary>
      /// Find the pedestrian in the image
      /// </summary>
      /// <param name="image">The image</param>
      /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
      /// <returns>The region where pedestrians are detected</returns>
      public static Rectangle[] Find(Mat image, bool tryUseCuda, out long processingTime)
      {
         Stopwatch watch;
         Rectangle[] regions;

#if !(__IOS__ || NETFX_CORE)
         //check if there is a compatible Cuda device to run pedestrian detection
         if (tryUseCuda && CudaInvoke.HasCuda)
         {  //this is the Cuda version
            using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8,8), new Size(8,8)))
            {
               des.SetSVMDetector(des.GetDefaultPeopleDetector());

               watch = Stopwatch.StartNew();
               using (GpuMat cudaBgr = new GpuMat(image))
               using (GpuMat cudaBgra = new GpuMat() )
               using (VectorOfRect vr = new VectorOfRect())
               {
                  CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra);
                  des.DetectMultiScale(cudaBgra, vr);
                  regions = vr.ToArray();
               }
            }
         }
         else
#endif
         {  
            //this is the CPU/OpenCL version
            using (HOGDescriptor des = new HOGDescriptor())
            {
               des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
               
               //load the image to umat so it will automatically use opencl is available
               UMat umat = image.ToUMat(AccessType.Read);

               watch = Stopwatch.StartNew();
               
               MCvObjectDetection[] results = des.DetectMultiScale(umat);
               regions = new Rectangle[results.Length];
               for (int i = 0; i < results.Length; i++)
                  regions[i] = results[i].Rect;
               watch.Stop();
            }
         }
        
         processingTime = watch.ElapsedMilliseconds;

         return regions;
      }
Exemplo n.º 19
0
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="processingTime">The pedestrian detection time in milliseconds</param>
        /// <returns>The image with pedestrian highlighted.</returns>
        public static Image<Bgr, Byte> Find(Image<Bgr, Byte> image, out long processingTime)
        {
            Stopwatch watch;
            Rectangle[] regions;

            //check if there is a compatible GPU to run pedestrian detection
            if (CudaInvoke.HasCuda)
            {  //this is the GPU version
                using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
                {
                    des.SetSVMDetector(des.GetDefaultPeopleDetector());

                    watch = Stopwatch.StartNew();
                    using (CudaImage<Bgr, Byte> gpuImg = new CudaImage<Bgr, byte>(image))
                    using (CudaImage<Bgra, Byte> gpuBgra = gpuImg.Convert<Bgra, Byte>())
                    using (VectorOfRect vr = new VectorOfRect())
                    {
                        CudaInvoke.CvtColor(gpuBgra, gpuBgra, ColorConversion.Bgr2Bgra);
                        des.DetectMultiScale(gpuBgra,vr);
                        regions = vr.ToArray();
                    }
                }
            }
            else
            {  //this is the CPU version
                using (HOGDescriptor des = new HOGDescriptor())
                {
                    des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());
                    //load the image to umat so it will automatically use opencl is available
                    UMat umat = image.ToUMat();

                    watch = Stopwatch.StartNew();
                    //regions = des.DetectMultiScale(image);
                    MCvObjectDetection[] results = des.DetectMultiScale(umat);
                    regions = new Rectangle[results.Length];
                    for (int i = 0; i < results.Length; i++)
                        regions[i] = results[i].Rect;
                }
            }
            watch.Stop();

            processingTime = watch.ElapsedMilliseconds;

            foreach (Rectangle pedestrain in regions)
            {
                image.Draw(pedestrain, new Bgr(Color.Red), 1);
            }
            return image;
        }
Exemplo n.º 20
0
        public List <IImage> ProcessFrame(IImage original)
        {
            Rectangle[] peopleRegion;

            using (InputArray iaImage = original.GetInputArray())
            {
#if !(__IOS__ || NETFX_CORE)
                //if the input array is a GpuMat
                //check if there is a compatible Cuda device to run pedestrian detection
                if (iaImage.Kind == InputArray.Type.CudaGpuMat)
                {
                    //this is the Cuda version
                    using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
                    {
                        des.SetSVMDetector(des.GetDefaultPeopleDetector());

                        using (GpuMat cudaBgra = new GpuMat())
                            using (VectorOfRect vr = new VectorOfRect())
                            {
                                CudaInvoke.CvtColor(original, cudaBgra, ColorConversion.Bgr2Bgra);
                                des.DetectMultiScale(cudaBgra, vr);
                                peopleRegion = vr.ToArray();
                            }
                    }
                }
                else
#endif
                {
                    //this is the CPU/OpenCL version
                    using (HOGDescriptor peopleDescriptor = new HOGDescriptor())
                    {
                        peopleDescriptor.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());

                        MCvObjectDetection[] peopleFound = peopleDescriptor
                                                           .DetectMultiScale(original, 0, default(Size), default(Size), AdjustableParameters["Scale"].CurrentValue, AdjustableParameters["SimilarityThreshold"].CurrentValue, AdjustableParameters["MeanShiftGrouping"].CurrentValue == 1);
                        peopleRegion = new Rectangle[peopleFound.Length];
                        for (int i = 0; i < peopleFound.Length; i++)
                        {
                            peopleRegion[i] = peopleFound[i].Rect;
                        }
                    }
                }

                IImage copy = CopyAndDraw(original, peopleRegion);
                return(new List <IImage> {
                    copy
                });
            }
        }
Exemplo n.º 21
0
        // Detects faces in the given Mat.
        public List <Rectangle> detectFace(Mat mat)
        {
            // Convert the current frame to grayscale.
            Mat gray_mat = new Mat();

            CvInvoke.CvtColor(mat, gray_mat, Emgu.CV.CvEnum.ColorConversion.Rgb2Gray);

            List <Rectangle> faces = new List <Rectangle>();
            double           w     = (double)mat.Width;
            double           h     = (double)mat.Height;

            // Detect frontal faces.
            Rectangle[] faceDetections = frontalFaceDetector.DetectMultiScale(gray_mat, 1.1, 3,
                                                                              new Size((int)(0.05 * w), (int)(0.05 * h)), new Size((int)(0.8 * w), (int)(0.8 * h)));
            faces.AddRange(faceDetections);
            // Detect right profile faces.
            faceDetections = profileFaceDetector.DetectMultiScale(gray_mat, 1.1, 3,
                                                                  new Size((int)(0.05 * w), (int)(0.05 * h)), new Size((int)(0.8 * w), (int)(0.8 * h)));
            faces.AddRange(faceDetections);

            // Detect left profile faces.反转左脸
            // flipCode = 1 for horizontal flip, along y-axis in mat.
            Mat flipped_mat = new Mat();

            CvInvoke.Flip(gray_mat, flipped_mat, FlipType.Horizontal);
            faceDetections = profileFaceDetector.DetectMultiScale(flipped_mat, 1.1, 3,
                                                                  new Size((int)(0.05 * w), (int)(0.05 * h)), new Size((int)(0.8 * w), (int)(0.8 * h)));
            //左脸回正
            foreach (Rectangle leftProfileInFlippedMat in faceDetections)
            {
                Rectangle leftProfileInOriginalMat = leftProfileInFlippedMat;
                leftProfileInOriginalMat.X = mat.Width - (leftProfileInFlippedMat.X + leftProfileInFlippedMat.Width);
                faces.Add(leftProfileInOriginalMat);
            }
            logger.Debug("CascadeClassifier detected face number: " + faces.Count());

            // Merge overlapped faces.
            //List<Rectangle> allMatOfRect = new List<Rectangle>();//----------------------------------------------
            VectorOfRect allMatOfRect = new VectorOfRect();

            faces.AddRange(faces);
            Rectangle[] tmpfaces = faces.ToArray();
            allMatOfRect.Push(tmpfaces);
            CvInvoke.GroupRectangles(allMatOfRect, 1, 0.2);
            logger.Debug("Grouped face number: " + allMatOfRect.Size);

            return(allMatOfRect.ToArray().ToList());
        }
Exemplo n.º 22
0
        private FaceModel GetFaceModel(Image <Bgr, Byte> image, Image <Gray, byte> grayImage)
        {
            grayImage._EqualizeHist();
            VectorOfRect faces = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));

            Rectangle[]            rects     = faces.ToArray();
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();
            bool success = facemark.Fit(grayImage, faces, landmarks);

            PointF[] points = landmarks.ToArrayOfArray()[0];
            if (!success)
            {
                return(null);
            }
            return(new FaceModel(points, rects[0]));
        }
        /// <summary>
        /// Draws bounding rectangles around objects in motion.
        /// </summary>
        /// <param name="annotatedImage">The image with the bounded rectangles.</param>
        /// <param name="data">The raw data for the bounding rectangles.</param>
        private void DrawMotion(ref Image <Bgr, byte> annotatedImage, ref List <object> data)
        {
            using (var boundingRect = new VectorOfRect())
            {
                // get the motion components (and their bounding rectangles)
                _motionHistory.GetMotionComponents(_segMask, boundingRect);
                var rects = boundingRect.ToArray();

                // draw the rectangles and populate the data
                foreach (var rect in rects.Where(r => r.Width * r.Height >= _minArea))
                {
                    annotatedImage.Draw(rect, new Bgr(_annoColor.Color()), _lineThick);
                    data.Add(new Box(rect));
                }
            }
        }
Exemplo n.º 24
0
        /// <summary>
        /// Find the pedestrian in the image
        /// </summary>
        /// <param name="image">The image</param>
        /// <returns>The region where pedestrians are detected</returns>
        public static Rectangle[] Find(IInputArray image, HOGDescriptor hog, CudaHOG hogCuda = null)
        {
            //Stopwatch watch;
            Rectangle[] regions;

            using (InputArray iaImage = image.GetInputArray())
            {
                //if the input array is a GpuMat
                //check if there is a compatible Cuda device to run pedestrian detection
                if (iaImage.Kind == InputArray.Type.CudaGpuMat && hogCuda != null)
                {
                    //this is the Cuda version



                    //watch = Stopwatch.StartNew();
                    using (GpuMat cudaBgra = new GpuMat())
                        using (VectorOfRect vr = new VectorOfRect())
                        {
                            CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra);
                            hogCuda.DetectMultiScale(cudaBgra, vr);
                            regions = vr.ToArray();
                        }
                }
                else
                {
                    //this is the CPU/OpenCL version


                    //watch = Stopwatch.StartNew();

                    MCvObjectDetection[] results = hog.DetectMultiScale(image);
                    regions = new Rectangle[results.Length];
                    for (int i = 0; i < results.Length; i++)
                    {
                        regions[i] = results[i].Rect;
                    }
                    //watch.Stop();
                }

                //processingTime = watch.ElapsedMilliseconds;

                return(regions);
            }
        }
Exemplo n.º 25
0
        /// <summary>
        /// Recognize text using the tesseract-ocr API.
        /// Takes image on input and returns recognized text in the output_text parameter.
        /// Optionally provides also the Rects for individual text elements found(e.g.words),
        /// and the list of those text elements with their confidence values.
        /// </summary>
        /// <param name="image">Input image CV_8UC1 or CV_8UC3</param>
        /// <param name="mask"></param>
        /// <param name="outputText">Output text of the tesseract-ocr.</param>
        /// <param name="componentRects">If provided the method will output a list of Rects for the individual
        /// text elements found(e.g.words or text lines).</param>
        /// <param name="componentTexts">If provided the method will output a list of text strings for the
        /// recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentConfidences">If provided the method will output a list of confidence values
        /// for the recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentLevel">OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXT_LINE.</param>
        public override void Run(
            Mat image,
            Mat mask,
            out string outputText,
            out Rect[] componentRects,
            out string[] componentTexts,
            out float[] componentConfidences,
            ComponentLevels componentLevel = ComponentLevels.Word)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (mask == null)
            {
                throw new ArgumentNullException(nameof(mask));
            }
            image.ThrowIfDisposed();
            mask.ThrowIfDisposed();

            using (var outputTextString = new StdString())
                using (var componentRectsVector = new VectorOfRect())
                    using (var componentTextsVector = new VectorOfString())
                        using (var componentConfidencesVector = new VectorOfFloat())
                        {
                            NativeMethods.text_OCRTesseract_run2(
                                ptr,
                                image.CvPtr,
                                mask.CvPtr,
                                outputTextString.CvPtr,
                                componentRectsVector.CvPtr,
                                componentTextsVector.CvPtr,
                                componentConfidencesVector.CvPtr,
                                (int)componentLevel);

                            outputText           = outputTextString.ToString();
                            componentRects       = componentRectsVector.ToArray();
                            componentTexts       = componentTextsVector.ToArray();
                            componentConfidences = componentConfidencesVector.ToArray();
                        }

            GC.KeepAlive(image);
        }
Exemplo n.º 26
0
        public Rectangle[] Detect(Image <Gray, byte> grayframe)
        {
            using (CudaCascadeClassifier des = new CudaCascadeClassifier(ConfigurationManager.AppSettings["haarPath"]))
            {
                using (GpuMat cudaBgra = new GpuMat())
                {
                    using (VectorOfRect vr = new VectorOfRect())
                    {
                        //CudaInvoke.CvtColor(grayframe, cudaBgra, ColorConversion.Bgr2Bgra);
                        cudaBgra.Upload(grayframe);
                        //CudaInvoke.CvtColor(grayframe, cudaBgra, ColorConversion.Gray2Bgra);
                        des.DetectMultiScale(cudaBgra, vr);
                        var regions = vr.ToArray();

                        return(regions);
                    }
                }
            }
        }
Exemplo n.º 27
0
        /// <summary>
        /// Find groups of Extremal Regions that are organized as text blocks.
        /// </summary>
        /// <param name="channels">Array of sinle channel images from wich the regions were extracted</param>
        /// <param name="erstats">Vector of ER’s retreived from the ERFilter algorithm from each channel</param>
        /// <param name="groupingTrainedFileName">The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)</param>
        /// <param name="minProbability">The minimum probability for accepting a group.</param>
        /// <returns>The output of the algorithm that indicates the text regions</returns>
        public static System.Drawing.Rectangle[] ERGrouping(IInputArray channels, VectorOfERStat[] erstats, String groupingTrainedFileName, float minProbability = 0.5f)
        {
            IntPtr[] erstatPtrs = new IntPtr[erstats.Length];

            for (int i = 0; i < erstatPtrs.Length; i++)
            {
                erstatPtrs[i] = erstats[i].Ptr;
            }

            using (VectorOfRect regions = new VectorOfRect())
                using (CvString s = new CvString(groupingTrainedFileName))
                    using (InputArray iaChannels = channels.GetInputArray())
                    {
                        GCHandle erstatsHandle = GCHandle.Alloc(erstatPtrs, GCHandleType.Pinned);
                        CvERGrouping(iaChannels, erstatsHandle.AddrOfPinnedObject(), erstatPtrs.Length, s, minProbability, regions);
                        erstatsHandle.Free();
                        return(regions.ToArray());
                    }
        }
Exemplo n.º 28
0
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The CudaImage to search in</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(IInputArray image)
 {
     using (Util.VectorOfRect vr = new VectorOfRect())
         using (Util.VectorOfDouble vd = new VectorOfDouble())
         {
             DetectMultiScale(image, vr, vd);
             Rectangle[]          location = vr.ToArray();
             double[]             weight   = vd.ToArray();
             MCvObjectDetection[] result   = new MCvObjectDetection[location.Length];
             for (int i = 0; i < result.Length; i++)
             {
                 MCvObjectDetection od = new MCvObjectDetection();
                 od.Rect   = location[i];
                 od.Score  = (float)weight[i];
                 result[i] = od;
             }
             return(result);
         }
 }
Exemplo n.º 29
0
        public Image <Bgr, Byte> GetFacePoints()
        {
            String facePath = Path.GetFullPath(@"../../data/haarcascade_frontalface_default.xml");

            //CascadeClassifier faceDetector = new CascadeClassifier(@"..\..\Resource\EMGUCV\haarcascade_frontalface_default.xml");
            CascadeClassifier faceDetector = new CascadeClassifier(facePath);
            FacemarkLBFParams fParams      = new FacemarkLBFParams();

            //fParams.ModelFile = @"..\..\Resource\EMGUCV\lbfmodel.yaml";
            fParams.ModelFile  = @"lbfmodel.yaml";
            fParams.NLandmarks = 68; // number of landmark points
            fParams.InitShapeN = 10; // number of multiplier for make data augmentation
            fParams.StagesN    = 5;  // amount of refinement stages
            fParams.TreeN      = 6;  // number of tree in the model for each landmark point
            fParams.TreeDepth  = 5;  //he depth of decision tree
            FacemarkLBF facemark = new FacemarkLBF(fParams);
            //facemark.SetFaceDetector(MyDetector);

            Image <Bgr, Byte>  image     = new Image <Bgr, byte>("test.png");
            Image <Gray, byte> grayImage = image.Convert <Gray, byte>();

            grayImage._EqualizeHist();

            VectorOfRect           faces     = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();

            facemark.LoadModel(fParams.ModelFile);

            bool success = facemark.Fit(grayImage, faces, landmarks);

            if (success)
            {
                Rectangle[] facesRect = faces.ToArray();
                for (int i = 0; i < facesRect.Length; i++)
                {
                    image.Draw(facesRect[i], new Bgr(Color.Blue), 2);
                    FaceInvoke.DrawFacemarks(image, landmarks[i], new Bgr(Color.Blue).MCvScalar);
                }
                return(image);
            }
            return(null);
        }
Exemplo n.º 30
0
 /// <summary>
 /// Given the input frame, create input blob, run net and return result detections.
 /// </summary>
 /// <param name="frame">The input image.</param>
 /// <param name="classIds">Class indexes in result detection.</param>
 /// <param name="confidences">A set of corresponding confidences.</param>
 /// <param name="boxes">A set of bounding boxes.</param>
 /// <param name="confThreshold">A threshold used to filter boxes by confidences.</param>
 /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param>
 public void Detect(
     IInputArray frame,
     VectorOfInt classIds,
     VectorOfFloat confidences,
     VectorOfRect boxes,
     float confThreshold = 0.5f,
     float nmsThreshold  = 0.5f)
 {
     using (InputArray iaFrame = frame.GetInputArray())
     {
         DnnInvoke.cveDnnDetectionModelDetect(
             _ptr,
             iaFrame,
             classIds,
             confidences,
             boxes,
             confThreshold,
             nmsThreshold);
     }
 }
Exemplo n.º 31
0
 /// <summary>
 /// Find bounding boxes of text words given an input image.
 /// </summary>
 /// <param name="inputImage">An image expected to be a CV_U8C3 of any size</param>
 /// <returns>The text regions found.</returns>
 public TextRegion[] Detect(IInputArray inputImage)
 {
     using (InputArray iaImage = inputImage.GetInputArray())
         using (VectorOfRect vr = new VectorOfRect())
             using (VectorOfFloat vf = new VectorOfFloat())
             {
                 TextInvoke.cveTextDetectorCNNDetect(_ptr, iaImage, vr, vf);
                 Rectangle[]  bboxes     = vr.ToArray();
                 float[]      confidents = vf.ToArray();
                 TextRegion[] regions    = new TextRegion[bboxes.Length];
                 for (int i = 0; i < regions.Length; i++)
                 {
                     TextRegion tr = new TextRegion();
                     tr.BBox      = bboxes[i];
                     tr.Confident = confidents[i];
                     regions[i]   = tr;
                 }
                 return(regions);
             }
 }
Exemplo n.º 32
0
        public Rectangle[] Detect(Image <Gray, byte> grayframe)
        {
            using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8)))
            {
                des.SetSVMDetector(des.GetDefaultPeopleDetector());

                using (GpuMat cudaBgra = new GpuMat())
                {
                    using (VectorOfRect vr = new VectorOfRect())
                    {
                        //CudaInvoke.CvtColor(grayframe, cudaBgra, ColorConversion.Bgr2Bgra);
                        cudaBgra.Upload(grayframe);
                        //CudaInvoke.CvtColor(grayframe, cudaBgra, ColorConversion.Gray2Bgra);
                        des.DetectMultiScale(cudaBgra, vr);
                        var regions = vr.ToArray();

                        return(regions);
                    }
                }
            }
        }
Exemplo n.º 33
0
        public void TestHOG2()
        {
            if (CudaInvoke.HasCuda)
            {
                using (CudaHOG hog = new CudaHOG(
                           new Size(48, 96), //winSize
                           new Size(16, 16), //blockSize
                           new Size(8, 8),   //blockStride
                           new Size(8, 8)    //cellSize
                           ))
                    using (Mat pedestrianDescriptor = hog.GetDefaultPeopleDetector())
                        using (Image <Bgr, Byte> image = new Image <Bgr, byte>("pedestrian.png"))
                        {
                            //float[] pedestrianDescriptor = CudaHOGDescriptor.GetPeopleDetector48x96();
                            hog.SetSVMDetector(pedestrianDescriptor);

                            Stopwatch   watch = Stopwatch.StartNew();
                            Rectangle[] rects;
                            using (GpuMat cudaImage = new GpuMat(image))
                                using (GpuMat gpuBgra = new GpuMat())
                                    using (VectorOfRect vRect = new VectorOfRect())
                                    {
                                        CudaInvoke.CvtColor(cudaImage, gpuBgra, ColorConversion.Bgr2Bgra);
                                        hog.DetectMultiScale(gpuBgra, vRect);
                                        rects = vRect.ToArray();
                                    }
                            watch.Stop();

                            //Assert.AreEqual(1, rects.Length);

                            foreach (Rectangle rect in rects)
                            {
                                image.Draw(rect, new Bgr(Color.Red), 1);
                            }
                            Trace.WriteLine(String.Format("HOG detection time: {0} ms", watch.ElapsedMilliseconds));

                            //ImageViewer.Show(image, String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds));
                        }
            }
        }
Exemplo n.º 34
0
        /// <summary>
        /// Detect faces from a given image using default or user defined face detector.
        /// Some Algorithm might not provide a default face detector.
        /// </summary>
        /// <param name="image">Input image.</param>
        /// <param name="faces">Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
        /// <returns></returns>
        public virtual bool GetFaces(InputArray image, out Rect[] faces)
        {
            ThrowIfDisposed();
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            int ret;

            using (var facesVec = new VectorOfRect())
            {
                ret   = NativeMethods.face_Facemark_getFaces_vectorOfRect(ptr, image.CvPtr, facesVec.CvPtr);
                faces = facesVec.ToArray();
            }

            GC.KeepAlive(this);
            GC.KeepAlive(image);

            return(ret != 0);
        }
Exemplo n.º 35
0
        /// <summary>
        /// Given the input frame, create input blob, run net and return result detections.
        /// </summary>
        /// <param name="model">The Dnn DetectionModel</param>
        /// <param name="frame">The input image.</param>
        /// <param name="confThreshold">A threshold used to filter boxes by confidences.</param>
        /// <param name="nmsThreshold">A threshold used in non maximum suppression. The default value 0 means we will not perform non-maximum supression.</param>
        /// <param name="labels">Optional labels mapping, if provided, it will use classId as lookup index to get the Label. If null, the Label field of the DetectedObject will be null.</param>
        /// <returns>The array of detected objects</returns>
        public static DetectedObject[] Detect(
            this Emgu.CV.Dnn.DetectionModel model,
            IInputArray frame,
            float confThreshold = 0.5f,
            float nmsThreshold  = 0.5f,
            String[] labels     = null)
        {
            using (VectorOfInt classIds = new VectorOfInt())
                using (VectorOfFloat confidents = new VectorOfFloat())
                    using (VectorOfRect regions = new VectorOfRect())
                    {
                        model.Detect(
                            frame,
                            classIds,
                            confidents,
                            regions,
                            (float)confThreshold,
                            (float)nmsThreshold);
                        var classIdArr   = classIds.ToArray();
                        var confidentArr = confidents.ToArray();
                        var regionArr    = regions.ToArray();
                        List <DetectedObject> results = new List <DetectedObject>();
                        for (int i = 0; i < classIdArr.Length; i++)
                        {
                            DetectedObject o = new DetectedObject();
                            o.ClassId   = classIdArr[i];
                            o.Confident = confidentArr[i];
                            o.Region    = regionArr[i];
                            if (labels != null)
                            {
                                o.Label = labels[o.ClassId];
                            }
                            results.Add(o);
                        }

                        return(results.ToArray());
                    }
        }
Exemplo n.º 36
0
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The CudaImage to search in</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(IInputArray image)
 {
    using (Util.VectorOfRect vr = new VectorOfRect())
    using (Util.VectorOfDouble vd = new VectorOfDouble())
    {
       DetectMultiScale(image, vr, vd);
       Rectangle[] location = vr.ToArray();
       double[] weight = vd.ToArray();
       MCvObjectDetection[] result = new MCvObjectDetection[location.Length];
       for (int i = 0; i < result.Length; i++)
       {
          MCvObjectDetection od = new MCvObjectDetection();
          od.Rect = location[i];
          od.Score = (float)weight[i];
          result[i] = od;
       }
       return result;
    }
 }
Exemplo n.º 37
0
      private void ProcessFrame(object sender, EventArgs e)
      {
         Mat image = new Mat();

         _capture.Retrieve(image);
         if (_forgroundDetector == null)
         {
            _forgroundDetector = new BackgroundSubtractorMOG2();
         }

         _forgroundDetector.Apply(image, _forgroundMask);

         //update the motion history
         _motionHistory.Update(_forgroundMask);         

         #region get a copy of the motion mask and enhance its color
         double[] minValues, maxValues;
         Point[] minLoc, maxLoc;
         _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
         Mat motionMask = new Mat();
         using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0]))
            CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U);
         //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]);
         #endregion

         //create the motion image 
         Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3);
         //display the motion pixels in blue (first channel)
         //motionImage[0] = motionMask;
         CvInvoke.InsertChannel(motionMask, motionImage, 0);

         //Threshold to define a motion area, reduce the value to detect smaller motion
         double minArea = 100;

         //storage.Clear(); //clear the storage
         Rectangle[] rects;
         using (VectorOfRect boundingRect = new VectorOfRect())
         {
            _motionHistory.GetMotionComponents(_segMask, boundingRect);
            rects = boundingRect.ToArray();
         }

         //iterate through each of the motion component
         foreach (Rectangle comp in rects)
         {
            int area = comp.Width * comp.Height;
            //reject the components that have small area;
            if (area < minArea) continue;

            // find the angle and motion pixel count of the specific area
            double angle, motionPixelCount;
            _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount);

            //reject the area that contains too few motion
            if (motionPixelCount < area * 0.05) continue;

            //Draw each individual motion in red
            DrawMotion(motionImage, comp, angle, new Bgr(Color.Red));
         }

         // find and draw the overall motion angle
         double overallAngle, overallMotionPixelCount;

         _motionHistory.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount);
         DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green));

         if (this.Disposing || this.IsDisposed)
            return;

         capturedImageBox.Image = image;
         forgroundImageBox.Image = _forgroundMask;

         //Display the amount of motions found on the current image
         UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", rects.Length, overallMotionPixelCount));

         //Display the image of the motion
         motionImageBox.Image = motionImage;

      }
Exemplo n.º 38
0
        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. 
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <param name="outputRejectLevels"></param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            out int[] rejectLevels,
            out double[] levelWeights,
            double scaleFactor = 1.1,
            int minNeighbors = 3,
            HaarDetectionType flags = HaarDetectionType.Zero,
            Size? minSize = null,
            Size? maxSize = null,
            bool outputRejectLevels = false)
        {
            if (disposed)
                throw new ObjectDisposedException("CascadeClassifier");
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            using (var objectsVec = new VectorOfRect())
            using (var rejectLevelsVec = new VectorOfInt32())
            using (var levelWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_CascadeClassifier_detectMultiScale(
                    ptr, image.CvPtr, objectsVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr,
                    scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0, outputRejectLevels ? 1 : 0);

                rejectLevels = rejectLevelsVec.ToArray();
                levelWeights = levelWeightsVec.ToArray();
                return objectsVec.ToArray();
            }
        }
Exemplo n.º 39
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="image"></param>
        /// <param name="msers"></param>
        /// <param name="bboxes"></param>
        public virtual void DetectRegions(
            InputArray image, out Point[][] msers, out Rect[] bboxes)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null) 
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            using (var msersVec = new VectorOfVectorPoint())
            using (var bboxesVec = new VectorOfRect())
            {
                NativeMethods.features2d_MSER_detectRegions(
                    ptr, image.CvPtr, msersVec.CvPtr, bboxesVec.CvPtr);
                msers = msersVec.ToArray();
                bboxes = bboxesVec.ToArray();
            }

            GC.KeepAlive(image);
        }
Exemplo n.º 40
0
        /// <summary>
        /// Splits a motion history image into a few parts corresponding to separate independent motions 
        /// (for example, left hand, right hand).
        /// </summary>
        /// <param name="mhi">Motion history image.</param>
        /// <param name="segmask">Image where the found mask should be stored, single-channel, 32-bit floating-point.</param>
        /// <param name="boundingRects">Vector containing ROIs of motion connected components.</param>
        /// <param name="timestamp">Current time in milliseconds or other units.</param>
        /// <param name="segThresh">Segmentation threshold that is recommended to be equal to the interval between motion history “steps” or greater.</param>
        public static void SegmentMotion(
            InputArray mhi, OutputArray segmask,
            out Rect[] boundingRects,
            double timestamp, double segThresh)
        {
            if (mhi == null)
                throw new ArgumentNullException("mhi");
            if (segmask == null)
                throw new ArgumentNullException("segmask");
            mhi.ThrowIfDisposed();
            segmask.ThrowIfNotReady();

            using (var br = new VectorOfRect())
            {
                NativeMethods.video_segmentMotion(
                    mhi.CvPtr, segmask.CvPtr, br.CvPtr, timestamp, segThresh);
                boundingRects = br.ToArray();
            }
            segmask.Fix();
        }
Exemplo n.º 41
0
        public static CaptureData ProcessFrame(CaptureCalc capCalc, CaptureData capData)
        {
            if (capData == null || capCalc == null)
                return new CaptureData();

            capCalc.CaptureC.Retrieve(capData.CapturedImage);

            capCalc.ForegroundDetectorC.Apply(capData.CapturedImage, capData.ForegroundImage);

            //update the motion history
            capCalc.MotionHistoryC.Update(capData.ForegroundImage);

            #region get a copy of the motion mask and enhance its color
            double[] minValues, maxValues;
            Point[] minLoc, maxLoc;
            capCalc.MotionHistoryC.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
            Mat motionMask = new Mat();
            using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0]))
                CvInvoke.Multiply(capCalc.MotionHistoryC.Mask, sa, motionMask, 1, DepthType.Cv8U);
            //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]);
            #endregion

            //create the motion image
            //Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3);
            capData.MotionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3);

            //display the motion pixels in blue (first channel)
            //motionImage[0] = motionMask;
            CvInvoke.InsertChannel(motionMask, capData.MotionImage, (int)Constants.ColorChannel.Blue);

            //Threshold to define a motion area, reduce the value to detect smaller motion
            double minArea = Constants.MIN_DECTIVED_AREA;

            //storage.Clear(); //clear the storage
            //Rectangle[] rects;

            Mat segMask = new Mat();
            using (VectorOfRect boundingRect = new VectorOfRect())
            {
                capCalc.MotionHistoryC.GetMotionComponents(segMask, boundingRect);
                capData.CapturedMotionData.Rects = boundingRect.ToArray();
            }

            double[] angles = new double[capData.CapturedMotionData.Rects.Length];
            double[] motionPixelArr = new double[capData.CapturedMotionData.Rects.Length];
            uint index = 0;
            //iterate through each of the motion component
            foreach (Rectangle comp in capData.CapturedMotionData.Rects)
            {
                int area = comp.Width * comp.Height;
                //reject the components that have small area;
                if (area < minArea)
                    continue;

                // find the angle and motion pixel count of the specific area
                double angle = 0;
                double motionPixelCount = 0;
                capCalc.MotionHistoryC.MotionInfo(capData.ForegroundImage, comp, out angle, out motionPixelCount);

                //reject the area that contains too few motion
                if (motionPixelCount < area * Constants.MIN_MOTION_PIXEL_RATIO_IN_AN_AREA)
                    continue;

                angles[index] = angle;
                motionPixelArr[index] = motionPixelCount;
                index++;
                //Draw each individual motion in red
                DrawMotion(capData.MotionImage, comp, angle, new Bgr(Color.Red));
            }

            // find and draw the overall motion angle
            double overallAngle, overallMotionPixelCount;

            capCalc.MotionHistoryC.MotionInfo(capData.ForegroundImage, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount);
            capData.CapturedMotionData.OverallAngle = overallAngle;
            capData.CapturedMotionData.OverallMotionPixelCount = overallMotionPixelCount;
            capData.CapturedMotionData.Angles = angles;
            capData.CapturedMotionData.MotionPixelCountArray = motionPixelArr;

            DrawMotion(capData.MotionImage, new Rectangle(Point.Empty, motionMask.Size), capData.CapturedMotionData.OverallAngle, new Bgr(Color.Green));

            return capData;
        }
Exemplo n.º 42
0
        private async void ProcessFrame(object sender, EventArgs e)
        {
            List<BsonDocument> tempList = new List<BsonDocument>();
            Mat image = new Mat();
            _capture.Retrieve(image);
            if (_forgroundDetector == null)
            {
                _forgroundDetector = new BackgroundSubtractorMOG2();
            }

            _forgroundDetector.Apply(image, _forgroundMask);

            //update the motion history
            _motionHistory.Update(_forgroundMask);

            #region get a copy of the motion mask and enhance its color
            double[] minValues, maxValues;
            Point[] minLoc, maxLoc;
            _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
            Mat motionMask = new Mat();
            using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0]))
                CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U);
            //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]);
            #endregion

            //create the motion image 
            Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3);
            //display the motion pixels in blue (first channel)
            //motionImage[0] = motionMask;
            CvInvoke.InsertChannel(motionMask, motionImage, 0);

            //Threshold to define a motion area, reduce the value to detect smaller motion
            double minArea = 5000;

            //storage.Clear(); //clear the storage
            System.Drawing.Rectangle[] rects;
            using (VectorOfRect boundingRect = new VectorOfRect())
            {
                _motionHistory.GetMotionComponents(_segMask, boundingRect);
                rects = boundingRect.ToArray();
            }

            int motionCount = 0;
            //iterate through each of the motion component
            foreach (System.Drawing.Rectangle comp in rects)
            {
                int area = comp.Width * comp.Height;

                //reject the components that have small area;
                if (area < minArea) continue;

                if (!CheckArea(comp)) continue;

                //find center point
                Point center = new Point(comp.X + (comp.Width >> 1), comp.Y + (comp.Height >> 1));

                //insert to temp motion list
                var document = new BsonDocument
                {
                    {"Source", Path.GetFileName(videoSource)},
                    {"Time", DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss") },
                    {"Area", GetAreaCode(center).ToString()},
                    {"AreaX", center.X.ToString()},
                    {"AreaY", center.Y.ToString()},
                    {"MotionCout", 1 }
                };
                tempList.Add(document);

                //create heatmap
                myHeatMap.heatPoints.Add(new HeatPoint(center.X, center.Y, 16));

                // find the angle and motion pixel count of the specific area
                double angle, motionPixelCount;
                _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount);

                //reject the area that contains too few motion 0.05
                if (motionPixelCount < area * 0.1) continue;

                //Draw each individual motion in red
                DrawMotion(motionImage, comp, angle, new Bgr(Color.Red));
                motionCount++;
            }

            //pictureBox3.BackgroundImage = myHeatMap.CreateHeatmap();
            Bitmap b = (Bitmap)myHeatMap.CreateHeatmap();
            //Image<Gray, Byte> normalizedMasterImage = new Image<Gray, Byte>(b);
            //motionImage = normalizedMasterImage.Mat;
            pictureBox3.BackgroundImage = b;


            // find and draw the overall motion angle
            double overallAngle, overallMotionPixelCount;

            _motionHistory.MotionInfo(_forgroundMask, new System.Drawing.Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount);
            DrawMotion(motionImage, new System.Drawing.Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green));

            if (this.Disposing || this.IsDisposed)
            {
                return;
            }

            //find pedestrian
            //bool tryUseCuda = true;
            //bool tryuseOpenCL = false;
            //long processingTime;
            //System.Drawing.Rectangle[] pedestrianRestult = FindPedestrian.Find(image, tryUseCuda, tryuseOpenCL, out processingTime);
            //foreach (System.Drawing.Rectangle rect in pedestrianRestult)
            //{
            //    CvInvoke.Rectangle(image, rect, new Bgr(Color.Gold).MCvScalar);
            //}

            capturedImageBox.Image = image;
            //forgroundImageBox.Image = _forgroundMask;

            //Display the amount of motions found on the current image
            UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionCount, overallMotionPixelCount));

            //write into db
            if (checkTimer)
            {
                foreach(var doc in tempList)
                {
                    await DataAccess.Insert(doc);
                }             
            }

            motionCount = 0;
            //Display the image of the motion
            motionImageBox.Image = motionImage;         
      }
Exemplo n.º 43
0
 /// <summary>
 /// Finds all the motion segments and marks them in segMask with individual values each (1,2,...). It also returns a sequence of CvConnectedComp structures, one per each motion components. After than the motion direction for every component can be calculated with cvCalcGlobalOrientation using extracted mask of the particular component (using cvCmp) 
 /// </summary>
 /// <param name="mhi">Motion history image</param>
 /// <param name="segMask">Image where the mask found should be stored, single-channel, 32-bit floating-point</param>
 /// <param name="timestamp">Current time in milliseconds or other units</param>
 /// <param name="segThresh">Segmentation threshold; recommended to be equal to the interval between motion history "steps" or greater</param>
 /// <param name="boundingRects">Vector containing ROIs of motion connected components.</param>
 public static void SegmentMotion(
    IInputArray mhi,
    IOutputArray segMask,
    VectorOfRect boundingRects,
    double timestamp,
    double segThresh)
 {
    using (InputArray iaMhi = mhi.GetInputArray())
    using (OutputArray oaSegMask = segMask.GetOutputArray())
    {
       cveSegmentMotion(iaMhi, oaSegMask, boundingRects, timestamp, segThresh);
    }
 }
Exemplo n.º 44
0
        private MotionInfo GetMotionInfo(Mat image)
        {
            Mat _forgroundMask = new Mat();
            Mat _segMask = new Mat();
            MotionInfo motionInfoObj = new MotionInfo();
            double minArea, angle, objectCount, totalPixelCount;
            double overallangle = 0;
            double  motionPixelCount =0;
            int motionArea =0;
            totalPixelCount = 0;
            objectCount = 0;
            minArea = 800;

            if (foregroundDetector == null)
            {
                foregroundDetector = new BackgroundSubtractorMOG2();
            }

            foregroundDetector.Apply(image, _forgroundMask);

            _motionHistory.Update(_forgroundMask);

            ImageForeGroundMaskLast = _forgroundMask.ToImage<Bgr, byte>();

            #region get a copy of the motion mask and enhance its color
            double[] minValues, maxValues;
            Point[] minLoc, maxLoc;
            _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
            Mat motionMask = new Mat();
            using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0]))
                CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U);
            //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]);
            #endregion

            //create the motion image
            Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size);
            //display the motion pixels in blue (first channel)
            //motionImage[0] = motionMask;
            CvInvoke.InsertChannel(motionMask, motionImage, 0);

            //Threshold to define a motion area, reduce the value to detect smaller motion
            minArea = 100;
             //storage.Clear(); //clear the storage
             Rectangle[] rects;

             using (VectorOfRect boundingRect = new VectorOfRect())
             {
             _motionHistory.GetMotionComponents(_segMask, boundingRect);
             rects = boundingRect.ToArray();
             }

             //iterate through each of the motion component
             foreach (Rectangle comp in rects)
             {
            int area = comp.Width * comp.Height;
            //reject the components that have small area;
            _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount);
            if (area < minArea) continue;
            else
            {
                overallangle = overallangle + angle;
                totalPixelCount = totalPixelCount + motionPixelCount;
                objectCount = objectCount + 1;
                motionArea = motionArea + area;
            }

            // find the angle and motion pixel count of the specific area

            ////Draw each individual motion in red
            //DrawMotion(motionImage, comp, angle, new Bgr(Color.Red));
             }
            motionInfoObj.MotionArea = motionArea;
            motionInfoObj.OverallAngle = overallangle;
            motionInfoObj.BoundingRect = rects;
            motionInfoObj.TotalMotions = rects.Length;
            motionInfoObj.MotionObjects = objectCount;
            motionInfoObj.MotionPixels = totalPixelCount;
            averagetotalPixelCount = 0.75 * averagetotalPixelCount + 0.25 * totalPixelCount;
            if ( Math.Abs(averagetotalPixelCount - totalPixelCount) / averagetotalPixelCount > 0.59)
                Console.WriteLine(" GetMotionInfo - Total Motions found: " + rects.Length + "; Motion Pixel count: " + totalPixelCount);
             return motionInfoObj;
        }
Exemplo n.º 45
0
 /// <summary>
 /// Update the current tracking status. The result will be saved in the internal storage.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="boundingBox">the tracking result, represent a list of ROIs of the tracked objects.</param>
 /// <returns>True id update success</returns>
 public bool Update(Mat image, VectorOfRect boundingBox)
 {
    return ContribInvoke.cveMultiTrackerUpdate(_ptr, image, boundingBox);
 }
Exemplo n.º 46
0
 /// <summary>
 /// Get Motion Areas from history
 /// </summary>
 /// <returns>Array of motion areas</returns>
 private Rectangle[] GetMotionAreas()
 {
     Rectangle[] rects;
     using (VectorOfRect boundingRect = new VectorOfRect())
     {
         _motionHistory.GetMotionComponents(_segMask, boundingRect);
         rects = boundingRect.ToArray();
     }
     return rects;
 }
Exemplo n.º 47
0
        /// <summary>
        /// Performs object detection with a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="foundWeights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="scale">Coefficient of the detection window increase.</param>
        /// <param name="groupThreshold">Coefficient to regulate the similarity threshold. 
        /// When detected, some objects can be covered by many rectangles. 0 means not to perform grouping.</param>
        /// <returns>Detected objects boundaries.</returns>
        public virtual Rect[] DetectMultiScale(Mat img, out double[] foundWeights,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfRect())
            using (var foundWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScale(ptr, img.CvPtr, flVec.CvPtr, foundWeightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, scale, groupThreshold);
                foundWeights = foundWeightsVec.ToArray();
                return flVec.ToArray();
            }
        }
Exemplo n.º 48
0
        //public virtual bool read( const FileNode& node );

        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. 
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            double scaleFactor = 1.1,
            int minNeighbors = 3,
            HaarDetectionType flags = 0,
            Size? minSize = null,
            Size? maxSize = null)
        {
            if (disposed)
                throw new ObjectDisposedException("CascadeClassifier");
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            using (var objectsVec = new VectorOfRect())
            {
                NativeMethods.objdetect_CascadeClassifier_detectMultiScale1(
                    ptr, image.CvPtr, objectsVec.CvPtr, 
                    scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0);
                return objectsVec.ToArray();
            }
        }
Exemplo n.º 49
0
      public void TestHOG1()
      {
         if (CudaInvoke.HasCuda)
         {
            using (CudaHOG hog = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8,8), 9))
            using (Mat pedestrianDescriptor = hog.GetDefaultPeopleDetector())
            using (Image<Bgr, Byte> image = new Image<Bgr, byte>("pedestrian.png"))
            {
               hog.SetSVMDetector(pedestrianDescriptor);
               //hog.GroupThreshold = 0;
               Stopwatch watch = Stopwatch.StartNew();
               Rectangle[] rects;
               using (CudaImage<Bgr, Byte> CudaImage = new CudaImage<Bgr, byte>(image))
               using (CudaImage<Bgra, Byte> gpuBgra = CudaImage.Convert<Bgra, Byte>())
               using (VectorOfRect vRect = new VectorOfRect())
               {
                  hog.DetectMultiScale(gpuBgra, vRect);
                  rects = vRect.ToArray();
               }
               watch.Stop();

               Assert.AreEqual(1, rects.Length);

               foreach (Rectangle rect in rects)
                  image.Draw(rect, new Bgr(Color.Red), 1);
               Trace.WriteLine(String.Format("HOG detection time: {0} ms", watch.ElapsedMilliseconds));

               //ImageViewer.Show(image, String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds));
            }
         }
      }
Exemplo n.º 50
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="rectList"></param>
        /// <param name="weights"></param>
        /// <param name="groupThreshold"></param>
        /// <param name="eps"></param>
        public void GroupRectangles(out Rect[] rectList, out double[] weights, int groupThreshold, double eps)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");

            using (var rectListVec = new VectorOfRect())
            using (var weightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_groupRectangles(
                    ptr, rectListVec.CvPtr, weightsVec.CvPtr, groupThreshold, eps);
                rectList = rectListVec.ToArray();
                weights = weightsVec.ToArray();
            }
        }
Exemplo n.º 51
0
      /*
      /// <summary>
      /// Performs object detection with increasing detection window.
      /// </summary>
      /// <param name="image">The CudaImage to search in</param>
      /// <returns>The regions where positives are found</returns>
      public MCvObjectDetection[] DetectMultiScale(IInputArray image)
      {
         using (Util.VectorOfRect vr = new VectorOfRect())
         using (Util.VectorOfDouble vd = new VectorOfDouble())
         {
            DetectMultiScale(image, vr, vd);
            Rectangle[] location = vr.ToArray();
            double[] weight = vd.ToArray();
            MCvObjectDetection[] result = new MCvObjectDetection[location.Length];
            for (int i = 0; i < result.Length; i++)
            {
               MCvObjectDetection od = new MCvObjectDetection();
               od.Rect = location[i];
               od.Score = (float)weight[i];
               result[i] = od;
            }
            return result;
         }
      }*/

      public void DetectMultiScale(IInputArray image, VectorOfRect objects, VectorOfDouble confident = null)
      {
         using (InputArray iaImage = image.GetInputArray())
         {
            CudaInvoke.cudaHOGDetectMultiScale(_ptr, iaImage, objects, confident);
         }
      }
Exemplo n.º 52
0
        /// <summary>
        /// evaluate specified ROI and return confidence value for each location in multiple scales
        /// </summary>
        /// <param name="img"></param>
        /// <param name="foundLocations"></param>
        /// <param name="locations"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="groupThreshold"></param>
        public void DetectMultiScaleROI(
            Mat img,
            out Rect[] foundLocations,
            out DetectionROI[] locations,
            double hitThreshold = 0,
            int groupThreshold = 0)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (var flVec = new VectorOfRect())
            using (var scalesVec = new VectorOfDouble())
            using (var locationsVec = new VectorOfVectorPoint())
            using (var confidencesVec = new VectorOfVectorDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScaleROI(
                    ptr, img.CvPtr, flVec.CvPtr, 
                    scalesVec.CvPtr, locationsVec.CvPtr, confidencesVec.CvPtr,
                    hitThreshold, groupThreshold);
                foundLocations = flVec.ToArray();

                double[] s = scalesVec.ToArray();
                Point[][] l = locationsVec.ToArray();
                double[][] c = confidencesVec.ToArray();

                if(s.Length != l.Length || l.Length != c.Length)
                    throw new OpenCvSharpException("Invalid result data 'locations'");
                locations = new DetectionROI[s.Length];
                for (int i = 0; i < s.Length; i++)
                {
                    locations[i] = new DetectionROI
                    {
                        Scale = s[i],
                        Locations = l[i],
                        Confidences = c[i]
                    };
                }
            }
        }
Exemplo n.º 53
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        /// <param name="scale"></param>
        /// <param name="groupThreshold"></param>
        /// <returns></returns>
        public virtual Rect[] DetectMultiScale(Mat img, double hitThreshold = 0, 
            Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfRect())
            {
                NativeMethods.HOGDescriptor_detectMultiScale(ptr, img.CvPtr, flVec.CvPtr, hitThreshold, winStride0, padding0, scale, groupThreshold);
                // std::vector<cv::Rect>*からCvRect[]に移し替えて返す
                return flVec.ToArray();
            }          
        }
Exemplo n.º 54
0
      public void TestHOG2()
      {
         if (CudaInvoke.HasCuda)
         {
            using (CudaHOG hog = new CudaHOG(
               new Size (48, 96), //winSize
               new Size(16, 16), //blockSize
               new Size(8,8), //blockStride
               new Size(8, 8)  //cellSize
               ))
            using (Mat pedestrianDescriptor = hog.GetDefaultPeopleDetector())
            using (Image<Bgr, Byte> image = new Image<Bgr, byte>("pedestrian.png"))
            {
               //float[] pedestrianDescriptor = CudaHOGDescriptor.GetPeopleDetector48x96();
               hog.SetSVMDetector(pedestrianDescriptor);

               Stopwatch watch = Stopwatch.StartNew();
               Rectangle[] rects;
               using (GpuMat cudaImage = new GpuMat(image))
               using (GpuMat gpuBgra = new GpuMat())
               using (VectorOfRect vRect = new VectorOfRect())
               {
                  CudaInvoke.CvtColor(cudaImage, gpuBgra, ColorConversion.Bgr2Bgra);
                  hog.DetectMultiScale(gpuBgra, vRect);
                  rects = vRect.ToArray();
               }
               watch.Stop();

               //Assert.AreEqual(1, rects.Length);

               foreach (Rectangle rect in rects)
                  image.Draw(rect, new Bgr(Color.Red), 1);
               Trace.WriteLine(String.Format("HOG detection time: {0} ms", watch.ElapsedMilliseconds));

               //ImageViewer.Show(image, String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds));
            }
         }
      }
Exemplo n.º 55
0
 /// <summary>
 /// Performs object detection with increasing detection window.
 /// </summary>
 /// <param name="image">The image to search in</param>
 /// <param name="hitThreshold">
 /// Threshold for the distance between features and SVM classifying plane.
 /// Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
 /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.
 ///</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride.</param>
 /// <param name="padding"></param>
 /// <param name="scale">Coefficient of the detection window increase.</param>
 /// <param name="finalThreshold">After detection some objects could be covered by many rectangles. This coefficient regulates similarity threshold. 0 means don't perform grouping. Should be an integer if not using meanshift grouping. Use 2.0 for default</param>
 /// <param name="useMeanshiftGrouping">If true, it will use meanshift grouping.</param>
 /// <returns>The regions where positives are found</returns>
 public MCvObjectDetection[] DetectMultiScale(
    IInputArray image,
    double hitThreshold = 0,
    Size winStride = new Size(),
    Size padding = new Size(),
    double scale = 1.05,
    double finalThreshold = 2.0,
    bool useMeanshiftGrouping = false)
 {
    using (Util.VectorOfRect vr = new VectorOfRect())
    using (Util.VectorOfDouble vd = new VectorOfDouble())
    using (InputArray iaImage = image.GetInputArray())
    {
       CvInvoke.cveHOGDescriptorDetectMultiScale(_ptr, iaImage, vr, vd, hitThreshold, ref winStride, ref padding, scale,
          finalThreshold, useMeanshiftGrouping);
       Rectangle[] location = vr.ToArray();
       double[] weight = vd.ToArray();
       MCvObjectDetection[] result = new MCvObjectDetection[location.Length];
       for (int i = 0; i < result.Length; i++)
       {
          MCvObjectDetection od = new MCvObjectDetection();
          od.Rect = location[i];
          od.Score = (float)weight[i];
          result[i] = od;
       }
       return result;
    }
 }
Exemplo n.º 56
0
 /// <summary>
 /// Get a sequence of motion component
 /// </summary>
 /// <returns>A sequence of motion components</returns>
 public void GetMotionComponents(IOutputArray segMask, VectorOfRect boundingRects)
 {
    TimeSpan ts = _lastTime.Subtract(_initTime);
    
    CvInvoke.SegmentMotion(_mhi, segMask, boundingRects, ts.TotalSeconds, _maxTimeDelta);
 }
Exemplo n.º 57
-1
      /// <summary>
      /// Find groups of Extremal Regions that are organized as text blocks.
      /// </summary>
      /// <param name="image">The image where ER grouping is to be perform on</param>
      /// <param name="channels">Array of single channel images from which the regions were extracted</param>
      /// <param name="erstats">Vector of ER’s retrieved from the ERFilter algorithm from each channel</param>
      /// <param name="groupingTrainedFileName">The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)</param>
      /// <param name="minProbability">The minimum probability for accepting a group.</param>
      /// <param name="groupMethods">The grouping methods</param>
      /// <returns>The output of the algorithm that indicates the text regions</returns>
      public static System.Drawing.Rectangle[] ERGrouping(IInputArray image, IInputArrayOfArrays channels, VectorOfERStat[] erstats, GroupingMethod groupMethods = GroupingMethod.OrientationHoriz, String groupingTrainedFileName = null, float minProbability = 0.5f)
      {
         IntPtr[] erstatPtrs = new IntPtr[erstats.Length];

         for (int i = 0; i < erstatPtrs.Length; i++)
         {
            erstatPtrs[i] = erstats[i].Ptr;
         }

         using (VectorOfVectorOfPoint regionGroups = new VectorOfVectorOfPoint())
         using (VectorOfRect groupsBoxes = new VectorOfRect())
         using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaChannels = channels.GetInputArray())
         using (CvString s = (groupingTrainedFileName == null ? new CvString() : new CvString(groupingTrainedFileName)))
         {
            GCHandle erstatsHandle = GCHandle.Alloc(erstatPtrs, GCHandleType.Pinned);
            CvERGrouping(
               iaImage, iaChannels,
               erstatsHandle.AddrOfPinnedObject(), erstatPtrs.Length,
               regionGroups, groupsBoxes,
               groupMethods,
               s, minProbability);

            erstatsHandle.Free();
            return groupsBoxes.ToArray();
         }
      }