/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(Mat image, bool tryUseCuda, bool tryUseOpenCL, out long processingTime) { Stopwatch watch; Rectangle[] regions; #if !(IOS || NETFX_CORE) //check if there is a compatible Cuda device to run pedestrian detection if (tryUseCuda && CudaInvoke.HasCuda) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuMat cudaBgr = new GpuMat(image)) using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } } else #endif { //Many opencl functions require opencl compatible gpu devices. //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices). CvInvoke.UseOpenCL = tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice; //this is the CPU/OpenCL version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); //load the image to umat so it will automatically use opencl is available UMat umat = image.ToUMat(AccessType.Read); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(umat); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) regions[i] = results[i].Rect; watch.Stop(); } } processingTime = watch.ElapsedMilliseconds; return regions; }
private void objectDetection(Image<Bgr, Byte> myImage) { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); regions = des.DetectMultiScale(myImage); } foreach (MCvObjectDetection pedestrian in regions) { myImage.Draw(pedestrian.Rect, new Bgr(Color.Red), 1); } }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(Mat image, bool tryUseCuda, out long processingTime) { Stopwatch watch; Rectangle[] regions; #if !(__IOS__ || NETFX_CORE) //check if there is a compatible Cuda device to run pedestrian detection if (tryUseCuda && CudaInvoke.HasCuda) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8,8), new Size(8,8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuMat cudaBgr = new GpuMat(image)) using (GpuMat cudaBgra = new GpuMat() ) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } } else #endif { //this is the CPU/OpenCL version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); //load the image to umat so it will automatically use opencl is available UMat umat = image.ToUMat(AccessType.Read); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(umat); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) regions[i] = results[i].Rect; watch.Stop(); } } processingTime = watch.ElapsedMilliseconds; return regions; }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The processing time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(IInputArray image, out long processingTime) { Stopwatch watch; Rectangle[] regions; using (InputArray iaImage = image.GetInputArray()) { #if !(__IOS__ || NETFX_CORE) //if the input array is a GpuMat //check if there is a compatible Cuda device to run pedestrian detection if (iaImage.Kind == InputArray.Type.CudaGpuMat) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } } else #endif { //this is the CPU/OpenCL version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(image); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) regions[i] = results[i].Rect; watch.Stop(); } } processingTime = watch.ElapsedMilliseconds; return regions; } }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The image with pedestrian highlighted.</returns> public static Image<Bgr, Byte> Find(Image<Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (CudaInvoke.HasCuda) { //this is the GPU version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (CudaImage<Bgr, Byte> gpuImg = new CudaImage<Bgr, byte>(image)) using (CudaImage<Bgra, Byte> gpuBgra = gpuImg.Convert<Bgra, Byte>()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(gpuBgra, gpuBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(gpuBgra,vr); regions = vr.ToArray(); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); //load the image to umat so it will automatically use opencl is available UMat umat = image.ToUMat(); watch = Stopwatch.StartNew(); //regions = des.DetectMultiScale(image); MCvObjectDetection[] results = des.DetectMultiScale(umat); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) regions[i] = results[i].Rect; } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle pedestrain in regions) { image.Draw(pedestrain, new Bgr(Color.Red), 1); } return image; }
public static Image<Bgr, Byte> Find(Image<Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; if (GpuInvoke.HasCuda) { using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) { using (GpuImage<Bgra, Byte> gpuGpraImage = gpuImage.Convert<Bgra, Byte>()) { regions = des.DetectMultiScale(gpuGpraImage); } } } } else { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle rect in regions) { image.Draw(rect, new Bgr(Color.Red),1 ); } return image; }
public float[] GetVector(Image<Bgr, Byte> im) { HOGDescriptor hog = new HOGDescriptor(); // with defaults values Image<Bgr, Byte> imageOfInterest = Resize(im); Point[] p = new Point[imageOfInterest.Width * imageOfInterest.Height]; int k = 0; for (int i = 0; i < imageOfInterest.Width; i++) { for (int j = 0; j < imageOfInterest.Height; j++) { Point p1 = new Point(i, j); p[k++] = p1; } } return hog.Compute(imageOfInterest, new Size(8, 8), new Size(0, 0), p); }
public static Image<Bgr, Byte> Find(Image<Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; float[] result; using (HOGDescriptor des = new HOGDescriptor()) { watch = Stopwatch.StartNew(); result = des.Compute(image, new Size(16, 16), Size.Empty, null); watch.Stop(); result = result.Where(x => x != 0).ToArray(); //regions = des.DetectMultiScale(image); } processingTime = watch.ElapsedMilliseconds; return image; }
//=================================================== Feature Descriptor (HOG) Data Training Kursi =========================================== public static Rectangle[] FindObject(Image<Bgr, Byte> image, out long processingTime, Size winSizeObject, string dataFile) { Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImg = new GpuImage<Bgr, byte>(image)) using (GpuImage<Bgra, Byte> gpuBgra = gpuImg.Convert<Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor(winSizeObject, blockSize, blockStride, cellSize, nbins, 1, -1, 0.2, true)) { des.SetSVMDetector(GetDataObjects(dataFile)); //des.SetSVMDetector(GetData2()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; return regions; }
public void classify(BitmapSource frame) { Console.WriteLine(relativeURI); //byte[] classifiedImage = frame; //WriteableBitmap frameImage = new WriteableBitmap(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null); //BitmapSource frameImage = BitmapSource.Create(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null, frame, stride); /* resultsPtr = CvInvoke.cvHaarDetectObjects( Marshal.GetIUnknownForObject(frame), classifier, resultsPtr, 1.1, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(0,0), new System.Drawing.Size(0,0) ); Console.WriteLine("Classified?!? Pointer below: "); Console.WriteLine(resultsPtr.ToString()); */ //return classifiedImage; Console.WriteLine(" - - - Converting Bitmap..."); System.Drawing.Bitmap bitmapFrame; using (MemoryStream outStream = new MemoryStream()) { BitmapEncoder enc = new BmpBitmapEncoder(); enc.Frames.Add(BitmapFrame.Create(frame)); enc.Save(outStream); bitmapFrame = new System.Drawing.Bitmap(outStream); } Console.WriteLine(" - - - Bitmap converted!"); Image<Bgr, Byte> image = new Image<Bgr, Byte>(bitmapFrame); Console.WriteLine(" - - - Image set"); Console.WriteLine(" - - - Check CUDA..."); if (GpuInvoke.HasCuda) { Console.WriteLine(" - - - Has CUDA!"); using (GpuCascadeClassifier target = new GpuCascadeClassifier(classifierURI)) { using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { Console.WriteLine(" - - - Detecting!"); Rectangle[] targetSet = target.DetectMultiScale(gpuGray, 1.1, 10, System.Drawing.Size.Empty); Console.WriteLine(" - - - Detected :D :D :D Printing rectangle set: "); foreach (Rectangle f in targetSet) { Console.WriteLine("Rectangle found at: " + f.ToString()); //draw the face detected in the 0th (gray) channel with blue color image.Draw(f, new Bgr(System.Drawing.Color.Blue), 2); } Console.WriteLine(" - - - DONE"); } } } else { using (HOGDescriptor des = new HOGDescriptor()) { //des.SetSVMDetector } Console.WriteLine(" - - - No CUDA :( "); Console.WriteLine(" - - - Devices available: " + GpuInvoke.GetCudaEnabledDeviceCount()); } }
public void TestHOG2() { using (HOGDescriptor hog = new HOGDescriptor()) using (Image<Bgr, Byte> image = new Image<Bgr, byte>("lena.jpg")) { float[] pedestrianDescriptor = HOGDescriptor.GetDefaultPeopleDetector(); hog.SetSVMDetector(pedestrianDescriptor); Stopwatch watch = Stopwatch.StartNew(); Rectangle[] rects = hog.DetectMultiScale(image); watch.Stop(); Assert.AreEqual(0, rects.Length); foreach (Rectangle rect in rects) image.Draw(rect, new Bgr(Color.Red), 1); Trace.WriteLine(String.Format("HOG detection time: {0} ms", watch.ElapsedMilliseconds)); //ImageViewer.Show(image, String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds)); } }
public void TestGrabCut2() { Image<Bgr, Byte> img = new Image<Bgr, byte>("pedestrian.png"); HOGDescriptor desc = new HOGDescriptor(); desc.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); Rectangle[] humanRegions = desc.DetectMultiScale(img); Image<Gray, byte> pedestrianMask = new Image<Gray, byte>(img.Size); foreach (Rectangle rect in humanRegions) { //generate the mask where 3 indicates forground and 2 indicates background using (Image<Gray, byte> mask = img.GrabCut(rect, 2)) { //get the mask of the forground CvInvoke.cvCmpS(mask, 3, mask, Emgu.CV.CvEnum.CMP_TYPE.CV_CMP_EQ); pedestrianMask._Or(mask); } } }
public void TestGrabCut2() { Image<Bgr, Byte> img = EmguAssert.LoadImage<Bgr, Byte>("pedestrian.png"); HOGDescriptor desc = new HOGDescriptor(); desc.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); MCvObjectDetection[] humanRegions = desc.DetectMultiScale(img); Image<Gray, byte> pedestrianMask = new Image<Gray, byte>(img.Size); foreach (MCvObjectDetection rect in humanRegions) { //generate the mask where 3 indicates foreground and 2 indicates background using (Image<Gray, byte> mask = img.GrabCut(rect.Rect, 2)) { //get the mask of the foreground using (ScalarArray ia = new ScalarArray(3)) CvInvoke.Compare(mask, ia, mask, Emgu.CV.CvEnum.CmpType.Equal); pedestrianMask._Or(mask); } } }
public void TestHOGTrainAnySize() { using (Image<Bgr, byte> image = EmguAssert.LoadImage<Bgr, Byte>("lena.jpg")) using (HOGDescriptor hog = new HOGDescriptor(image)) { Stopwatch watch = Stopwatch.StartNew(); MCvObjectDetection[] rects = hog.DetectMultiScale(image); watch.Stop(); foreach (MCvObjectDetection rect in rects) image.Draw(rect.Rect, new Bgr(0, 0, 255), 1); EmguAssert.WriteLine(String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds)); //ImageViewer.Show(image, String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds)); } }
public void TestHOG1() { using (HOGDescriptor hog = new HOGDescriptor()) using (Image<Bgr, Byte> image = EmguAssert.LoadImage<Bgr, Byte>("pedestrian.png")) { float[] pedestrianDescriptor = HOGDescriptor.GetDefaultPeopleDetector(); hog.SetSVMDetector(pedestrianDescriptor); Stopwatch watch = Stopwatch.StartNew(); MCvObjectDetection[] rects = hog.DetectMultiScale(image); watch.Stop(); EmguAssert.AreEqual(1, rects.Length); foreach (MCvObjectDetection rect in rects) image.Draw(rect.Rect, new Bgr(0, 0, 255), 1); EmguAssert.WriteLine(String.Format("HOG detection time: {0} ms", watch.ElapsedMilliseconds)); //Emgu.CV.UI.ImageViewer.Show(image, String.Format("Detection Time: {0}ms", watch.ElapsedMilliseconds)); } }