/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <returns>The region where pedestrians are detected</returns> public Rectangle[] Find(IInputArray image) { Rectangle[] regions; using (InputArray iaImage = image.GetInputArray()) { //if the input array is a GpuMat //check if there is a compatible Cuda device to run pedestrian detection if (iaImage.Kind == InputArray.Type.CudaGpuMat && _hogCuda != null) { //this is the Cuda version using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra); _hogCuda.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } else { //this is the CPU/OpenCL version MCvObjectDetection[] results = _hog.DetectMultiScale(image); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = results[i].Rect; } } return(regions); } }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The image with pedestrian highlighted.</returns> public static Image <Bgr, Byte> Find(Image <Bgr, Byte> image) { Rectangle[] regions = new Rectangle[5]; //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); Emgu.CV.Structure.MCvObjectDetection[] objects = des.DetectMultiScale(image); for (int i = 0; i < objects.Length; i++) { regions[i] = objects[i].Rect; if (objects[i].Score > 0.50) { FormVideo.Counter++; } } } foreach (Rectangle pedestrain in regions) { image.Draw(pedestrain, new Bgr(Color.Red), 1); } return(image); }
public Rectangle[] FindBodyHOG_WithoutGpu(Mat image) { Rectangle[] regions = null; //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { try { //Mat newImage = new Mat(); //BackgroundSubtractor bs = new BackgroundSubtractorMOG2(500, 16, false); //bs.Apply(image, newImage); des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); MCvObjectDetection[] allBodies = des.DetectMultiScale(image); regions = new Rectangle[allBodies.Length]; for (int i = 0; i < allBodies.Length; i++) { regions[i] = allBodies[i].Rect; //if (body.Score > threshold) //regions.Add(body.Rect); } } catch (Exception ex) { MessageBox.Show(ex.Message); } } return(regions); }
private static Rectangle[] FindPedestrian(IInputArray image, out long processTime) { Stopwatch watch; Rectangle[] regions; using (InputArray iaImage = image.GetInputArray()) { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(image); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = results[i].Rect; } watch.Stop(); } } processTime = watch.ElapsedMilliseconds; return(regions); }
private void videoSourcePlayerCamera_NewFrame(object sender, ref Bitmap image) { // get new frame if (needUpdateCamera) { needUpdateCamera = false; Image <Bgr, Byte> img = null; if (image != null) { img = new Image <Bgr, Byte>(image); //Image<Bgr, Byte> imgBlank = null; HOGDescriptor hogd = new HOGDescriptor(); MCvObjectDetection[] mObj; //ibVideoPieton.Image = imgBlank; Application.DoEvents(); hogd.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); mObj = hogd.DetectMultiScale(img, 0, new Size(4, 4), new Size(8, 8), 1.05); //mObj = hogd.DetectMultiScale(img); foreach (MCvObjectDetection obj in mObj) { img.Draw(obj.Rect, new Bgr(System.Drawing.Color.Yellow), 2); } ibCameraDetection.Image = img; } } //motiondetector.ProcessFrame(image); }
public override Rectangle[] find(Image <Bgr, Byte> image) { HOGDescriptor descriptor = new HOGDescriptor(); descriptor.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); return(descriptor.DetectMultiScale(image)); }
//=============================Feature Descriptor (HOG) Data Training Tanaman============================= public static Rectangle[] findObjects(Image <Bgr, Byte> image, out long processingTime, Size winSize, string dataFile) { Stopwatch watch; Rectangle[] regions; if (GpuInvoke.HasCuda) { using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { using (HOGDescriptor des = new HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins, 1, -1, 0.2, true)) { des.SetSVMDetector(GetDataObjects(dataFile)); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; return(regions); }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(Mat image, bool tryUseCuda, bool tryUseOpenCL, out long processingTime) { Stopwatch watch; Rectangle[] regions; #if !(IOS || NETFX_CORE) //check if there is a compatible Cuda device to run pedestrian detection if (tryUseCuda && CudaInvoke.HasCuda) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuMat cudaBgr = new GpuMat(image)) using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } } else #endif { //Many opencl functions require opencl compatible gpu devices. //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices). CvInvoke.UseOpenCL = tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice; //this is the CPU/OpenCL version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); //load the image to umat so it will automatically use opencl is available UMat umat = image.ToUMat(AccessType.Read); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(umat); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = results[i].Rect; } watch.Stop(); } } processingTime = watch.ElapsedMilliseconds; return(regions); }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The processing time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(IInputArray image, out long processingTime) { Stopwatch watch; Rectangle[] regions; using (InputArray iaImage = image.GetInputArray()) { #if !(__IOS__ || NETFX_CORE) //if the input array is a GpuMat //check if there is a compatible Cuda device to run pedestrian detection if (iaImage.Kind == InputArray.Type.CudaGpuMat) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } } else #endif { //this is the CPU/OpenCL version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(image); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = results[i].Rect; } watch.Stop(); } } processingTime = watch.ElapsedMilliseconds; return(regions); } }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(Mat image, bool tryUseCuda, out long processingTime) { Stopwatch watch; Rectangle[] regions; #if !(__IOS__ || NETFX_CORE) //check if there is a compatible Cuda device to run pedestrian detection if (tryUseCuda && CudaInvoke.HasCuda) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuMat cudaBgr = new GpuMat(image)) using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(cudaBgr, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } } else #endif { //this is the CPU/OpenCL version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); //load the image to umat so it will automatically use opencl is available UMat umat = image.ToUMat(AccessType.Read); watch = Stopwatch.StartNew(); MCvObjectDetection[] results = des.DetectMultiScale(umat); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = results[i].Rect; } watch.Stop(); } } processingTime = watch.ElapsedMilliseconds; return(regions); }
private Rectangle[] DetectPedestrian(Image <Bgr, byte> bgrImage) { if (bgrImage == null) { Status = "Can't get image or convert it"; return(new Rectangle[0]); } //bgrImage._EqualizeHist(); // does it make it relly better? var result = hog.DetectMultiScale(bgrImage, hitThreshold, winStride, padding, scale, finalThreshold, useMeanShiftGrouping); return(result); }
public List <IImage> ProcessFrame(IImage original) { Rectangle[] peopleRegion; using (InputArray iaImage = original.GetInputArray()) { #if !(__IOS__ || NETFX_CORE) //if the input array is a GpuMat //check if there is a compatible Cuda device to run pedestrian detection if (iaImage.Kind == InputArray.Type.CudaGpuMat) { //this is the Cuda version using (CudaHOG des = new CudaHOG(new Size(64, 128), new Size(16, 16), new Size(8, 8), new Size(8, 8))) { des.SetSVMDetector(des.GetDefaultPeopleDetector()); using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(original, cudaBgra, ColorConversion.Bgr2Bgra); des.DetectMultiScale(cudaBgra, vr); peopleRegion = vr.ToArray(); } } } else #endif { //this is the CPU/OpenCL version using (HOGDescriptor peopleDescriptor = new HOGDescriptor()) { peopleDescriptor.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); MCvObjectDetection[] peopleFound = peopleDescriptor .DetectMultiScale(original, 0, default(Size), default(Size), AdjustableParameters["Scale"].CurrentValue, AdjustableParameters["SimilarityThreshold"].CurrentValue, AdjustableParameters["MeanShiftGrouping"].CurrentValue == 1); peopleRegion = new Rectangle[peopleFound.Length]; for (int i = 0; i < peopleFound.Length; i++) { peopleRegion[i] = peopleFound[i].Rect; } } } IImage copy = CopyAndDraw(original, peopleRegion); return(new List <IImage> { copy }); } }
void HogProcessing(Mat image, HOGDescriptor hog) { if (image.Empty()) { return; } var found = hog.DetectMultiScale(image, 0, new Size(8, 8), new Size(24, 16), 1.05, 2); currentBodies.Clear(); foreach (var item in found) { currentBodies.Add(item); } }
static void Run() { Image <Bgr, Byte> image = new Image <Bgr, byte>("pedestrian.png"); Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); foreach (Rectangle pedestrain in regions) { image.Draw(pedestrain, new Bgr(Color.Red), 1); } ImageViewer.Show( image, String.Format("Pedestrain detection using {0} in {1} milliseconds.", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds)); }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(IInputArray image, HOGDescriptor hog, CudaHOG hogCuda = null) { //Stopwatch watch; Rectangle[] regions; using (InputArray iaImage = image.GetInputArray()) { //if the input array is a GpuMat //check if there is a compatible Cuda device to run pedestrian detection if (iaImage.Kind == InputArray.Type.CudaGpuMat && hogCuda != null) { //this is the Cuda version //watch = Stopwatch.StartNew(); using (GpuMat cudaBgra = new GpuMat()) using (VectorOfRect vr = new VectorOfRect()) { CudaInvoke.CvtColor(image, cudaBgra, ColorConversion.Bgr2Bgra); hogCuda.DetectMultiScale(cudaBgra, vr); regions = vr.ToArray(); } } else { //this is the CPU/OpenCL version //watch = Stopwatch.StartNew(); MCvObjectDetection[] results = hog.DetectMultiScale(image); regions = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = results[i].Rect; } //watch.Stop(); } //processingTime = watch.ElapsedMilliseconds; return(regions); } }
/// <summary> /// Detect people in the given frame using the histogram of oriented gradients algorithm. /// </summary> /// <param name="frame">Input frame in which people should get detected.</param> /// <returns>Regions of detected people.</returns> public MCvObjectDetection[] Detect(Image <Bgr, byte> frame) { // detect people in the given frame using the HOG descriptor MCvObjectDetection[] regions = descriptor.DetectMultiScale(frame, 0, new Size(2, 2), new Size(8, 8)); // filter the region based on the defined score List <MCvObjectDetection> filteredRegions = new List <MCvObjectDetection>(); foreach (MCvObjectDetection region in regions) { if (region.Score >= ScoreThreshold) { filteredRegions.Add(region); } } return(filteredRegions.ToArray()); }
public void getBodies() { bodiesCount = 0; SVMbodies = descriptor.DetectMultiScale(Gray_Frame, scale: 1.3, useMeanshiftGrouping: false); bodiesCount = SVMbodies.Length; foreach (MCvObjectDetection body in SVMbodies) { img.Draw(body.Rect, new Bgr(System.Drawing.Color.Green), 2); } bodies = _body.DetectMultiScale(Gray_Frame, 1.5, 5, new System.Drawing.Size(50, 50), System.Drawing.Size.Empty); foreach (Rectangle body in bodies) { img.Draw(body, new Bgr(System.Drawing.Color.Purple), 2); } }
public void Do(PictureBox pictureBox, Action callback) { using (var imageFrame = _capture.QueryFrame().ToImage <Bgr, Byte>()) { if (imageFrame != null) { var grayframe = imageFrame.Convert <Gray, byte>(); using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); MCvObjectDetection[] results = des.DetectMultiScale(grayframe); var faces = new Rectangle[results.Length]; for (int i = 0; i < results.Length; i++) { faces[i] = results[i].Rect; } foreach (var face in faces) { var c = CenterRect(face); if (Helper.IsPointInPolygon(c, _points)) { imageFrame.Draw(face, new Bgr(Color.Chartreuse), 1); //the detected face(s) is highlighted here using a box that is drawn around it/them if (callback != null) { callback(); } } } imageFrame.DrawPolyline(_points, true, new Bgr(Color.Crimson), 1); var bmp = EmguHelper.ResizeImage(imageFrame.ToBitmap(), new Size(pictureBox.Width, pictureBox.Height)); pictureBox.Image = bmp; } } } }
public void Run() { var img = Cv2.ImRead(FilePath.Asahiyama, LoadMode.Color); var hog = new HOGDescriptor(); hog.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); bool b = hog.CheckDetectorSize(); Console.WriteLine("CheckDetectorSize: {0}", b); var watch = Stopwatch.StartNew(); // run the detector with default parameters. to get a higher hit-rate // (and more false alarms, respectively), decrease the hitThreshold and // groupThreshold (set groupThreshold to 0 to turn off the grouping completely). Rect[] found = hog.DetectMultiScale(img, 0, new Size(8, 8), new Size(24, 16), 1.05, 2); watch.Stop(); Console.WriteLine("Detection time = {0}ms", watch.ElapsedMilliseconds); Console.WriteLine("{0} region(s) found", found.Length); foreach (Rect rect in found) { // the HOG detector returns slightly larger rectangles than the real objects. // so we slightly shrink the rectangles to get a nicer output. var r = new Rect { X = rect.X + (int)Math.Round(rect.Width * 0.1), Y = rect.Y + (int)Math.Round(rect.Height * 0.1), Width = (int)Math.Round(rect.Width * 0.8), Height = (int)Math.Round(rect.Height * 0.8) }; img.Rectangle(r.TopLeft, r.BottomRight, Scalar.Red, 3, LineType.Link8, 0); } using (var window = new Window("people detector", WindowMode.None, img)) { window.SetProperty(WindowProperty.Fullscreen, 1); Cv.WaitKey(0); } }
public static Image <Bgr, Byte> Find(Image <Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; if (GpuInvoke.HasCuda) { using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) { using (GpuImage <Bgra, Byte> gpuGpraImage = gpuImage.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuGpraImage); } } } } else { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle rect in regions) { image.Draw(rect, new Bgr(Color.Red), 1); } return(image); }
public static Image <Bgr, Byte> findPerson(Image <Bgr, Byte> image, out int detections, out List <PointF> positions) { //If the gpu has nvidia CUDA if (GpuInvoke.HasCuda) { imageToProcess = new Image <Bgr, byte>(image.Bitmap); //Value is coppied so the refference of the input image is not changed outside of this class Person_Detector.positions.Clear(); using (GpuHOGDescriptor descriptor = new GpuHOGDescriptor()) { descriptor.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(imageToProcess)) //Create gpuImage from image { using (GpuImage <Bgra, Byte> bgraImage = gpuImage.Convert <Bgra, Byte>()) { regeions = descriptor.DetectMultiScale(bgraImage); //Returns all detected regions in a rectangle array } } } } else { using (HOGDescriptor des = new HOGDescriptor()) { regeions = des.DetectMultiScale(imageToProcess); } } detections = regeions.Length; //Draws detected rectangles onto the image being returned foreach (Rectangle ped in regeions) { imageToProcess.Draw(ped, new Bgr(Color.Red), 5); imageToProcess.Draw(new Cross2DF(new PointF(ped.Location.X + (ped.Width / 2), ped.Location.Y + (ped.Height / 2)), 30, 30), new Bgr(Color.Green), 3); Person_Detector.positions.Add(new PointF(ped.Location.X + (ped.Width / 2), ped.Location.Y + (ped.Height / 2))); //Sets the putput variable } positions = Person_Detector.positions; return(imageToProcess); }
public Rectangle[] Detect(Image <Gray, byte> grayframe) { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); MCvObjectDetection[] results = des.DetectMultiScale(grayframe); var rects = new Rectangle[results.Length]; if (results.Length > 0) { for (int i = 0; i < results.Length; i++) { rects[i] = results[i].Rect; } } return(rects); } }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="imageFileName">The name of the image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The image with pedestrian highlighted.</returns> public static Image <Bgr, Byte> Find(Image <Bgr, byte> source, out long processingTime) { Image <Bgr, Byte> image = source.Copy(); Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle pedestrain in regions) { image.Draw(pedestrain, new Bgr(Color.Red), 1); } return(image); }
private void ProcessImageAndUpdateGUI() { Image <Bgr, Byte> imgImage = new Image <Bgr, Byte>(lblChosenFile.Text); //Image<Bgr, Byte> imgBlank = null; HOGDescriptor hogd = new HOGDescriptor(); MCvObjectDetection[] mObj; //ibDetectionPietonImage.Image = imgBlank; Application.DoEvents(); hogd.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); mObj = hogd.DetectMultiScale(imgImage); foreach (MCvObjectDetection obj in mObj) { imgImage.Draw(obj.Rect, new Bgr(System.Drawing.Color.Yellow), 2); } ibDetectionPietonImage.Image = imgImage; }
private void HOGDrawObjects() { Mat img = new Mat(src, true); CvRect[] found = hog.DetectMultiScale(img, 0, new CvSize(8, 8), new CvSize(24, 16), 1.05, 2); foreach (CvRect rect in found) { // the HOG detector returns slightly larger rectangles than the real objects. // so we slightly shrink the rectangles to get a nicer output. CvRect r = new CvRect { X = rect.X + (int)Math.Round(rect.Width * 0.1), Y = rect.Y + (int)Math.Round(rect.Height * 0.1), Width = (int)Math.Round(rect.Width * 0.8), Height = (int)Math.Round(rect.Height * 0.8) }; img.Rectangle(r.TopLeft, r.BottomRight, CvColor.Red, 3); } src = img.ToIplImage(); }
// New frame event handler, which is invoked on each new available video frame private void video_NewFrame(object sender, AForge.Video.NewFrameEventArgs eventArgs) { // get new frame bitmapPlayed = eventArgs.Frame; if (needUpdate) { needUpdate = false; Image <Bgr, Byte> img = null; if (bitmapPlayed != null) { img = new Image <Bgr, Byte>(bitmapPlayed); Image <Bgr, Byte> imgBlank = null; HOGDescriptor hogd = new HOGDescriptor(); MCvObjectDetection[] mObj; ibVideoPieton.Image = imgBlank; Application.DoEvents(); hogd.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); mObj = hogd.DetectMultiScale(img); foreach (MCvObjectDetection obj in mObj) { img.Draw(obj.Rect, new Bgr(System.Drawing.Color.Yellow), 2); } ibVideoPieton.Image = img; } } //motiondetector.ProcessFrame(eventArgs.Frame); // process the frame }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(Image <Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; #if !IOS //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else #endif { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; return(regions); }
//public static Image<Bgr, Byte> DetectFeatures() //{ // Stopwatch watch = Stopwatch.StartNew(); // Rectangle[] regions = GetBodies("Untitled7.jpg"); // MCvAvgComp[][] facesDetected = GetFaces("Untitled7.jpg"); // Image<Bgr, Byte> image = new Image<Bgr, byte>("Untitled7.jpg"); // foreach (Rectangle pedestrain in regions) // { // image.Draw(pedestrain, new Bgr(Color.Red), 1); // } // foreach (MCvAvgComp f in facesDetected[0]) // { // //draw the face detected in the 0th (gray) channel with blue color // image.Draw(f.rect, new Bgr(Color.Blue), 2); // } // return image; //} // Body Function public static Rectangle[] GetBodies(string fileName) { Image <Bgr, Byte> image = new Image <Bgr, byte>(fileName); Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); return(regions); }
private void btnTest_Click(object sender, EventArgs e) { MyRect[] regions = null; Emgu.CV.ML.SVM vSVM = new Emgu.CV.ML.SVM(); String cExportFileName = txtFileName.Text; FileStorage fileStorage = new FileStorage(cExportFileName, FileStorage.Mode.Read); vSVM.Read(fileStorage.GetFirstTopLevelNode()); int iHeight = vSVM.GetSupportVectors().Height; int iWidth = vSVM.GetSupportVectors().Width; var svmMat = new Matrix <float>(iWidth, iHeight); Matrix <float> resultMat = new Matrix <float>(1, iWidth); Matrix <float> alphaMat = new Matrix <float>(1, iHeight); float[] mydetector = new float[iWidth + 1]; for (int i = 0; i < iWidth; i++) { mydetector[i] = resultMat[0, i]; } //mydetector[iWidth] = rhoValue; Mat vImage = new Mat(); HOGDescriptor hog = new HOGDescriptor(new Size(36, 36), new Size(36, 36), new Size(6, 6), new Size(6, 6)); hog.SetSVMDetector(mydetector); MCvObjectDetection[] results = hog.DetectMultiScale(vImage); regions = new MyRect[results.Length]; for (int i = 0; i < results.Length; i++) { regions[i] = new MyRect(); regions[i].Rect = results[i].Rect; regions[i].Score = results[i].Score; } }
public void Run() { CascadeClassifier cascadeClassifier = new CascadeClassifier("haarcascade_frontalface_default.xml"); // Opens MP4 file (ffmpeg is probably needed) var capture = new VideoCapture(0); int sleepTime = (int)Math.Round(1000 / ((capture.Fps == 0) ? 60 : capture.Fps)); var hog = new HOGDescriptor(); hog.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); var recognizer = OpenCvSharp.Face.EigenFaceRecognizer.Create(2); var img1 = Cv2.ImRead(SamplesCore.FilePath.Image.Girl, ImreadModes.Grayscale); var img2 = Cv2.ImRead(SamplesCore.FilePath.Image.Lenna, ImreadModes.Grayscale); System.Console.WriteLine("img1 width: " + img1.Size().Width + " height:" + img1.Size().Height); System.Console.WriteLine("img2 width: " + img2.Size().Width + " height:" + img2.Size().Height); recognizer.Train(new List <Mat>() { img1, img2 }, new List <int>() { 1, 2 }); recognizer.Write("trainfile1.dat"); recognizer.Read("trainfile1.dat"); System.Console.WriteLine("before predict"); System.Console.WriteLine("img1 width: " + img1.Size().Width + " height:" + img1.Size().Height); System.Console.WriteLine("img2 width: " + img2.Size().Width + " height:" + img2.Size().Height); recognizer.Predict(InputArray.Create(img1), out int label, out double confidence); System.Console.WriteLine("label: " + label + " confidence: " + confidence); using (var window = new Window("capture")) { // Frame image buffer Mat image = new Mat(); // When the movie playback reaches end, Mat.data becomes NULL. while (true) { capture.Read(image); // same as cvQueryFrame if (image.Empty()) { break; } var rect = cascadeClassifier.DetectMultiScale(image, 1.3, 5, HaarDetectionType.FindBiggestObject); if (rect.Count() > 0) { for (int i = 0; i < rect.Count(); i++) { Cv2.Rectangle(image, rect[i], Scalar.Red, 2); } } var found = hog.DetectMultiScale(image, 0, new Size(8, 8), new Size(24, 16), 1.05, 2); if (found.Count() > 0) { for (int i = 0; i < found.Count(); i++) { Cv2.Rectangle(image, found[i], Scalar.Green, 2); } } window.ShowImage(image); Cv2.WaitKey(sleepTime); } } //mat.SaveImage("ahihi.png"); }
public ActionResult DetectHuman(HttpPostedFileBase imageData) { try { if (imageData == null) { throw new ArgumentException("File is not exist."); } using (var img = Mat.FromStream(imageData.InputStream, LoadMode.Color)) { var ExecutingAssemblyPath = new Uri(Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().CodeBase.ToString())).LocalPath; double scale = 2.0; using (var gray = new Mat()) using (var smallImg = new Mat((int)(img.Rows / scale), (int)(img.Cols / scale), MatType.CV_8UC1)) { byte[] imgBytes = img.ToBytes(".png"); string base64Img = Convert.ToBase64String(imgBytes); ViewBag.Base64Img = base64Img; var hog = new HOGDescriptor(); hog.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); var rects = hog.DetectMultiScale(img); foreach (var rect in rects) { var r = rect; r.X += Cv.Round(rect.Width * 0.1); r.Width = Cv.Round(rect.Width * 0.8); r.Y += Cv.Round(rect.Height * 0.1); r.Height = Cv.Round(rect.Height * 0.8); Cv2.Rectangle(img, r, new Scalar(0, 255, 0), 3); } byte[] resultBytes = img.ToBytes(".png"); string base64Result = Convert.ToBase64String(resultBytes); ViewBag.Base64OrgResult = base64Result; } } } catch (Exception ex) { Console.WriteLine(ex.ToString()); } return View(); }