//=============================Feature Descriptor (HOG) Data Training Tanaman============================= public static Rectangle[] findObjects(Image <Bgr, Byte> image, out long processingTime, Size winSize, string dataFile) { Stopwatch watch; Rectangle[] regions; if (GpuInvoke.HasCuda) { using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { using (HOGDescriptor des = new HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins, 1, -1, 0.2, true)) { des.SetSVMDetector(GetDataObjects(dataFile)); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; return(regions); }
public static Image <Bgr, byte> detectFace(Image <Bgr, byte> image, out int detections, out List <PointF> positions) { Image <Bgr, byte> copyImage = new Image <Bgr, byte>(image.Bitmap); //Copy the image into a new one Person_Detector.facePositions.Clear(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) //The cascade classifier only takes gray for some reason { faces = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); //Draws rectanges to the face detection positions foreach (Rectangle f in faces) { copyImage.Draw(f, new Bgr(Color.Red), 4); Person_Detector.facePositions.Add(new PointF(f.Location.X + (f.Width / 2), f.Location.Y + (f.Height / 2))); } detections = faces.Length; positions = facePositions; } return(copyImage); }
public static void UsingGPU(Image <Bgr, Byte> image, GpuCascadeClassifier face, GpuCascadeClassifier eye, List <Rectangle> faces, List <Rectangle> eyes, out long detectionTime) { watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); foreach (Rectangle f in faceRegion) { using (GpuImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (GpuImage <Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } } watch.Stop(); detectionTime = watch.ElapsedMilliseconds; }
public static Rectangle[] Detect(Image <Bgr, Byte> image, string cascadeFile, double scaleFactor = 1.3, int minNeighbors = 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE detectionType = Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, int minSize = 20, int maxSize = 0) { string cascadeFilePath = CascadeManager.GetCascade(cascadeFile); Size minimumSize; if (minSize == 0) { minimumSize = Size.Empty; } else { minimumSize = new Size(minSize, minSize); } Size maximumSize; if (maxSize == 0) { maximumSize = Size.Empty; } else { maximumSize = new Size(maxSize, maxSize); } if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier cascade = new GpuCascadeClassifier(cascadeFilePath)) using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { return(cascade.DetectMultiScale(gpuGray, scaleFactor, minNeighbors, minimumSize)); } } else { using (HaarCascade cascade = new HaarCascade(cascadeFilePath)) using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) { gray._EqualizeHist(); MCvAvgComp[] detected = cascade.Detect(gray, scaleFactor, minNeighbors, detectionType, minimumSize, maximumSize); return((from x in detected select x.rect).ToArray()); } } }
public static Rectangle[] Detect(Image<Bgr, Byte> image, string cascadeFile, double scaleFactor = 1.3, int minNeighbors = 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE detectionType = Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, int minSize = 20, int maxSize = 0) { string cascadeFilePath = CascadeManager.GetCascade(cascadeFile); Size minimumSize; if (minSize == 0) { minimumSize = Size.Empty; } else { minimumSize = new Size(minSize, minSize); } Size maximumSize; if (maxSize == 0) { maximumSize = Size.Empty; } else { maximumSize = new Size(maxSize, maxSize); } if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier cascade = new GpuCascadeClassifier(cascadeFilePath)) using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { return cascade.DetectMultiScale(gpuGray, scaleFactor, minNeighbors, minimumSize); } } else { using (HaarCascade cascade = new HaarCascade(cascadeFilePath)) using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) { gray._EqualizeHist(); MCvAvgComp[] detected = cascade.Detect(gray, scaleFactor, minNeighbors, detectionType, minimumSize, maximumSize); return (from x in detected select x.rect).ToArray(); } } }
public IImage GetResult(IImage source, PrepareImageParameter parameter) { if (source is Image <Bgr, byte> == false) { return(null); } using (var image = new GpuImage <Bgr, byte>((Image <Bgr, byte>)source)) { return(image.Convert <Gray, byte>()); } }
static void Run() { Image <Bgr, Byte> image = new Image <Bgr, byte>("pedestrian.png"); Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); foreach (Rectangle pedestrain in regions) { image.Draw(pedestrain, new Bgr(Color.Red), 1); } ImageViewer.Show( image, String.Format("Pedestrain detection using {0} in {1} milliseconds.", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds)); }
public static Image<Bgr, Byte> Find(Image<Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; if (GpuInvoke.HasCuda) { using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) { using (GpuImage<Bgra, Byte> gpuGpraImage = gpuImage.Convert<Bgra, Byte>()) { regions = des.DetectMultiScale(gpuGpraImage); } } } } else { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle rect in regions) { image.Draw(rect, new Bgr(Color.Red),1 ); } return image; }
public static void Detect(Image <Bgr, Byte> image, String faceFileName, List <Rectangle> faces, out long detectionTime) { Stopwatch watch; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) { watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); } watch.Stop(); } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) { watch = Stopwatch.StartNew(); using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); faces.AddRange(facesDetected); } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; }
public static void Detect(Image<Bgr, Byte> image, String faceFileName, List<Rectangle> faces, out long detectionTime) { Stopwatch watch; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) { watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); } watch.Stop(); } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) { watch = Stopwatch.StartNew(); using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); faces.AddRange(facesDetected); } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; }
public static Image <Bgr, Byte> findPerson(Image <Bgr, Byte> image, out int detections, out List <PointF> positions) { //If the gpu has nvidia CUDA if (GpuInvoke.HasCuda) { imageToProcess = new Image <Bgr, byte>(image.Bitmap); //Value is coppied so the refference of the input image is not changed outside of this class Person_Detector.positions.Clear(); using (GpuHOGDescriptor descriptor = new GpuHOGDescriptor()) { descriptor.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(imageToProcess)) //Create gpuImage from image { using (GpuImage <Bgra, Byte> bgraImage = gpuImage.Convert <Bgra, Byte>()) { regeions = descriptor.DetectMultiScale(bgraImage); //Returns all detected regions in a rectangle array } } } } else { using (HOGDescriptor des = new HOGDescriptor()) { regeions = des.DetectMultiScale(imageToProcess); } } detections = regeions.Length; //Draws detected rectangles onto the image being returned foreach (Rectangle ped in regeions) { imageToProcess.Draw(ped, new Bgr(Color.Red), 5); imageToProcess.Draw(new Cross2DF(new PointF(ped.Location.X + (ped.Width / 2), ped.Location.Y + (ped.Height / 2)), 30, 30), new Bgr(Color.Green), 3); Person_Detector.positions.Add(new PointF(ped.Location.X + (ped.Width / 2), ped.Location.Y + (ped.Height / 2))); //Sets the putput variable } positions = Person_Detector.positions; return(imageToProcess); }
public static Image <Bgr, Byte> Find(Image <Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; if (GpuInvoke.HasCuda) { using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) { using (GpuImage <Bgra, Byte> gpuGpraImage = gpuImage.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuGpraImage); } } } } else { using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle rect in regions) { image.Draw(rect, new Bgr(Color.Red), 1); } return(image); }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="imageFileName">The name of the image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The image with pedestrian highlighted.</returns> public static Image <Bgr, Byte> Find(Image <Bgr, byte> source, out long processingTime) { Image <Bgr, Byte> image = source.Copy(); Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; foreach (Rectangle pedestrain in regions) { image.Draw(pedestrain, new Bgr(Color.Red), 1); } return(image); }
private void DetectFace(Image <Bgr, Byte> image, List <Rectangle> faces) { #if !IOS if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(_faceFileName)) { using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); } } } else #endif { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(_faceFileName)) { using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); faces.AddRange(facesDetected); } } } }
/// <summary> /// Find the pedestrian in the image /// </summary> /// <param name="image">The image</param> /// <param name="processingTime">The pedestrian detection time in milliseconds</param> /// <returns>The region where pedestrians are detected</returns> public static Rectangle[] Find(Image <Bgr, Byte> image, out long processingTime) { Stopwatch watch; Rectangle[] regions; #if !IOS //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else #endif { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; return(regions); }
//=================================================== Feature Descriptor (HOG) Data Training Kursi =========================================== public static Rectangle[] FindObject(Image<Bgr, Byte> image, out long processingTime, Size winSizeObject, string dataFile) { Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImg = new GpuImage<Bgr, byte>(image)) using (GpuImage<Bgra, Byte> gpuBgra = gpuImg.Convert<Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor(winSizeObject, blockSize, blockStride, cellSize, nbins, 1, -1, 0.2, true)) { des.SetSVMDetector(GetDataObjects(dataFile)); //des.SetSVMDetector(GetData2()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); processingTime = watch.ElapsedMilliseconds; return regions; }
//public static Image<Bgr, Byte> DetectFeatures() //{ // Stopwatch watch = Stopwatch.StartNew(); // Rectangle[] regions = GetBodies("Untitled7.jpg"); // MCvAvgComp[][] facesDetected = GetFaces("Untitled7.jpg"); // Image<Bgr, Byte> image = new Image<Bgr, byte>("Untitled7.jpg"); // foreach (Rectangle pedestrain in regions) // { // image.Draw(pedestrain, new Bgr(Color.Red), 1); // } // foreach (MCvAvgComp f in facesDetected[0]) // { // //draw the face detected in the 0th (gray) channel with blue color // image.Draw(f.rect, new Bgr(Color.Blue), 2); // } // return image; //} // Body Function public static Rectangle[] GetBodies(string fileName) { Image <Bgr, Byte> image = new Image <Bgr, byte>(fileName); Stopwatch watch; Rectangle[] regions; //check if there is a compatible GPU to run pedestrian detection if (GpuInvoke.HasCuda) { //this is the GPU version using (GpuHOGDescriptor des = new GpuHOGDescriptor()) { des.SetSVMDetector(GpuHOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImg = new GpuImage <Bgr, byte>(image)) using (GpuImage <Bgra, Byte> gpuBgra = gpuImg.Convert <Bgra, Byte>()) { regions = des.DetectMultiScale(gpuBgra); } } } else { //this is the CPU version using (HOGDescriptor des = new HOGDescriptor()) { des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); watch = Stopwatch.StartNew(); regions = des.DetectMultiScale(image); } } watch.Stop(); return(regions); }
public static void Detect(Image<Bgr, Byte> image, String faceFileName, String eyeFileName, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime) { Stopwatch watch; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); foreach (Rectangle f in faceRegion) { using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (GpuImage<Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } } watch.Stop(); } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) using (CascadeClassifier eye = new CascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); faces.AddRange(facesDetected); foreach (Rectangle f in facesDetected) { //Set the region of interest on the faces gray.ROI = f; Rectangle[] eyesDetected = eye.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); gray.ROI = Rectangle.Empty; foreach (Rectangle e in eyesDetected) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; }
public static void Detect(Image <Bgr, Byte> image, String faceFileName, String eyeFileName, List <Rectangle> faces, List <Rectangle> eyes, out long detectionTime) { Stopwatch watch; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.4, 4, Size.Empty); faces.AddRange(faceRegion); foreach (Rectangle f in faceRegion) { using (GpuImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (GpuImage <Gray, Byte> clone = faceImg.Clone(null)) { Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.4, 4, Size.Empty); foreach (Rectangle e in eyeRegion) { //Rectangle eyeRect = e; //eyeRect.Offset(f.X, f.Y); //eyes.Add(eyeRect); } } } } } watch.Stop(); } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) using (CascadeClassifier eye = new CascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.3, 4, new Size(20, 20), Size.Empty); faces.AddRange(facesDetected); foreach (Rectangle f in facesDetected) { //Set the region of interest on the faces gray.ROI = f; Rectangle[] eyesDetected = eye.DetectMultiScale( gray, 1.4, 10, new Size(20, 20), Size.Empty); gray.ROI = Rectangle.Empty; foreach (Rectangle e in eyesDetected) { //Rectangle eyeRect = e; //eyeRect.Offset(f.X, f.Y); //eyes.Add(eyeRect); } } } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; }
public void classify(BitmapSource frame) { Console.WriteLine(relativeURI); //byte[] classifiedImage = frame; //WriteableBitmap frameImage = new WriteableBitmap(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null); //BitmapSource frameImage = BitmapSource.Create(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null, frame, stride); /* resultsPtr = CvInvoke.cvHaarDetectObjects( Marshal.GetIUnknownForObject(frame), classifier, resultsPtr, 1.1, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(0,0), new System.Drawing.Size(0,0) ); Console.WriteLine("Classified?!? Pointer below: "); Console.WriteLine(resultsPtr.ToString()); */ //return classifiedImage; Console.WriteLine(" - - - Converting Bitmap..."); System.Drawing.Bitmap bitmapFrame; using (MemoryStream outStream = new MemoryStream()) { BitmapEncoder enc = new BmpBitmapEncoder(); enc.Frames.Add(BitmapFrame.Create(frame)); enc.Save(outStream); bitmapFrame = new System.Drawing.Bitmap(outStream); } Console.WriteLine(" - - - Bitmap converted!"); Image<Bgr, Byte> image = new Image<Bgr, Byte>(bitmapFrame); Console.WriteLine(" - - - Image set"); Console.WriteLine(" - - - Check CUDA..."); if (GpuInvoke.HasCuda) { Console.WriteLine(" - - - Has CUDA!"); using (GpuCascadeClassifier target = new GpuCascadeClassifier(classifierURI)) { using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { Console.WriteLine(" - - - Detecting!"); Rectangle[] targetSet = target.DetectMultiScale(gpuGray, 1.1, 10, System.Drawing.Size.Empty); Console.WriteLine(" - - - Detected :D :D :D Printing rectangle set: "); foreach (Rectangle f in targetSet) { Console.WriteLine("Rectangle found at: " + f.ToString()); //draw the face detected in the 0th (gray) channel with blue color image.Draw(f, new Bgr(System.Drawing.Color.Blue), 2); } Console.WriteLine(" - - - DONE"); } } } else { using (HOGDescriptor des = new HOGDescriptor()) { //des.SetSVMDetector } Console.WriteLine(" - - - No CUDA :( "); Console.WriteLine(" - - - Devices available: " + GpuInvoke.GetCudaEnabledDeviceCount()); } }
public void classify(BitmapSource frame) { Console.WriteLine(relativeURI); //byte[] classifiedImage = frame; //WriteableBitmap frameImage = new WriteableBitmap(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null); //BitmapSource frameImage = BitmapSource.Create(frameWidth, frameHeight, 96, 96, PixelFormats.Bgr32, null, frame, stride); /* * resultsPtr = CvInvoke.cvHaarDetectObjects( * Marshal.GetIUnknownForObject(frame), * classifier, * resultsPtr, * 1.1, * 3, * Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, * new System.Drawing.Size(0,0), * new System.Drawing.Size(0,0) * ); * * Console.WriteLine("Classified?!? Pointer below: "); * Console.WriteLine(resultsPtr.ToString()); */ //return classifiedImage; Console.WriteLine(" - - - Converting Bitmap..."); System.Drawing.Bitmap bitmapFrame; using (MemoryStream outStream = new MemoryStream()) { BitmapEncoder enc = new BmpBitmapEncoder(); enc.Frames.Add(BitmapFrame.Create(frame)); enc.Save(outStream); bitmapFrame = new System.Drawing.Bitmap(outStream); } Console.WriteLine(" - - - Bitmap converted!"); Image <Bgr, Byte> image = new Image <Bgr, Byte>(bitmapFrame); Console.WriteLine(" - - - Image set"); Console.WriteLine(" - - - Check CUDA..."); if (GpuInvoke.HasCuda) { Console.WriteLine(" - - - Has CUDA!"); using (GpuCascadeClassifier target = new GpuCascadeClassifier(classifierURI)) { using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Console.WriteLine(" - - - Detecting!"); Rectangle[] targetSet = target.DetectMultiScale(gpuGray, 1.1, 10, System.Drawing.Size.Empty); Console.WriteLine(" - - - Detected :D :D :D Printing rectangle set: "); foreach (Rectangle f in targetSet) { Console.WriteLine("Rectangle found at: " + f.ToString()); //draw the face detected in the 0th (gray) channel with blue color image.Draw(f, new Bgr(System.Drawing.Color.Blue), 2); } Console.WriteLine(" - - - DONE"); } } } else { using (HOGDescriptor des = new HOGDescriptor()) { //des.SetSVMDetector } Console.WriteLine(" - - - No CUDA :( "); Console.WriteLine(" - - - Devices available: " + GpuInvoke.GetCudaEnabledDeviceCount()); } }
private void DetectFaceAndEyes(Image <Bgr, byte> image, List <Rectangle> faces, List <Rectangle> eyes) { #if !IOS if (GpuInvoke.HasCuda) { using (var face = new GpuCascadeClassifier(_faceFileName)) using (var eye = new GpuCascadeClassifier(_eyeFileName)) { using (var gpuImage = new GpuImage <Bgr, byte>(image)) using (var gpuGray = gpuImage.Convert <Gray, byte>()) { var faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); foreach (var f in faceRegion) { using (var faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (var clone = faceImg.Clone()) { var eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty); foreach (var e in eyeRegion) { var eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } } } } else #endif using (var face = new CascadeClassifier(_faceFileName)) using (var eye = new CascadeClassifier(_eyeFileName)) { using (var gray = image.Convert <Gray, byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel var facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); faces.AddRange(facesDetected); foreach (var f in facesDetected) { //Set the region of interest on the faces gray.ROI = f; var eyesDetected = eye.DetectMultiScale( gray, 1.1, 10, new Size(20, 20), Size.Empty); gray.ROI = Rectangle.Empty; foreach (var e in eyesDetected) { var eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } }
// public static void detectFaceGPU(Image<Bgr, Byte> image, String faceFileName, String eyesFileName, List<Rectangle> facesList, List<Rectangle> eyesList, out long detectionTime) public static void detectFaceGPU(Image<Bgr, Byte> image, String faceFileName, String eyesFileName, List<Rectangle> facesList, out Rectangle eyeL, out Rectangle eyeR, out long detectionTime) { Stopwatch watch; Rectangle eyeLtmp = new Rectangle(), eyeRtmp = new Rectangle(); using (GpuCascadeClassifier faceCascade = new GpuCascadeClassifier(faceFileName)) using (GpuCascadeClassifier eyesCascade = new GpuCascadeClassifier(eyesFileName)) { watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr,byte>(image)) using (GpuImage<Gray, Byte> grayImage = gpuImage.Convert<Gray, Byte>()) { Rectangle[] facesRegion = faceCascade.DetectMultiScale(grayImage, 1.1, 10, new Size(image.Width / 8, image.Height / 8)); facesList.AddRange(facesRegion); foreach (Rectangle f in facesRegion) { #region ROI dla oczu Point eyesBoxTopLeft = new Point(f.X, f.Top + f.Height / 4); Size eyesBoxArea = new Size(f.Width, f.Height / 4); Rectangle eyesBox = new Rectangle(eyesBoxTopLeft, eyesBoxArea); Size rightEyeBoxArea = new Size(eyesBox.Width / 2, eyesBox.Height); Rectangle rightEyeBox = new Rectangle(eyesBoxTopLeft, rightEyeBoxArea); Size leftEyeBoxArea = new Size(eyesBox.Width / 2, eyesBox.Height); Rectangle leftEyeBox = new Rectangle(new Point(eyesBoxTopLeft.X + leftEyeBoxArea.Width, eyesBoxTopLeft.Y), leftEyeBoxArea); #endregion #region Prawe oko using (GpuImage<Gray, Byte> faceImg = grayImage.GetSubRect(rightEyeBox)) { using (GpuImage<Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eyesCascade.DetectMultiScale(clone, 1.1, 10, new Size(30, 30)); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(rightEyeBox.X, rightEyeBox.Y); eyeRtmp = eyeRect; } } } #endregion #region Lewe oko using (GpuImage<Gray, Byte> faceImg = grayImage.GetSubRect(leftEyeBox)) { using (GpuImage<Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eyesCascade.DetectMultiScale(clone, 1.1, 10, new Size(30, 30)); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(leftEyeBox.X, leftEyeBox.Y); eyeLtmp = eyeRect; } } } #endregion } } watch.Stop(); } detectionTime = watch.ElapsedMilliseconds; eyeL = eyeLtmp; eyeR = eyeRtmp; }
static void Run() { Image <Bgr, Byte> image = new Image <Bgr, byte>("lena.jpg"); //Read the files as an 8-bit Bgr image Stopwatch watch; String faceFileName = "haarcascade_frontalface_default.xml"; String eyeFileName = "haarcascade_eye.xml"; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); foreach (Rectangle f in faceRegion) { //draw the face detected in the 0th (gray) channel with blue color image.Draw(f, new Bgr(Color.Blue), 2); using (GpuImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (GpuImage <Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); image.Draw(eyeRect, new Bgr(Color.Red), 2); } } } } } watch.Stop(); } } else { //Read the HaarCascade objects using (HaarCascade face = new HaarCascade(faceFileName)) using (HaarCascade eye = new HaarCascade(eyeFileName)) { watch = Stopwatch.StartNew(); using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel MCvAvgComp[] facesDetected = face.Detect( gray, 1.1, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); foreach (MCvAvgComp f in facesDetected) { //draw the face detected in the 0th (gray) channel with blue color image.Draw(f.rect, new Bgr(Color.Blue), 2); //Set the region of interest on the faces gray.ROI = f.rect; MCvAvgComp[] eyesDetected = eye.Detect( gray, 1.1, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); gray.ROI = Rectangle.Empty; foreach (MCvAvgComp e in eyesDetected) { Rectangle eyeRect = e.rect; eyeRect.Offset(f.rect.X, f.rect.Y); image.Draw(eyeRect, new Bgr(Color.Red), 2); } } } watch.Stop(); } } //display the image ImageViewer.Show(image, String.Format( "Completed face and eye detection using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU": "CPU", watch.ElapsedMilliseconds)); }
static void Run() { Image<Bgr, Byte> image = new Image<Bgr, byte>("lena.jpg"); //Read the files as an 8-bit Bgr image Stopwatch watch; String faceFileName = "haarcascade_frontalface_default.xml"; String eyeFileName = "haarcascade_eye.xml"; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); foreach (Rectangle f in faceRegion) { //draw the face detected in the 0th (gray) channel with blue color image.Draw(f, new Bgr(Color.Blue), 2); using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (GpuImage<Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); image.Draw(eyeRect, new Bgr(Color.Red), 2); } } } } } watch.Stop(); } } else { //Read the HaarCascade objects using(HaarCascade face = new HaarCascade(faceFileName)) using(HaarCascade eye = new HaarCascade(eyeFileName)) { watch = Stopwatch.StartNew(); using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel MCvAvgComp[] facesDetected = face.Detect( gray, 1.1, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); foreach (MCvAvgComp f in facesDetected) { //draw the face detected in the 0th (gray) channel with blue color image.Draw(f.rect, new Bgr(Color.Blue), 2); //Set the region of interest on the faces gray.ROI = f.rect; MCvAvgComp[] eyesDetected = eye.Detect( gray, 1.1, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); gray.ROI = Rectangle.Empty; foreach (MCvAvgComp e in eyesDetected) { Rectangle eyeRect = e.rect; eyeRect.Offset(f.rect.X, f.rect.Y); image.Draw(eyeRect, new Bgr(Color.Red), 2); } } } watch.Stop(); } } //display the image ImageViewer.Show(image, String.Format( "Completed face and eye detection using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU": "CPU", watch.ElapsedMilliseconds)); }
private static Rectangle[] DetectFace(Image<Bgr, Byte> image, string faceFileName) { try { if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) { using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image)) using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); return faceRegion; } } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) { using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(filterWidth, filterHeight), Size.Empty); return facesDetected; } } } } catch (Exception ex) { throw ex; } }