public static void Detect( Mat image, Dictionary <String, String> facialFeatures, Dictionary <string, List <Rectangle> > recObjects, out long detectionTime) { Stopwatch watch; //Many opencl functions require opencl compatible gpu devices. //As of opencv 3.0-alpha, opencv will crash if opencl is enable and only opencv compatible cpu device is presented //So we need to call CvInvoke.HaveOpenCLCompatibleGpuDevice instead of CvInvoke.HaveOpenCL (which also returns true on a system that only have cpu opencl devices). //CvInvoke.UseOpenCL = tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice; //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(facialFeatures["face"])) using (CascadeClassifier eyeR = new CascadeClassifier(facialFeatures["reye"])) using (CascadeClassifier eyeL = new CascadeClassifier(facialFeatures["leye"])) using (CascadeClassifier mouth = new CascadeClassifier(facialFeatures["mouth"])) using (CascadeClassifier nose = new CascadeClassifier(facialFeatures["nose"])) using (CascadeClassifier bodyU = new CascadeClassifier(facialFeatures["ubody"])) using (CascadeClassifier bodyL = new CascadeClassifier(facialFeatures["lbody"])) using (CascadeClassifier body = new CascadeClassifier(facialFeatures["body"])) { watch = Stopwatch.StartNew(); /*List<Rectangle> regions = new List<Rectangle>(); * using (HOGDescriptor des = new HOGDescriptor()) * { * des.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); * * watch = Stopwatch.StartNew(); * Emgu.CV.Structure.MCvObjectDetection[] test = des.DetectMultiScale(image); * * foreach (MCvObjectDetection obj in test) * { * regions.Add(obj.Rect); * } * recObjects["people"].AddRange(regions); * }*/ using (UMat ugray = new UMat()) { CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); //normalizes brightness and increases contrast of the image CvInvoke.EqualizeHist(ugray, ugray); Rectangle[] bodiesDetectedU = bodyU.DetectMultiScale( ugray, 1.01, 25, new Size(600, 600)); recObjects["upperBodies"].AddRange(bodiesDetectedU); Rectangle[] bodiesDetectedL = bodyL.DetectMultiScale( ugray, 1.01, 25, new Size(600, 600)); recObjects["lowerBodies"].AddRange(bodiesDetectedL); Rectangle[] bodiesDetected = body.DetectMultiScale( ugray, 1.01, 25, new Size(600, 600)); recObjects["bodies"].AddRange(bodiesDetected); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = {}; float reduceBy = 1.35f; while (facesDetected.Length == 0 && reduceBy >= 1f) { facesDetected = face.DetectMultiScale( ugray, reduceBy, 10, new Size(50, 50)); reduceBy -= .1f; } reduceBy = 1.35f; recObjects["faces"].AddRange(facesDetected); foreach (Rectangle f in facesDetected) { Rectangle rightHalf = new Rectangle(f.X, f.Y, f.Width / 2, f.Height); Rectangle leftHalf = new Rectangle((f.X + (f.Width / 2)), f.Y, (f.Width / 2), f.Height); //Get the region of interest on the faces using (UMat faceRegion = new UMat(ugray, rightHalf)) { { Rectangle[] eyesDetectedR = eyeR.DetectMultiScale( faceRegion, 1.05, 10, new Size(20, 20)); Rectangle eyeRect = new Rectangle(); foreach (Rectangle e in eyesDetectedR) { if (e.Height >= eyeRect.Height && e.Width >= eyeRect.Width) { eyeRect = e; eyeRect.Offset(rightHalf.X, rightHalf.Y); } } /*if (eyeRect.Width > eyeRect.Height) * { * bool flip = true; * while (eyeRect.Width > eyeRect.Height) * { * if (flip) * { * eyeRect.Height++; * flip = false; * } * else * { * eyeRect.Y--; * flip = true; * } * } * } * else if (eyeRect.Width < eyeRect.Height) * { * bool flip = true; * while (eyeRect.Width < eyeRect.Height) * { * if (flip) * { * eyeRect.Width++; * flip = false; * } * else * { * eyeRect.X--; * flip = true; * } * } * }*/ recObjects["rightEyes"].Add(eyeRect); } } using (UMat faceRegion = new UMat(ugray, leftHalf)) { { Rectangle[] eyesDetectedL = eyeL.DetectMultiScale( faceRegion, 1.01, 5, new Size(10, 10)); Rectangle eyeRect = new Rectangle(); foreach (Rectangle e in eyesDetectedL) { if (e.Height >= eyeRect.Height && e.Width >= eyeRect.Width) { eyeRect = e; eyeRect.Offset(leftHalf.X, leftHalf.Y); } } recObjects["leftEyes"].Add(eyeRect); } } using (UMat faceRegion = new UMat(ugray, f)) { { Rectangle[] mouthDetected = mouth.DetectMultiScale( faceRegion, 1.05, 30, new Size(20, 20)); Rectangle mouthRect = new Rectangle(); foreach (Rectangle e in mouthDetected) { e.Offset(f.X, f.Y); bool intersectsRightEye = e.IntersectsWith(recObjects["rightEyes"][0]); bool intersectsLeftEye = e.IntersectsWith(recObjects["leftEyes"][0]); if (e.Height >= mouthRect.Height && e.Width >= mouthRect.Width && !(intersectsRightEye || intersectsLeftEye)) { mouthRect = e; } } /*if (mouthRect.Width > mouthRect.Height) * { * bool flip = true; * while (mouthRect.Width > mouthRect.Height) * { * if (flip) * { * mouthRect.Height++; * flip = false; * } * else * { * mouthRect.Y--; * flip = true; * } * } * } * else if (mouthRect.Width < mouthRect.Height) * { * bool flip = true; * while (mouthRect.Width < mouthRect.Height) * { * if (flip) * { * mouthRect.Width++; * flip = false; * } * else * { * mouthRect.X--; * flip = true; * } * } * }*/ recObjects["mouths"].Add(mouthRect); } { Rectangle[] noseDetected = nose.DetectMultiScale( faceRegion, 1.05, 10, new Size(50, 50)); Rectangle noseRect = new Rectangle(); foreach (Rectangle e in noseDetected) { if (e.Height >= noseRect.Height && e.Width >= noseRect.Width) { noseRect = e; noseRect.Offset(f.X, f.Y); } } /*if (noseRect.Width > noseRect.Height) * { * bool flip = true; * while (noseRect.Width > noseRect.Height) * { * if (flip) * { * noseRect.Height++; * flip = false; * } * else * { * noseRect.Y--; * flip = true; * } * } * } * else if (noseRect.Width < noseRect.Height) * { * bool flip = true; * while (noseRect.Width < noseRect.Height) * { * if (flip) * { * noseRect.Width++; * flip = false; * } * else * { * noseRect.X--; * flip = true; * } * } * }*/ recObjects["noses"].Add(noseRect); } } } } watch.Stop(); } detectionTime = watch.ElapsedMilliseconds; }
public static void Detect(Image <Bgr, Byte> image, String faceFileName, String eyeFileName, List <Rectangle> faces, List <Rectangle> eyes, out long detectionTime) { Stopwatch watch; if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)) using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty); faces.AddRange(faceRegion); foreach (Rectangle f in faceRegion) { using (GpuImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of GpuCascadeClassifier in opencv using (GpuImage <Gray, Byte> clone = faceImg.Clone()) { Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } } watch.Stop(); } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) //using (CascadeClassifier eye = new CascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( gray, 1.1, 10, new Size(30, 30), Size.Empty); faces.AddRange(facesDetected); //foreach (Rectangle f in facesDetected) //{ // //Set the region of interest on the faces // gray.ROI = f; // Rectangle[] eyesDetected = eye.DetectMultiScale( // gray, // 1.1, // 10, // new Size(20, 20), // Size.Empty); // gray.ROI = Rectangle.Empty; // foreach (Rectangle e in eyesDetected) // { // Rectangle eyeRect = e; // eyeRect.Offset(f.X, f.Y); // eyes.Add(eyeRect); // } //} } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; }
public static void Detect( IInputArray image, String faceFileName, String eyeFileName, List <Rectangle> faces, List <Rectangle> eyes, out long detectionTime) { Stopwatch watch; using (InputArray iaImage = image.GetInputArray()) { #if !(__IOS__ || NETFX_CORE) if (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda) { using (CudaCascadeClassifier face = new CudaCascadeClassifier(faceFileName)) { face.ScaleFactor = 1.1; face.MinNeighbors = 10; face.MinObjectSize = Size.Empty; watch = Stopwatch.StartNew(); using (CudaImage <Bgr, Byte> gpuImage = new CudaImage <Bgr, byte>(image)) using (CudaImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) using (GpuMat region = new GpuMat()) { face.DetectMultiScale(gpuGray, region); Rectangle[] faceRegion = face.Convert(region); faces.AddRange(faceRegion); } watch.Stop(); } } else #endif { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) { watch = Stopwatch.StartNew(); using (UMat ugray = new UMat()) { CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); //normalizes brightness and increases contrast of the image CvInvoke.EqualizeHist(ugray, ugray); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( ugray, 1.1, 10, new Size(20, 20)); faces.AddRange(facesDetected); } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; } }
public static void Detect( IInputArray image, String faceFileName, String eyeFileName, List <Rectangle> faces, List <Rectangle> eyes, out long detectionTime) { Stopwatch watch; using (InputArray iaImage = image.GetInputArray()) { if (iaImage.Kind == InputArray.Type.CudaGpuMat && CudaInvoke.HasCuda) { using (CudaCascadeClassifier face = new CudaCascadeClassifier(faceFileName)) using (CudaCascadeClassifier eye = new CudaCascadeClassifier(eyeFileName)) { face.ScaleFactor = 1.1; face.MinNeighbors = 10; face.MinObjectSize = Size.Empty; eye.ScaleFactor = 1.1; eye.MinNeighbors = 10; eye.MinObjectSize = Size.Empty; watch = Stopwatch.StartNew(); using (CudaImage <Bgr, Byte> gpuImage = new CudaImage <Bgr, byte>(image)) using (CudaImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) using (GpuMat region = new GpuMat()) { face.DetectMultiScale(gpuGray, region); Rectangle[] faceRegion = face.Convert(region); faces.AddRange(faceRegion); foreach (Rectangle f in faceRegion) { using (CudaImage <Gray, Byte> faceImg = gpuGray.GetSubRect(f)) { //For some reason a clone is required. //Might be a bug of CudaCascadeClassifier in opencv using (CudaImage <Gray, Byte> clone = faceImg.Clone(null)) using (GpuMat eyeRegionMat = new GpuMat()) { eye.DetectMultiScale(clone, eyeRegionMat); Rectangle[] eyeRegion = eye.Convert(eyeRegionMat); foreach (Rectangle e in eyeRegion) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } } watch.Stop(); } } else { //Read the HaarCascade objects using (CascadeClassifier face = new CascadeClassifier(faceFileName)) using (CascadeClassifier eye = new CascadeClassifier(eyeFileName)) { watch = Stopwatch.StartNew(); using (UMat ugray = new UMat()) { CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); //normalizes brightness and increases contrast of the image CvInvoke.EqualizeHist(ugray, ugray); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel Rectangle[] facesDetected = face.DetectMultiScale( ugray, 1.1, 10, new Size(20, 20)); faces.AddRange(facesDetected); foreach (Rectangle f in facesDetected) { //Get the region of interest on the faces using (UMat faceRegion = new UMat(ugray, f)) { Rectangle[] eyesDetected = eye.DetectMultiScale( faceRegion, 1.1, 10, new Size(20, 20)); foreach (Rectangle e in eyesDetected) { Rectangle eyeRect = e; eyeRect.Offset(f.X, f.Y); eyes.Add(eyeRect); } } } } watch.Stop(); } } detectionTime = watch.ElapsedMilliseconds; } }