/// <summary> /// Detects features on a grayscale image. /// </summary> /// <param name="img"></param> /// <param name="storage"></param> /// <returns></returns> protected override List <Face> DetectFeatures(IplImage img, CvMemStorage storage) { //Determine minimum face size var minSize = (int)Math.Round((double)MinSizePercent / 100.0 * Math.Min(img.Width, img.Height)); //Detect faces (frontal). TODO: side Stopwatch watch = Stopwatch.StartNew(); CvAvgComp[] faces = Cv.HaarDetectObjects(img, Cascades["FaceCascade"], storage, 1.0850, MinConfidenceLevel, 0, new CvSize(minSize, minSize)).ToArrayAndDispose(); watch.Stop(); Debug.WriteLine("Face detection time = " + watch.ElapsedMilliseconds); //Sort by accuracy Array.Sort <CvAvgComp>(faces, CompareByNeighbors); //Convert into feature objects list List <Face> features = new List <Face>(faces.Length); foreach (CvAvgComp face in faces) { features.Add(new Face(PolygonMath.ScaleRect(face.Rect.ToRectangleF(), ExpandX, ExpandY), face.Neighbors)); } //Unless we're below MinFaces, filter out the low confidence matches. while (features.Count > MinFaces && features[features.Count - 1].Accuracy < ConfidenceLevelThreshold) { features.RemoveAt(features.Count - 1); } //Never return more than [MaxFaces] return((features.Count > MaxFaces) ? features.GetRange(0, MaxFaces) : features); }
/// <summary> /// Detects features on a grayscale image. /// </summary> /// <param name="img"></param> /// <param name="storage"></param> /// <returns></returns> protected override List <Face> DetectFeatures(IplImage img, CvMemStorage storage) { //Determine minimum face size var minSize = Math.Max(12, (int)Math.Round((double)MinSizePercent / 100.0 * Math.Min(img.Width, img.Height))); //Detect faces (frontal). Stopwatch watch = Stopwatch.StartNew(); CvAvgComp[] faces = BorrowCascade("FaceCascadeAlt", c => Cv.HaarDetectObjects(img, c, storage, 1.0850, MinConfidenceLevel, HaarDetectionType.DoCannyPruning, new CvSize(minSize, minSize), new CvSize(0, 0)).ToArrayAndDispose()); //Sort by accuracy Array.Sort <CvAvgComp>(faces, CompareByNeighbors); //Convert into feature objects list List <Face> features = new List <Face>(faces.Length); foreach (CvAvgComp face in faces) { features.Add(new Face(PolygonMath.ScaleRect(face.Rect.ToRectangleF(), ExpandX, ExpandY), face.Neighbors)); } // Doesn't add much, and would have to be deduplicated. //CvAvgComp[] profiles = BorrowCascade("FaceProfile", c => Cv.HaarDetectObjects(img, c, storage, 1.2, MinConfidenceLevel + 2, HaarDetectionType.FindBiggestObject | HaarDetectionType.DoRoughSearch | HaarDetectionType.DoCannyPruning, new CvSize(img.Width / 8, img.Height / 8), new CvSize(0, 0)).ToArrayAndDispose()); //foreach (CvAvgComp face in profiles) features.Add(new Face(PolygonMath.ScaleRect(face.Rect.ToRectangleF(), ExpandX, ExpandY), face.Neighbors)); // Test for eyes, if faces > 20 pixels foreach (var face in features) { var w = (int)(face.X2 - face.X); var h = (int)((face.Y2 - face.Y) * 0.6); if (w > 20) { img.SetROI((int)face.X, (int)face.Y, w, h); storage.Clear(); CvAvgComp[] eyes = BorrowCascade("Eye", c => Cv.HaarDetectObjects(img, c, storage, 1.0850, 4, HaarDetectionType.FindBiggestObject | HaarDetectionType.DoRoughSearch, new CvSize(4, 4), new CvSize(img.Width / 2, img.Height / 2)) .ToArrayAndDispose()); if (eyes.Length == 0) { // Halve the estimated accuracy if there are no eyes detected face.Accuracy = face.Accuracy / 2; // We never want to boost accuracy, because the walls have eyes } } } //Unless we're below MinFaces, filter out the low confidence matches. while (features.Count > MinFaces && features[features.Count - 1].Accuracy < ConfidenceLevelThreshold) { features.RemoveAt(features.Count - 1); } watch.Stop(); totalTime += watch.ElapsedMilliseconds; count++; Debug.WriteLine($"Face detection time: {watch.ElapsedMilliseconds}ms (avg {totalTime / count}ms)"); //Never return more than [MaxFaces] return((features.Count > MaxFaces) ? features.GetRange(0, MaxFaces) : features); }