public static Image <Gray, byte> DetectAndTrimFace(int[] pixels, Size initialSize, Size outputSize, String haarcascadePath) { var inBitmap = ConvertToBitmap(pixels, initialSize.Width, initialSize.Width); //for testing purposes I can the picture to a folder //inBitmap.Save(@"E:\data\phototest\received.bmp"); var grayframe = new Image <Gray, byte>(inBitmap); var haar = new HaarCascade(haarcascadePath); var faces = haar.Detect(grayframe, 1.2, 3, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(30, 30)); if (faces.Count() != 1) { return(null); } var face = faces[0]; var returnImage = grayframe.Copy(face.rect).Resize(outputSize.Width, outputSize.Height, INTER.CV_INTER_CUBIC); //cleanup managed resources haar.Dispose(); grayframe.Dispose(); return(returnImage); }
private void timer1_Tick(object sender, EventArgs e) { original = capture.QueryFrame(); gray = original.Convert <Gray, byte>(); if (cbFace.Checked) { foreach (MCvAvgComp item in faceCascade.Detect(gray)) { original.Draw(item.rect, new Bgr(Color.Lime), 5); } } if (cbNose.Checked) { CvInvoke.cvCvtColor(edit, gray, COLOR_CONVERSION.CV_BGR2GRAY); var faces = noseCascade.Detect(gray); foreach (var face in faces) { edit.Draw(face.rect, new Bgr(Color.Red), 5); } } if (cbEyes.Checked) { foreach (MCvAvgComp item in eyeCascade.Detect(gray)) { original.Draw(item.rect, new Bgr(Color.White), 5); } } pictureBox.Image = original.Bitmap; }
private void facialRecTimer_Tick(object sender, EventArgs e) { if (JarvisData.webcam == "false") { return; } currentFrame = capture.QueryFrame(); //Load the Image if (currentFrame != null) { gray = currentFrame.Convert <Gray, Byte>(); var detectedFaces = haarCascade.Detect(gray); foreach (var face in detectedFaces) { currentFrame.Draw(face.rect, new Bgr(0, double.MaxValue, 0), 3); if (JarvisData.isHome == "false") { command.processCommand("nick home", this); this.log.Items.Add("Nick Recognized"); currentFrame.Save(@"D:\xampp\htdocs\jarvis\imgs\img.jpeg"); } } imageBox.Source = ToBitmapSource(currentFrame); } }
public MCvAvgComp[] DetectEyes(FrameData data) { MCvAvgComp face = data.Face; Int32 yCoordStartSearchEyes = face.rect.Top + (face.rect.Height * 3 / 11); Point startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes); Size searchEyesAreaSize = new Size(face.rect.Width, (face.rect.Height * 3 / 11)); Rectangle possibleROI_eyes = new Rectangle(startingPointSearchEyes, searchEyesAreaSize); data.GrayFrame.ROI = possibleROI_eyes; MCvAvgComp[] eyesDetected = _eyes.Detect(data.GrayFrame, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(20, 20)); data.GrayFrame.ROI = Rectangle.Empty; if (eyesDetected.Length != 0) { Rectangle eyeRect = eyesDetected[0].rect; eyeRect.Offset(possibleROI_eyes.X, possibleROI_eyes.Y); data.GrayFrame.ROI = eyeRect; data.GrayFrame.ROI = possibleROI_eyes; data.EyesROI = possibleROI_eyes; MCvAvgComp[] singleEyesDetected = _singleEyes.Detect(data.GrayFrame, 1.5, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(20, 20)); data.GrayFrame.ROI = Rectangle.Empty; return(singleEyesDetected); } throw new NoEyesDetectedException(); }
public MCvAvgComp DetectMouth(FrameData data) { MCvAvgComp face = data.Face; Int32 yCoordStartSearchMouth = face.rect.Top + (face.rect.Height * 7 / 11); Point startingPointSearchMouth = new Point(face.rect.X, yCoordStartSearchMouth); Size searchMouthAreaSize = new Size(face.rect.Width, (face.rect.Height * 4 / 11)); Rectangle possibleROI_mouth = new Rectangle(startingPointSearchMouth, searchMouthAreaSize); data.GrayFrame.ROI = possibleROI_mouth; data.MouthROI = possibleROI_mouth; MCvAvgComp[] mouthDetected = _mouth.Detect(data.GrayFrame, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(30, 20)); data.GrayFrame.ROI = Rectangle.Empty; if (mouthDetected.Length > 0) { if (mouthDetected[0].rect.Height != 0 && mouthDetected[0].rect.Width != 0) { var mouthRect = mouthDetected[0].rect; mouthRect.Offset(possibleROI_mouth.X, possibleROI_mouth.Y); data.GrayFrame.ROI = mouthRect; return(mouthDetected[0]); } } throw new NoMouthDetectedException(); }
private void detect() { if (original != null) { copy = original.Clone(); gray = new Image <Gray, byte>(original.Width, original.Height); CvInvoke.cvCvtColor(original, gray, COLOR_CONVERSION.CV_BGR2GRAY); if (faceDetect) { foreach (MCvAvgComp f in face.Detect(gray)) { copy.Draw(f.rect, new Bgr(Color.LightGray), 10); } } if (eyeDetect) { foreach (MCvAvgComp e in eye.Detect(gray)) { copy.Draw(e.rect, new Bgr(Color.LightSalmon), 10); } } pictureBox.Image = copy.ToBitmap(); } }
public void Pulse() { using (HaarCascade face = new HaarCascade(faceFileName)) { var frame = _kinectSensor.ColorStream.OpenNextFrame(100); var image = frame.ToOpenCVImage <Rgb, Byte>(); using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) //Convert it to Grayscale { //normalizes brightness and increases contrast of the image gray._EqualizeHist(); //Detect the faces from the gray scale image and store the locations as rectangle //The first dimensional is the channel //The second dimension is the index of the rectangle in the specific channel MCvAvgComp[] facesDetected = face.Detect( gray, 1.1, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20)); Image <Rgb, Byte> happyMan = new Image <Rgb, byte>("happy-man.png"); foreach (MCvAvgComp f in facesDetected) { //image.Draw(f.rect, new Rgb(System.Drawing.Color.Blue), 2); var rect = new System.Drawing.Rectangle(f.rect.X - f.rect.Width / 2 , f.rect.Y - f.rect.Height / 2 , f.rect.Width * 2 , f.rect.Height * 2); var newImage = happyMan.Resize(rect.Width, rect.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); for (int i = 0; i < (rect.Height); i++) { for (int j = 0; j < (rect.Width); j++) { // mask black image background with 'or' logic if (newImage[i, j].Blue != 0 || newImage[i, j].Red != 0 || newImage[i, j].Green != 0) { if (j + rect.X < image.Width && j + rect.X > 0) { var dot = newImage[i, j]; // additional safety logic // don't attempt if we are outside the bounds of the image if (i + rect.Y > image.Height || i + rect.Y < 0 || j + rect.X > image.Width || j + rect.X < 0) { continue; } image[i + rect.Y, j + rect.X] = dot; } } } } } Dispatcher.BeginInvoke(new Action(() => { rgbImage.Source = image.ToBitmapSource(); })); } } }
/// <summary> /// This method detect true if we found a mouth in the image /// NOTE: The idea is to transform this method in true, when the user is speaking (mouth open) /// </summary> /// <param name="face"></param> /// <returns></returns> public bool IsMouthDetected(Image <Gray, byte> face) { var detectRectangle = new Rectangle(0, face.Height * 2 / 3, face.Width, face.Height / 3); var whereMouthShouldBe = face.GetSubRect(detectRectangle); var mouths = Mouth.Detect(whereMouthShouldBe, 1.2, 10, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(5, 5)); return(mouths.Any()); }
private void faceDetection() { var faces = cascadeFace.Detect(grayscale, 1.1, 3, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(50, 50)); foreach (var face in faces) { original.Draw(face.rect, new Bgr(Color.Aquamarine), 5); } }
public static Rectangle[] Detect(Image <Bgr, Byte> image, string cascadeFile, double scaleFactor = 1.3, int minNeighbors = 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE detectionType = Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, int minSize = 20, int maxSize = 0) { string cascadeFilePath = CascadeManager.GetCascade(cascadeFile); Size minimumSize; if (minSize == 0) { minimumSize = Size.Empty; } else { minimumSize = new Size(minSize, minSize); } Size maximumSize; if (maxSize == 0) { maximumSize = Size.Empty; } else { maximumSize = new Size(maxSize, maxSize); } if (GpuInvoke.HasCuda) { using (GpuCascadeClassifier cascade = new GpuCascadeClassifier(cascadeFilePath)) using (GpuImage <Bgr, Byte> gpuImage = new GpuImage <Bgr, byte>(image)) using (GpuImage <Gray, Byte> gpuGray = gpuImage.Convert <Gray, Byte>()) { return(cascade.DetectMultiScale(gpuGray, scaleFactor, minNeighbors, minimumSize)); } } else { using (HaarCascade cascade = new HaarCascade(cascadeFilePath)) using (Image <Gray, Byte> gray = image.Convert <Gray, Byte>()) { gray._EqualizeHist(); MCvAvgComp[] detected = cascade.Detect(gray, scaleFactor, minNeighbors, detectionType, minimumSize, maximumSize); return((from x in detected select x.rect).ToArray()); } } }
private void button2_Click(object sender, EventArgs e) { if (!checkBox1.Checked && !checkBox2.Checked && !checkBox3.Checked && !checkBox4.Checked) { label1.Text = "Please choose the object detection type"; } else { label1.Text = ""; CvInvoke.cvCvtColor(ori, gray, COLOR_CONVERSION.CV_BGR2GRAY); if (checkBox1.Checked) { var faces = detect.Detect(gray); edit = ori.Clone(); foreach (var face in faces) { edit.Draw(face.rect, new Bgr(Color.Red), 5); } } if (checkBox2.Checked) { var noses = detect2.Detect(gray); edit = ori.Clone(); foreach (var nose in noses) { edit.Draw(nose.rect, new Bgr(Color.Blue), 5); } } if (checkBox3.Checked) { var mouths = detect3.Detect(gray); edit = ori.Clone(); foreach (var mouth in mouths) { edit.Draw(mouth.rect, new Bgr(Color.Yellow), 5); } } if (checkBox4.Checked) { var upperboddies = detect4.Detect(gray); edit = ori.Clone(); foreach (var upperbody in upperboddies) { edit.Draw(upperbody.rect, new Bgr(Color.Green), 5); } } pictureBox2.Image = edit.Bitmap; } }
private void timer1_Tick(object sender, EventArgs e) { video_play = true; ori = cap.QueryFrame(); if (ori != null) { gray = new Image <Gray, byte>(ori.Width, ori.Height); CvInvoke.cvCvtColor(ori, gray, COLOR_CONVERSION.CV_BGR2GRAY); if (detectFace) { var faces = faceclassifier.Detect(gray); foreach (var face in faces) { ori.Draw(face.rect, new Bgr(Color.Blue), 3); } } if (detectEyes) { var eyes = eyesclassifier.Detect(gray); foreach (var eye in eyes) { ori.Draw(eye.rect, new Bgr(Color.Orange), 3); } } if (canny_edge_status) { var faces_canny = cascade.Detect(gray, 1.1, 2, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(ori.Width, ori.Height)); foreach (var face in faces_canny) { ori.Draw(face.rect, new Bgr(Color.Red), 3); } } pictureBox1.Image = ori.ToBitmap(); } }
/// <summary> /// Adding to face database new faces - background worker /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void addImagesWorker_DoWork(object sender, System.ComponentModel.DoWorkEventArgs e) { String dir = (String)e.Argument; FileInfo [] files = new DirectoryInfo(dir).GetFiles("*.jpg"); var haar = new HaarCascade("haarcascade_frontalface_default.xml"); int count = 0; foreach (FileInfo file in files) { var image = new Image <Gray, Byte>(dir + "\\" + file.Name); Image <Gray, byte> img = image.Convert <Gray, byte>(); var faces = haar.Detect(img, 1.4, 4, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(img.Width / 8, img.Height / 8)); Rectangle rect = new Rectangle(); if (faces.Length > 0) { rect = faces [0].rect; for (int i = 1; i < faces.Length; i++) { if (faces [i].rect.Width > rect.Width && faces [i].rect.Height > rect.Height) { rect = faces [i].rect; } } //find the biggest face in detected img.ROI = rect; Image <Gray, Byte> face = img.Clone(); Image <Gray, Byte> faceResized = face.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); if (faceResized != null) { var splitted = file.Name.Substring(0, file.Name.Length - 4).Split(' '); int tmp; if (int.TryParse(splitted[2], out tmp)) { faceResized.Save(Learn.createFileName(splitted [0], splitted [1], int.Parse(splitted [2]))); } } //if resized } //if found any face count++; addImagesWorker.ReportProgress(count, DateTime.Now); } //check each jpg file }
/// <summary> /// Run over directory, get images, detech faces, resize and storeagain /// </summary> /// <param name="directory"></param> public static void CreateTrainingSet(String mainDirectory, int newSize, bool equalize, int rotation, bool flip) { string[] subdirEntries = Directory.GetDirectories(mainDirectory); foreach (var directory in subdirEntries) { string[] fileEntries = Directory.GetFiles(directory); foreach (var file in fileEntries.Where(x => (!x.Contains("_")))) { Image <Gray, byte> image = new Image <Gray, byte>(file); //the images are big - reduce the size to the half image = image.Resize(0.5, INTER.CV_INTER_CUBIC); var haar = new HaarCascade(FileAccessUtil.GetHaarCascade()); var faces = haar.Detect(image); if (faces.Count() == 1) { var face = faces[0]; //resize all images to 100 var faceImg = image.Copy(face.rect).Resize(newSize, newSize, INTER.CV_INTER_CUBIC); String imgName = file.Insert(file.IndexOf("."), "_" + newSize.ToString()); if (equalize) { imgName = imgName.Insert(file.IndexOf("."), "_N"); var equalized = EqualizeHist(faceImg); faceImg = equalized; } faceImg.Save(imgName); //create rotated image if it was demanded if (rotation != 0) { var rotated = faceImg.Rotate(rotation, new Gray(0.3)); var rotatedName = imgName.Insert(file.IndexOf("."), "_R"); rotated.Save(rotatedName); } if (flip) { var fliped = faceImg.Flip(FLIP.HORIZONTAL); var flipedName = imgName.Insert(file.IndexOf("."), "_F"); fliped.Save(flipedName); } } } } }
private void timer1_Tick(object sender, EventArgs e) { ori = new Image <Bgr, byte>(cap.QueryFrame().ToBitmap()); gray = new Image <Gray, byte>(ori.Size); CvInvoke.cvCvtColor(ori, gray, COLOR_CONVERSION.CV_BGR2GRAY); var faces = hc.Detect(gray); foreach (var face in faces) { ori.Draw(face.rect, new Bgr(Color.Green), 1); } pictureBox2.Image = ori.Bitmap; }
Image <Bgr, byte> detect(Image <Bgr, byte> edit) { gray = new Image <Gray, byte>(edit.Width, edit.Height); CvInvoke.cvCvtColor(original, gray, COLOR_CONVERSION.CV_BGR2GRAY); var faces = cascade.Detect(gray); edit = edit.Clone(); foreach (var face in faces) { edit.Draw(face.rect, new Bgr(Color.Aqua), 5); } return(edit); }
// 分析采集的头像数据 private MCvAvgComp[] getFaces(Image <Bgr, byte> img, HaarCascade haar) { if (haar == null || img == null) { return(null); } MCvAvgComp[] faces = haar.Detect(img.Convert <Gray, byte>()); if (faces != null) { FaceNumTxt.Text = string.Format("本次识别 {0} 个人脸.", faces.Length); } return(faces); }
public MCvAvgComp DetectFace(FrameData data) { MCvAvgComp[] facesDetected = _faces.Detect(data.GrayFrame, 1.05, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(20, 20)); if (facesDetected.Length == 1) { MCvAvgComp face = facesDetected[0]; return(face); } else if (facesDetected.Length > 1) { Console.WriteLine("HaarDetectors.DetectFace: more than two faces detected. Possible errors"); MCvAvgComp face = facesDetected[0]; return(face); } throw new NoFaceDetectedException(); }
private void ProcessFrame(object sender, EventArgs e) { if (viewer != null && capture != null) { img = capture.QueryFrame(); HaarCascade haar = new HaarCascade("../../haarcascade_frontalface_default.xml"); Image <Gray, byte> grayframe = img.Convert <Gray, byte>(); var faces = haar.Detect(grayframe, 1.2, 3, HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(20, 20), new Size(img.Width / 2, img.Height / 2)); foreach (var face in faces) { img.Draw(face.rect, new Bgr(0, double.MaxValue, 0), 3); } viewer.Image = img; } }
public Rectangle getRectFromImage(string FileName, Image <Bgr, Byte> Image) { var cascade = new HaarCascade(FileName); var gray = Image.Convert <Gray, Byte>(); var MouthDetected = cascade.Detect(gray, 1.1, 10, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); gray.ROI = Rectangle.Empty; var Rect = new Rectangle(0, 0, Image.Width, Image.Height); foreach (var m in MouthDetected) { Rect = m.rect; break; } return(Rect); }
/// <summary> /// Właściwe rozpoznawanie twarzy. Jako jedyny argument jest przyjmowany obraz zawierający jakąś twarz, która ma zostać poddana weryfikacji. /// </summary> /// <param name="frame"></param> /// <returns></returns> public String recognize(Image <Bgr, Byte> frame) { //1 - konwersja obrazu na skalę szarości //2 - wykrycie wszystkich twarzy obrazie //3 - utworzenie pojemnika na położenie i wymiary twarzy //4 - sprawdzenie czy jakieś twarze wykryto //4a - pobierz informacje o pierwszej twarzy //4b - wyodrębnianie największej twarzy w obrazie //4b1 - porównywanie wielkości twarzy z inną twarzą //4b2 - jeśli ta druga większa to zapamiętaj jej położenie i wymiary //4c - ustaw region zainteresowania na twarz //4d - skopiuj obszar twarzy ograniczony regionem zainteresowania //4e - dostosuj rozmiar twarzy do 100x100 //4f - dokonaj właściwego rozpoznania twarzy z wykorzystaniem obiektu dostarczonego poprzez bibliotekę OpenCV(można także bardziej niskopoziomowo bez gotowych api tego typu, aby dostosować do swojego rozwiązania). //5 - nie wykryto żadnych twarzy, więc zwróć pustą etykietę Image <Gray, byte> grayframe = frame.Convert <Gray, byte>(); //1 var faces = haar.Detect(grayframe, 1.4, 4, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(frame.Width / 8, frame.Height / 8)); Rectangle rect = new Rectangle(); //3 if (faces.Length > 0) //4 { rect = faces [0].rect; //4a for (int i = 1; i < faces.Length; i++) //4b { if (faces [i].rect.Width > rect.Width && faces [i].rect.Height > rect.Height) //4b1 { rect = faces [i].rect; //4b2 } } grayframe.ROI = rect; //4c Image <Gray, Byte> face = grayframe.Clone(); //4d Image <Gray, Byte> faceResized = face.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); //4e return(eor.Recognize(faceResized)); //4f } else //5 { return(""); } }
private Image <Bgr, byte> scanEye(Image <Bgr, byte> inp) { var res = inp.Clone(); var gray = new Image <Gray, byte>(res.Width, res.Height); CvInvoke.cvCvtColor(res, gray, COLOR_CONVERSION.CV_BGR2GRAY); var faces = eyecascade.Detect(gray); foreach (var face in faces) { res.Draw(face.rect, new Bgr(Color.MediumBlue), 2); } return(res); }
public override void Process() { //TODO: Status = "Load Haar file" if (FHaarCascade == null) { return; } FInput.Image.GetImage(FGrayScale); var stride = (FGrayScale.Width * 3); var align = stride % 4; if (align != 0) { stride += 4 - align; } //Can not work, bcs src and dest are the same. CvInvoke.cvEqualizeHist(FGrayScale.CvMat, FGrayScale.CvMat); //MCvAvgComp[] objectsDetected = FHaarCascade.Detect(grayImage, 1.8, 1, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(grayImage.Width / 10, grayImage.Height / 10)); MCvAvgComp[] objectsDetected = FHaarCascade.Detect(FGrayScale.GetImage() as Image <Gray, byte>, FScaleFactor, FMinNeighbors, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(FMinWidth, FMinHeight)); FTrackingObjects.Clear(); foreach (MCvAvgComp f in objectsDetected) { TrackingObject trackingObject = new TrackingObject(); Vector2D objectCenterPosition = new Vector2D(f.rect.X + f.rect.Width / 2, f.rect.Y + f.rect.Height / 2); Vector2D maximumSourceXY = new Vector2D(FGrayScale.Width, FGrayScale.Height); trackingObject.Position = VMath.Map(objectCenterPosition, FMinimumSourceXY, maximumSourceXY, FMinimumDestXY, FMaximumDestXY, TMapMode.Float); trackingObject.Scale = VMath.Map(new Vector2D(f.rect.Width, f.rect.Height), FMinimumSourceXY.x, maximumSourceXY.x, 0, 1, TMapMode.Float); FTrackingObjects.Add(trackingObject); } }
private void timer1_Tick(object sender, EventArgs e) { Image <Bgr, byte> ori = cap.QueryFrame(); HaarCascade face = new HaarCascade("haarcascade_frontalface_alt.xml"); if (checkBox1.Checked) { foreach (var item in face.Detect(ori.Convert <Gray, byte>())) { ori.Draw(item.rect, new Bgr(Color.Red), 1); } pictureBox2.Image = ori.Bitmap; } else { pictureBox2.Image = ori.Bitmap; } }
void fnFindFacesThread() { while (IsRunning) { if (FSource.FrameChanged) { lock (this) { FGrayImage = FSource.Img.Convert <Gray, Byte>(); var stride = (FGrayImage.Width * 3); var align = stride % 4; if (align != 0) { stride += 4 - align; } FGrayImage._EqualizeHist(); MCvAvgComp[] faceDetected = FHaarCascade.Detect(FGrayImage, 1.8, 4, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(FGrayImage.Width / 8, FGrayImage.Height / 8)); Faces.Clear(); foreach (MCvAvgComp f in faceDetected) { FaceTrackingFace face = new FaceTrackingFace(); var faceVector = new Vector2D(f.rect.X + f.rect.Width / 2, f.rect.Y + f.rect.Height / 2); Vector2D CMaximumSourceXY = new Vector2D(FGrayImage.Width, FGrayImage.Height); face.Position = VMath.Map(faceVector, CMinimumSourceXY, CMaximumSourceXY, CMinimumDestXY, CMaximumDestXY, TMapMode.Float); face.Scale = VMath.Map(new Vector2D(f.rect.Width, f.rect.Height), CMinimumSourceXY.x, CMaximumSourceXY.x, 0, 2, TMapMode.Float); Faces.Add(face); } } } } }
/// <summary> /// This get the face detected by the pixel data /// NOTE: This will only return the first face data with a visible mouth /// </summary> /// <param name="pixelData"></param> /// <param name="height"></param> /// <param name="width"></param> /// <returns></returns> public Image <Gray, byte> GetDetectedFace(byte[] pixelData, int height, int width) { var bitmap = BytesToBitmap(pixelData, height, width); var image = new Image <Bgr, byte>(bitmap); var grayImage = image.Convert <Gray, Byte>(); //Face Detector var facesDetected = Face.Detect(grayImage, 1.2, 10, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); //Action for each element detected foreach (var faceFound in facesDetected) { var face = image.Copy(faceFound.rect).Convert <Gray, byte>().Resize(FaceDataWidth, FaceDataHeight, INTER.CV_INTER_CUBIC); face._EqualizeHist(); IsMouthDetected(face); return(face); } return(null); }
private void ProcessFrame(object sender, EventArgs e) { Image <Bgr, byte> frame = capture.RetrieveBgrFrame(); if (Faces != null && Faces.Length != 0) { frame.Draw(Faces[0].rect, new Bgr(0, 255, 127), 5); } if (count == 10) { Image <Gray, byte> grayFrame = frame.Convert <Gray, byte>(); //finding faces Faces = faceCascade.Detect(grayFrame); if (Faces.Length != 0) //when found calculate distance { frame.Draw(Faces[0].rect, new Bgr(0, 255, 127), 5); d = calculate(Faces[0].rect.Height); //calculate //updating window info Action updateLabel = () => labeld.Text = d.ToString("0.00") + " cm"; labeld.Invoke(updateLabel); } else { Action updateLabel = () => labeld.Text = "NaN"; labeld.Invoke(updateLabel); } count = 0; } webcamOut.Image = frame.ToBitmap(); count++; }
public static Bitmap emguHaarDetect(Bitmap bt) { Image <Bgr, byte> img = new Image <Bgr, byte>(bt); HaarCascade haar = new HaarCascade(haarXmlPath); if (haar == null || img == null) { return(null); } MCvAvgComp[] faces = haar.Detect(img.Convert <Gray, byte>(), 1.4, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); if (faces.Length > 0) { foreach (MCvAvgComp face in faces) { img.Draw(face.rect, new Bgr(Color.Yellow), 2); } return(img.ToBitmap()); } else { return(null); } }
public static Image <Gray, byte> DetectAndTrimFace(int[] pixels, int initialSize, int outputSize) { var inBitmap = ConvertToBitmap(pixels, initialSize); //inBitmap.Save(@"E:\data\phototest\received.bmp"); var grayframe = new Image <Gray, byte>(inBitmap); var haar = new HaarCascade(FileAccessUtil.GetHaarCascade()); var faces = haar.Detect(grayframe, 1.2, 3, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(30, 30)); if (faces.Count() != 1) { return(null); } var face = faces[0]; var returnImage = grayframe.Copy(face.rect).Resize(outputSize, outputSize, INTER.CV_INTER_CUBIC); return(returnImage); }
private void processFunction(object obj, EventArgs e) { if (shapeBTN.Checked) { ready4Play++; faceBOX.Visible = false; if (rd_bt_choose_image.Checked) { original_Image = new Image <Bgr, byte>(openFileDialog1.FileName); } else if (rd_bt_open_cam.Checked) { original_Image = capture_cam.QueryFrame(); } smoothed_Image = original_Image.PyrDown().PyrUp(); smoothed_Image._SmoothGaussian(3); Gray gray_Cannay_threshold = new Gray(160); Gray gray_Circle_Cannay_threshold = new Gray(100); Gray gray_threshold_Linking = new Gray(80); gray_Image = original_Image.Convert <Gray, byte>(); cannay_Image = gray_Image.Canny(gray_Cannay_threshold, gray_threshold_Linking); detected_Image = original_Image.CopyBlank(); double db_Accum_res = 2.0; double min_distance_between_circles = gray_Image.Height / 4; int min_radius = 10; int max_radius = 4000; CircleF[] circles = gray_Image.HoughCircles(gray_Cannay_threshold, gray_Circle_Cannay_threshold, db_Accum_res, min_distance_between_circles, min_radius, max_radius)[0]; foreach (CircleF circle in circles) { detected_Image.Draw(circle, new Bgr(Color.Red), 2); } double db_line_res = 1.0; double db_line_angle_res = 4.0 * (Math.PI / 180.0); int line_threshold = 20; double min_line_width = 30.0; double max_line_height = 10.0; LineSegment2D[] lines = cannay_Image.HoughLinesBinary(db_line_res, db_line_angle_res, line_threshold, min_line_width, max_line_height)[0]; foreach (LineSegment2D line in lines) { detected_Image.Draw(line, new Bgr(Color.Green), 2); } Contour <Point> contours = cannay_Image.FindContours(); List <Triangle2DF> triangles = new List <Triangle2DF>(); List <MCvBox2D> rectangles = new List <MCvBox2D>(); List <Contour <Point> > polygon = new List <Contour <Point> >(); while (contours != null) { Contour <Point> contour = contours.ApproxPoly(10.0); if (contour.Area > 250.0) { if (contour.Total == 3) { Point[] points = contour.ToArray(); triangles.Add(new Triangle2DF(points[0], points[1], points[2])); } else if (contour.Total >= 4 && contour.Total <= 6) { Point[] points = contour.ToArray(); bool isRect = true; if (contour.Total == 4) { LineSegment2D[] edges = PointCollection.PolyLine(points, true); for (int i = 0; i < edges.Length; i++) { double angle = Math.Abs(edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i])); if (angle < 80 || angle > 100) { isRect = false; } } } else { isRect = false; } if (isRect) { rectangles.Add(contour.GetMinAreaRect()); } else { polygon.Add(contour); } } } contours = contours.HNext; } foreach (Triangle2DF triangle in triangles) { detected_Image.Draw(triangle, new Bgr(Color.Yellow), 2); } foreach (MCvBox2D rect in rectangles) { detected_Image.Draw(rect, new Bgr(Color.Purple), 2); } foreach (Contour <Point> poly in polygon) { detected_Image.Draw(poly, new Bgr(Color.Orange), 2); } //if (rd_bt_choose_image.Checked) //{ imageBox2.Image = original_Image; imageBox3.Image = detected_Image; //} } //////////////////////////////////////////////////////////////////////// if (faceBTN.Checked) { ready4Play2++; if (imageBox2.Visible == true || imageBox2.Visible == true || imageBox3.Visible == true) { imageBox2.Visible = false; imageBox3.Visible = false; tableLayoutPanel1.Visible = false; } faceBOX.Visible = true; if (rd_bt_choose_image.Checked) { original_img = new Image <Bgr, byte>(txt_bx_img_path.Text); } else if (rd_bt_open_cam.Checked) { original_img = capture_cam.QueryFrame(); } original_gray = original_img.Convert <Gray, byte>(); detected_faces = haar_obj.Detect(original_gray, scale, min_neighbor, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(win_size, win_size), new Size(win_size, win_size)); //MessageBox.Show(detected_faces.Length.ToString()); if (detected_faces.Length > 0) { Bitmap input = original_gray.ToBitmap(); Bitmap extracted_face; Graphics face_canva; foreach (MCvAvgComp face in detected_faces) { original_img.Draw(face.rect, new Bgr(Color.Red), 3); extracted_face = new Bitmap(face.rect.Width, face.rect.Height); face_canva = Graphics.FromImage(extracted_face); face_canva.DrawImage(input, 0, 0, face.rect, GraphicsUnit.Pixel); } } faceBOX.Image = original_img.Resize(faceBOX.Width, faceBOX.Height, INTER.CV_INTER_LINEAR); } if (ready4Play > 0 && ready4Play2 > 0) { gameBTN.Visible = true; } }