private void button8_Click(object sender, EventArgs e) { if (int.Parse(((Control)sender).Name.Substring(6)) < 9) { bmp = bmp0; } else { bmp = (Bitmap)this.picBox_Original.Image; } this.picBox_Original.Image = ImageProcessing.ImagePreProcessing(bmp); }
public bool CatchFace(string label, PictureBox pcb) { try { bool detected = false; //Trained face counter //Get a gray frame from capture device gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //Face Detector MCvAvgComp[][] facesDetected = gray.DetectHaarCascade( face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); //Action for each element detected foreach (MCvAvgComp f in facesDetected[0]) { TrainedFace = currentFrame.Copy(f.rect).Convert <Gray, byte>(); detected = true; break; } if (!detected) { return(false); } //resize face detected image for force to compare the same size with the //test image with cubic interpolation type method TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); TrainedFace = new Image <Gray, byte>(ImageProcessing.ImagePreProcessing(TrainedFace.ToBitmap())); //Show face added in gray scale pcb.Image = TrainedFace.ToBitmap(); UpdateRecognizer(); MessageBox.Show(label + "´s face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information); return(true); } catch { MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation); return(false); } }
public string SaveString(string inputpath, string label, ref int index) { try { ContTrain = ContTrain + 1; bool detected = false; gray = new Image <Gray, byte>(inputpath); MCvAvgComp[][] facesDetected = gray.DetectHaarCascade( face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); foreach (MCvAvgComp f in facesDetected[0]) { TrainedFace = gray.Copy(f.rect).Convert <Gray, byte>(); detected = true; break; } if (!detected) { return(string.Empty); } TrainedFace = TrainedFace.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); TrainedFace = new Image <Gray, byte>(ImageProcessing.ImagePreProcessing(TrainedFace.ToBitmap())); trainingImages.Add(TrainedFace); labels.Add(label); UpdateRecognizer(); return(BasicOperations.SaveImage(TrainedFace.ToBitmap(), ref index));; } catch { return(string.Empty); } }
public void FrameGrabber(object sender, EventArgs e) { lbl3 = "0"; lbl4 = ""; NamePersons.Add(""); //Get the current frame form capture device try { currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); } catch { } //Convert it to Grayscale gray = currentFrame.Convert <Gray, Byte>(); //Face Detector MCvAvgComp[][] facesDetected = gray.DetectHaarCascade( face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); //Action for each element detected foreach (MCvAvgComp f in facesDetected[0]) { t = t + 1; result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //draw the face detected in the 0th (gray) channel with blue color currentFrame.Draw(f.rect, new Bgr(Color.Red), 2); if (trainingImages.ToArray().Length != 0) { //UpdateRecognizer(); name = recognizer.Recognize(new Image <Gray, byte>(ImageProcessing.ImagePreProcessing(result.ToBitmap()))); //Draw the label for each face detected and recognized currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen)); } NamePersons[t - 1] = name; NamePersons.Add(""); //Set the number of faces detected on the scene lbl3 = facesDetected[0].Length.ToString(); } t = 0; //Names concatenation of persons recognized for (int nnn = 0; nnn < facesDetected[0].Length; nnn++) { names = names + NamePersons[nnn] + ", "; } //Show the faces procesed and recognized pictureBoxFrameGrabber.Image = currentFrame.ToBitmap(); lbl3 = names; names = ""; //Clear the list(vector) of names NamePersons.Clear(); }