コード例 #1
0
        private void button2_Click(object sender, EventArgs e)
        {
            try
            {
                ContTrain = ContTrain + 1;

                //take the 320x240 picture from the cemera and make it 20x20 for TrainedFace
                gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                //TestImageBox.Image = gray;
                MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                    face,
                    1.2,
                    10,
                    Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                    new Size(20, 20)); // new size of pic only face


                foreach (MCvAvgComp f in facesDetected[0])
                {
                    TrainedFace = currentFrame.Copy(f.rect).Convert <Gray, byte>(); //converting the pic to gray and save it to TranedFace
                    break;
                }

                //Trained image is been save to "Trainedface" variable
                TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                trainingImages.Add(TrainedFace);
                //adding text box train name to label
                labels.Add(textBox1.Text);


                imageBox2.Image = TrainedFace;


                File.WriteAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");


                for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
                {
                    trainingImages.ToArray()[i - 1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
                    File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");
                }

                MessageBox.Show(textBox1.Text + "´s face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
            }
            catch
            {
                MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
            }
        }//////////// face detection /////////////
コード例 #2
0
        private void prosesframe(object sender, EventArgs arg)
        {
            Image <Bgr, Byte> imageframe = capture.QueryFrame();

            imageBox1.Image = imageframe;
            {
                Image <Gray, Byte> grayframe = imageframe.Convert <Gray, Byte>();
                var face = grayframe.DetectHaarCascade(Haar, 1.4, 4, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20))[0];
                foreach (var faces in face)
                {
                    imageframe.Draw(faces.rect, new Bgr(Color.Red), 3);
                }
            }
            imageBox1.Image = imageframe;
        }
コード例 #3
0
 private void button2_Click(object sender, EventArgs e)
 {
     count   += 1;
     grayFace = camera.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
     MCvAvgComp[][] Detectedfaces = grayFace.DetectHaarCascade(facedetected, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
     foreach (MCvAvgComp f in Detectedfaces[0])
     {
         TrainedFace = Frame.Copy(f.rect).Convert <Gray, byte>();
         break;
     }
     TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
     trainningimages.Add(TrainedFace);
     File.WriteAllText(Application.StartupPath + "/Faces/Faces.txt", trainningimages.ToArray().Length.ToString() + ",");
     for (int i = 1; i < trainningimages.ToArray().Length + 1; i++)
     {
         trainningimages.ToArray()[i - 1].Save(Application.StartupPath + "/Faces/faces" + i + ".bmp");
         File.AppendAllText(Application.StartupPath + "/Faces/Faces.txt", lables.ToArray()[i - 1] + ",");
     }
 }
コード例 #4
0
        private void timer1_Tick(object sender, EventArgs e)
        {
            using (Image nextframe = cap.QueryFrame())
            {
                if (nextframe != null)
                {
                    if (isTrack == false)
                    {
                        Image grayframe = nextframe.Convert();
                        grayframe._EqualizeHist();

                        var faces = grayframe.DetectHaarCascade(haar, 1.4, 4, HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT | HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(40, 40))[0];

                        hsv = new Image(grayframe.Width, grayframe.Height);
                        hsv = nextframe.Convert();
                        hsv._EqualizeHist();

                        hue         = new Image(grayframe.Width, grayframe.Height);
                        mask        = new Image(grayframe.Width, grayframe.Height);
                        backproject = new Image(grayframe.Width, grayframe.Height);

                        Emgu.CV.CvInvoke.cvInRangeS(hsv, new MCvScalar(0, 30, Math.Min(10, 255), 0), new MCvScalar(180, 256, Math.Max(10, 255), 0), mask);
                        Emgu.CV.CvInvoke.cvSplit(hsv, hue, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero);

                        picHue.Image = hue.ToBitmap();


                        foreach (var face in faces)
                        {
                            // Rectangle roi = new Rectangle(face.rect.X + face.rect.Width / 4, face.rect.Y + face.rect.Height / 4, face.rect.Width / 2, face.rect.Height / 2);
                            // Rectangle roi = new Rectangle(face.rect.X, face.rect.Y, face.rect.Width / 2, face.rect.Height / 2);

                            Emgu.CV.CvInvoke.cvSetImageROI(hue, face.rect);
                            Emgu.CV.CvInvoke.cvSetImageROI(mask, face.rect);

                            nextframe.Draw(face.rect, new Bgr(0, double.MaxValue, 1), 2);
                            picMask.Image = mask.ToBitmap();
                            trackwin      = face.rect;
                        }
                        img = new IntPtr[1]
                        {
                            hue
                        };

                        Emgu.CV.CvInvoke.cvCalcHist(img, hist, false, mask);

                        Emgu.CV.CvInvoke.cvResetImageROI(hue);
                        Emgu.CV.CvInvoke.cvResetImageROI(mask);

                        CapImg.Image = nextframe.ToBitmap();
                        isTrack      = true;
                        // isTrack = true;
                    }
                    else
                    {
                        if (trackwin != null)
                        {
                            hsv = nextframe.Convert();
                            Emgu.CV.CvInvoke.cvInRangeS(hsv, new MCvScalar(0, 30, 10, 0), new MCvScalar(180, 256, 256, 0), mask);
                            Emgu.CV.CvInvoke.cvSplit(hsv, hue, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero);
                            picMask.Image = mask.ToBitmap();
                            picHue.Image  = hue.ToBitmap();
                        }

                        img = new IntPtr[1]
                        {
                            hue
                        };

                        Emgu.CV.CvInvoke.cvCalcBackProject(img, backproject, hist);
                        Emgu.CV.CvInvoke.cvAnd(backproject, mask, backproject, IntPtr.Zero);

                        Image grayframe = nextframe.Convert();
                        grayframe._EqualizeHist();


                        var faces = grayframe.DetectHaarCascade(haar, 1.4, 4, HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT | HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(40, 40))[0];
                        foreach (var face in faces)
                        {
                            nextframe.Draw(face.rect, new Bgr(Color.Black), 2);
                        }


                        if (trackwin.Width == 0)
                        {
                            trackwin.Width = 40;
                        }
                        if (trackwin.Height == 0)
                        {
                            trackwin.Height = 40;
                        }

                        Emgu.CV.CvInvoke.cvCamShift(backproject, trackwin, new MCvTermCriteria(10, 0.1), out trackcomp, out trackbox);
                        trackwin = trackcomp.rect;

                        // CvInvoke.cvEllipseBox(nextframe, trackbox, new MCvScalar(0, 255, 0), 2, LINE_TYPE.CV_AA, 0);


                        nextframe.Draw(trackwin, new Bgr(Color.Blue), 3);
                        CapImg.Image  = nextframe.ToBitmap();
                        faceS         = nextframe.Copy(trackwin);
                        picFace.Image = faceS.ToBitmap();
                    }
                }
            }
        }
コード例 #5
0
        public void FrameGrabber(object sender, EventArgs e)
        {
            NamePersons.Add("");

            // capture a frame form  device both face and all things on the image
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            gray         = currentFrame.Convert <Gray, Byte>();
            //(TestImageBox.Image = currentFrame);
            //Result of haarCascade will be on the "MCvAvgComp"-facedetected (is it face or not )
            MCvAvgComp[][] facesDetected  = gray.DetectHaarCascade(face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20)); //face
            MCvAvgComp[][] StoreEyes      = gray.DetectHaarCascade(eye, 1.2, 2, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(1, 1));                    //eye
            MCvAvgComp[][] Mouthdetection = gray.DetectHaarCascade(mouth, 3, 3, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(1, 1));

            foreach (MCvAvgComp mouths in Mouthdetection[0]) // loop over all the detected MOUTHs
            {
                //currentFrame.Draw(mouths.rect, new Bgr(Color.Blue), 1);
                // draw a recangle for each mouth and put it on the display
                int   mouthLableX = mouths.rect.X;                       // find the X top-left corner of the mouth for labling
                int   mouthLableY = mouths.rect.Y;                       // find the Y top-left corner of the mouth for labling
                Point p           = new Point(mouthLableX, mouthLableY); // create a point to attach the "MOUTH" lable to it
            }

            foreach (MCvAvgComp f in facesDetected[0])
            {
                t      = t + 1;
                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                currentFrame.Draw(f.rect, new Bgr(Color.Green), 2); //Frame detect colour is 'read'
                if (trainingImages.ToArray().Length != 0)
                {
                    MCvTermCriteria       termCrit   = new MCvTermCriteria(ContTrain, 0.001);
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), labels.ToArray(), 3000, ref termCrit);

                    name = recognizer.Recognize(result); // detected name of the face is been saved  to the 'name'-variable
                    if (name == null)
                    {
                        currentFrame.Draw(f.rect, new Bgr(Color.Green), 2); //Frame detect colour is 'read'
                    }
                    //the colour of  the face label name
                }
                NamePersons[t - 1] = name;
                NamePersons.Add("");
                label3.Text = facesDetected[0].Length.ToString();
            }
            t = 0;
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn];
            }

            label3.Text = names;
            names       = "";
            NamePersons.Clear();

            NamesPersons.Add("");
            foreach (MCvAvgComp eyes in StoreEyes[0]) // more than one eye may be detected so for each eye there will be a rectangle
            {
                tem     = tem + 1;
                results = currentFrame.Copy(eyes.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                currentFrame.Draw(eyes.rect, new Bgr(Color.Red), 2);
                // for each detecte eye in the image a rectangle will be drawn on the image
                int   eyeLableX = eyes.rect.X;                     // find the X top-left corner of the eye for labeling
                int   eyeLableY = eyes.rect.Y;                     // find the Y top-left corneer of the eye for lableing
                Point p         = new Point(eyeLableX, eyeLableY); // create a point p to attach the lable "EYE" to it
                //currentFrame.Draw(name, ref font, p, new Bgr(Color.Red)); // add a string "EYE" to the image captured from webcam just to be distingushable
                if (trainingImages.ToArray().Length != 0)
                {
                    MCvTermCriteria       termCrit   = new MCvTermCriteria(ContTrain, 0.001);
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), labels.ToArray(), 3000, ref termCrit);
                    eyename = recognizer.Recognize(results); // detected name of the face is been saved  to the 'name'-variable
                }

                NamesPersons[tem - 1] = eyename;
                NamesPersons.Add("");
                label10.Text = StoreEyes[0].Length.ToString();
            }
            tem = 0;
            for (int ee = 0; ee < StoreEyes[0].Length; ee++)
            {
                eyenames = eyenames + NamesPersons[ee];
            }

            label10.Text = eyenames;
            eyenames     = "";
            NamesPersons.Clear();


            imageBox1.Image = currentFrame;
        }