Esempio n. 1
0
        public static int RecognizeFace(Image <Gray, byte> image)
        {
            image = ImageHandler.ResizeGrayImage(image);
            EigenFaceRecognizer eigen = OldEigen();

            EigenFaceRecognizer.PredictionResult result = eigen.Predict(image);

            /*
             * if(result.Distance > threshold)
             * {
             *  return result.Label;
             * } else
             * {
             *  return 0;
             * }
             */

            return(result.Label);
        }
Esempio n. 2
0
        private void videoStream(object sender, EventArgs e)
        {
            using (var imageFrame = capture.QueryFrame().ToImage <Bgr, Byte>())
            {
                if (imageFrame != null)
                {
                    var greyFrame    = imageFrame.Convert <Gray, byte>();
                    var frontalFaces = cascadeClassifier.DetectMultiScale(greyFrame, 1.1, 10, Size.Empty);
                    foreach (var face in frontalFaces)
                    {
                        imageFrame.Draw(face, new Bgr(Color.Green), 2);

                        MCvTermCriteria mcvCrit = new MCvTermCriteria(30, 0.001);

                        EigenFaceRecognizer recognizer = new EigenFaceRecognizer(200, double.PositiveInfinity);

                        Image <Gray, byte>[] copyTrainingImages = new Image <Gray, byte> [nbRecordedFaces];
                        int[] copyLabels = new int[nbRecordedFaces];

                        for (int i = 0; i < nbRecordedFaces; i++)
                        {
                            copyTrainingImages[i] = trainingImages[i];
                            copyLabels[i]         = labels[i];
                        }
                        if (nbRecordedFaces > 0)
                        {
                            recognizer.Train(copyTrainingImages, copyLabels);

                            var grayFace = imageFrame.Copy(face).Resize(64, 64, Emgu.CV.CvEnum.Inter.Cubic).Convert <Gray, Byte>();

                            EigenFaceRecognizer.PredictionResult result = recognizer.Predict(grayFace);

                            String name;

                            if (result.Label == 0)
                            {
                                name = "????";
                            }
                            else
                            {
                                name = recordedFaces[result.Label - 1].name;
                            }

                            CvInvoke.PutText(imageFrame, name, new Point(face.Location.X + 10, face.Location.Y - 10),
                                             Emgu.CV.CvEnum.FontFace.HersheyComplex, 0.6, new Bgr(0, 255, 0).MCvScalar);
                        }
                    }

                    var profileFaces = cascadeClassifierProfile.DetectMultiScale(greyFrame, 1.1, 10, Size.Empty);
                    foreach (var face in profileFaces)
                    {
                        imageFrame.Draw(face, new Bgr(Color.Yellow), 2);
                    }

                    if (detectAUX)
                    {
                        var eyes = cascadeClassifierEyes.DetectMultiScale(greyFrame, 1.2, 10, Size.Empty);
                        foreach (var eye in eyes)
                        {
                            imageFrame.Draw(eye, new Bgr(Color.Red), 2);
                        }

                        var noses = cascadeClassifierNose.DetectMultiScale(greyFrame, 1.2, 10, Size.Empty);
                        foreach (var nose in noses)
                        {
                            imageFrame.Draw(nose, new Bgr(Color.Black), 2);
                        }

                        var mouths = cascadeClassifierMouth.DetectMultiScale(greyFrame, 1.2, 10, Size.Empty);
                        foreach (var mouth in mouths)
                        {
                            imageFrame.Draw(mouth, new Bgr(Color.Purple), 2);
                        }
                    }
                }

                imgCamUser.Image = imageFrame;
            }
        }
Esempio n. 3
0
        private void ProcessFrame(object sender, EventArgs e)
        {
            // Step 1: Video Capture
            videoCapture.Retrieve(frame, 0);
            currentFrame = frame.ToImage <Bgr, Byte>().Resize(picCapture.Width, picCapture.Height, Inter.Cubic);

            // Step 2: Face Detection
            if (facesDetectionEnabled)
            {
                // Convert from Bgr to Gray Image
                Mat grayImage = new Mat();
                CvInvoke.CvtColor(currentFrame, grayImage, ColorConversion.Bgr2Gray);
                // Enhance the image to get better result
                CvInvoke.EqualizeHist(grayImage, grayImage);

                Rectangle[] faces = faceCascadeClassifier.DetectMultiScale(grayImage, 1.1, 3, Size.Empty, Size.Empty);
                // If Faces detected
                if (faces.Length > 0)
                {
                    // Draw a square around the face
                    foreach (var face in faces)
                    {
                        CvInvoke.Rectangle(currentFrame, face, new Bgr(Color.Red).MCvScalar, 2);

                        // Step 3: Add Person
                        // Assign the face to the picture Box face picDetected
                        Image <Bgr, byte> resultImage = currentFrame.Convert <Bgr, byte>();
                        resultImage.ROI      = face;
                        picDetected.SizeMode = PictureBoxSizeMode.StretchImage;
                        picDetected.Image    = resultImage.Bitmap;

                        if (EnableSaveImage)
                        {
                            save.setResultImage(resultImage);
                        }

                        // Show the results for the trained images
                        if (isTrained)
                        {
                            Image <Gray, Byte> grayFaceResult = resultImage.Convert <Gray, byte>().Resize(200, 200, Inter.Cubic);
                            //var result = recognizer.Predict(grayFaceResult);
                            EigenFaceRecognizer.PredictionResult res = recognizer.Predict(grayFaceResult);
                            pictureBox3.Image = trainingSizedImages[res.Label].Bitmap;
                            Console.WriteLine(res.Label);
                            Console.WriteLine(res.Distance);
                            if (res.Distance < 100000 / 10)
                            {
                                CvInvoke.PutText(currentFrame, PersonName[res.Label], new Point(face.X - 2, face.Y - 2),
                                                 FontFace.HersheyComplex, 1.0, new Bgr(Color.Orange).MCvScalar);
                            }
                            else
                            {
                                CvInvoke.PutText(currentFrame, "Unknown", new Point(face.X - 2, face.Y - 2),
                                                 FontFace.HersheyComplex, 1.0, new Bgr(Color.Orange).MCvScalar);
                            }
                        }
                    }
                }
            }
            // Render the video capture into the Picturebox Capture
            picCapture.Image = currentFrame.Bitmap;
        }