コード例 #1
0
        public void FaceppThread( )
        {
            facesDetected = smallGrayFrame.DetectHaarCascade(
                haar,
                1.1,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20))[0];
            int NUM = 0;

            for (int i = 0; i < facesDetected.Length; i++)
            {
                MCvAvgComp f = facesDetected[i];
                DETECTED = true;
                this.smallGrayFrame.ROI = f.rect;
                Image <Gray, Byte> roiImage = new Image <Gray, Byte> (f.rect.Size);
                this.smallGrayFrame.Copy(roiImage, smallGrayFrame);
                fsFileName = "DECT.jpg";
                roiImage.Save(fsFileName);
                DetectResult res_ = fs.Detection_DetectImg(this.smallGrayFrame);
                DetectResult res  = fs.Detection_DetectImg(System.Environment.CurrentDirectory + "\\" + fsFileName);
                if (res.face.Count > 0)
                {
                    age[NUM]    = res.face[0].attribute.age;
                    gender[NUM] = res.face[0].attribute.gender;
                    NUM++;
                }
            }
            if (DETECTED == true)
            {
                processingData( );
            }
        }
コード例 #2
0
        private void DrawFaces(Image <Bgr, Byte> image)
        {
            MCvAvgComp[]         Faces  = Sensor.FaceManager.Faces;
            String[]             Names  = Sensor.FaceManager.Names;
            Image <Gray, byte>[] Thumbs = Sensor.FaceManager.Thumbs;
            for (int i = 0; i < Faces.Length; i++)
            {
                // Draw Rect
                MCvAvgComp f = Faces[i];
                if (f.rect == null)
                {
                    continue;
                }
                image.Draw(f.rect, border, 2);

                // Draw text
                var rect = new Rectangle(f.rect.X, f.rect.Y + f.rect.Height + 20, f.rect.Width, f.rect.Height);
                DrawText(image, rect, Names[i] != null ? Names[i] : "");

                // Draw thumb
                if (Thumbs[i] != null)
                {
                    this.imageTrainedFace.Source = ToBitmapSource(Thumbs[i]);
                }
            }
        }
コード例 #3
0
        public MCvAvgComp[] DetectEyes(FrameData data)
        {
            MCvAvgComp face = data.Face;
            Int32      yCoordStartSearchEyes   = face.rect.Top + (face.rect.Height * 3 / 11);
            Point      startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes);
            Size       searchEyesAreaSize      = new Size(face.rect.Width, (face.rect.Height * 3 / 11));
            Rectangle  possibleROI_eyes        = new Rectangle(startingPointSearchEyes, searchEyesAreaSize);

            data.GrayFrame.ROI = possibleROI_eyes;
            MCvAvgComp[] eyesDetected = _eyes.Detect(data.GrayFrame, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(20, 20));
            data.GrayFrame.ROI = Rectangle.Empty;

            if (eyesDetected.Length != 0)
            {
                Rectangle eyeRect = eyesDetected[0].rect;

                eyeRect.Offset(possibleROI_eyes.X, possibleROI_eyes.Y);
                data.GrayFrame.ROI = eyeRect;

                data.GrayFrame.ROI = possibleROI_eyes;
                data.EyesROI       = possibleROI_eyes;
                MCvAvgComp[] singleEyesDetected = _singleEyes.Detect(data.GrayFrame, 1.5, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(20, 20));
                data.GrayFrame.ROI = Rectangle.Empty;

                return(singleEyesDetected);
            }
            throw new NoEyesDetectedException();
        }
コード例 #4
0
        public MCvAvgComp DetectMouth(FrameData data)
        {
            MCvAvgComp face = data.Face;
            Int32      yCoordStartSearchMouth   = face.rect.Top + (face.rect.Height * 7 / 11);
            Point      startingPointSearchMouth = new Point(face.rect.X, yCoordStartSearchMouth);
            Size       searchMouthAreaSize      = new Size(face.rect.Width, (face.rect.Height * 4 / 11));
            Rectangle  possibleROI_mouth        = new Rectangle(startingPointSearchMouth, searchMouthAreaSize);

            data.GrayFrame.ROI = possibleROI_mouth;
            data.MouthROI      = possibleROI_mouth;
            MCvAvgComp[] mouthDetected = _mouth.Detect(data.GrayFrame, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_ROUGH_SEARCH, new Size(30, 20));
            data.GrayFrame.ROI = Rectangle.Empty;

            if (mouthDetected.Length > 0)
            {
                if (mouthDetected[0].rect.Height != 0 && mouthDetected[0].rect.Width != 0)
                {
                    var mouthRect = mouthDetected[0].rect;
                    mouthRect.Offset(possibleROI_mouth.X, possibleROI_mouth.Y);
                    data.GrayFrame.ROI = mouthRect;
                    return(mouthDetected[0]);
                }
            }
            throw new NoMouthDetectedException();
        }
コード例 #5
0
        public Image <Bgr, Byte> FaceDetection(Image Image)
        {
            face = new HaarCascade("haarcascade_frontalface_default.xml");
            Utility UTl = new Utility();

            //Get the current frame form capture device
            Image <Bgr, Byte> currentFrame = UTl.ImageToBgrByte(Image);

            //Convert it to Grayscale
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));

            //Action for element detected
            try
            {
                MCvAvgComp f = facesDetected[0][0];

                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(Color.White), 2);
            }
            catch (Exception ex)
            {
                MessageBox.Show("Camera Error: Empty frames arrived" + ex.Message.ToString(), "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
            }
            return(currentFrame);
        }
コード例 #6
0
        /// <summary>
        /// This method returns the 2 ROIs that are more "aligned", i.e. with the
        /// lowest difference in the Y coordinate
        /// It calculates the distances between all the ROIs and selects the appropriate pair
        /// </summary>
        /// <param name="ROIs"></param>
        /// <returns></returns>
        private MCvAvgComp[] AlignedROIs(MCvAvgComp[] ROIs)
        {
            MCvAvgComp[] alignedROIs = new MCvAvgComp[2];
            int          N           = ROIs.Length;
            Matrix <int> distancesY  = new Matrix <int>(N, N);

            distancesY = distancesY.Add(100000);
            double minimum;
            double maximum;
            Point  minimumLocation;
            Point  maximumLocation;

            for (int i = 0; i < N; i++)
            {
                for (int j = i + 1; j < N; j++)
                {
                    // If both rectangles do not intersect, we add their distance to the matrix
                    // MT: Min distance of 2 x roi.width
                    if (ROIs[j].rect.IntersectsWith(ROIs[i].rect) == false && Math.Abs(ROIs[j].rect.X - ROIs[i].rect.X) > ROIs[j].rect.Width * 2.5)
                    {
                        distancesY[i, j] = Math.Abs(ROIs[j].rect.Y - ROIs[i].rect.Y);
                    }
                }
            }

            distancesY.MinMax(out minimum, out maximum, out minimumLocation, out maximumLocation);

            alignedROIs[0] = ROIs[minimumLocation.X];
            alignedROIs[1] = ROIs[minimumLocation.Y];

            return(alignedROIs);
        }
コード例 #7
0
ファイル: Form1.cs プロジェクト: catbox56790/ChromePlusRecord
        public void ProcessImage(object sender, EventArgs e)
        {
            Image <Bgr, Byte>  frame     = _capture.QueryFrame();
            Image <Gray, Byte> grayImage = frame.Convert <Gray, Byte>();

            grayImage._EqualizeHist();

            System.Drawing.Rectangle imageArea = grayImage.ROI;

            System.Drawing.Rectangle mouseStableArea =
                new System.Drawing.Rectangle((int)(imageArea.Width * 0.4), (int)(imageArea.Height * 0.4), (int)(imageArea.Width * 0.2), (int)(imageArea.Height * 0.2));

            //draw the stable area where the face will not trigger a movement;
            frame.Draw(mouseStableArea, new Bgr(255, 0, 0), 1);

            MCvAvgComp[] faces = grayImage.DetectHaarCascade(_face)[0];
            if (faces.Length > 0)
            { //if there is at least one face
                #region find the biggest face
                MCvAvgComp biggestFace = faces[0];
                for (int i = 1; i < faces.Length; i++)
                {
                    if (faces[i].rect.Width * faces[i].rect.Height > biggestFace.rect.Width * biggestFace.rect.Height)
                    {
                        biggestFace = faces[i];
                    }
                }
                #endregion

                //draw a yellow rectangle around the face
                frame.Draw(biggestFace.rect, new Bgr(255, 255, 0.0), 1);

                Point biggestFaceCenter = new Point(biggestFace.rect.X + biggestFace.rect.Width / 2, biggestFace.rect.Y + biggestFace.rect.Height / 2);
                Point imageAreaCenter   = new Point(imageArea.X + imageArea.Width / 2, imageArea.Y + imageArea.Height / 2);
                //draw a green cross at the center of the biggest face
                frame.Draw(
                    new Cross2DF(biggestFaceCenter, biggestFace.rect.Width * 0.1f, biggestFace.rect.Height * 0.1f),
                    new Bgr(0, 255, 0), 1);

                if (!mouseStableArea.Contains(biggestFaceCenter))
                { //the point is far enough from the center to triger a movement
                    //horizontal fraction is a value in [-0.5, 0.5] where
                    //-0.5 refer to the far left and
                    //0.5 refer to the far right
                    double horizontalFraction = (double)(biggestFaceCenter.X - imageAreaCenter.X) / imageArea.Width;
                    //do the same for vertical fraction
                    double verticalFraction = (double)(biggestFaceCenter.Y - imageAreaCenter.Y) / imageArea.Height;

                    Rectangle            rect          = System.Windows.Forms.Screen.PrimaryScreen.Bounds;
                    int                  maxMouseSpeed = rect.Width / 20;
                    System.Drawing.Point p;
                    GetCursorPos(out p);
                    p.X = Math.Min(Math.Max(0, p.X + (int)((maxMouseSpeed / 2) * horizontalFraction)), rect.Width);
                    p.Y = Math.Min(Math.Max(0, p.Y + (int)((maxMouseSpeed / 2) * verticalFraction)), rect.Height);
                    SetCursorPos(p.X, p.Y);
                }
            }

            imageBox1.Image = frame;
        }
コード例 #8
0
        void Process(object sender, EventArgs e)
        {
            FrameData data      = CaptureFrame();
            Boolean   processed = false;

            if (data != null)
            {
                data.Epoch = currentEpoch++;
                try
                {
                    MCvAvgComp face = detectors.DetectFace(data);
                    data.Face = face;
                    MCvAvgComp mouth = detectors.DetectMouth(data);
                    data.Mouth = mouth;
                    MCvAvgComp[] eyes = detectors.DetectEyes(data);
                    data.Eyes      = eyes;
                    data.EyesCount = eyes.Length;
                    if (data.EyesCount == 2)
                    {
                        framesWithoutEyes = 0;
                        bool rotated = UpdateRotation(data.Eyes);
                        data.Rotated  = rotated;
                        data.Rotation = rotateAngle;
                        EventListeners(data);
                        processed = true;
                    }
                    else if (data.EyesCount > 2)
                    {
                        Console.WriteLine("More than three eyes detected. Possible errors");
                    }
                    else
                    {
                        framesWithoutEyes++;
                        if (framesWithoutEyes > FRAMES_TO_RESET_ROTATION)
                        {
                            framesWithoutEyes = 0;
                            this.rotateAngle  = 0.0;
                        }
                    }
                }
                catch (NoFaceDetectedException)
                {
                }
                catch (NoEyesDetectedException)
                {
                }
                catch (NoMouthDetectedException)
                {
                }
            }
            if (!processed)
            {
                data.Empty = true;
                //EventListeners(EMPTY);
                EventListeners(data);
            }
        }
コード例 #9
0
        private void addToFlow(Image <Bgr, byte> result, MCvAvgComp f, string name)
        {
            PictureBox pbox = new PictureBox();

            pbox.Tag              = new object[] { new int[] { f.rect.X, f.rect.Y, f.rect.Width, f.rect.Height }, name };
            pbox.Click           += new EventHandler(pbox_Click);
            pbox.ContextMenuStrip = contextMenuStrip1;
            pbox.SizeMode         = PictureBoxSizeMode.StretchImage;
            pbox.Height           = 50;
            pbox.Width            = 50;
            pbox.Cursor           = Cursors.Hand;
            pbox.Image            = result.Copy(f.rect).Bitmap;
            flowLayoutPanel1.Controls.Add(pbox);
        }
コード例 #10
0
ファイル: FaceDetector.cs プロジェクト: dsp56001/Facesketball
        public FaceController(MCvAvgComp f, int confidence)
        {
            //Center
            //this.location.X = f.rect.Location.X + (f.rect.Width / 2);
            //this.location.Y = f.rect.Location.Y + (f.rect.Height / 2);
            this.location.X = f.rect.Location.X;
            this.location.Y = f.rect.Location.Y;
            this.Rect       = new Microsoft.Xna.Framework.Rectangle(
                (int)this.Location.X,
                (int)this.Location.Y, f.rect.Width, f.rect.Height);

            this.Confidence = (ConfidenceAmount)confidence;
            this.Scale      = f.rect.Right - f.rect.Left;
        }
コード例 #11
0
 public MCvAvgComp DetectFace(FrameData data)
 {
     MCvAvgComp[] facesDetected = _faces.Detect(data.GrayFrame, 1.05, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(20, 20));
     if (facesDetected.Length == 1)
     {
         MCvAvgComp face = facesDetected[0];
         return(face);
     }
     else if (facesDetected.Length > 1)
     {
         Console.WriteLine("HaarDetectors.DetectFace: more than two faces detected. Possible errors");
         MCvAvgComp face = facesDetected[0];
         return(face);
     }
     throw new NoFaceDetectedException();
 }
コード例 #12
0
        public DetectedFace DetectFirstFace(Image <Bgr, Byte> frame, string newLabel = null)
        {
            var result        = new DetectedFace();
            var gray          = frame.Convert <Gray, Byte>();
            var facesDetected = gray.DetectHaarCascade(
                _face,
                1.2,
                10,
                HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            gray.Dispose();

            if (!facesDetected[0].Any())
            {
                return(null);
            }

            MCvAvgComp faceInfo = facesDetected[0].First();

            result.FaceInfo = faceInfo;

            var faceImage = frame.Copy(faceInfo.rect)
                            .Convert <Gray, byte>()
                            .Resize(100, 100,
                                    INTER.CV_INTER_CUBIC);

            if (!String.IsNullOrEmpty(newLabel))
            {
                _recognizerProvider.AddNewLabel(newLabel, faceImage);
                _rep.Save(newLabel, faceImage);
                result.Label = newLabel;
                return(result);
            }

            if (_recognizerProvider.HasConfiguredFaces())
            {
                result.Label = _recognizerProvider.GetRecognizer()
                               .Recognize(faceImage);
            }
            else
            {
                result.Label = UnknownLabel;
            }
            faceImage.Dispose();
            return(result);
        }
コード例 #13
0
        // Worker Thread가 실제 하는 일
        void worker_DoWork(object sender, DoWorkEventArgs e)
        {
            Image <Gray, Byte> grayFrame = (Image <Gray, Byte>)e.Argument;

            // 머신러닝을 이용한 얼굴 인식 Haaracascade 돌리기
            MCvAvgComp[][] facesDetected = grayFrame.DetectHaarCascade(_faces, 1.1, 0, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(20, 20));
            if (facesDetected[0].Length != 0)
            {
                face = facesDetected[0][0];

                #region 얼굴 인식한것을 토대로 눈 찾기
                Int32 yCoordStartSearchEyes = face.rect.Top + (face.rect.Height * 3 / 11);
                System.Drawing.Point startingPointSearchEyes = new System.Drawing.Point(face.rect.X, yCoordStartSearchEyes);
                System.Drawing.Point endingPointSearchEyes   = new System.Drawing.Point((face.rect.X + face.rect.Width), yCoordStartSearchEyes);

                Size searchEyesAreaSize = new Size(face.rect.Width, (face.rect.Height * 2 / 9));
                Size eyeAreaSize        = new Size(face.rect.Width / 2, (face.rect.Height * 2 / 9));
                System.Drawing.Point lowerEyesPointOptimized       = new System.Drawing.Point(face.rect.X, yCoordStartSearchEyes + searchEyesAreaSize.Height);
                System.Drawing.Point startingLeftEyePointOptimized = new System.Drawing.Point(face.rect.X + face.rect.Width / 2, yCoordStartSearchEyes);

                Rectangle leftEyeArea = new Rectangle(new System.Drawing.Point(startingPointSearchEyes.X + 25, startingPointSearchEyes.Y + 10),
                                                      new Size(eyeAreaSize.Width - 25, eyeAreaSize.Height - 20));
                Rectangle rightEyeArea = new Rectangle(new System.Drawing.Point(startingLeftEyePointOptimized.X + 5, startingLeftEyePointOptimized.Y + 10),
                                                       new Size(eyeAreaSize.Width - 33, eyeAreaSize.Height - 20));
                #endregion

                //#region 눈 영역 그리기
                //a = new LineSegment2D(startingPointSearchEyes, endingPointSearchEyes);
                //b = new LineSegment2D(new System.Drawing.Point(lowerEyesPointOptimized.X, lowerEyesPointOptimized.Y),
                //                      new System.Drawing.Point((lowerEyesPointOptimized.X + face.rect.Width), (yCoordStartSearchEyes + searchEyesAreaSize.Height)));
                //d = new Bgr(Color.Chocolate);

                ////그리기
                //frame.Draw(a, d, 3);
                //frame.Draw(b, d, 3);
                //#endregion

                #region 눈 영역 검출한 Rectangle의 크기가 양수일 경우에만 눈 영역 적출하기
                if (leftEyeArea.Width > 0 && leftEyeArea.Height > 0 && rightEyeArea.Width > 0 && rightEyeArea.Height > 0)
                {
                    possibleROI_leftEye  = leftEyeArea;
                    possibleROI_rightEye = rightEyeArea;
                }
                #endregion
            }// if(faceDetect[0])
        }
コード例 #14
0
ファイル: Eyestracker.cs プロジェクト: odins1970/Ogama
        private bool DoEyesRegionExtraction(Image <Gray, Byte> input, TrackData trackData)
        {
            // We assume there's only one face in the video
            MCvAvgComp[][] facesDetected = input.DetectHaarCascade(
                haarCascade,
                Settings.Instance.Eyestracker.ScaleFactor,
                2, //Min. neighbours, higher value reduces false detection
                HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT,
                Settings.Instance.Eyestracker.SizeMin);

            if (facesDetected[0].Length == 1)
            {
                MCvAvgComp face = facesDetected[0][0];

                if (face.rect.X != 0 && face.rect.Width != 0)
                {
                    if (face.rect.Height < 100)
                    {
                        return(false);
                    }

                    roiEyes = face.rect;
                    // Add some margin
                    //roiEyes.Y = Convert.ToInt32(roiEyes.Y * 0.90);
                    roiEyes.X         = Convert.ToInt32(roiEyes.X * 0.85);
                    roiEyes.Height    = Convert.ToInt32(roiEyes.Height * 1.2);
                    roiEyes.Width     = Convert.ToInt32(roiEyes.Width * 1.4);
                    foundEyes         = true;
                    trackData.EyesROI = roiEyes;
                }
            }
            else
            {
                foundEyes = false;
                roiEyes   = new Rectangle(new Point(0, 0), new Size(0, 0));
            }

            Performance.Now.Stamp("Eyes X:" + roiEyes.X + " Y:" + roiEyes.Y + " W:" + roiEyes.Width + " H:" +
                                  roiEyes.Height);

            return(foundEyes);
        }
コード例 #15
0
        private void detectFacesInFrame(Image <Bgr, byte> currentFrame)
        {
            currentDetectedFace = null;
            Image <Gray, byte> grayFrame = currentFrame.Convert <Gray, Byte>();

            MCvAvgComp[][] facesDetected = grayFrame.DetectHaarCascade(
                FaceCascade, 1.2, 10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new System.Drawing.Size(20, 20));

            nCurrentlyDetectedFaces = facesDetected[0].Length;

            //apenas processamos quando ha exactamente 1 face
            if (nCurrentlyDetectedFaces == 1)
            {
                MCvAvgComp faceFound = facesDetected[0][0];
                currentDetectedFace = currentFrame.Copy(faceFound.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                currentDetectedFace._EqualizeHist();
            }
        }
コード例 #16
0
        // hàm chụp khuôn mặt đc phát hiện và lưu vào database
        private void capture(string name, string phone, string email, DateTime dob)
        {
            //lấy frame hiện tại của camera
            gray = grabber.QueryGrayFrame().Resize(imageBoxFrameGrabber.Width, imageBoxFrameGrabber.Height, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //phát hiện khuôn mặt
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                face,
                1.2,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            // nếu phát hiện được khuôn mặt nào
            if (facesDetected[0].Length > 0)
            {
                // lấy khuôn mặt đc phát hiện
                MCvAvgComp f = facesDetected[0][0];
                TrainedFace = currentFrame.Copy(f.rect).Convert <Gray, byte>();//Convert<gray,>();

                // chỉnh lại kích thước khuôn mặt để so sánh với dữ liệu
                TrainedFace = result.Resize(Constants.DETECTED_FRAME_WIDTH, Constants.DETECTED_FRAME_HEIGHT, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                // tạo đối tượng Face
                Face newFace = new Face(name, phone, email, dob, TrainedFace);
                try
                {
                    // lưu vào cơ sở dữ liệu
                    SQLServerHelper.getInstance().insertFace(newFace);

                    // thêm khuôn mặt vào khung Training
                    lstFaces.Controls.Add(CreatePanel(TrainedFace));
                }
                catch (Exception ex)
                {
                    MessageBox.Show("Error:" + ex.Message, "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
                }
            }
        }
コード例 #17
0
        public String[] Recognize()
        {
            if (null == Faces || null == Gray || null == recognizer || initEigen)
            {
                return(null);
            }
            Array.Clear(Names, 0, Names.Length);

            for (int i = 0; i < Faces.Length; i++)
            {
                MCvAvgComp f = Faces[i];
                if (null == f.rect)
                {
                    continue;
                }

                // Build a thumbnail
                Thumbs[i] = Gray.Copy(f.rect).Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                // Recognize
                Names[i] = recognizer.Recognize(Thumbs[i]);
            }
            return(Names);
        }
コード例 #18
0
        public void DetectObj(Emgu.CV.Image <Emgu.CV.Structure.Bgr, byte> image)
        {
            if (m_harrDetector == null)
            {
                throw new ArgumentNullException("HaarCascade", "haarCascade not initilized.");
            }

            MCvAvgComp[][] EyeDetected      = null;
            MCvAvgComp[][] LeftEyeDetected  = null;
            MCvAvgComp[][] RightEyeDetected = null;


            EyeDetected = image.DetectHaarCascade(
                m_harrDetector,
                1.1,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                m_MinEyeSize);

            if (EyeDetected.Length > 1)
            {
                Rectangle rcROI = image.ROI;

                MCvAvgComp eyepair = EyeDetected[0][0];

                int width  = Convert.ToInt32(eyepair.rect.Width * 1.5);
                int height = Convert.ToInt32(eyepair.rect.Height * 1.5);

                Rectangle rectEye = new Rectangle(eyepair.rect.X - 10,
                                                  eyepair.rect.Y - 10, width, height);
                Rectangle rectLeftEye = new Rectangle(eyepair.rect.X - 10,
                                                      eyepair.rect.Y - 10, width, height);
                Rectangle rectRightEye = new Rectangle(eyepair.rect.X - 10 + width / 2,
                                                       eyepair.rect.Y - 10, width, height);

                Rectangle rcLeft  = Rectangle.Empty;
                Rectangle rcRight = Rectangle.Empty;

                if (rcROI != Rectangle.Empty)
                {
                    rectEye.X += rcROI.X;
                    rectEye.Y += rcROI.Y;

                    rectLeftEye.X += rcROI.X;
                    rectLeftEye.Y += rcROI.Y;

                    rectRightEye.X += rcROI.X;
                    rectRightEye.Y += rcROI.Y;
                }
                image.ROI = rectLeftEye;

                if (m_LeftEyeDetector != null)
                {
                    LeftEyeDetected = image.DetectHaarCascade(
                        m_LeftEyeDetector,
                        1.1,
                        10,
                        Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                        m_MinEyeSize);

                    //Project into no ROI space
                    for (int i = 0; i < LeftEyeDetected[0].Length; i++)
                    {
                        LeftEyeDetected[0][i].rect.X += (rectLeftEye.X);
                        LeftEyeDetected[0][i].rect.Y += (rectLeftEye.Y);
                    }
                }

                image.ROI = rectEye;
                if (LeftEyeDetected[0].Length > 1)
                {// if already detected more than one eyes, then both eye should already be detected
                    foreach (MCvAvgComp eye in LeftEyeDetected[0])
                    {
                        if (rcLeft == Rectangle.Empty || rcLeft.X > eye.rect.X)
                        {
                            rcLeft = eye.rect;
                        }
                        if (rcRight == Rectangle.Empty || rcRight.X < eye.rect.X)
                        {
                            rcRight = eye.rect;
                        }
                    }
                }
                else if (m_RightEyeDetector != null)
                {
                    RightEyeDetected = image.DetectHaarCascade(
                        m_RightEyeDetector,
                        1.1,
                        10,
                        Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                        m_MinEyeSize);
                    //Project into no ROI space
                    for (int i = 0; i < RightEyeDetected[0].Length; i++)
                    {
                        RightEyeDetected[0][i].rect.X += rectEye.X;
                        RightEyeDetected[0][i].rect.Y += rectEye.Y;
                    }
                    // we only need the right eye
                    rcRight = Rectangle.Empty;
                    foreach (MCvAvgComp eye in RightEyeDetected[0])
                    {
                        if (rcRight == Rectangle.Empty || rcRight.X < eye.rect.X)
                        {
                            rcRight = eye.rect;
                        }
                    }
                }
                m_rcBestLeftEye     = rcLeft;
                m_rcBestRightEye    = rcRight;
                m_rcBestEyeDetected = EyeDetected[0][0].rect;

                //Project into the original ROI
                if (rcROI != Rectangle.Empty)
                {
                    m_rcBestLeftEye.X -= rcROI.X;
                    m_rcBestLeftEye.Y -= rcROI.Y;

                    m_rcBestRightEye.X -= rcROI.X;
                    m_rcBestRightEye.Y -= rcROI.Y;
                }
                image.ROI = rcROI;
            }
        }
コード例 #19
0
ファイル: ComputerVision.cs プロジェクト: Fedejg/SelfieLogin
 public void DibujarRecognitionRectangle(MCvAvgComp face)
 {
     //Dibuja un rectangulo de color verde delimitando el o los rostros detectados
     _ActualFrame.Draw(face.rect, new Bgr(Color.Green), 2);
 }
コード例 #20
0
        private static void DetectAndDrawEyes(Image <Bgr, byte> image, Image <Gray, byte> gray, MCvAvgComp f, HaarCascade eye)
        {
            gray.ROI = f.rect;
            MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
                eye,
                1.1,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));
            gray.ROI = Rectangle.Empty;

            foreach (MCvAvgComp e in eyesDetected[0])
            {
                Rectangle eyeRect = e.rect;
                eyeRect.Offset(f.rect.X, f.rect.Y);
                image.Draw(eyeRect, new Bgr(Color.Red), 2);
            }
        }
コード例 #21
0
        void FrameGrabber(object sender, EventArgs e)
        {
            //We are acquiring a new frame
            Image <Bgr, Byte> frame = _capture.QueryFrame();
            //We convert it to grayscale
            Image <Gray, Byte> grayFrame = frame.Convert <Gray, Byte>();

            //Equalization step
            grayFrame._EqualizeHist();

            // We assume there's only one face in the video
            MCvAvgComp[][] facesDetected = grayFrame.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(20, 20));

            if (facesDetected[0].Length == 1)
            {
                MCvAvgComp face = facesDetected[0][0];
                //Set the region of interest on the faces

                #region Luca Del Tongo Search Roi based on Face Metric Estimation --- based on empirical measuraments on a couple of photos ---  a really trivial heuristic

                // Our Region of interest where find eyes will start with a sample estimation using face metric
                Int32 yCoordStartSearchEyes   = face.rect.Top + (face.rect.Height * 3 / 11);
                Point startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes);
                Point endingPointSearchEyes   = new Point((face.rect.X + face.rect.Width), yCoordStartSearchEyes);

                Size  searchEyesAreaSize            = new Size(face.rect.Width, (face.rect.Height * 2 / 9));
                Point lowerEyesPointOptimized       = new Point(face.rect.X, yCoordStartSearchEyes + searchEyesAreaSize.Height);
                Size  eyeAreaSize                   = new Size(face.rect.Width / 2, (face.rect.Height * 2 / 9));
                Point startingLeftEyePointOptimized = new Point(face.rect.X + face.rect.Width / 2, yCoordStartSearchEyes);

                Rectangle possibleROI_eyes     = new Rectangle(startingPointSearchEyes, searchEyesAreaSize);
                Rectangle possibleROI_rightEye = new Rectangle(startingPointSearchEyes, eyeAreaSize);
                Rectangle possibleROI_leftEye  = new Rectangle(startingLeftEyePointOptimized, eyeAreaSize);

                #endregion

                #region Drawing Utilities
                // Let's draw our search area, first the upper line
                frame.Draw(new LineSegment2D(startingPointSearchEyes, endingPointSearchEyes), new Bgr(Color.White), 3);
                // draw the bottom line
                frame.Draw(new LineSegment2D(lowerEyesPointOptimized, new Point((lowerEyesPointOptimized.X + face.rect.Width), (yCoordStartSearchEyes + searchEyesAreaSize.Height))), new Bgr(Color.White), 3);
                // draw the eyes search vertical line
                frame.Draw(new LineSegment2D(startingLeftEyePointOptimized, new Point(startingLeftEyePointOptimized.X, (yCoordStartSearchEyes + searchEyesAreaSize.Height))), new Bgr(Color.White), 3);

                MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.6d, 0.6d);
                frame.Draw("Search Eyes Area", ref font, new Point((startingLeftEyePointOptimized.X - 80), (yCoordStartSearchEyes + searchEyesAreaSize.Height + 15)), new Bgr(Color.Yellow));
                frame.Draw("Right Eye Area", ref font, new Point(startingPointSearchEyes.X, startingPointSearchEyes.Y - 10), new Bgr(Color.Yellow));
                frame.Draw("Left Eye Area", ref font, new Point(startingLeftEyePointOptimized.X + searchEyesAreaSize.Height / 2, startingPointSearchEyes.Y - 10), new Bgr(Color.Yellow));
                #endregion

                grayFrame.ROI = possibleROI_leftEye;
                MCvAvgComp[][] leftEyesDetected = grayFrame.DetectHaarCascade(_eyes, 1.15, 0, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                grayFrame.ROI = Rectangle.Empty;

                grayFrame.ROI = possibleROI_rightEye;
                MCvAvgComp[][] rightEyesDetected = grayFrame.DetectHaarCascade(_eyes, 1.15, 0, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                grayFrame.ROI = Rectangle.Empty;

                //If we are able to find eyes inside the possible face, it should be a face, maybe we find also a couple of eyes
                if (leftEyesDetected[0].Length != 0 && rightEyesDetected[0].Length != 0)
                {
                    //draw the face
                    frame.Draw(face.rect, new Bgr(Color.Violet), 2);


                    #region Hough Circles Eye Detection

                    grayFrame.ROI = possibleROI_leftEye;
                    CircleF[] leftEyecircles = grayFrame.HoughCircles(new Gray(180), new Gray(70), 5.0, 10.0, 1, 200)[0];
                    grayFrame.ROI = Rectangle.Empty;
                    foreach (CircleF circle in leftEyecircles)
                    {
                        float x = circle.Center.X + startingLeftEyePointOptimized.X;
                        float y = circle.Center.Y + startingLeftEyePointOptimized.Y;
                        frame.Draw(new CircleF(new PointF(x, y), circle.Radius), new Bgr(Color.RoyalBlue), 4);
                    }

                    grayFrame.ROI = possibleROI_rightEye;
                    CircleF[] rightEyecircles = grayFrame.HoughCircles(new Gray(180), new Gray(70), 5.0, 10.0, 1, 200)[0];
                    grayFrame.ROI = Rectangle.Empty;

                    foreach (CircleF circle in rightEyecircles)
                    {
                        float x = circle.Center.X + startingPointSearchEyes.X;
                        float y = circle.Center.Y + startingPointSearchEyes.Y;
                        frame.Draw(new CircleF(new PointF(x, y), circle.Radius), new Bgr(Color.RoyalBlue), 4);
                    }

                    #endregion

                    //Uncomment this to draw all rectangle eyes
                    //foreach (MCvAvgComp eyeLeft in leftEyesDetected[0])
                    //{
                    //    Rectangle eyeRect = eyeLeft.rect;
                    //    eyeRect.Offset(startingLeftEyePointOptimized.X, startingLeftEyePointOptimized.Y);
                    //    frame.Draw(eyeRect, new Bgr(Color.Red), 2);
                    //}
                    //foreach (MCvAvgComp eyeRight in rightEyesDetected[0])
                    //{
                    //    Rectangle eyeRect = eyeRight.rect;
                    //    eyeRect.Offset(startingPointSearchEyes.X, startingPointSearchEyes.Y);
                    //    frame.Draw(eyeRect, new Bgr(Color.Red), 2);
                    //}
                }
                imageBoxCapturedFrame.Image = frame;
            }
        }
コード例 #22
0
        private void face_detect()
        {
            gray = face_image.Convert <Gray, byte>();//將camera輸出影像轉為灰階(EmguCV 語法)

            //人臉抓取語法
            faces =
                gray.DetectHaarCascade(haar, 1.2, 10,
                                       HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                       new Size(25, 25))[0];


            Image Input = face_image.ToBitmap();//轉檔camera輸出影像為Bitmap

            //判斷人臉抓取是否為空
            if (faces.Length > 0)
            {
                if (c_flag == 0)
                {
                    Camera_timer.Stop(); //抓取到人臉後camera_timer停止

                    Count_timer.Start(); //計數圖片顯示
                }
                else
                {
                    //Array.Clear(pic, 0, pic.Length);
                    Countbox.Image = timePhotos[c_flag];
                    c_flag         = 0;
                    //Array.Clear(pic, 0, pic.Length);
                    Camera_timer.Stop();        //抓取到人臉後camera_timer停止

                    MCvAvgComp face = faces[0]; //宣告取得的人臉為face
                    //face_image.Draw(face.rect, new Bgr(Color.Red), 3);//將camera輸出影像對應抓取到的人臉,標記紅框

                    //將取得的人臉分割出來並另存為Bitmap
                    ExtractedFace = new Bitmap(face.rect.Width, face.rect.Height);
                    FaceCanvas    = Graphics.FromImage(ExtractedFace);
                    FaceCanvas.DrawImage(Input, 0, 0, face.rect, GraphicsUnit.Pixel);
                    ExtractedFace.RotateFlip(RotateFlipType.Rotate270FlipNone); //旋轉影像

                    pictureBoxCenter.Visible = true;                            //顯示大圖影像

                    pictureBoxCenter.Image    = ExtractedFace;                  //將分割的人臉載入當前的picturebox中
                    pictureBoxCenter.SizeMode = PictureBoxSizeMode.Zoom;

                    Invalidate();//清空box,重新繪製圖片

                    //半透明處理
                    matrix = new ColorMatrix(nArray);
                    attributes.SetColorMatrix(matrix, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
                    Image    srcImage    = (Image)ExtractedFace;
                    Bitmap   resultImage = new Bitmap(srcImage.Width, srcImage.Height);
                    Graphics g           = Graphics.FromImage(resultImage);
                    g.DrawImage(srcImage, new Rectangle(0, 0, srcImage.Width, srcImage.Height), 0, 0, srcImage.Width, srcImage.Height, GraphicsUnit.Pixel, attributes);
                    ExtFaces = resultImage;

                    in_taiwan();

                    //分割動畫距離格數
                    vx = (pictureBoxCenter.Location.X - pic[faceNo].Location.X) / fps;
                    vy = (pictureBoxCenter.Location.Y - pic[faceNo].Location.Y) / fps;

                    picout();

                    faceNo = (faceNo + 1) % (cnt_x * cnt_y); //faceNO++,限制圖片載量(台灣外部)
                    faces  = null;                           //抓取一次人臉,清空一次,釋放記憶體

                    Countbox.Visible = false;
                }
            }

            //攝影機影像顯示
            pictureBoxCamera.Image    = face_image.ToBitmap();
            pictureBoxCamera.SizeMode = PictureBoxSizeMode.Zoom;
        }
コード例 #23
0
ファイル: FaceDetector.cs プロジェクト: dsp56001/Facesketball
 public FaceController(MCvAvgComp f) : this(f, 0)
 {
 }
コード例 #24
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            Image<Bgr, Byte> ImageFrame = this.cameracapture.getFrame();
            if (ImageFrame != null)
            {
                countframes += 1;
                Image<Gray, byte> grayframe = cameracapture.getGrayFrame(ImageFrame);
                grayframe._EqualizeHist();

                MCvAvgComp[][] facesDetected = this.face.detectFace(grayframe, haarface);

                if (facesDetected[0].Length == 1)
                {
                    MCvAvgComp face = facesDetected[0][0];
                    //Set the region of interest on the faces                

                    // Our Region of interest where find eyes will start with a sample estimation using face metric
                    Int32 yCoordStartSearchEyes = face.rect.Top + (face.rect.Height * 3 / 11);
                    Point startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes);
                    Point endingPointSearchEyes = new Point((face.rect.X + face.rect.Width), yCoordStartSearchEyes);

                    Size searchEyesAreaSize = new Size(face.rect.Width, (face.rect.Height * 2 / 9));
                    Point lowerEyesPointOptimized = new Point(face.rect.X, yCoordStartSearchEyes + searchEyesAreaSize.Height);
                    Size eyeAreaSize = new Size(face.rect.Width / 2, (face.rect.Height * 2 / 9));
                    Point startingLeftEyePointOptimized = new Point(face.rect.X + face.rect.Width / 2, yCoordStartSearchEyes);

                    Rectangle possibleROI_eyes = new Rectangle(startingPointSearchEyes, searchEyesAreaSize);
                    Rectangle possibleROI_rightEye = new Rectangle(startingPointSearchEyes, eyeAreaSize);
                    Rectangle possibleROI_leftEye = new Rectangle(startingLeftEyePointOptimized, eyeAreaSize);

                    grayframe.ROI = possibleROI_leftEye;
                    MCvAvgComp[][] leftEyesDetected = this.eye.detectEyes(grayframe, haareyes);
                    grayframe.ROI = Rectangle.Empty;

                    grayframe.ROI = possibleROI_rightEye;
                    MCvAvgComp[][] rightEyesDetected = this.eye.detectEyes(grayframe, haareyes);
                    grayframe.ROI = Rectangle.Empty;

                    //If we are able to find eyes inside the possible face, it should be a face, maybe we find also a couple of eyes
                    if (leftEyesDetected[0].Length != 0 && rightEyesDetected[0].Length != 0)
                    {
                        eyes_count += 1;
                        //draw the face
                        ImageFrame.Draw(face.rect, new Bgr(Color.Red), 2);

                        MCvAvgComp eyeLeft = leftEyesDetected[0][0];
                        MCvAvgComp eyeRight = leftEyesDetected[0][0];
                        
                        // Draw the Left Eye
                        Rectangle eyeRectL = eyeLeft.rect;
                        eyeRectL.Offset(startingLeftEyePointOptimized.X, startingLeftEyePointOptimized.Y);
                        ImageFrame.Draw(eyeRectL, new Bgr(Color.Red), 2);

                        //Draw the Right Eye
                        Rectangle eyeRectR = eyeRight.rect;
                        eyeRectR.Offset(startingPointSearchEyes.X, startingPointSearchEyes.Y);
                        ImageFrame.Draw(eyeRectR, new Bgr(Color.Red), 2);

                    }
                    imageBox1.Image = ImageFrame;
                }
            }
        }
コード例 #25
0
        void FaceGrabber(object sender, EventArgs e)
        {
            currentFrame = grabber.QueryFrame();
            if (currentFrame != null)
            {
                label1.Show();
                currentFrameCopy = currentFrame.Copy();

                Image <Bgr, Byte>  frame     = grabber.QueryFrame();
                Image <Gray, Byte> grayFrame = frame.Convert <Gray, Byte>();
                grayFrame._EqualizeHist();

                MCvAvgComp[][] facesDetected = grayFrame.DetectHaarCascade(_face, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(20, 20));
                //  MCvAvgComp[][] lefteyeDeteced = grayFrame.DetectHaarCascade(leye, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT, new Size(20, 20));
                if (facesDetected[0].Length == 1)
                {
                    MCvAvgComp face = facesDetected[0][0];

                    #region on Face Metric Estimation --- based on empirical measuraments on a couple of photos ---  a really trivial heuristic

                    //// Our Region of interest where find eyes will start with a sample estimation using face metric
                    Int32 yCoordStartSearchEyes   = face.rect.Top + (face.rect.Height * 3 / 11);
                    Point startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes);
                    //Point endingPointSearchEyes = new Point((face.rect.X + face.rect.Width), yCoordStartSearchEyes);
                    //richTextBox1.Text = face.rect.Top.ToString();
                    Size searchEyesAreaSize = new Size(face.rect.Width * 5 / 2, (face.rect.Height * 2 / 5));
                    //Point lowerEyesPointOptimized = new Point(face.rect.X, yCoordStartSearchEyes + searchEyesAreaSize.Height);
                    Size  eyeAreaSize = new Size(face.rect.Width / 2, (face.rect.Height * 2 / 9));
                    Point startingLeftEyePointOptimized = new Point(face.rect.X + face.rect.Width / 2, yCoordStartSearchEyes);

                    Rectangle possibleROI_eyes     = new Rectangle(startingPointSearchEyes, searchEyesAreaSize);
                    Rectangle possibleROI_rightEye = new Rectangle(startingPointSearchEyes, eyeAreaSize);
                    Rectangle possibleROI_leftEye  = new Rectangle(startingLeftEyePointOptimized, eyeAreaSize);

                    #endregion


                    int widthNav  = (frame.Width / 10 * 2);
                    int heightNav = (frame.Height / 10 * 2);

                    Rectangle nav = new Rectangle(new Point(frame.Width / 25 - widthNav / 2, frame.Height / 2 - heightNav / 2), searchEyesAreaSize);
                    frame.Draw(nav, new Bgr(Color.Lavender), 3);
                    Point cursor = new Point(face.rect.X + searchEyesAreaSize.Width / 2, yCoordStartSearchEyes);


                    grayFrame.ROI = possibleROI_eyes;
                    MCvAvgComp[][] EyesDetected = grayFrame.DetectHaarCascade(eyes, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                    grayFrame.ROI = Rectangle.Empty;

                    if (EyesDetected[0].Length != 0)
                    {
                        sw.Reset();
                        frame.Draw(face.rect, new Bgr(Color.Yellow), 1);

                        foreach (MCvAvgComp eye in EyesDetected[0])
                        {
                            Rectangle eyeRect = eye.rect;
                            eyeRect.Offset(possibleROI_eyes.X, possibleROI_eyes.Y);
                            grayFrame.ROI = eyeRect;
                            frame.Draw(eyeRect, new Bgr(Color.DarkSeaGreen), 2);
                            frame.Draw(possibleROI_eyes, new Bgr(Color.DeepPink), 2);

                            if (nav.Left < cursor.X && cursor.X < (nav.Left + 20 * nav.Width) && nav.Top < cursor.Y && cursor.Y < nav.Top + 3 * nav.Height)
                            {
                                LineSegment2D CursorDraw = new LineSegment2D(cursor, new Point(cursor.X, cursor.Y + 1));
                                frame.Draw(CursorDraw, new Bgr(Color.White), 3);
                                int right  = (frame.Width * (cursor.X - nav.Right)) / nav.Width;
                                int xCoord = (frame.Width * (cursor.X - nav.Left)) / nav.Width + 2 * right;
                                int yCoord = (frame.Width * (cursor.Y - nav.Top)) / nav.Height;
                                Cursor.Position = new Point(xCoord, yCoord);
                            }
                        }
                    }
                    grayFrame.ROI = possibleROI_leftEye;
                    MCvAvgComp[][] leftEyesDetected = grayFrame.DetectHaarCascade(leye, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                    grayFrame.ROI = Rectangle.Empty;

                    grayFrame.ROI = possibleROI_rightEye;
                    MCvAvgComp[][] rightEyesDetected = grayFrame.DetectHaarCascade(reye, 1.15, 3, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                    grayFrame.ROI = Rectangle.Empty;

                    //If we are able to find eyes inside the possible face, it should be a face, maybe we find also a couple of eyes
                    if (leftEyesDetected[0].Length != 0 && rightEyesDetected[0].Length != 0)
                    {
                        sw.Stop();
                        //draw the face
                        frame.Draw(face.rect, new Bgr(Color.Violet), 2);
                        grayFrame.ROI = possibleROI_leftEye;
                        grayFrame.ROI = Rectangle.Empty;
                        grayFrame.ROI = possibleROI_rightEye;
                        grayFrame.ROI = Rectangle.Empty;
                    }
                    else

                    if (leftEyesDetected[0].Length == 0)
                    {
                        timer1.Enabled = true;

                        sw.Start();
                        //timer1_Tick();
                        if (label1.Text == "0:00:03")
                        {
                            DoMouseClick();
                            sw.Reset();
                        }
                    }



                    imageBoxSkin.Image = frame;
                }
            }
        }
コード例 #26
0
        void FrameGrabber(object sender, EventArgs e)
        {
            //We are acquiring a new frame



            //_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, 1280);
            //_capture.SetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, 720);

            frame = _capture.QueryFrame().Resize(800, 800, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            //We convert it to grayscale
            grayFrame = frame.Convert <Gray, Byte>();
            //Equalization step
            grayFrame._EqualizeHist();

            // We assume there's only one face in the video

            //MCvAvgComp[][] facesDetected = grayFrame.DetectHaarCascade(_faces, faceScaleRate, faceMinNeighbourTheshold, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(frame.Height / 4, frame.Width / 4));
            if (StopFace == false)
            {
                facesDetected = grayFrame.DetectHaarCascade(_faces, faceScaleRate, faceMinNeighbourTheshold, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(25, 25));
            }

            if (facesDetected[0].Length == 1)
            {
                MCvAvgComp face = facesDetected[0][0];
                //Set the region of interest on the faces

                #region Search Roi based on Face Metric Estimation --- based on empirical measuraments on a couple of photos ---  a really trivial heuristic

                // Our Region of interest where find eyes will start with a sample estimation using face metric

                //Int32 yCoordStartSearchEyes = face.rect.Top + (face.rect.Height * 3 / 12);
                //startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes);
                //Point endingPointSearchEyes = new Point((face.rect.X + face.rect.Width), yCoordStartSearchEyes);

                //Size searchEyesAreaSize = new Size(face.rect.Width, (face.rect.Height * 2 / 10));
                //Point lowerEyesPointOptimized = new Point(face.rect.X, yCoordStartSearchEyes + searchEyesAreaSize.Height);
                //Size eyeAreaSize = new Size(face.rect.Width / 2, (face.rect.Height * 3 / 10));
                //startingLeftEyePointOptimized = new Point(face.rect.X + face.rect.Width / 2, yCoordStartSearchEyes);

                Int32 yCoordStartSearchEyes = face.rect.Top + (face.rect.Height * 3 / 11);
                startingPointSearchEyes = new Point(face.rect.X, yCoordStartSearchEyes);
                Point endingPointSearchEyes = new Point((face.rect.X + face.rect.Width), yCoordStartSearchEyes);

                Size  searchEyesAreaSize      = new Size(face.rect.Width, (face.rect.Height * 2 / 9));
                Point lowerEyesPointOptimized = new Point(face.rect.X, yCoordStartSearchEyes + searchEyesAreaSize.Height);
                Size  eyeAreaSize             = new Size(face.rect.Width / 2, (face.rect.Height * 2 / 9));
                startingLeftEyePointOptimized = new Point(face.rect.X + face.rect.Width / 2, yCoordStartSearchEyes);

                Rectangle possibleROI_eyes = new Rectangle(startingPointSearchEyes, searchEyesAreaSize);
                possibleROI_rightEye = new Rectangle(startingPointSearchEyes, eyeAreaSize);
                possibleROI_leftEye  = new Rectangle(startingLeftEyePointOptimized, eyeAreaSize);

                #endregion

                #region Drawing Utilities
                // Let's draw our search area, first the upper line
                frame.Draw(new LineSegment2D(startingPointSearchEyes, endingPointSearchEyes), new Bgr(Color.White), 3);
                // draw the bottom line
                frame.Draw(new LineSegment2D(lowerEyesPointOptimized, new Point((lowerEyesPointOptimized.X + face.rect.Width), (yCoordStartSearchEyes + searchEyesAreaSize.Height))), new Bgr(Color.White), 3);
                // draw the eyes search vertical line
                frame.Draw(new LineSegment2D(startingLeftEyePointOptimized, new Point(startingLeftEyePointOptimized.X, (yCoordStartSearchEyes + searchEyesAreaSize.Height))), new Bgr(Color.White), 3);

                MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.6d, 0.6d);
                frame.Draw("", ref font, new Point((startingLeftEyePointOptimized.X - 80), (yCoordStartSearchEyes + searchEyesAreaSize.Height + 15)), new Bgr(Color.Yellow));
                frame.Draw("Right Eye", ref font, new Point(startingPointSearchEyes.X, startingPointSearchEyes.Y - 10), new Bgr(Color.Yellow));
                frame.Draw("Left Eye", ref font, new Point(startingLeftEyePointOptimized.X + searchEyesAreaSize.Height / 2, startingPointSearchEyes.Y - 10), new Bgr(Color.Yellow));
                #endregion


                if (STopROI == false)
                {
                    grayFrame.ROI     = possibleROI_rightEye;
                    rightEyesDetected = grayFrame.DetectHaarCascade(_eyes, rightScaleRate, rightMinimumNeighbourThreshold, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(60, 60));
                    grayFrame.ROI     = Rectangle.Empty;

                    grayFrame.ROI    = possibleROI_leftEye;
                    leftEyesDetected = grayFrame.DetectHaarCascade(_eyes, leftScaleRate, leftMinimumNeighbourThreshold, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(60, 60));
                    grayFrame.ROI    = Rectangle.Empty;
                }
                //If we are able to find eyes inside the possible face, it should be a face, maybe we find also a couple of eyes
                //if (leftEyesDetected[0].Length != 0 && rightEyesDetected[0].Length != 0)
                //{
                //draw the face
                frame.Draw(face.rect, new Bgr(Color.Violet), 2);


                #region Hough Circles Eye Detection

                Application.Idle += calcRightEye;

                Application.Idle += calcLeftEye;

                #endregion


                imageBox1.Image = frame;
            }
        }
コード例 #27
0
ファイル: ComputerVision.cs プロジェクト: Fedejg/SelfieLogin
 public void CrearTrainedFace(MCvAvgComp face)
 {
     //_TrainedFace se utiliza para enviar al proceso de reconocimiento recognizer.Recognize(_TrainedFace);
     _TrainedFace = _ActualFrame.Copy(face.rect).Convert <Gray, byte>().Resize(100, 100, INTER.CV_INTER_CUBIC);
 }
コード例 #28
0
        void Recognize(MCvAvgComp f)
        {
            result = current_frame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            //draw the face detected in the 0th (gray) channel with blue color
            current_frame.Draw(f.rect, new Bgr(Color.Red), 2);


            if (training_images.ToArray().Length != 0)
            {
                float[] distances = recognizer.GetEigenDistances(result);
                name = recognizer.Recognize(result);
                if (name == null || name == "")
                {
                    name = "someone_" + unknown_people;
                    unknown_people++;
                    MessageBox.Show("Unknown person detected.");
                }
                if (!recognized_people.Exists(n => n == name))
                {
                    recognized_people.Add(name);
                    recognized_faces.Add(result);
                    last_positions.Add(f.rect);
                    recognized_flags.Add(true);
                    m_Terminal.Express("Person recognized: " + name, Expression_Types.Information);
                }
                else
                {
                    int index = recognized_people.IndexOf(name);
                    recognized_faces[index] = result;
                    // Vision.voice.Speak("Hi " + name + "!, good to see you again.");
                    recognized_flags[index] = true;
                }
                detected_faces.Add(result);
                //Draw the label for each face detected and recognized
                current_frame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));



                if (distances.Length > 0)
                {
                    //Cerebro.consola.println(distances[0].ToString());
                    float min_distance = 9999999.0f;
                    foreach (float distance in distances)
                    {
                        if (distance < min_distance)
                        {
                            min_distance = distance;
                        }
                    }
                    if (min_distance > 1600)
                    {
                        training_images.Add(result);
                        face_labels.Add(name);
                        recognized_people.Add(name);
                        Save_Data();
                    }
                }
            }
            else
            {
                training_images.Add(result);
                face_labels.Add("Unknown");
                recognized_people.Add("Unknown");
                Save_Data();
            }
            if (unknown_people > 0)
            {
                m_Terminal.Express(unknown_people + " unknown people detected.", Expression_Types.Information);
            }
        }
コード例 #29
0
ファイル: FaceRecognition.cs プロジェクト: joao2605/cdap
        public void FrameGrabber2(object sender, EventArgs e)
        {
            NamePersons.Add("");

            face = new HaarCascade("haarcascade_frontalface_default.xml");
            //Utility UTl = new Utility();

            //Get the current frame form capture device
            //Image<Bgr, Byte> currentFrame = UTl.ImageToBgrByte(Image);
            try
            {
                currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            }
            catch (Exception exp)
            {
                grabber = new Capture("video002.mp4");
            }
            //Convert it to Grayscale
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));

            //Action for element detected
            try
            {
                MCvAvgComp f = facesDetected[0][0];

                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(Color.White), 2);
            }
            catch (Exception ex)
            {
                //MessageBox.Show("Camera Error: Empty frames arrived" + ex.Message.ToString(), "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
            }


            if (trainingImages.ToArray().Length != 0)
            {
                //TermCriteria for face recognition with numbers of trained images like maxIteration
                MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                //Eigen face recognizer
                EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                    trainingImages.ToArray(),
                    labels.ToArray(),
                    3000,
                    ref termCrit);

                name = recognizer.Recognize(result);

                //Draw the label for each face detected and recognized
                //currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
            }

            //NamePersons[t - 1] = name;
            NamePersons.Add("");



            t = 0;

            //Names concatenation of persons recognized
            //for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            //{
            //    names = names + NamePersons[nnn] + ", ";
            //}
            //Show the faces procesed and recognized
            emguImgFace.Image   = currentFrame;
            lblCandidateID.Text = name;
            name = "";
            //Clear the list(vector) of names
            NamePersons.Clear();
        }