Ejemplo n.º 1
0
        /// <summary>
        /// Displays facial feature points
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void FFPCheckbox_Clicked(object sender, RoutedEventArgs e)
        {
            Mat    tmp        = new Mat();
            string buttonName = ((RadioButton)sender).Uid;

            switch (buttonName)
            {
            case "ffpRBCurr":
                if (_preprocessor.ffpCurr != null)
                {
                    currImageI.Mat.CopyTo(tmp);
                    this.currFFPImg = tmp.ToImage <Bgr, byte>();

                    FaceInvoke.DrawFacemarks(currFFPImg, _preprocessor.ffpCurr, new MCvScalar(255, 0, 0));
                    currImage.Source     = BitmapSourceConvert.ToBitmapSource(currFFPImg);
                    activeButtonEnumCurr = ActiveButtonEnum.FFP;
                }
                break;

            case "ffpRBNext":
                if (_preprocessor.ffpCurr != null)
                {
                    nextImageI.Mat.CopyTo(tmp);
                    this.nextFFPImg = tmp.ToImage <Bgr, byte>();
                    FaceInvoke.DrawFacemarks(nextFFPImg, _preprocessor.ffpNext, new MCvScalar(255, 0, 0));
                    nextImage.Source     = BitmapSourceConvert.ToBitmapSource(nextFFPImg);
                    activeButtonEnumNext = ActiveButtonEnum.FFP;
                }
                break;

            default:
                throw new MissingFieldException();
            }
        }
Ejemplo n.º 2
0
        public Image <Bgr, Byte> GetFacePoints()
        {
            //facemark.SetFaceDetector(MyDetector);

            Image <Bgr, Byte> image = new Image <Bgr, byte>("test.png");


            Image <Gray, byte> grayImage = imgInput.Convert <Gray, byte>();

            grayImage._EqualizeHist();

            VectorOfRect           faces     = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();


            bool success = facemark.Fit(grayImage, faces, landmarks);

            PointF[][] f = landmarks.ToArrayOfArray();
            if (success)
            {
                Rectangle[] facesRect = faces.ToArray();
                for (int i = 0; i < facesRect.Length; i++)
                {
                    imgInput.Draw(facesRect[0], new Bgr(Color.Blue), 2);
                    FaceInvoke.DrawFacemarks(imgInput, landmarks[i], new Bgr(Color.Blue).MCvScalar);
                }
                return(imgInput);
            }
            return(null);
        }
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
        /// <returns>The messages that we want to display.</returns>
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            if (imageOut != imageIn)
            {
                using (InputArray iaImageIn = imageIn.GetInputArray())
                {
                    iaImageIn.CopyTo(imageOut);
                }
            }

            Stopwatch watch = Stopwatch.StartNew();

            List <DetectedObject> fullFaceRegions    = new List <DetectedObject>();
            List <DetectedObject> partialFaceRegions = new List <DetectedObject>();

            _faceDetector.Detect(imageIn, fullFaceRegions, partialFaceRegions);

            if (partialFaceRegions.Count > 0)
            {
                foreach (DetectedObject face in partialFaceRegions)
                {
                    CvInvoke.Rectangle(imageOut, face.Region, new MCvScalar(0, 255, 0));
                }
            }

            if (fullFaceRegions.Count > 0)
            {
                foreach (DetectedObject face in fullFaceRegions)
                {
                    CvInvoke.Rectangle(imageOut, face.Region, new MCvScalar(0, 255, 0));
                }

                var fullFaceRegionsArr = fullFaceRegions.ToArray();
                var rectRegionArr      = Array.ConvertAll(fullFaceRegionsArr, r => r.Region);

                using (VectorOfVectorOfPointF landmarks = _facemarkDetector.Detect(imageIn, rectRegionArr))
                {
                    int len = landmarks.Size;
                    for (int i = 0; i < len; i++)
                    {
                        using (VectorOfPointF vpf = landmarks[i])
                            FaceInvoke.DrawFacemarks(imageOut, vpf, new MCvScalar(255, 0, 0));
                    }
                }
            }
            watch.Stop();
            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
Ejemplo n.º 4
0
        public Image <Bgr, Byte> GetFacePoints()
        {
            String facePath = Path.GetFullPath(@"../../data/haarcascade_frontalface_default.xml");

            //CascadeClassifier faceDetector = new CascadeClassifier(@"..\..\Resource\EMGUCV\haarcascade_frontalface_default.xml");
            CascadeClassifier faceDetector = new CascadeClassifier(facePath);
            FacemarkLBFParams fParams      = new FacemarkLBFParams();

            //fParams.ModelFile = @"..\..\Resource\EMGUCV\lbfmodel.yaml";
            fParams.ModelFile  = @"lbfmodel.yaml";
            fParams.NLandmarks = 68; // number of landmark points
            fParams.InitShapeN = 10; // number of multiplier for make data augmentation
            fParams.StagesN    = 5;  // amount of refinement stages
            fParams.TreeN      = 6;  // number of tree in the model for each landmark point
            fParams.TreeDepth  = 5;  //he depth of decision tree
            FacemarkLBF facemark = new FacemarkLBF(fParams);
            //facemark.SetFaceDetector(MyDetector);

            Image <Bgr, Byte>  image     = new Image <Bgr, byte>("test.png");
            Image <Gray, byte> grayImage = image.Convert <Gray, byte>();

            grayImage._EqualizeHist();

            VectorOfRect           faces     = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();

            facemark.LoadModel(fParams.ModelFile);

            bool success = facemark.Fit(grayImage, faces, landmarks);

            if (success)
            {
                Rectangle[] facesRect = faces.ToArray();
                for (int i = 0; i < facesRect.Length; i++)
                {
                    image.Draw(facesRect[i], new Bgr(Color.Blue), 2);
                    FaceInvoke.DrawFacemarks(image, landmarks[i], new Bgr(Color.Blue).MCvScalar);
                }
                return(image);
            }
            return(null);
        }
Ejemplo n.º 5
0
        public FaceLandmarkDetectionPage()
            : base()
        {
            var button = this.GetButton();

            button.Text     = "Perform Face Landmark Detection";
            button.Clicked += OnButtonClicked;

            OnImagesLoaded += async(sender, image) =>
            {
                if (image == null || image[0] == null)
                {
                    return;
                }
                SetMessage("Please wait...");
                SetImage(null);
                Task <Tuple <IInputArray, long> > t = new Task <Tuple <IInputArray, long> >(
                    () =>
                {
                    InitFaceDetector();
                    InitFacemark();

                    int imgDim        = 300;
                    MCvScalar meanVal = new MCvScalar(104, 177, 123);
                    Stopwatch watch   = Stopwatch.StartNew();
                    Size imageSize    = image[0].Size;
                    using (Mat inputBlob = DnnInvoke.BlobFromImage(
                               image[0],
                               1.0,
                               new Size(imgDim, imgDim),
                               meanVal,
                               false,
                               false))
                        _faceDetector.SetInput(inputBlob, "data");
                    using (Mat detection = _faceDetector.Forward("detection_out"))
                    {
                        float confidenceThreshold = 0.5f;

                        List <Rectangle> faceRegions = new List <Rectangle>();

                        float[,,,] values = detection.GetData(true) as float[, , , ];
                        for (int i = 0; i < values.GetLength(2); i++)
                        {
                            float confident = values[0, 0, i, 2];

                            if (confident > confidenceThreshold)
                            {
                                float xLeftBottom       = values[0, 0, i, 3] * imageSize.Width;
                                float yLeftBottom       = values[0, 0, i, 4] * imageSize.Height;
                                float xRightTop         = values[0, 0, i, 5] * imageSize.Width;
                                float yRightTop         = values[0, 0, i, 6] * imageSize.Height;
                                RectangleF objectRegion = new RectangleF(
                                    xLeftBottom,
                                    yLeftBottom,
                                    xRightTop - xLeftBottom,
                                    yRightTop - yLeftBottom);
                                Rectangle faceRegion = Rectangle.Round(objectRegion);
                                faceRegions.Add(faceRegion);
                            }
                        }

                        using (VectorOfRect vr = new VectorOfRect(faceRegions.ToArray()))
                            using (VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF())
                            {
                                _facemark.Fit(image[0], vr, landmarks);

                                foreach (Rectangle face in faceRegions)
                                {
                                    CvInvoke.Rectangle(image[0], face, new MCvScalar(0, 255, 0));
                                }

                                int len = landmarks.Size;
                                for (int i = 0; i < landmarks.Size; i++)
                                {
                                    using (VectorOfPointF vpf = landmarks[i])
                                        FaceInvoke.DrawFacemarks(image[0], vpf, new MCvScalar(255, 0, 0));
                                }
                            }
                        watch.Stop();
                        return(new Tuple <IInputArray, long>(image[0], watch.ElapsedMilliseconds));
                    }
                });
                t.Start();

                var result = await t;
                SetImage(t.Result.Item1);
                String computeDevice = CvInvoke.UseOpenCL ? "OpenCL: " + Ocl.Device.Default.Name : "CPU";

                SetMessage(String.Format("Detected in {0} milliseconds.", t.Result.Item2));
            };
        }
Ejemplo n.º 6
0
        private void DetectAndRender(Mat image)
        {
            int       imgDim  = 300;
            MCvScalar meanVal = new MCvScalar(104, 177, 123);

            Size imageSize = image.Size;

            using (Mat inputBlob = DnnInvoke.BlobFromImage(
                       image,
                       1.0,
                       new Size(imgDim, imgDim),
                       meanVal,
                       false,
                       false))
                _faceDetector.SetInput(inputBlob, "data");
            using (Mat detection = _faceDetector.Forward("detection_out"))
            {
                float confidenceThreshold = 0.5f;

                List <Rectangle> faceRegions = new List <Rectangle>();

                float[,,,] values = detection.GetData(true) as float[, , , ];
                for (int i = 0; i < values.GetLength(2); i++)
                {
                    float confident = values[0, 0, i, 2];

                    if (confident > confidenceThreshold)
                    {
                        float      xLeftBottom  = values[0, 0, i, 3] * imageSize.Width;
                        float      yLeftBottom  = values[0, 0, i, 4] * imageSize.Height;
                        float      xRightTop    = values[0, 0, i, 5] * imageSize.Width;
                        float      yRightTop    = values[0, 0, i, 6] * imageSize.Height;
                        RectangleF objectRegion = new RectangleF(
                            xLeftBottom,
                            yLeftBottom,
                            xRightTop - xLeftBottom,
                            yRightTop - yLeftBottom);
                        Rectangle faceRegion = Rectangle.Round(objectRegion);
                        faceRegions.Add(faceRegion);
                    }
                }

                using (VectorOfRect vr = new VectorOfRect(faceRegions.ToArray()))
                    using (VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF())
                    {
                        _facemark.Fit(image, vr, landmarks);

                        foreach (Rectangle face in faceRegions)
                        {
                            CvInvoke.Rectangle(image, face, new MCvScalar(0, 255, 0));
                        }

                        int len = landmarks.Size;
                        for (int i = 0; i < landmarks.Size; i++)
                        {
                            using (VectorOfPointF vpf = landmarks[i])
                                FaceInvoke.DrawFacemarks(image, vpf, new MCvScalar(255, 0, 0));
                        }
                    }
            }
        }
Ejemplo n.º 7
0
    void Track()
    {
        if (lastPositions != null)
        {
            lastPositions = landmarks;
        }

        // We fetch webcam texture data
        convertedTexture.SetPixels(webcamTexture.GetPixels());
        convertedTexture.Apply();

        // We convert the webcam texture2D into the OpenCV image format
        UMat img = new UMat();

        TextureConvert.Texture2dToOutputArray(convertedTexture, img);
        CvInvoke.Flip(img, img, FlipType.Vertical);

        using (CascadeClassifier classifier = new CascadeClassifier(filePath)) {
            using (UMat gray = new UMat()) {
                // We convert the OpenCV image format to the facial detection API parsable monochrome image type and detect the faces
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                facesVV   = new VectorOfRect(classifier.DetectMultiScale(gray));
                landmarks = new VectorOfVectorOfPointF();

                // we fit facial landmarks onto the face data
                if (facemark.Fit(gray, facesVV, landmarks))
                {
                    FaceInvoke.DrawFacemarks(img, landmarks[0], new MCvScalar(255, 255, 0, 255));

                    // We calculate the nose position to use as a capture center
                    noseOffset = new Vector3(landmarks[0][67].X, landmarks[0][67].Y * -1f, 0f);

                    // We draw markers and computer positions
                    for (int j = 0; j < 68; j++)
                    {
                        Vector3 markerPos = new Vector3(landmarks[0][j].X, landmarks[0][j].Y * -1f, 0f);

                        if (displayOffsetMarkers)
                        {
                            Debug.DrawLine(markerPos, markerPos + (Vector3.forward * 3f), UnityEngine.Color.green, trackingInterval);
                        }

                        AdjustCalibration(j, markerPos);
                    }
                    recording = true;
                }
                else
                {
                    recording = false;
                }

                if (displayCalibrationMarkers)
                {
                    DisplayCalibration();
                }
            }
        }

        // We render out the calculation result into the debug image
        if (debugImage)
        {
            Texture2D texture = TextureConvert.InputArrayToTexture2D(img, FlipType.Vertical);
            debugImage.sprite = Sprite.Create(texture, new Rect(0, 0, texture.width, texture.height), new Vector2(0.5f, 0.5f));
        }
    }
Ejemplo n.º 8
0
        /// <summary>
        /// Makes sure that settings are kept when switching images
        /// </summary>
        private void DisplayImagesCorrectCB()
        {
            Mat tmp = new Mat();

            switch (activeButtonEnumCurr)
            {
            case ActiveButtonEnum.None:
                currImage.Source = BitmapSourceConvert.ToBitmapSource(currImageI);
                break;

            case ActiveButtonEnum.FFP:
                currImageI.Mat.CopyTo(tmp);
                this.currFFPImg = tmp.ToImage <Bgr, byte>();
                FaceInvoke.DrawFacemarks(currFFPImg, _preprocessor.ffpCurr, new MCvScalar(255, 0, 0));
                currImage.Source = BitmapSourceConvert.ToBitmapSource(currFFPImg);
                break;

            case ActiveButtonEnum.Delaunay:
                currImageI.Mat.CopyTo(tmp);
                this.currDelaunayImg = tmp.ToImage <Bgr, byte>();

                foreach (Triangle2DF triangle in _preprocessor.delaunayTrianglesCurr)
                {
                    System.Drawing.Point[] vertices = Array.ConvertAll <PointF, System.Drawing.Point>(triangle.GetVertices(), System.Drawing.Point.Round);
                    using (VectorOfPoint vp = new VectorOfPoint(vertices))
                    {
                        CvInvoke.Polylines(currDelaunayImg, vp, true, new Bgr(255, 255, 255).MCvScalar);
                    }
                }
                currImage.Source = BitmapSourceConvert.ToBitmapSource(currDelaunayImg);
                break;

            default:
                break;
            }

            switch (activeButtonEnumNext)
            {
            case ActiveButtonEnum.None:
                nextImage.Source = BitmapSourceConvert.ToBitmapSource(nextImageI);
                break;

            case ActiveButtonEnum.FFP:
                nextImageI.Mat.CopyTo(tmp);
                this.nextFFPImg = tmp.ToImage <Bgr, byte>();
                FaceInvoke.DrawFacemarks(nextFFPImg, _preprocessor.ffpNext, new MCvScalar(255, 0, 0));
                nextImage.Source = BitmapSourceConvert.ToBitmapSource(nextFFPImg);
                break;

            case ActiveButtonEnum.Delaunay:
                nextImageI.Mat.CopyTo(tmp);
                this.nextDelaunayImg = tmp.ToImage <Bgr, byte>();

                foreach (Triangle2DF triangle in _preprocessor.delaunayTrianglesNext)
                {
                    System.Drawing.Point[] vertices = Array.ConvertAll <PointF, System.Drawing.Point>(triangle.GetVertices(), System.Drawing.Point.Round);
                    using (VectorOfPoint vp = new VectorOfPoint(vertices))
                    {
                        CvInvoke.Polylines(nextDelaunayImg, vp, true, new Bgr(255, 255, 255).MCvScalar);
                    }
                }
                nextImage.Source = BitmapSourceConvert.ToBitmapSource(nextDelaunayImg);
                break;

            default:
                break;
            }
        }