예제 #1
0
파일: CLNF.cs 프로젝트: zjmsky/SharpFace
 public CLNF(CLNF other) : this(LandmarkDetectorPINVOKE.new_CLNF__SWIG_2(CLNF.getCPtr(other)), true)
 {
     if (LandmarkDetectorPINVOKE.SWIGPendingException.Pending)
     {
         throw LandmarkDetectorPINVOKE.SWIGPendingException.Retrieve();
     }
 }
예제 #2
0
        static void Main()
        {
            // initialize application
            Application.EnableVisualStyles();
            Application.SetCompatibleTextRenderingDefault(false);

            // set up the game form
            form             = new GameForm();
            form.OnGazeMove += new OnGazeMoveDelegate(OnGazeMove);

            // grab the first camera
            var baseFolder = AppDomain.CurrentDomain.BaseDirectory;
            var camera     = SequenceReader.GetCameras(baseFolder).First();

            // set up a sequence reader
            reader = new SequenceReader(camera.Item1, 640, 400);

            // set up the face model
            String root = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, @"..\..\");

            faceModel = new FaceModelParameters(root, false);
            faceModel.optimiseForVideo();

            // set up a face detector, a landmark detector, and a gaze analyser
            faceDetector     = new FaceDetector();
            landmarkDetector = new CLNF(faceModel);
            gazeAnalyser     = new GazeAnalyserManaged();

            // run the game
            Application.Run(form);
        }
예제 #3
0
        private bool ProcessFrame(CLNF landmark_detector, GazeAnalyserManaged gaze_analyser, FaceModelParameters model_params, RawImage frame, RawImage grayscale_frame, float fx, float fy, float cx, float cy)
        {
            bool detection_succeeding = landmark_detector.DetectLandmarksInVideo(frame, model_params, grayscale_frame);

            gaze_analyser.AddNextFrame(landmark_detector, detection_succeeding, fx, fy, cx, cy);
            return(detection_succeeding);
        }
예제 #4
0
        public MainWindow()
        {
            InitializeComponent();
            this.DataContext = this; // For WPF data binding

            // Set the icon
            Uri iconUri = new Uri("logo1.ico", UriKind.RelativeOrAbsolute);

            this.Icon = BitmapFrame.Create(iconUri);

            String root = AppDomain.CurrentDomain.BaseDirectory;

            face_model_params = new FaceModelParameters(root, LandmarkDetectorCECLM, LandmarkDetectorCLNF, LandmarkDetectorCLM);
            // Initialize the face detector
            face_detector = new FaceDetector(face_model_params.GetHaarLocation(), face_model_params.GetMTCNNLocation());

            // If MTCNN model not available, use HOG
            if (!face_detector.IsMTCNNLoaded())
            {
                FaceDetCNN.IsEnabled = false;
                DetectorCNN          = false;
                DetectorHOG          = true;
            }
            face_model_params.SetFaceDetector(DetectorHaar, DetectorHOG, DetectorCNN);

            landmark_detector = new CLNF(face_model_params);

            gaze_analyser = new GazeAnalyserManaged();
        }
예제 #5
0
        // If the landmark detector model changed need to reload it
        private void ReloadLandmarkDetector()
        {
            bool reload = false;

            if (face_model_params.IsCECLM() && !LandmarkDetectorCECLM)
            {
                reload = true;
            }
            else if (face_model_params.IsCLNF() && !LandmarkDetectorCLNF)
            {
                reload = true;
            }
            else if (face_model_params.IsCLM() && !LandmarkDetectorCLM)
            {
                reload = true;
            }

            if (reload)
            {
                String root = AppDomain.CurrentDomain.BaseDirectory;

                face_model_params = new FaceModelParameters(root, LandmarkDetectorCECLM, LandmarkDetectorCLNF, LandmarkDetectorCLM);
                landmark_detector = new CLNF(face_model_params);
            }
        }
예제 #6
0
 public static void Draw(SWIGTYPE_p_cv__Mat img, CLNF clnf_model)
 {
     LandmarkDetectorPINVOKE.Draw__SWIG_2(SWIGTYPE_p_cv__Mat.getCPtr(img), CLNF.getCPtr(clnf_model));
     if (LandmarkDetectorPINVOKE.SWIGPendingException.Pending)
     {
         throw LandmarkDetectorPINVOKE.SWIGPendingException.Retrieve();
     }
 }
예제 #7
0
        private Bitmap ProcessImage(SequenceReader reader)
        {
            // set up the face model
            String root      = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, @"..\..\");
            var    faceModel = new FaceModelParameters(root, false);

            faceModel.optimiseForImages();

            // set up a face detector and a landmark detector
            var faceDetector     = new FaceDetector();
            var landmarkDetector = new CLNF(faceModel);

            // read the image from the sequence reader
            var frame     = new RawImage(reader.GetNextImage());
            var grayFrame = new RawImage(reader.GetCurrentFrameGray());

            // detect faces
            var faces       = new List <Rect>();
            var confidences = new List <double>();

            faceDetector.DetectFacesHOG(faces, grayFrame, confidences);

            // detect landmarks
            var landmarks = new List <List <Tuple <double, double> > >();

            foreach (var face in faces)
            {
                landmarkDetector.DetectFaceLandmarksInImage(grayFrame, face, faceModel);
                var points = landmarkDetector.CalculateAllLandmarks();
                landmarks.Add(points);
            }

            // draw rectangles and confidence values on image
            var image = frame.ToBitmap();

            using (Graphics g = Graphics.FromImage(image))
            {
                int index = 0;
                var pen   = new System.Drawing.Pen(System.Drawing.Color.LightGreen, 4);
                var pen2  = new System.Drawing.Pen(System.Drawing.Color.Red, 4);
                var font  = new Font(FontFamily.GenericSansSerif, 30);
                foreach (var face in faces)
                {
                    g.DrawRectangle(pen, (int)face.X, (int)face.Y, (int)face.Width, (int)face.Height);
                    g.DrawString($"{confidences[index]:0.00}", font, Brushes.Black, (int)face.X + 36, (int)face.Y - 36);

                    // draw landmark points
                    foreach (var p in landmarks[index])
                    {
                        g.DrawRectangle(pen2, new Rectangle((int)p.Item1, (int)p.Item2, 4, 4));
                    }
                    index++;
                }
            }

            return(image);
        }
        private Bitmap ProcessImage(SequenceReader reader)
        {
            // set up the face model
            var root      = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, @"..\..\");
            var faceModel = new FaceModelParameters(root, false);

            faceModel.optimiseForImages();

            // set up a face detector, a landmark detector, and a face analyser
            var faceDetector     = new FaceDetector();
            var landmarkDetector = new CLNF(faceModel);
            var faceAnalyser     = new FaceAnalyserManaged(root, true, 0);

            // read the image from the sequence reader
            var frame     = new RawImage(reader.GetNextImage());
            var grayFrame = new RawImage(reader.GetCurrentFrameGray());

            // detect faces
            var faces       = new List <Rect>();
            var confidences = new List <double>();

            faceDetector.DetectFacesHOG(faces, grayFrame, confidences);

            // detect landmarks
            var image = frame.ToBitmap();

            foreach (var face in faces)
            {
                landmarkDetector.DetectFaceLandmarksInImage(grayFrame, face, faceModel);
                var points = landmarkDetector.CalculateAllLandmarks();

                // calculate action units
                var features = faceAnalyser.PredictStaticAUsAndComputeFeatures(grayFrame, points);

                // find the action units
                var actionUnits = (from au in features.Item2
                                   where au.Value > 0
                                   orderby au.Key
                                   select au.Key);

                // get top emotions
                var topEmotions = GetTopEmotions(actionUnits);

                // draw the emotion on the face
                using (Graphics g = Graphics.FromImage(image))
                {
                    string name = string.Join(Environment.NewLine, topEmotions);
                    Font   fnt  = new Font("Verdana", 15, GraphicsUnit.Pixel);
                    Brush  brs  = new SolidBrush(Color.Black);
                    var    bump = 36;
                    System.Drawing.SizeF stringSize = g.MeasureString(name, fnt);
                    g.FillRectangle(new SolidBrush(Color.Yellow), (int)face.X + bump, (int)face.Y, stringSize.Width, stringSize.Height);
                    g.DrawString(name, fnt, brs, (int)face.X + bump, (int)face.Y);
                }
            }
            return(image);
        }
예제 #9
0
        public static CVPoint2DList CalculateLandmarks(CLNF clnf_model)
        {
            CVPoint2DList ret = new CVPoint2DList(LandmarkDetectorPINVOKE.CalculateLandmarks__SWIG_1(CLNF.getCPtr(clnf_model)), true);

            if (LandmarkDetectorPINVOKE.SWIGPendingException.Pending)
            {
                throw LandmarkDetectorPINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
예제 #10
0
        unsafe void visualise_tracking(Mat captured_image, ref CLNF face_model, ref FaceModelParameters det_parameters, int frame_count, double fx, double fy, double cx, double cy)
        {
            //Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
            double detection_certainty = face_model.detection_certainty;
            bool   detection_success   = face_model.detection_success;

            double visualisation_boundary = 0.2;

            // Only draw if the reliability is reasonable, the value is slightly ad-hoc
            if (detection_certainty < visualisation_boundary)
            {
                LandmarkDetector.Draw(new SWIGTYPE_p_cv__Mat(captured_image.CvPtr), face_model);

                double vis_certainty = detection_certainty;
                if (vis_certainty > 1)
                {
                    vis_certainty = 1;
                }
                if (vis_certainty < -1)
                {
                    vis_certainty = -1;
                }

                vis_certainty = (vis_certainty + 1) / (visualisation_boundary + 1);

                //A rough heuristic for box around the face width

                int thickness = (int)Math.Ceiling(2.0 * ((double)captured_image.Cols) / 640.0);

                Vec6d pose_estimate_to_draw = *((Vec6d *)LandmarkDetector.GetCorrectedPoseWorld(new SWIGTYPE_p_CLNF(CLNF.getCPtr(face_model)), fx, fy, cx, cy).Pointer);

                //Draw it in reddish if uncertain, blueish if certain
                var color = new Scalar((1 - vis_certainty) * 255.0, 0, vis_certainty * 255);
                LandmarkDetector.DrawBox(new SWIGTYPE_p_cv__Mat(captured_image.CvPtr), new SWIGTYPE_p_cv__Vec6d(new IntPtr(&pose_estimate_to_draw)), new SWIGTYPE_p_cv__Scalar(new IntPtr(&(color))), thickness, (float)fx, (float)fy, (float)cx, (float)cy);
            }

            //Work out the framerate
            if (frame_count % 10 == 0)
            {
                double t1 = Cv2.GetTickCount();
                fps_tracker = 10.0 / ((double)(t1 - t0) / Cv2.GetTickFrequency());
                t0          = (long)t1;
            }

            using (Mat resize = captured_image.Resize(new Size(Size.Width * SizeFactor, Size.Height * SizeFactor)))
            {
                string fpsSt = "FPS: " + Math.Round(fps_tracker).ToString("0.0");
                Cv2.PutText(resize, fpsSt, new Point(10, 20), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0), 1, LineTypes.AntiAlias);
                Cv2.ImShow("tracking_result", resize);
            }
        }
        public MainWindow()
        {
            InitializeComponent();
            this.DataContext = this; // For WPF data binding

            // Set the icon
            Uri iconUri = new Uri("logo1.ico", UriKind.RelativeOrAbsolute);

            this.Icon = BitmapFrame.Create(iconUri);

            String root = AppDomain.CurrentDomain.BaseDirectory;

            face_model_params = new FaceModelParameters(root, false);
            landmark_detector = new CLNF(face_model_params);

            gaze_analyser = new GazeAnalyserManaged();
        }
예제 #12
0
        protected ExtractorBase(FaceModelParameters faceModelParameters)
        {
            if (_initialized)
            {
                return;
            }

            ModelParams  = faceModelParameters;
            GazeAnalyzer = new GazeAnalyserManaged();

            var face_detector = new FaceDetector(ModelParams.GetHaarLocation(), ModelParams.GetMTCNNLocation());

            if (!face_detector.IsMTCNNLoaded()) // If MTCNN model not available, use HOG
            {
                ModelParams.SetFaceDetector(false, true, false);
            }

            FaceModel    = new CLNF(ModelParams);
            _initialized = true;
        }
예제 #13
0
        private void Initialize(object sender, PipelineRunEventArgs e)
        {
            string rootDirectory = AppDomain.CurrentDomain.BaseDirectory;

            faceModelParameters = new FaceModelParameters(rootDirectory, true, false, false);
            faceModelParameters.optimiseForVideo();

            faceDetector = new FaceDetector(faceModelParameters.GetHaarLocation(), faceModelParameters.GetMTCNNLocation());
            if (!faceDetector.IsMTCNNLoaded())
            {
                faceModelParameters.SetFaceDetector(false, true, false);
            }

            landmarkDetector = new CLNF(faceModelParameters);
            faceAnalyser     = new FaceAnalyser(rootDirectory, dynamic: true, output_width: 112, mask_aligned: true);
            gazeAnalyser     = new GazeAnalyser();

            landmarkDetector.Reset();
            faceAnalyser.Reset();
        }
예제 #14
0
        public MainWindow()
        {
            InitializeComponent();

            // Set the icon
            Uri iconUri = new Uri("logo1.ico", UriKind.RelativeOrAbsolute);

            this.Icon = BitmapFrame.Create(iconUri);

            String root = AppDomain.CurrentDomain.BaseDirectory;

            // TODO, create a demo version of parameters
            face_model_params = new FaceModelParameters(root, false);
            landmark_detector = new CLNF(face_model_params);
            face_analyser     = new FaceAnalyserManaged(root, true, 112, true);
            gaze_analyser     = new GazeAnalyserManaged();

            Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
            {
                headPosePlot.AssocColor(0, Colors.Blue);
                headPosePlot.AssocColor(1, Colors.Red);
                headPosePlot.AssocColor(2, Colors.Green);

                headPosePlot.AssocName(1, "Turn");
                headPosePlot.AssocName(2, "Tilt");
                headPosePlot.AssocName(0, "Up/Down");

                headPosePlot.AssocThickness(0, 2);
                headPosePlot.AssocThickness(1, 2);
                headPosePlot.AssocThickness(2, 2);

                gazePlot.AssocColor(0, Colors.Red);
                gazePlot.AssocColor(1, Colors.Blue);

                gazePlot.AssocName(0, "Left-right");
                gazePlot.AssocName(1, "Up-down");
                gazePlot.AssocThickness(0, 2);
                gazePlot.AssocThickness(1, 2);

                smilePlot.AssocColor(0, Colors.Green);
                smilePlot.AssocColor(1, Colors.Red);
                smilePlot.AssocName(0, "Smile");
                smilePlot.AssocName(1, "Frown");
                smilePlot.AssocThickness(0, 2);
                smilePlot.AssocThickness(1, 2);

                browPlot.AssocColor(0, Colors.Green);
                browPlot.AssocColor(1, Colors.Red);
                browPlot.AssocName(0, "Raise");
                browPlot.AssocName(1, "Furrow");
                browPlot.AssocThickness(0, 2);
                browPlot.AssocThickness(1, 2);

                eyePlot.AssocColor(0, Colors.Green);
                eyePlot.AssocColor(1, Colors.Red);
                eyePlot.AssocName(0, "Eye widen");
                eyePlot.AssocName(1, "Nose wrinkle");
                eyePlot.AssocThickness(0, 2);
                eyePlot.AssocThickness(1, 2);
            }));
        }
예제 #15
0
        // Capturing and processing the video frame by frame
        private void VideoLoop(UtilitiesOF.SequenceReader reader)
        {
            Thread.CurrentThread.IsBackground = true;

            String root = AppDomain.CurrentDomain.BaseDirectory;
            FaceModelParameters model_params = new FaceModelParameters(root, true, false, false);

            // Initialize the face detector
            FaceDetector face_detector = new FaceDetector(model_params.GetHaarLocation(), model_params.GetMTCNNLocation());

            // If MTCNN model not available, use HOG
            if (!face_detector.IsMTCNNLoaded())
            {
                model_params.SetFaceDetector(false, true, false);
            }

            CLNF face_model = new CLNF(model_params);
            GazeAnalyserManaged gaze_analyser = new GazeAnalyserManaged();

            DateTime?startTime = CurrentTime;

            var lastFrameTime = CurrentTime;

            while (running)
            {
                //////////////////////////////////////////////
                // CAPTURE FRAME AND DETECT LANDMARKS FOLLOWED BY THE REQUIRED IMAGE PROCESSING
                //////////////////////////////////////////////

                RawImage frame = reader.GetNextImage();

                lastFrameTime = CurrentTime;
                processing_fps.AddFrame();

                var grayFrame = reader.GetCurrentFrameGray();

                if (mirror_image)
                {
                    frame.Mirror();
                    grayFrame.Mirror();
                }

                bool detectionSucceeding = ProcessFrame(face_model, gaze_analyser, model_params, frame, grayFrame, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());

                lock (recording_lock)
                {
                    if (recording)
                    {
                        // Add objects to recording queues
                        List <float> pose = new List <float>();
                        face_model.GetPose(pose, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
                        RawImage image = new RawImage(frame);
                        recording_objects.Enqueue(new Tuple <RawImage, bool, List <float> >(image, detectionSucceeding, pose));
                    }
                }

                List <Tuple <System.Windows.Point, System.Windows.Point> > lines = null;
                List <Tuple <float, float> > eye_landmarks = null;
                List <System.Windows.Point>  landmarks     = new List <System.Windows.Point>();
                List <Tuple <System.Windows.Point, System.Windows.Point> > gaze_lines = null;
                Tuple <float, float> gaze_angle = new Tuple <float, float>(0, 0);
                var    visibilities             = face_model.GetVisibilities();
                double scale = face_model.GetRigidParams()[0];

                if (detectionSucceeding)
                {
                    List <Tuple <float, float> > landmarks_doubles = face_model.CalculateAllLandmarks();

                    foreach (var p in landmarks_doubles)
                    {
                        landmarks.Add(new System.Windows.Point(p.Item1, p.Item2));
                    }

                    eye_landmarks = face_model.CalculateVisibleEyeLandmarks();

                    gaze_lines = gaze_analyser.CalculateGazeLines(reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
                    gaze_angle = gaze_analyser.GetGazeAngle();

                    lines = face_model.CalculateBox(reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());
                }

                if (reset)
                {
                    face_model.Reset();
                    reset = false;
                }

                // Visualisation updating
                try
                {
                    Dispatcher.Invoke(DispatcherPriority.Render, new TimeSpan(0, 0, 0, 0, 200), (Action)(() =>
                    {
                        if (latest_img == null)
                        {
                            latest_img = frame.CreateWriteableBitmap();
                        }

                        List <float> pose = new List <float>();
                        face_model.GetPose(pose, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());

                        int yaw = (int)(pose[4] * 180 / Math.PI + 0.5);
                        int yaw_abs = Math.Abs(yaw);

                        int roll = (int)(pose[5] * 180 / Math.PI + 0.5);
                        int roll_abs = Math.Abs(roll);

                        int pitch = (int)(pose[3] * 180 / Math.PI + 0.5);
                        int pitch_abs = Math.Abs(pitch);

                        YawLabel.Content = yaw_abs + "°";
                        RollLabel.Content = roll_abs + "°";
                        PitchLabel.Content = pitch_abs + "°";

                        if (yaw > 0)
                        {
                            YawLabelDir.Content = "Right";
                        }
                        else if (yaw < 0)
                        {
                            YawLabelDir.Content = "Left";
                        }
                        else
                        {
                            YawLabelDir.Content = "Straight";
                        }

                        if (pitch > 0)
                        {
                            PitchLabelDir.Content = "Down";
                        }
                        else if (pitch < 0)
                        {
                            PitchLabelDir.Content = "Up";
                        }
                        else
                        {
                            PitchLabelDir.Content = "Straight";
                        }

                        if (roll > 0)
                        {
                            RollLabelDir.Content = "Left";
                        }
                        else if (roll < 0)
                        {
                            RollLabelDir.Content = "Right";
                        }
                        else
                        {
                            RollLabelDir.Content = "Straight";
                        }

                        XPoseLabel.Content = (int)pose[0] + " mm";
                        YPoseLabel.Content = (int)pose[1] + " mm";
                        ZPoseLabel.Content = (int)pose[2] + " mm";

                        String x_angle = String.Format("{0:F0}°", gaze_angle.Item1 * (180.0 / Math.PI));
                        String y_angle = String.Format("{0:F0}°", gaze_angle.Item2 * (180.0 / Math.PI));
                        YawLabelGaze.Content = x_angle;
                        PitchLabelGaze.Content = y_angle;

                        if (gaze_angle.Item1 > 0)
                        {
                            YawLabelGazeDir.Content = "Right";
                        }
                        else if (gaze_angle.Item1 < 0)
                        {
                            YawLabelGazeDir.Content = "Left";
                        }
                        else
                        {
                            YawLabelGazeDir.Content = "Straight";
                        }

                        if (gaze_angle.Item2 > 0)
                        {
                            PitchLabelGazeDir.Content = "Down";
                        }
                        else if (gaze_angle.Item2 < 0)
                        {
                            PitchLabelGazeDir.Content = "Up";
                        }
                        else
                        {
                            PitchLabelGazeDir.Content = "Straight";
                        }

                        double confidence = face_model.GetConfidence();

                        if (confidence < 0)
                        {
                            confidence = 0;
                        }
                        else if (confidence > 1)
                        {
                            confidence = 1;
                        }

                        frame.UpdateWriteableBitmap(latest_img);
                        webcam_img.Clear();

                        webcam_img.Source = latest_img;
                        webcam_img.Confidence.Add(confidence);
                        webcam_img.FPS = processing_fps.GetFPS();
                        if (detectionSucceeding)
                        {
                            webcam_img.OverlayLines.Add(lines);
                            webcam_img.OverlayPoints.Add(landmarks);
                            webcam_img.OverlayPointsVisibility.Add(visibilities);
                            webcam_img.FaceScale.Add(scale);

                            List <System.Windows.Point> eye_landmark_points = new List <System.Windows.Point>();
                            foreach (var p in eye_landmarks)
                            {
                                eye_landmark_points.Add(new System.Windows.Point(p.Item1, p.Item2));
                            }


                            webcam_img.OverlayEyePoints.Add(eye_landmark_points);
                            webcam_img.GazeLines.Add(gaze_lines);

                            // Publish the information for other applications
                            String str_head_pose = String.Format("{0}:{1:F2}, {2:F2}, {3:F2}, {4:F2}, {5:F2}, {6:F2}", "HeadPose", pose[0], pose[1], pose[2],
                                                                 pose[3] * 180 / Math.PI, pose[4] * 180 / Math.PI, pose[5] * 180 / Math.PI);

                            zero_mq_socket.Send(new ZFrame(str_head_pose, Encoding.UTF8));

                            String str_gaze = String.Format("{0}:{1:F2}, {2:F2}", "GazeAngle", gaze_angle.Item1 * (180.0 / Math.PI), gaze_angle.Item2 * (180.0 / Math.PI));

                            zero_mq_socket.Send(new ZFrame(str_gaze, Encoding.UTF8));
                        }
                    }));

                    while (running & pause)
                    {
                        Thread.Sleep(10);
                    }
                }
                catch (TaskCanceledException)
                {
                    // Quitting
                    break;
                }
            }
            reader.Close();
            System.Console.Out.WriteLine("Thread finished");
        }
예제 #16
0
        public override int Run()
        {
            //Convert arguments to more convenient vector form
            var arguments = new StringList {
                "./"
            };

            // Search paths
            var configPath = "some/config/path";
            var parentPath = arguments[0];

            // Some initial parameters that can be overriden from command line
            StringList files                   = new StringList(),
                       outputImages            = new StringList(),
                       outputLandmarkLocations = new StringList(),
                       outputPoseLocations     = new StringList();

            //            // Bounding boxes for a face in each image (optional)
            //            vector<cv::Rect_<double>> bounding_boxes;
            CVDoubleRectList boundingBoxes = new CVDoubleRectList();

            //            LandmarkDetector::get_image_input_output_params(files, output_landmark_locations, output_pose_locations, output_images, bounding_boxes, arguments);
            LandmarkDetector.get_image_input_output_params(files, outputLandmarkLocations, outputPoseLocations, outputImages, boundingBoxes, arguments);

            //            LandmarkDetector::FaceModelParameters det_parameters(arguments);
            FaceModelParameters detParameters = new FaceModelParameters(arguments);

            //            // No need to validate detections, as we're not doing tracking
            //            det_parameters.validate_detections = false;
            detParameters.validate_detections = false;

            //            // Grab camera parameters if provided (only used for pose and eye gaze and are quite important for accurate estimates)
            //            float fx = 0, fy = 0, cx = 0, cy = 0;
            //            int device = -1;
            //            LandmarkDetector::get_camera_params(device, fx, fy, cx, cy, arguments);
            float fx = 0, fy = 0, cx = 0, cy = 0;
            int   device = -1;

            LandmarkDetector.get_camera_params(out device, out fx, out fy, out cx, out cy, arguments);

            //            // If cx (optical axis centre) is undefined will use the image size/2 as an estimate
            //            bool cx_undefined = false;
            //            bool fx_undefined = false;
            //            if (cx == 0 || cy == 0)
            //            {
            //                cx_undefined = true;
            //            }
            //            if (fx == 0 || fy == 0)
            //            {
            //                fx_undefined = true;
            //            }

            bool cx_undefined = false;
            bool fx_undefined = false;

            if (cx == 0 || cy == 0)
            {
                cx_undefined = true;
            }
            if (fx == 0 || fy == 0)
            {
                fx_undefined = true;
            }


            //            // The modules that are being used for tracking
            //            cout << "Loading the model" << endl;
            //            LandmarkDetector::CLNF clnf_model(det_parameters.model_location);
            //            cout << "Model loaded" << endl;

            INFO_STREAM("Loading the model");
            CLNF clnfModel = new CLNF(detParameters.model_location);

            INFO_STREAM("Model loaded");

            // TODO: dlib::frontal_face_detector ??? NANI ???

            //            cv::CascadeClassifier classifier(det_parameters.face_detector_location);
            //            dlib::frontal_face_detector face_detector_hog = dlib::get_frontal_face_detector();
            CascadeClassifier classifier = new CascadeClassifier(detParameters.face_detector_location);
            SWIGTYPE_p_dlib__frontal_face_detector faceDetectorHog;

            //            // Loading the AU prediction models
            //            string au_loc = "AU_predictors/AU_all_static.txt";
            string auLoc = "AU_predictors/AU_all_static.txt";

            //            boost::filesystem::path au_loc_path = boost::filesystem::path(au_loc);


            //            if (boost::filesystem::exists(au_loc_path))
            //            {
            //                au_loc = au_loc_path.string();
            //            }
            //            else if (boost::filesystem::exists(parent_path / au_loc_path))
            //            {
            //                au_loc = (parent_path / au_loc_path).string();
            //            }
            //            else if (boost::filesystem::exists(config_path / au_loc_path))
            //            {
            //                au_loc = (config_path / au_loc_path).string();
            //            }
            //            else
            //            {
            //                cout << "Can't find AU prediction files, exiting" << endl;
            //                return 1;
            //            }

            //            // Used for image masking for AUs
            //            string tri_loc;
            //            boost::filesystem::path tri_loc_path = boost::filesystem::path("model/tris_68_full.txt");
            //            if (boost::filesystem::exists(tri_loc_path))
            //            {
            //                tri_loc = tri_loc_path.string();
            //            }
            //            else if (boost::filesystem::exists(parent_path / tri_loc_path))
            //            {
            //                tri_loc = (parent_path / tri_loc_path).string();
            //            }
            //            else if (boost::filesystem::exists(config_path / tri_loc_path))
            //            {
            //                tri_loc = (config_path / tri_loc_path).string();
            //            }
            //            else
            //            {
            //                cout << "Can't find triangulation files, exiting" << endl;
            //                return 1;
            //            }

            //            FaceAnalysis::FaceAnalyser face_analyser(vector<cv::Vec3d>(), 0.7, 112, 112, au_loc, tri_loc);

            //            bool visualise = !det_parameters.quiet_mode;

            //            // Do some image loading
            //            for (size_t i = 0; i < files.size(); i++)
            //            {
            //                string file = files.at(i);

            //                // Loading image
            //                cv::Mat read_image = cv::imread(file, -1);

            //                if (read_image.empty())
            //                {
            //                    cout << "Could not read the input image" << endl;
            //                    return 1;
            //                }

            //                // Making sure the image is in uchar grayscale
            //                cv::Mat_<uchar> grayscale_image;
            //                convert_to_grayscale(read_image, grayscale_image);


            //                // If optical centers are not defined just use center of image
            //                if (cx_undefined)
            //                {
            //                    cx = grayscale_image.cols / 2.0f;
            //                    cy = grayscale_image.rows / 2.0f;
            //                }
            //                // Use a rough guess-timate of focal length
            //                if (fx_undefined)
            //                {
            //                    fx = 500 * (grayscale_image.cols / 640.0);
            //                    fy = 500 * (grayscale_image.rows / 480.0);

            //                    fx = (fx + fy) / 2.0;
            //                    fy = fx;
            //                }


            //                // if no pose defined we just use a face detector
            //                if (bounding_boxes.empty())
            //                {

            //                    // Detect faces in an image
            //                    vector<cv::Rect_<double>> face_detections;

            //                    if (det_parameters.curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR)
            //                    {
            //                        vector<double> confidences;
            //                        LandmarkDetector::DetectFacesHOG(face_detections, grayscale_image, face_detector_hog, confidences);
            //                    }
            //                    else
            //                    {
            //                        LandmarkDetector::DetectFaces(face_detections, grayscale_image, classifier);
            //                    }

            //                    // Detect landmarks around detected faces
            //                    int face_det = 0;
            //                    // perform landmark detection for every face detected
            //                    for (size_t face = 0; face < face_detections.size(); ++face)
            //                    {
            //                        // if there are multiple detections go through them
            //                        bool success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_detections[face], clnf_model, det_parameters);

            //                        // Estimate head pose and eye gaze
            //                        cv::Vec6d headPose = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //                        // Gaze tracking, absolute gaze direction
            //                        cv::Point3f gazeDirection0(0, 0, -1);
            //            cv::Point3f gazeDirection1(0, 0, -1);

            //            if (success && det_parameters.track_gaze)
            //            {
            //                FaceAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true);
            //                FaceAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false);

            //            }

            //            auto ActionUnits = face_analyser.PredictStaticAUs(read_image, clnf_model, false);

            //            // Writing out the detected landmarks (in an OS independent manner)
            //            if (!output_landmark_locations.empty())
            //            {
            //                char name[100];
            //                // append detection number (in case multiple faces are detected)
            //                sprintf(name, "_det_%d", face_det);

            //                // Construct the output filename
            //                boost::filesystem::path slash("/");
            //                std::string preferredSlash = slash.make_preferred().string();

            //                boost::filesystem::path out_feat_path(output_landmark_locations.at(i));
            //                boost::filesystem::path dir = out_feat_path.parent_path();
            //                boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
            //                boost::filesystem::path ext = out_feat_path.extension();
            //                string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
            //                write_out_landmarks(outfeatures, clnf_model, headPose, gazeDirection0, gazeDirection1, ActionUnits.first, ActionUnits.second);
            //            }

            //            if (!output_pose_locations.empty())
            //            {
            //                char name[100];
            //                // append detection number (in case multiple faces are detected)
            //                sprintf(name, "_det_%d", face_det);

            //                // Construct the output filename
            //                boost::filesystem::path slash("/");
            //                std::string preferredSlash = slash.make_preferred().string();

            //                boost::filesystem::path out_pose_path(output_pose_locations.at(i));
            //                boost::filesystem::path dir = out_pose_path.parent_path();
            //                boost::filesystem::path fname = out_pose_path.filename().replace_extension("");
            //                boost::filesystem::path ext = out_pose_path.extension();
            //                string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
            //                write_out_pose_landmarks(outfeatures, clnf_model.GetShape(fx, fy, cx, cy), headPose, gazeDirection0, gazeDirection1);

            //            }

            //            if (det_parameters.track_gaze)
            //            {
            //                cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //                // Draw it in reddish if uncertain, blueish if certain
            //                LandmarkDetector::DrawBox(read_image, pose_estimate_to_draw, cv::Scalar(255.0, 0, 0), 3, fx, fy, cx, cy);
            //                FaceAnalysis::DrawGaze(read_image, clnf_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
            //            }

            //            // displaying detected landmarks
            //            cv::Mat display_image;
            //            create_display_image(read_image, display_image, clnf_model);

            //            if (visualise && success)
            //            {
            //                imshow("colour", display_image);
            //                cv::waitKey(1);
            //            }

            //            // Saving the display images (in an OS independent manner)
            //            if (!output_images.empty() && success)
            //            {
            //                string outimage = output_images.at(i);
            //                if (!outimage.empty())
            //                {
            //                    char name[100];
            //                    sprintf(name, "_det_%d", face_det);

            //                    boost::filesystem::path slash("/");
            //                    std::string preferredSlash = slash.make_preferred().string();

            //                    // append detection number
            //                    boost::filesystem::path out_feat_path(outimage);
            //                    boost::filesystem::path dir = out_feat_path.parent_path();
            //                    boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
            //                    boost::filesystem::path ext = out_feat_path.extension();
            //                    outimage = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
            //                    create_directory_from_file(outimage);
            //                    bool write_success = cv::imwrite(outimage, display_image);

            //                    if (!write_success)
            //                    {
            //                        cout << "Could not output a processed image" << endl;
            //                        return 1;
            //                    }

            //                }

            //            }

            //            if (success)
            //            {
            //                face_det++;
            //            }

            //        }
            //    }
            //		else
            //		{
            //			// Have provided bounding boxes
            //			LandmarkDetector::DetectLandmarksInImage(grayscale_image, bounding_boxes[i], clnf_model, det_parameters);

            //			// Estimate head pose and eye gaze
            //			cv::Vec6d headPose = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //    // Gaze tracking, absolute gaze direction
            //    cv::Point3f gazeDirection0(0, 0, -1);
            //    cv::Point3f gazeDirection1(0, 0, -1);

            //			if (det_parameters.track_gaze)
            //			{
            //				FaceAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true);
            //				FaceAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false);
            //			}

            //auto ActionUnits = face_analyser.PredictStaticAUs(read_image, clnf_model, false);

            //			// Writing out the detected landmarks
            //			if(!output_landmark_locations.empty())
            //			{
            //				string outfeatures = output_landmark_locations.at(i);

            //                write_out_landmarks(outfeatures, clnf_model, headPose, gazeDirection0, gazeDirection1, ActionUnits.first, ActionUnits.second);
            //			}

            //			// Writing out the detected landmarks
            //			if (!output_pose_locations.empty())
            //			{
            //				string outfeatures = output_pose_locations.at(i);

            //                write_out_pose_landmarks(outfeatures, clnf_model.GetShape(fx, fy, cx, cy), headPose, gazeDirection0, gazeDirection1);
            //			}

            //			// displaying detected stuff
            //			cv::Mat display_image;

            //			if (det_parameters.track_gaze)
            //			{
            //				cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //// Draw it in reddish if uncertain, blueish if certain
            //LandmarkDetector::DrawBox(read_image, pose_estimate_to_draw, cv::Scalar(255.0, 0, 0), 3, fx, fy, cx, cy);
            //				FaceAnalysis::DrawGaze(read_image, clnf_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
            //			}


            //            create_display_image(read_image, display_image, clnf_model);

            //			if(visualise)
            //			{

            //                imshow("colour", display_image);
            //cv::waitKey(1);
            //			}

            //			if(!output_images.empty())
            //			{
            //				string outimage = output_images.at(i);
            //				if(!outimage.empty())
            //				{

            //                    create_directory_from_file(outimage);
            //bool write_success = imwrite(outimage, display_image);

            //					if (!write_success)
            //					{
            //						cout << "Could not output a processed image" << endl;
            //						return 1;
            //					}
            //				}
            //			}
            //		}

            //	}

            return(0);
        }
예제 #17
0
        public MainWindow()
        {
            bool bPush = false;

            string configfile_name = "config.xml";

            FileStream  stream   = new FileStream(configfile_name, FileMode.Open);
            XmlDocument document = new XmlDocument();

            document.Load(stream);

            string serveraddress = "localhost";
            int    port          = 5570;


            XmlNodeList list = document.GetElementsByTagName("Mode");

            if (list.Count > 0 && ((XmlElement)list[0]).InnerText.ToLower().Equals("push"))
            {
                bPush = true;
            }
            list = document.GetElementsByTagName("IP");
            if (list.Count > 0)
            {
                serveraddress = ((XmlElement)list[0]).InnerText;
            }
            list = document.GetElementsByTagName("Port");
            if (list.Count > 0)
            {
                port = Int32.Parse(((XmlElement)list[0]).InnerText);
            }
            list = document.GetElementsByTagName("Topic");
            if (list.Count > 0)
            {
                topic = ((XmlElement)list[0]).InnerText;
            }


            String hostName = Dns.GetHostName();

            IPAddress[] addresses = Dns.GetHostAddresses(hostName);

            string myaddress = "localhost";


            foreach (IPAddress address in addresses)
            {
                if (address.AddressFamily == System.Net.Sockets.AddressFamily.InterNetwork)
                {
                    myaddress = address.ToString();
                }
            }

            InitializeComponent();
            this.DataContext = this; // For WPF data binding

            // Set the icon
            Uri iconUri = new Uri("logo1.ico", UriKind.RelativeOrAbsolute);

            this.Icon = BitmapFrame.Create(iconUri);

            String root = AppDomain.CurrentDomain.BaseDirectory;

            face_model_params = new FaceModelParameters(root, false);
            landmark_detector = new CLNF(face_model_params);

            gaze_analyser = new GazeAnalyserManaged();

            frame_no = 0;

            // Added by Huang
            pubSocket = new PublisherSocket();
            pubSocket.Options.SendHighWatermark = 1000;
            if (bPush)
            {
                pubSocket.Connect("tcp://" + serveraddress + ":" + port);
            }
            else
            {
                pubSocket.Bind("tcp://" + myaddress + ":" + port);
            }
        }
예제 #18
0
파일: CLNF.cs 프로젝트: zjmsky/SharpFace
 internal static global::System.Runtime.InteropServices.HandleRef getCPtr(CLNF obj)
 {
     return((obj == null) ? new global::System.Runtime.InteropServices.HandleRef(null, global::System.IntPtr.Zero) : obj.swigCPtr);
 }
예제 #19
0
 public LandmarkData(FrameData frame, CLNF faceModel, bool detectionSucceeded)
 {
     FrameData          = frame;
     FaceModel          = faceModel;
     DetectionSucceeded = detectionSucceeded;
 }
예제 #20
0
        public override int Run()
        {
            int device = 0;

            var argument = new StringList {
                "./"
            };
            FaceModelParameters det_parameters = new FaceModelParameters(argument);

            //vector<string> files, depth_directories, output_video_files, out_dummy;
            StringList files = new StringList(), output_video_files = new StringList(), out_dummy = new StringList();
            bool       u;
            string     output_codec;

            LandmarkDetector.get_video_input_output_params(files, out_dummy, output_video_files, out u, out output_codec, argument);

            CLNF clnf_model = new CLNF(det_parameters.model_location);

            float fx = 0, fy = 0, cx = 0, cy = 0;

            LandmarkDetector.get_camera_params(out device, out fx, out fy, out cx, out cy, argument);

            // If cx (optical axis centre) is undefined will use the image size/2 as an estimate
            bool cx_undefined = false;
            bool fx_undefined = false;

            if (cx == 0 || cy == 0)
            {
                cx_undefined = true;
            }
            if (fx == 0 || fy == 0)
            {
                fx_undefined = true;
            }

            //// Do some grabbing
            INFO_STREAM("Attempting to capture from device: " + device);
            using (VideoCapture video_capture = new VideoCapture(device))
            {
                using (Mat dummy = new Mat())
                    video_capture.Read(dummy);

                if (!video_capture.IsOpened())
                {
                    FATAL_STREAM("Failed to open video source");
                    return(1);
                }
                else
                {
                    INFO_STREAM("Device or file opened");
                }

                int frame_count    = 0;
                Mat captured_image = new Mat();
                video_capture.Read(captured_image);
                Size = new Size(captured_image.Width / SizeFactor, captured_image.Height / SizeFactor);
                using (var resized_image = captured_image.Resize(Size))
                {
                    // If optical centers are not defined just use center of image
                    if (cx_undefined)
                    {
                        cx = resized_image.Cols / 2.0f;
                        cy = resized_image.Rows / 2.0f;
                    }
                    // Use a rough guess-timate of focal length
                    if (fx_undefined)
                    {
                        fx = (float)(500 * (resized_image.Cols / 640.0));
                        fy = (float)(500 * (resized_image.Rows / 480.0));

                        fx = (float)((fx + fy) / 2.0);
                        fy = fx;
                    }
                }

                // Use for timestamping if using a webcam
                long t_initial = Cv2.GetTickCount();

                INFO_STREAM("Starting tracking");
                while (video_capture.Read(captured_image))
                {
                    using (var resized_image = captured_image.Resize(Size))
                    {
                        // Reading the images
                        MatOfByte grayscale_image = new MatOfByte();

                        if (resized_image.Channels() == 3)
                        {
                            Cv2.CvtColor(resized_image, grayscale_image, ColorConversionCodes.BGR2GRAY);
                        }
                        else
                        {
                            grayscale_image = (MatOfByte)resized_image.Clone();
                        }

                        // The actual facial landmark detection / tracking
                        bool detection_success = LandmarkDetector.DetectLandmarksInVideo(new SWIGTYPE_p_cv__Mat_T_uchar_t(grayscale_image.CvPtr), new SWIGTYPE_p_CLNF(CLNF.getCPtr(clnf_model)), new SWIGTYPE_p_FaceModelParameters(FaceModelParameters.getCPtr(det_parameters)));

                        // Visualising the results
                        // Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
                        double detection_certainty = clnf_model.detection_certainty;

                        visualise_tracking(resized_image, ref clnf_model, ref det_parameters, frame_count, fx, fy, cx, cy);

                        // detect key presses
                        char character_press = (char)Cv2.WaitKey(15);
                        switch (character_press)
                        {
                        case 'r':
                            clnf_model.Reset();
                            break;

                        case 'q':
                            return(0);
                        }

                        // Update the frame count
                        frame_count++;

                        grayscale_image.Dispose();
                        grayscale_image = null;
                    }
                }
            }

            return(0);
        }