Beispiel #1
0
        private void RecordObservation(RecorderOpenFace recorder, RawImage vis_image, int face_id, bool success, float fx, float fy, float cx, float cy, double timestamp, int frame_number)
        {
            recorder.SetObservationTimestamp(timestamp);

            double confidence = landmark_detector.GetConfidence();

            List <float> pose = new List <float>();

            landmark_detector.GetPose(pose, fx, fy, cx, cy);
            recorder.SetObservationPose(pose);

            List <Tuple <float, float> >        landmarks_2D = landmark_detector.CalculateAllLandmarks();
            List <Tuple <float, float, float> > landmarks_3D = landmark_detector.Calculate3DLandmarks(fx, fy, cx, cy);
            List <float> global_params = landmark_detector.GetRigidParams();
            List <float> local_params  = landmark_detector.GetNonRigidParams();

            recorder.SetObservationLandmarks(landmarks_2D, landmarks_3D, global_params, local_params, confidence, success);

            var gaze       = gaze_analyser.GetGazeCamera();
            var gaze_angle = gaze_analyser.GetGazeAngle();

            var landmarks_2d_eyes = landmark_detector.CalculateAllEyeLandmarks();
            var landmarks_3d_eyes = landmark_detector.CalculateAllEyeLandmarks3D(fx, fy, cx, cy);

            recorder.SetObservationGaze(gaze.Item1, gaze.Item2, gaze_angle, landmarks_2d_eyes, landmarks_3d_eyes);

            var au_regs    = face_analyser.GetCurrentAUsReg();
            var au_classes = face_analyser.GetCurrentAUsClass();

            recorder.SetObservationActionUnits(au_regs, au_classes);

            recorder.SetObservationFaceID(face_id);
            recorder.SetObservationFrameNumber(frame_number);

            recorder.SetObservationFaceAlign(face_analyser.GetLatestAlignedFace());

            var hog_feature = face_analyser.GetLatestHOGFeature();

            recorder.SetObservationHOG(success, hog_feature, face_analyser.GetHOGRows(), face_analyser.GetHOGCols(), face_analyser.GetHOGChannels());

            recorder.SetObservationVisualization(vis_image);

            recorder.WriteObservation();
        }
Beispiel #2
0
        private void ProcessIndividualImages(ImageReader reader)
        {
            // Make sure the GUI is setup appropriately
            SetupFeatureExtractionMode();

            // Indicate we will start running the thread
            thread_running = true;

            // Reload the face landmark detector if needed
            ReloadLandmarkDetector();

            if (!landmark_detector.isLoaded())
            {
                DetectorNotFoundWarning();
                EndMode();
                thread_running = false;
                return;
            }

            // Setup the parameters optimized for working on individual images rather than sequences
            face_model_params.optimiseForImages();

            // Setup the visualization
            Visualizer visualizer_of = new Visualizer(ShowTrackedVideo || RecordTracked, ShowAppearance, ShowAppearance, false);

            // Initialize the face detector if it has not been initialized yet
            if (face_detector == null)
            {
                face_detector = new FaceDetector(face_model_params.GetHaarLocation(), face_model_params.GetMTCNNLocation());
            }

            // Initialize the face analyser
            face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, false, image_output_size, MaskAligned);

            // Loading an image file
            var frame      = new RawImage(reader.GetNextImage());
            var gray_frame = new RawImage(reader.GetCurrentFrameGray());

            // For FPS tracking
            DateTime?startTime     = CurrentTime;
            var      lastFrameTime = CurrentTime;

            // This will be false when the image is not available
            while (reader.isOpened())
            {
                if (!thread_running)
                {
                    break;
                }

                // Setup recording
                RecorderOpenFaceParameters rec_params = new RecorderOpenFaceParameters(false, false,
                                                                                       Record2DLandmarks, Record3DLandmarks, RecordModelParameters, RecordPose, RecordAUs,
                                                                                       RecordGaze, RecordHOG, RecordTracked, RecordAligned, true,
                                                                                       reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), 0);

                RecorderOpenFace recorder = new RecorderOpenFace(reader.GetName(), rec_params, record_root);

                // Detect faces here and return bounding boxes
                List <Rect>  face_detections = new List <Rect>();
                List <float> confidences     = new List <float>();
                if (DetectorHOG)
                {
                    face_detector.DetectFacesHOG(face_detections, gray_frame, confidences);
                }
                else if (DetectorCNN)
                {
                    face_detector.DetectFacesMTCNN(face_detections, frame, confidences);
                }
                else if (DetectorHaar)
                {
                    face_detector.DetectFacesHaar(face_detections, gray_frame, confidences);
                }

                // For visualization
                double progress = reader.GetProgress();

                for (int i = 0; i < face_detections.Count; ++i)
                {
                    bool detection_succeeding = landmark_detector.DetectFaceLandmarksInImage(frame, face_detections[i], face_model_params, gray_frame);

                    var landmarks = landmark_detector.CalculateAllLandmarks();

                    // Predict action units
                    var au_preds = face_analyser.PredictStaticAUsAndComputeFeatures(frame, landmarks);

                    // Predic eye gaze
                    gaze_analyser.AddNextFrame(landmark_detector, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());

                    // Only the final face will contain the details
                    VisualizeFeatures(frame, visualizer_of, landmarks, landmark_detector.GetVisibilities(), detection_succeeding, i == 0, true, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);

                    // Record an observation
                    RecordObservation(recorder, visualizer_of.GetVisImage(), i, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), 0, 0);
                }

                frame      = new RawImage(reader.GetNextImage());
                gray_frame = new RawImage(reader.GetCurrentFrameGray());

                // Write out the tracked image
                if (RecordTracked)
                {
                    recorder.WriteObservationTracked();
                }

                // Do not cary state accross images
                landmark_detector.Reset();
                face_analyser.Reset();
                recorder.Close();

                lastFrameTime = CurrentTime;
                processing_fps.AddFrame();

                // TODO how to report errors from the reader here? exceptions? logging? Problem for future versions?
            }

            EndMode();
        }
Beispiel #3
0
        // The main function call for processing sequences
        private void ProcessSequence(SequenceReader reader)
        {
            Thread.CurrentThread.Priority = ThreadPriority.Highest;

            SetupFeatureExtractionMode();

            thread_running = true;

            // Reload the face landmark detector if needed
            ReloadLandmarkDetector();

            if (!landmark_detector.isLoaded())
            {
                DetectorNotFoundWarning();
                EndMode();
                thread_running = false;
                return;
            }

            // Set the face detector
            face_model_params.SetFaceDetector(DetectorHaar, DetectorHOG, DetectorCNN);
            face_model_params.optimiseForVideo();

            // Setup the visualization
            Visualizer visualizer_of = new Visualizer(ShowTrackedVideo || RecordTracked, ShowAppearance, ShowAppearance, false);

            // Initialize the face analyser
            face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, DynamicAUModels, image_output_size, MaskAligned);

            // Reset the tracker
            landmark_detector.Reset();

            // Loading an image file
            var frame      = new RawImage(reader.GetNextImage());
            var gray_frame = new RawImage(reader.GetCurrentFrameGray());

            // Setup recording
            RecorderOpenFaceParameters rec_params = new RecorderOpenFaceParameters(true, reader.IsWebcam(),
                                                                                   Record2DLandmarks, Record3DLandmarks, RecordModelParameters, RecordPose, RecordAUs,
                                                                                   RecordGaze, RecordHOG, RecordTracked, RecordAligned, false,
                                                                                   reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), reader.GetFPS());

            RecorderOpenFace recorder = new RecorderOpenFace(reader.GetName(), rec_params, record_root);

            // For FPS tracking
            DateTime?startTime     = CurrentTime;
            var      lastFrameTime = CurrentTime;

            // Empty image would indicate that the stream is over
            while (!gray_frame.IsEmpty)
            {
                if (!thread_running)
                {
                    break;
                }

                double progress = reader.GetProgress();

                bool detection_succeeding = landmark_detector.DetectLandmarksInVideo(frame, face_model_params, gray_frame);

                // The face analysis step (for AUs and eye gaze)
                face_analyser.AddNextFrame(frame, landmark_detector.CalculateAllLandmarks(), detection_succeeding, false);

                gaze_analyser.AddNextFrame(landmark_detector, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());

                // Only the final face will contain the details
                VisualizeFeatures(frame, visualizer_of, landmark_detector.CalculateAllLandmarks(), landmark_detector.GetVisibilities(), detection_succeeding, true, false, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);

                // Record an observation
                RecordObservation(recorder, visualizer_of.GetVisImage(), 0, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), reader.GetTimestamp(), reader.GetFrameNumber());

                if (RecordTracked)
                {
                    recorder.WriteObservationTracked();
                }

                while (thread_running & thread_paused && skip_frames == 0)
                {
                    Thread.Sleep(10);
                }

                if (skip_frames > 0)
                {
                    skip_frames--;
                }

                frame      = new RawImage(reader.GetNextImage());
                gray_frame = new RawImage(reader.GetCurrentFrameGray());

                lastFrameTime = CurrentTime;
                processing_fps.AddFrame();
            }

            // Finalize the recording and flush to disk
            recorder.Close();

            // Post-process the AU recordings
            if (RecordAUs)
            {
                face_analyser.PostProcessOutputFile(recorder.GetCSVFile());
            }

            // Close the open video/webcam
            reader.Close();

            EndMode();
        }