Exemplo n.º 1
0
        unsafe void visualise_tracking(Mat captured_image, ref CLNF face_model, ref FaceModelParameters det_parameters, int frame_count, double fx, double fy, double cx, double cy)
        {
            //Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
            double detection_certainty = face_model.detection_certainty;
            bool   detection_success   = face_model.detection_success;

            double visualisation_boundary = 0.2;

            // Only draw if the reliability is reasonable, the value is slightly ad-hoc
            if (detection_certainty < visualisation_boundary)
            {
                LandmarkDetector.Draw(new SWIGTYPE_p_cv__Mat(captured_image.CvPtr), face_model);

                double vis_certainty = detection_certainty;
                if (vis_certainty > 1)
                {
                    vis_certainty = 1;
                }
                if (vis_certainty < -1)
                {
                    vis_certainty = -1;
                }

                vis_certainty = (vis_certainty + 1) / (visualisation_boundary + 1);

                //A rough heuristic for box around the face width

                int thickness = (int)Math.Ceiling(2.0 * ((double)captured_image.Cols) / 640.0);

                Vec6d pose_estimate_to_draw = *((Vec6d *)LandmarkDetector.GetCorrectedPoseWorld(new SWIGTYPE_p_CLNF(CLNF.getCPtr(face_model)), fx, fy, cx, cy).Pointer);

                //Draw it in reddish if uncertain, blueish if certain
                var color = new Scalar((1 - vis_certainty) * 255.0, 0, vis_certainty * 255);
                LandmarkDetector.DrawBox(new SWIGTYPE_p_cv__Mat(captured_image.CvPtr), new SWIGTYPE_p_cv__Vec6d(new IntPtr(&pose_estimate_to_draw)), new SWIGTYPE_p_cv__Scalar(new IntPtr(&(color))), thickness, (float)fx, (float)fy, (float)cx, (float)cy);
            }

            //Work out the framerate
            if (frame_count % 10 == 0)
            {
                double t1 = Cv2.GetTickCount();
                fps_tracker = 10.0 / ((double)(t1 - t0) / Cv2.GetTickFrequency());
                t0          = (long)t1;
            }

            using (Mat resize = captured_image.Resize(new Size(Size.Width * SizeFactor, Size.Height * SizeFactor)))
            {
                string fpsSt = "FPS: " + Math.Round(fps_tracker).ToString("0.0");
                Cv2.PutText(resize, fpsSt, new Point(10, 20), HersheyFonts.HersheyPlain, 1, new Scalar(0, 255, 0), 1, LineTypes.AntiAlias);
                Cv2.ImShow("tracking_result", resize);
            }
        }
Exemplo n.º 2
0
        public override int Run()
        {
            int device = 0;

            var argument = new StringList {
                "./"
            };
            FaceModelParameters det_parameters = new FaceModelParameters(argument);

            //vector<string> files, depth_directories, output_video_files, out_dummy;
            StringList files = new StringList(), output_video_files = new StringList(), out_dummy = new StringList();
            bool       u;
            string     output_codec;

            LandmarkDetector.get_video_input_output_params(files, out_dummy, output_video_files, out u, out output_codec, argument);

            CLNF clnf_model = new CLNF(det_parameters.model_location);

            float fx = 0, fy = 0, cx = 0, cy = 0;

            LandmarkDetector.get_camera_params(out device, out fx, out fy, out cx, out cy, argument);

            // If cx (optical axis centre) is undefined will use the image size/2 as an estimate
            bool cx_undefined = false;
            bool fx_undefined = false;

            if (cx == 0 || cy == 0)
            {
                cx_undefined = true;
            }
            if (fx == 0 || fy == 0)
            {
                fx_undefined = true;
            }

            //// Do some grabbing
            INFO_STREAM("Attempting to capture from device: " + device);
            using (VideoCapture video_capture = new VideoCapture(device))
            {
                using (Mat dummy = new Mat())
                    video_capture.Read(dummy);

                if (!video_capture.IsOpened())
                {
                    FATAL_STREAM("Failed to open video source");
                    return(1);
                }
                else
                {
                    INFO_STREAM("Device or file opened");
                }

                int frame_count    = 0;
                Mat captured_image = new Mat();
                video_capture.Read(captured_image);
                Size = new Size(captured_image.Width / SizeFactor, captured_image.Height / SizeFactor);
                using (var resized_image = captured_image.Resize(Size))
                {
                    // If optical centers are not defined just use center of image
                    if (cx_undefined)
                    {
                        cx = resized_image.Cols / 2.0f;
                        cy = resized_image.Rows / 2.0f;
                    }
                    // Use a rough guess-timate of focal length
                    if (fx_undefined)
                    {
                        fx = (float)(500 * (resized_image.Cols / 640.0));
                        fy = (float)(500 * (resized_image.Rows / 480.0));

                        fx = (float)((fx + fy) / 2.0);
                        fy = fx;
                    }
                }

                // Use for timestamping if using a webcam
                long t_initial = Cv2.GetTickCount();

                INFO_STREAM("Starting tracking");
                while (video_capture.Read(captured_image))
                {
                    using (var resized_image = captured_image.Resize(Size))
                    {
                        // Reading the images
                        MatOfByte grayscale_image = new MatOfByte();

                        if (resized_image.Channels() == 3)
                        {
                            Cv2.CvtColor(resized_image, grayscale_image, ColorConversionCodes.BGR2GRAY);
                        }
                        else
                        {
                            grayscale_image = (MatOfByte)resized_image.Clone();
                        }

                        // The actual facial landmark detection / tracking
                        bool detection_success = LandmarkDetector.DetectLandmarksInVideo(new SWIGTYPE_p_cv__Mat_T_uchar_t(grayscale_image.CvPtr), new SWIGTYPE_p_CLNF(CLNF.getCPtr(clnf_model)), new SWIGTYPE_p_FaceModelParameters(FaceModelParameters.getCPtr(det_parameters)));

                        // Visualising the results
                        // Drawing the facial landmarks on the face and the bounding box around it if tracking is successful and initialised
                        double detection_certainty = clnf_model.detection_certainty;

                        visualise_tracking(resized_image, ref clnf_model, ref det_parameters, frame_count, fx, fy, cx, cy);

                        // detect key presses
                        char character_press = (char)Cv2.WaitKey(15);
                        switch (character_press)
                        {
                        case 'r':
                            clnf_model.Reset();
                            break;

                        case 'q':
                            return(0);
                        }

                        // Update the frame count
                        frame_count++;

                        grayscale_image.Dispose();
                        grayscale_image = null;
                    }
                }
            }

            return(0);
        }
Exemplo n.º 3
0
        public override int Run()
        {
            //Convert arguments to more convenient vector form
            var arguments = new StringList {
                "./"
            };

            // Search paths
            var configPath = "some/config/path";
            var parentPath = arguments[0];

            // Some initial parameters that can be overriden from command line
            StringList files                   = new StringList(),
                       outputImages            = new StringList(),
                       outputLandmarkLocations = new StringList(),
                       outputPoseLocations     = new StringList();

            //            // Bounding boxes for a face in each image (optional)
            //            vector<cv::Rect_<double>> bounding_boxes;
            CVDoubleRectList boundingBoxes = new CVDoubleRectList();

            //            LandmarkDetector::get_image_input_output_params(files, output_landmark_locations, output_pose_locations, output_images, bounding_boxes, arguments);
            LandmarkDetector.get_image_input_output_params(files, outputLandmarkLocations, outputPoseLocations, outputImages, boundingBoxes, arguments);

            //            LandmarkDetector::FaceModelParameters det_parameters(arguments);
            FaceModelParameters detParameters = new FaceModelParameters(arguments);

            //            // No need to validate detections, as we're not doing tracking
            //            det_parameters.validate_detections = false;
            detParameters.validate_detections = false;

            //            // Grab camera parameters if provided (only used for pose and eye gaze and are quite important for accurate estimates)
            //            float fx = 0, fy = 0, cx = 0, cy = 0;
            //            int device = -1;
            //            LandmarkDetector::get_camera_params(device, fx, fy, cx, cy, arguments);
            float fx = 0, fy = 0, cx = 0, cy = 0;
            int   device = -1;

            LandmarkDetector.get_camera_params(out device, out fx, out fy, out cx, out cy, arguments);

            //            // If cx (optical axis centre) is undefined will use the image size/2 as an estimate
            //            bool cx_undefined = false;
            //            bool fx_undefined = false;
            //            if (cx == 0 || cy == 0)
            //            {
            //                cx_undefined = true;
            //            }
            //            if (fx == 0 || fy == 0)
            //            {
            //                fx_undefined = true;
            //            }

            bool cx_undefined = false;
            bool fx_undefined = false;

            if (cx == 0 || cy == 0)
            {
                cx_undefined = true;
            }
            if (fx == 0 || fy == 0)
            {
                fx_undefined = true;
            }


            //            // The modules that are being used for tracking
            //            cout << "Loading the model" << endl;
            //            LandmarkDetector::CLNF clnf_model(det_parameters.model_location);
            //            cout << "Model loaded" << endl;

            INFO_STREAM("Loading the model");
            CLNF clnfModel = new CLNF(detParameters.model_location);

            INFO_STREAM("Model loaded");

            // TODO: dlib::frontal_face_detector ??? NANI ???

            //            cv::CascadeClassifier classifier(det_parameters.face_detector_location);
            //            dlib::frontal_face_detector face_detector_hog = dlib::get_frontal_face_detector();
            CascadeClassifier classifier = new CascadeClassifier(detParameters.face_detector_location);
            SWIGTYPE_p_dlib__frontal_face_detector faceDetectorHog;

            //            // Loading the AU prediction models
            //            string au_loc = "AU_predictors/AU_all_static.txt";
            string auLoc = "AU_predictors/AU_all_static.txt";

            //            boost::filesystem::path au_loc_path = boost::filesystem::path(au_loc);


            //            if (boost::filesystem::exists(au_loc_path))
            //            {
            //                au_loc = au_loc_path.string();
            //            }
            //            else if (boost::filesystem::exists(parent_path / au_loc_path))
            //            {
            //                au_loc = (parent_path / au_loc_path).string();
            //            }
            //            else if (boost::filesystem::exists(config_path / au_loc_path))
            //            {
            //                au_loc = (config_path / au_loc_path).string();
            //            }
            //            else
            //            {
            //                cout << "Can't find AU prediction files, exiting" << endl;
            //                return 1;
            //            }

            //            // Used for image masking for AUs
            //            string tri_loc;
            //            boost::filesystem::path tri_loc_path = boost::filesystem::path("model/tris_68_full.txt");
            //            if (boost::filesystem::exists(tri_loc_path))
            //            {
            //                tri_loc = tri_loc_path.string();
            //            }
            //            else if (boost::filesystem::exists(parent_path / tri_loc_path))
            //            {
            //                tri_loc = (parent_path / tri_loc_path).string();
            //            }
            //            else if (boost::filesystem::exists(config_path / tri_loc_path))
            //            {
            //                tri_loc = (config_path / tri_loc_path).string();
            //            }
            //            else
            //            {
            //                cout << "Can't find triangulation files, exiting" << endl;
            //                return 1;
            //            }

            //            FaceAnalysis::FaceAnalyser face_analyser(vector<cv::Vec3d>(), 0.7, 112, 112, au_loc, tri_loc);

            //            bool visualise = !det_parameters.quiet_mode;

            //            // Do some image loading
            //            for (size_t i = 0; i < files.size(); i++)
            //            {
            //                string file = files.at(i);

            //                // Loading image
            //                cv::Mat read_image = cv::imread(file, -1);

            //                if (read_image.empty())
            //                {
            //                    cout << "Could not read the input image" << endl;
            //                    return 1;
            //                }

            //                // Making sure the image is in uchar grayscale
            //                cv::Mat_<uchar> grayscale_image;
            //                convert_to_grayscale(read_image, grayscale_image);


            //                // If optical centers are not defined just use center of image
            //                if (cx_undefined)
            //                {
            //                    cx = grayscale_image.cols / 2.0f;
            //                    cy = grayscale_image.rows / 2.0f;
            //                }
            //                // Use a rough guess-timate of focal length
            //                if (fx_undefined)
            //                {
            //                    fx = 500 * (grayscale_image.cols / 640.0);
            //                    fy = 500 * (grayscale_image.rows / 480.0);

            //                    fx = (fx + fy) / 2.0;
            //                    fy = fx;
            //                }


            //                // if no pose defined we just use a face detector
            //                if (bounding_boxes.empty())
            //                {

            //                    // Detect faces in an image
            //                    vector<cv::Rect_<double>> face_detections;

            //                    if (det_parameters.curr_face_detector == LandmarkDetector::FaceModelParameters::HOG_SVM_DETECTOR)
            //                    {
            //                        vector<double> confidences;
            //                        LandmarkDetector::DetectFacesHOG(face_detections, grayscale_image, face_detector_hog, confidences);
            //                    }
            //                    else
            //                    {
            //                        LandmarkDetector::DetectFaces(face_detections, grayscale_image, classifier);
            //                    }

            //                    // Detect landmarks around detected faces
            //                    int face_det = 0;
            //                    // perform landmark detection for every face detected
            //                    for (size_t face = 0; face < face_detections.size(); ++face)
            //                    {
            //                        // if there are multiple detections go through them
            //                        bool success = LandmarkDetector::DetectLandmarksInImage(grayscale_image, face_detections[face], clnf_model, det_parameters);

            //                        // Estimate head pose and eye gaze
            //                        cv::Vec6d headPose = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //                        // Gaze tracking, absolute gaze direction
            //                        cv::Point3f gazeDirection0(0, 0, -1);
            //            cv::Point3f gazeDirection1(0, 0, -1);

            //            if (success && det_parameters.track_gaze)
            //            {
            //                FaceAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true);
            //                FaceAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false);

            //            }

            //            auto ActionUnits = face_analyser.PredictStaticAUs(read_image, clnf_model, false);

            //            // Writing out the detected landmarks (in an OS independent manner)
            //            if (!output_landmark_locations.empty())
            //            {
            //                char name[100];
            //                // append detection number (in case multiple faces are detected)
            //                sprintf(name, "_det_%d", face_det);

            //                // Construct the output filename
            //                boost::filesystem::path slash("/");
            //                std::string preferredSlash = slash.make_preferred().string();

            //                boost::filesystem::path out_feat_path(output_landmark_locations.at(i));
            //                boost::filesystem::path dir = out_feat_path.parent_path();
            //                boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
            //                boost::filesystem::path ext = out_feat_path.extension();
            //                string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
            //                write_out_landmarks(outfeatures, clnf_model, headPose, gazeDirection0, gazeDirection1, ActionUnits.first, ActionUnits.second);
            //            }

            //            if (!output_pose_locations.empty())
            //            {
            //                char name[100];
            //                // append detection number (in case multiple faces are detected)
            //                sprintf(name, "_det_%d", face_det);

            //                // Construct the output filename
            //                boost::filesystem::path slash("/");
            //                std::string preferredSlash = slash.make_preferred().string();

            //                boost::filesystem::path out_pose_path(output_pose_locations.at(i));
            //                boost::filesystem::path dir = out_pose_path.parent_path();
            //                boost::filesystem::path fname = out_pose_path.filename().replace_extension("");
            //                boost::filesystem::path ext = out_pose_path.extension();
            //                string outfeatures = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
            //                write_out_pose_landmarks(outfeatures, clnf_model.GetShape(fx, fy, cx, cy), headPose, gazeDirection0, gazeDirection1);

            //            }

            //            if (det_parameters.track_gaze)
            //            {
            //                cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //                // Draw it in reddish if uncertain, blueish if certain
            //                LandmarkDetector::DrawBox(read_image, pose_estimate_to_draw, cv::Scalar(255.0, 0, 0), 3, fx, fy, cx, cy);
            //                FaceAnalysis::DrawGaze(read_image, clnf_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
            //            }

            //            // displaying detected landmarks
            //            cv::Mat display_image;
            //            create_display_image(read_image, display_image, clnf_model);

            //            if (visualise && success)
            //            {
            //                imshow("colour", display_image);
            //                cv::waitKey(1);
            //            }

            //            // Saving the display images (in an OS independent manner)
            //            if (!output_images.empty() && success)
            //            {
            //                string outimage = output_images.at(i);
            //                if (!outimage.empty())
            //                {
            //                    char name[100];
            //                    sprintf(name, "_det_%d", face_det);

            //                    boost::filesystem::path slash("/");
            //                    std::string preferredSlash = slash.make_preferred().string();

            //                    // append detection number
            //                    boost::filesystem::path out_feat_path(outimage);
            //                    boost::filesystem::path dir = out_feat_path.parent_path();
            //                    boost::filesystem::path fname = out_feat_path.filename().replace_extension("");
            //                    boost::filesystem::path ext = out_feat_path.extension();
            //                    outimage = dir.string() + preferredSlash + fname.string() + string(name) + ext.string();
            //                    create_directory_from_file(outimage);
            //                    bool write_success = cv::imwrite(outimage, display_image);

            //                    if (!write_success)
            //                    {
            //                        cout << "Could not output a processed image" << endl;
            //                        return 1;
            //                    }

            //                }

            //            }

            //            if (success)
            //            {
            //                face_det++;
            //            }

            //        }
            //    }
            //		else
            //		{
            //			// Have provided bounding boxes
            //			LandmarkDetector::DetectLandmarksInImage(grayscale_image, bounding_boxes[i], clnf_model, det_parameters);

            //			// Estimate head pose and eye gaze
            //			cv::Vec6d headPose = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //    // Gaze tracking, absolute gaze direction
            //    cv::Point3f gazeDirection0(0, 0, -1);
            //    cv::Point3f gazeDirection1(0, 0, -1);

            //			if (det_parameters.track_gaze)
            //			{
            //				FaceAnalysis::EstimateGaze(clnf_model, gazeDirection0, fx, fy, cx, cy, true);
            //				FaceAnalysis::EstimateGaze(clnf_model, gazeDirection1, fx, fy, cx, cy, false);
            //			}

            //auto ActionUnits = face_analyser.PredictStaticAUs(read_image, clnf_model, false);

            //			// Writing out the detected landmarks
            //			if(!output_landmark_locations.empty())
            //			{
            //				string outfeatures = output_landmark_locations.at(i);

            //                write_out_landmarks(outfeatures, clnf_model, headPose, gazeDirection0, gazeDirection1, ActionUnits.first, ActionUnits.second);
            //			}

            //			// Writing out the detected landmarks
            //			if (!output_pose_locations.empty())
            //			{
            //				string outfeatures = output_pose_locations.at(i);

            //                write_out_pose_landmarks(outfeatures, clnf_model.GetShape(fx, fy, cx, cy), headPose, gazeDirection0, gazeDirection1);
            //			}

            //			// displaying detected stuff
            //			cv::Mat display_image;

            //			if (det_parameters.track_gaze)
            //			{
            //				cv::Vec6d pose_estimate_to_draw = LandmarkDetector::GetCorrectedPoseWorld(clnf_model, fx, fy, cx, cy);

            //// Draw it in reddish if uncertain, blueish if certain
            //LandmarkDetector::DrawBox(read_image, pose_estimate_to_draw, cv::Scalar(255.0, 0, 0), 3, fx, fy, cx, cy);
            //				FaceAnalysis::DrawGaze(read_image, clnf_model, gazeDirection0, gazeDirection1, fx, fy, cx, cy);
            //			}


            //            create_display_image(read_image, display_image, clnf_model);

            //			if(visualise)
            //			{

            //                imshow("colour", display_image);
            //cv::waitKey(1);
            //			}

            //			if(!output_images.empty())
            //			{
            //				string outimage = output_images.at(i);
            //				if(!outimage.empty())
            //				{

            //                    create_directory_from_file(outimage);
            //bool write_success = imwrite(outimage, display_image);

            //					if (!write_success)
            //					{
            //						cout << "Could not output a processed image" << endl;
            //						return 1;
            //					}
            //				}
            //			}
            //		}

            //	}

            return(0);
        }