static void Main(string[] args) { // Work with images folder configuration options string imagesFolder = "D:\\My Work\\VR\\images"; string outputFilepath = "D:\\My Work\\VR\\test-data-norm.txt"; // Work with csv datasets configuration options Size imageSize = new Size(350, 350); string csvInput = "D:\\My Work\\VR\\dataset\\facial_emotions_2_ready.csv"; string csvOutput = "D:\\My Work\\VR\\dataset\\faceexpress_dataset_v2_2.csv"; // Models string faceDetectorModel = "D:\\My Work\\VR\\resources\\haarcascade_frontalface_alt2.xml"; string facemarkModel = "D:\\My Work\\VR\\resources\\lbfmodel.yaml"; CascadeClassifier faceDetector = new CascadeClassifier(faceDetectorModel); FacemarkLBFParams facemarkParams = new FacemarkLBFParams(); FacemarkLBF facemark = new FacemarkLBF(facemarkParams); facemark.LoadModel(facemarkModel); if (runWithCsvDataset) { RunWithCsv(faceDetector, facemark, csvInput, csvOutput, imageSize); } else { RunWithImagesFolder(imagesFolder, outputFilepath, faceDetector, facemark); } Console.WriteLine("Program finished successfully!"); }
void Start() { //cascadePath = Path.Combine (Directory.GetCurrentDirectory (), AssetDatabase.GetAssetPath (cascadeFile)); // We initialize webcam texture data webcamTexture = new WebCamTexture(); webcamTexture.Play(); width = webcamTexture.width; height = webcamTexture.height; // We store settings internally for openCV after loading them in, these are the filepaths filePath = Path.Combine(Application.persistentDataPath, cascadeModel.name + ".xml"); fmFilePath = Path.Combine(Application.persistentDataPath, "lbfmodel.yaml"); // We initialize the facemark system that will be used to recognize our face fParams = new FacemarkLBFParams(); fParams.ModelFile = fmFilePath; facemark = new FacemarkLBF(fParams); facemark.LoadModel(fParams.ModelFile); File.WriteAllBytes(filePath, cascadeModel.bytes); convertedTexture = new Texture2D(width, height); Debug.Log("Tracking Started! Recording with " + webcamTexture.deviceName + " at " + webcamTexture.width + "x" + webcamTexture.height); InvokeRepeating("Track", trackingInterval, trackingInterval); }
public FaceLandmarksDetector(string faceDetectorModel, string faceLandmarkerModel) { // Load face detector model faceDetector = new CascadeClassifier(faceDetectorModel); // Load facemark model (face landmarker) FacemarkLBFParams facemarkParams = new FacemarkLBFParams(); facemark = new FacemarkLBF(facemarkParams); facemark.LoadModel(faceLandmarkerModel); }
private void FindFacialFeaturePoints() { string facePath; try { // get face detect dataset facePath = Path.GetFileName(@"data/haarcascade_frontalface_default.xml"); // get FFP dataset facemarkParam = new FacemarkLBFParams(); facemark = new FacemarkLBF(facemarkParam); facemark.LoadModel(@"data/lbfmodel.yaml"); } catch (Exception ex) { throw new Exception(ex.Message); } // initialize imageMat currImageMat = CurrImageI.Mat; nextImageMat = NextImageI.Mat; // Current Face FacesListCurr = facesArrCurr.OfType <Rectangle>().ToList(); // Find facial feature points VectorOfRect vrLeft = new VectorOfRect(facesArrCurr); landmarksCurr = new VectorOfVectorOfPointF(); facemark.Fit(currImageMat, vrLeft, landmarksCurr); ffpCurr = landmarksCurr[curr.SelectedFace]; // Next Face FacesListNext = facesArrNext.OfType <Rectangle>().ToList(); // Find facial feature points VectorOfRect vrRight = new VectorOfRect(facesArrNext); landmarksNext = new VectorOfVectorOfPointF(); facemark.Fit(nextImageMat, vrRight, landmarksNext); ffpNext = landmarksNext[next.SelectedFace]; // Add Corner points ffpCurr = AddCornerPoints(ffpCurr, this.curr.ResizedImage.Mat); ffpNext = AddCornerPoints(ffpNext, this.next.ResizedImage.Mat); }
private void InitModel() { faceDetector = new CascadeClassifier(Constants.FACE_DETECTOR_PATH); FacemarkLBFParams fParams = new FacemarkLBFParams(); fParams.ModelFile = Constants.LANDMARK_DETECTOR_PATH; fParams.NLandmarks = 68; // number of landmark points fParams.InitShapeN = 10; // number of multiplier for make data augmentation fParams.StagesN = 5; // amount of refinement stages fParams.TreeN = 6; // number of tree in the model for each landmark point fParams.TreeDepth = 5; //he depth of decision tree facemark = new FacemarkLBF(fParams); facemark.LoadModel(fParams.ModelFile); }
public Image <Bgr, Byte> GetFacePoints() { String facePath = Path.GetFullPath(@"../../data/haarcascade_frontalface_default.xml"); //CascadeClassifier faceDetector = new CascadeClassifier(@"..\..\Resource\EMGUCV\haarcascade_frontalface_default.xml"); CascadeClassifier faceDetector = new CascadeClassifier(facePath); FacemarkLBFParams fParams = new FacemarkLBFParams(); //fParams.ModelFile = @"..\..\Resource\EMGUCV\lbfmodel.yaml"; fParams.ModelFile = @"lbfmodel.yaml"; fParams.NLandmarks = 68; // number of landmark points fParams.InitShapeN = 10; // number of multiplier for make data augmentation fParams.StagesN = 5; // amount of refinement stages fParams.TreeN = 6; // number of tree in the model for each landmark point fParams.TreeDepth = 5; //he depth of decision tree FacemarkLBF facemark = new FacemarkLBF(fParams); //facemark.SetFaceDetector(MyDetector); Image <Bgr, Byte> image = new Image <Bgr, byte>("test.png"); Image <Gray, byte> grayImage = image.Convert <Gray, byte>(); grayImage._EqualizeHist(); VectorOfRect faces = new VectorOfRect(faceDetector.DetectMultiScale(grayImage)); VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF(); facemark.LoadModel(fParams.ModelFile); bool success = facemark.Fit(grayImage, faces, landmarks); if (success) { Rectangle[] facesRect = faces.ToArray(); for (int i = 0; i < facesRect.Length; i++) { image.Draw(facesRect[i], new Bgr(Color.Blue), 2); FaceInvoke.DrawFacemarks(image, landmarks[i], new Bgr(Color.Blue).MCvScalar); } return(image); } return(null); }