public static void LoadSavedData() { if (File.Exists(TARGET_PATH + LABELS_FILE) && PicturesVM.Pictures.Count <= 0) { ImageFiles = Directory.GetFiles(TARGET_PATH, "*."+ IMAGE_EXT); Names = File.ReadAllText(TARGET_PATH + LABELS_FILE).Split('%') .TakeWhile(x => !string.IsNullOrEmpty(x)) .ToList(); int countLabels = 0; foreach (var f in ImageFiles) { #region filling VModel PictureModel pm = new PictureModel(); pm.ImgSource = new WriteableBitmap(new BitmapImage(new Uri(f))); pm.Name = Names[countLabels]; pm.ID = countLabels; PicturesVM.Pictures.Add(pm); #endregion #region Filling Training Images TrainingImages.Add(new Image<Gray, byte>(f)); #endregion countLabels++; } } }
private async void CompositionTarget_Rendering(object sender, EventArgs e) { currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); if (_rects == null || _rects.Length == 0) { gray = currentFrame.Convert<Gray, Byte>(); var size = new System.Drawing.Size(20, 20); var window = new System.Drawing.Size(grabber.Width, grabber.Height); _rects = _faceClassifier.DetectMultiScale(gray, 1.2, 10, size, window); _vmodel.PersonRecognized = _rects.Length; if (_rects.Length > 0) { /* 1) save the current rendered faces 2) upload the current frame to detect 3) verify with trained images */ string snapshot = CommonData.TARGET_SNAPSHOT_PAHT + DateTime.Now.ToString().Replace('/', '_').Replace(':', '_') + "." + CommonData.IMAGE_EXT; currentFrame.Save(snapshot); _progressRec.IsIndeterminate = false; _progressRec.IsEnabled = true; var fr = await UploadAndDetectFaces(snapshot); //detect all faces foreach (var trainedFile in CommonData.ImageFiles) { var fileStream = File.OpenRead(trainedFile); var f = await faceServiceClient.DetectAsync(fileStream); _trainedFacesAI.AddRange(f.ToList()); } int i = 0; //verify reading from all db foreach (var face in _detectedFaceFromAI) { foreach (var secondFace in _trainedFacesAI) { var res = await faceServiceClient.VerifyAsync(face.FaceId, secondFace.FaceId); PictureModel _model = new PictureModel(); _model.ImgSource = new WriteableBitmap(new BitmapImage(new Uri(CommonData.ImageFiles[i]))); _model.AIID = face.FaceId.ToString(); if (res.IsIdentical) { _model.Name = CommonData.Names[i]; _model.ID = i; _model.IsVerified = true; _model.Confidence = res.Confidence; } else { _model.Name = "Unkonwn"; _model.IsVerified = false; _model.Confidence = res.Confidence; } var c = _vmodel.Pictures.Where(x => x.AIID == _model.AIID).Count(); if (!(c > 0))//adds only if is not already added { _vmodel.Pictures.Add(_model); } i++; } } _progressRec.IsIndeterminate = false; _progressRec.IsEnabled = false; } } _imgSource.Source = ImageHelper.ToBitmapSource(currentFrame); }
private void RegisterFaces() { //iterates till detects a face in the image while (facesDetected == null || facesDetected[0].Length == 0) { gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //Face Detector facesDetected = gray.DetectHaarCascade( face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20)); foreach (var f in TrainFaces(facesDetected)) { var t = f.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); t.Save(CommonData.TARGET_PATH + "face" + _countFaces + "." + CommonData.IMAGE_EXT); //File.AppendAllText(Environment.CurrentDirectory + "/TrainedFaces/TrainedLabels.txt", "names"); PictureModel _model = new PictureModel(); _model.ImgSource = ImageHelper.ToBitmapSource(t).ToWriteableBitmap(); _model.ID = _countFaces; CommonData.PicturesVM.Pictures.Add(_model); _countFaces++; } //Debug.WriteLine("d " + ++count); Thread.Sleep(100);//give it some time to elaborate if the CPU is full } }