internal Image <Bgr, byte> DetectFace(Mat frame, int width, int height) { var image = frame.ToImage <Bgr, byte>(); var faces = _cascadeClassifier.DetectMultiScale(image, 1.2, 10); //the actual face detection happens here for (var i = 0; i < faces.Length; i++) { var face = faces[i]; int xPos = face.X; int yPos = face.Y; var grayFace = image.Copy(face).Resize(width, height, Inter.Cubic).Convert <Gray, byte>(); // grayFace._EqualizeHist(); image.Draw(face, new Bgr(Color.LightBlue), 3); if (IsTraining.Equals(false) && _recognizer != null) { FaceRecognizer.PredictionResult result = _recognizer.Predict(grayFace); // float result = svmModel.Predict(grayFace); if (result.Label != -1 && faceMapping.ContainsKey(result.Label)) { string message = faceMapping[result.Label]; DrawText(message, image, xPos, yPos); Console.WriteLine("[" + result.Distance + "] " + message); } else { Console.Write("."); } } } return(image); }
public bool Recognise(Image <Gray, byte> Input_image, int Eigen_Thresh = -1) { try { recognizer.Train(TrainingImages.ToArray(), ImageLabels.ToArray()); FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image); return(ER.Label != -1); } catch { return(false); } }
public string Recognize(Image <Bgr, byte> display) { var faces = _cascade.DetectMultiScale(display.Convert <Gray, byte>(), 1.2, 0); Image <Gray, byte> faceImage; try { faceImage = display.Convert <Gray, byte>().Copy(faces[0]).Resize(100, 100, Inter.Cubic); } catch (IndexOutOfRangeException ex) { Console.WriteLine(ex); return(null); } var result = _recognizer.Predict(faceImage); //File.AppendAllText("D:\\Distance.txt", result.Distance + Environment.NewLine); if (result.Distance <= Distance) { return(_namesList.ElementAt(result.Label)); } return(null); }
public int RecognizeUser(Image <Gray, byte> userImage) { faceRecognizer.Load(recognizerPath); var res = faceRecognizer.Predict(userImage.Convert <Gray, byte>().Resize(100, 100, Inter.Cubic)); return(res.Label); }
public string Recognize(Image <Bgr, Byte> display) { Rectangle[] faces = cascade.DetectMultiScale(display.Convert <Gray, Byte>(), 1.2, 0); Image <Gray, Byte> faceImage; try { faceImage = display.Convert <Gray, Byte>().Copy(faces[0]).Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic); } catch (IndexOutOfRangeException e) { Console.WriteLine(e.Message); return(null); } FaceRecognizer.PredictionResult result = recognizer.Predict(faceImage); //For testing purpose Console.WriteLine(result.Distance); if (result.Distance <= 3000) { return(namesList.ElementAt(result.Label / 5)); } else { return(null); } }
/// <summary> /// Recognise a Grayscale Image using the trained Eigen Recogniser /// </summary> /// <param name="Input_image"></param> /// <returns></returns> public string Recognise(Image <Gray, byte> Input_image, int Eigen_Thresh = -1) { if (_IsTrained) { FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image); if (ER.Label == -1) { Eigen_label = "Unknown"; Eigen_Distance = 0; return(Eigen_label); } else { Eigen_label = Names_List[ER.Label]; Eigen_Distance = (float)ER.Distance; if (Eigen_Thresh > -1) { PCAThreshold = Eigen_Thresh; } return(Eigen_label); //the threshold set in training controls unknowns } } else { return(""); } }
public string Recognise(Image <Gray, byte> Input_image, int Eigen_Thresh = -1) { try { if (_IsTrained) { Set_Eigen_Threshold = recognizeTreshold; FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image); Console.WriteLine(ER.Label); if (ER.Label == -1) { Eigen_label = "UnknownNull"; Eigen_Distance = 0; return(Eigen_label + " " + Eigen_Distance.ToString()); } else { Eigen_label = allname[ER.Label]; Eigen_Distance = (float)ER.Distance; return(Eigen_label + " " + Eigen_Distance.ToString()); } } else { return(""); } } catch (Exception e) { Console.WriteLine(e); return(""); } }
private void Detect(FaceRecognizer recognizer, CascadeClassifier haar_cascade, Mat original, List <NetworkStream> displays, string[] indexToName) { var sw = new Stopwatch(); sw.Start(); var gray = original.CvtColor(ColorConversion.BgrToGray); var users = new List <string>(); var faces = haar_cascade.DetectMultiScale(gray); foreach (var faceRect in faces) { var face = gray.SubMat(faceRect); var faceResized = face.Resize(new OpenCvSharp.CPlusPlus.Size(100, 100), 1, 1, Interpolation.Cubic); int label; double confidence; recognizer.Predict(faceResized, out label, out confidence); //if (confidence > 600) { Debug.WriteLine("{0} {1}", label, confidence); users.Add(indexToName[label]); } original.Rectangle(faceRect, new Scalar(0, 255, 0), 3); original.PutText(label.ToString(), faceRect.Location, FontFace.HersheyPlain, 1, new Scalar(0, 255, 0)); // faceResized.SaveImage("data/people/hekwal/" + Guid.NewGuid() + ".jpg"); } var json = JArray.FromObject(users).ToString(); foreach (var disply in displays) { try { var arr = BitConverter.GetBytes(json.Length); disply.Write(arr, 0, arr.Length); arr = Encoding.UTF8.GetBytes(json); disply.Write(arr, 0, arr.Length); } catch (Exception ex) { Debug.WriteLine(ex); } } sw.Stop(); Debug.WriteLine("Processed frame in " + sw.ElapsedMilliseconds); sw.Start(); pictureBox1.Image = Bitmap.FromStream(new MemoryStream(original.Resize(new OpenCvSharp.CPlusPlus.Size(256, 256), 1, 1, Interpolation.Cubic).ToBytes())); // pictureBox1.Image = new Bitmap(original.Cols, original.Rows, original.ElemSize(), System.Drawing.Imaging.PixelFormat.Format24bppRgb, original.Data); sw.Stop(); Debug.WriteLine("Updated UI in " + sw.ElapsedMilliseconds); }
public FaceRecognizer.PredictionResult Who(Image <Gray, Byte> face) { FaceRecognizer.PredictionResult result = recognizerEMGUCV.Predict(face); if (result.Label != -1) { result.Label = (int)trainingImage.mapToCount[result.Label]; } return(result); }
private void button2_Click(object sender, EventArgs e) { learn(); string name; int resultat = 0; using (var imageFrame = capture.QuerySmallFrame().ToImage <Bgr, Byte>()) { if (imageFrame != null) { var grayframe = imageFrame.Convert <Gray, byte>(); var Faces = cascadeClassifier.DetectMultiScale(grayframe, 1.1, 10, Size.Empty); //the actual face detection happens here var grayframe2 = grayframe.Resize(400, 400, interpolationType: Inter.Cubic); bool found = false; foreach (var face in Faces) { imageFrame.Draw(face, new Bgr(Color.BurlyWood), 3); //the detected face(s) is highlighted here using a box that is drawn around it/them FaceRecognizer.PredictionResult pre = recognizer.Predict(grayframe2); if (pre.Label != -1) { for (int j = 0; j < faces.Count; j++) { if (Int32.Parse(faces[j].cin) == pre.Label) { login.ValidatedEns.cin = faces[j].cin; login.ValidatedEns.mots_de_passe = faces[j].mots_de_passe; login.ValidatedEns.nom = faces[j].nom; login.ValidatedEns.prenom = faces[j].prenom; login.ValidatedEns.mail = faces[j].mail; login.ValidatedEns.photo = faces[j].photo; login.ValidatedEns.code_a_bar = faces[j].code_a_bar; found = true; break; } } } else { MessageBox.Show("aucun similaire dans la base !"); break; } if (found) { Accueil_Enseignant ac = new Accueil_Enseignant(); ac.Show(); Close(); break; } } pictureBox2.Image = imageFrame.ToBitmap(); } } }
public PredictionResult Predict(GrayImage grayScaleImage) { grayScaleImage.Processing(); try { return(faceRecognizer.Predict(grayScaleImage)); } catch (Exception e) { throw e; } }
public string RecognizerUser(Image <Gray, byte> userImage) { faceRecognizer.Load(recognizeFilePath); var result = faceRecognizer.Predict(userImage.Resize(100, 100, Inter.Cubic)); Console.WriteLine(result.Label); string name = dataStoreAccess.GetUserName(result.Label); return(name); }
private void Cap_ImageGrabbed(object sender, EventArgs e) { for (int i = 0; i < tfstudent.Count; ++i) { tfstudent[i] = false; } FaceRecognizer.PredictionResult predictedLabel = new FaceRecognizer.PredictionResult(); CascadeClassifier fcas = new CascadeClassifier(fcasname); Mat img = new Mat(), imgg = new Mat(); cap.Retrieve(img); image = img.ToImage <Bgr, byte>(); List <System.Drawing.Rectangle> faces = new List <System.Drawing.Rectangle>(); CvInvoke.CvtColor(img, imgg, ColorConversion.Bgr2Gray); CvInvoke.EqualizeHist(imgg, imgg); System.Drawing.Rectangle[] facedetect = fcas.DetectMultiScale(imgg, 1.1, 10, new System.Drawing.Size(20, 20)); faces.AddRange(facedetect); Mat s_img = new Mat(); List <coord_id> hs = new List <coord_id>(); foreach (System.Drawing.Rectangle f in faces) //Parallel.ForEach(faces,(f) => { Image <Gray, byte> image2 = new Image <Gray, byte>(image.ToBitmap()); image2.ROI = f; //image2.ToBitmap(100, 100).Save(junk.ToString()+".jpg", ImageFormat.Jpeg); image2.Resize(MainWindow.widthheight, MainWindow.widthheight, Emgu.CV.CvEnum.Inter.Linear, false); s_img = image2.Mat; predictedLabel = face.Predict(s_img); //Ghi(predictedLabel.Label.ToString(), 1); //Dispatcher.BeginInvoke(new ThreadStart(() => textbox2.Text = junk.ToString())); try { tfstudent[predictedLabel.Label] = true; hs.Add(new coord_id(predictedLabel.Label, f)); } catch { image.Draw(f, new Bgr(0, 183, 149), 15); // Dispatcher.BeginInvoke(new ThreadStart(() => textbox1.Text = predictedLabel.Label.ToString())); continue; } } //++junk; if (hs.Count > 0) { checkseat(hs); } Dispatcher.Invoke(() => { Small_Camera.Source = CreateBitmapSourceFromGdiBitmap(image.Flip(FlipType.Horizontal).Bitmap); }); //Thread.Sleep(500); }
public bool IsMatched(Image <Gray, Byte> face, Image <Gray, Byte>[] trainedImages) { try { int[] ids = new int[trainedImages.Length]; for (int i = 0; i < trainedImages.Length; i++) { ids[i] = i + 1; } if (trainedImages.ToArray().Length != 0) { recognizer.Train(trainedImages, ids); FaceRecognizer.PredictionResult pr = recognizer.Predict(face.Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic)); if (recognizer is EigenFaceRecognizer) { return(pr.Distance > 100); } else if (recognizer is LBPHFaceRecognizer) { return(pr.Distance < 110); } else { return(pr.Distance > 300); } //return (pr.Distance > threshold); } } catch (Exception ex) { //if no person is registered MessageBox.Show(ex.Message); } return(false); }
/// <summary> /// Recognizes the user. /// </summary> /// <param name="grayframe">The grayframe.</param> /// <returns></returns> public static int RecognizeUser(Image <Gray, byte> grayframe) { if (!_trained || _faceRecognizer == null) { return(-1); } var result = _faceRecognizer.Predict(grayframe); return(result.Label); }
public int RecognizeUser(Image <Gray, byte> userImage) { /* Stream stream = new MemoryStream(); * stream.Write(userImage, 0, userImage.Length); * var faceImage = new Image<Gray, byte>(new Bitmap(stream));*/ _faceRecognizer.Load(_recognizerFilePath); var result = _faceRecognizer.Predict(userImage.Resize(100, 100, INTER.CV_INTER_CUBIC)); return(result.Label); }
/// <summary> /// Method which recognizes the user from the given image. /// </summary> /// <param name="userImage"></param> /// <returns></returns> public FaceRecognizer.PredictionResult RecognizeUser(Image <Gray, byte> userImage) { _faceRecognizer.Load(_recognizerFilePath); ////normalize brightness //userImage._EqualizeHist(); var result = _faceRecognizer.Predict(userImage.Resize(100, 100, Inter.Cubic)); return(result); }
public String[] Recognize() { // Reset Names and Areas Array.Clear(RecognizeNames, 0, RecognizeNames.Length); for (int i = 0; i < RecognizeArea.Length; i++) { RecognizeArea[i].X = 0; RecognizeArea[i].Y = 0; RecognizeArea[i].Width = 0; RecognizeArea[i].Height = 0; } // Prevent unknown faces if (null == DetectFaces || null == GrayImage) { return(null); } // Set names and Areas for (int i = 0; i < DetectFaces.Length && i < RecognizeNames.Length; i++) { // Build a thumbnail RecognizeThumbs[i] = GrayImage.Copy(DetectFaces[i]).Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic); RecognizeThumbs[i]._EqualizeHist(); // Crop first only if not trained if (!trained) { return(RecognizeNames); } // Recognize FaceRecognizer.PredictionResult ER = Recognizer.Predict(RecognizeThumbs[i]); RecognizeNames[i] = UNKNOWN; if (ER.Label >= 0) { RecognizeDistance = (float)ER.Distance; if (RecognizeDistance > Threshold) { RecognizeNames[i] = trainedLabels[ER.Label]; // Build area according to ratio Rectangle r = DetectFaces[i]; RecognizeArea[i].X = r.X * ratio; RecognizeArea[i].Y = r.Y * ratio; RecognizeArea[i].Width = r.Width * ratio; RecognizeArea[i].Height = r.Height * ratio; } } } return(RecognizeNames); }
/// <summary> /// Recognise a Grayscale Image using the trained Eigen Recogniser /// </summary> /// <param name="Input_image"></param> /// <returns></returns> public string Recognise(Image <Gray, byte> Input_image, int Eigen_Thresh = -1) { if (_IsTrained) { FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image); if (ER.Label == -1) { label = "Unknown"; Distance = 0; return(label); } else { label = Names_List[ER.Label]; Distance = (float)ER.Distance; if (Eigen_Thresh > -1) { Eigen_threshold = Eigen_Thresh; } //Only use the post threshold rule if we are using an Eigen Recognizer //since Fisher and LBHP threshold set during the constructor will work correctly switch (Recognizer_Type) { //old variant 25.03 /*case ("EMGU.CV.EigenFaceRecognizer"): * if (Distance > Eigen_threshold) return Eigen_label; * else return "Unknown";*/ case ("EMGU.CV.EigenFaceRecognizer"): if (Distance > Eigen_threshold) { return(label); } else { return("Unknown"); } case ("EMGU.CV.LBPHFaceRecognizer"): case ("EMGU.CV.FisherFaceRecognizer"): default: return(label); //the threshold set in training controls unknowns } } } else { return(""); } }
/*Returns the person's name if recognized.*/ public string Recognize(Image <Gray, Byte> detectedFace) { var result = faceRecognizer.Predict(detectedFace); if (result.Label != -1 && result.Distance < eigenThresh) { return(faceLabels[result.Label]); } else { return(""); } }
public int RecognizeUser(Image <Gray, byte> userImage) { Stream stream = new MemoryStream(); stream.Write(userImage.Bytes, 0, userImage.Bytes.Length); var faceImage = new Image <Gray, byte>(new Bitmap(stream)); _faceRecognizer.Read(_recognizerFilePath); var result = _faceRecognizer.Predict(userImage.Resize(100, 100, Inter.Cubic)); return(result.Label); }
/// <summary> /// Recognize a Grayscale Image using the trained Eigen Recognizer /// </summary> /// <param name="Input_image"></param> /// <returns></returns> public string Recognize(Image <Gray, byte> Input_image, int Eigen_Thresh = -1) { if (_IsTrained) { FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image); if (ER.Label == -1) { Eigen_label = "Unknown"; Eigen_Distance = 0; return(Eigen_label); } else { Eigen_label = Rfid_List[ER.Label]; Eigen_Distance = (float)ER.Distance; Debug.WriteLine("DISTANCE: " + Eigen_Distance); if (Eigen_Thresh > -1) { Eigen_threshold = Eigen_Thresh; } //if (trainingImages.Count() < 30) return Eigen_label; //Only use the post threshold rule if we are using an Eigen Recognizer //since Fisher and LBHP threshold set during the constructor will work correctly switch (Recognizer_Type) { case ("EMGU.CV.EigenFaceRecognizer"): if (Eigen_Distance < Eigen_threshold) { return(Eigen_label); } else { return("Unknown"); } case ("EMGU.CV.LBPHFaceRecognizer"): case ("EMGU.CV.FisherFaceRecognizer"): default: return(Eigen_label); //the threshold set in training controls unknowns } } } else { return(""); } }
public int Recognize(Image <Gray, byte> userImage) { faceRecognizer_.Load(recognizerFilePath_); var result = faceRecognizer_.Predict(userImage.Resize(100, 100, Inter.Cubic)); int id = 0; if (result.Distance <= 150) { id = result.Label; } return(id); }
//Yüz tanıma işlemini gerçekleştirecek olan metot(**) public string Recognition(Image <Gray, byte> Input_image) { if (_DizinKontrol)//dizin kontrol değeri true ise tanıma işlemine geçilir { //Tanıma işlemini yapar. predict fonksiyonu parametre olarak tanınması istenen yüzü girdi olarak alır.(**) FaceRecognizer.PredictionResult ER = recognizer.Predict(Input_image); //eğer tanıma işleminin sonucu -1 çıkarsa kameradan algılanan kişinin tanınmadığı anlamına gelir.(**) if (ER.Label == -1) { AdSoyad_label = "TANINMADI"; YuzDistance = 0; return(AdSoyad_label); //eğer yüz tanınmadıysa "TANINMADI" şeklinde mesaj gönderilir.(**) } else { AdSoyad_label = AdSoyadList[ER.Label]; //eğer yüz tanındıysa tanınan kişinin ad soyad bilgisini aldı(**) YuzDistance = (float)ER.Distance; //tanınan yüzün değeri YuzDistance değişkenine atanır.(**) // if (Eigen_Thresh > -1) OzyuzEsikDeger = Eigen_Thresh;//yorum satırı yap(**) //eigenfaces algoritması kullanıyorsak eşik değerini kullanırız.(**) //TanimaTuru değişkeni form ekranında seçtiğimiz tanıma yöntemine göre değişmekte. o yüzden public tanımlamıştım.(**) switch (TanimaTuru) { case ("EMGU.CV.EigenFaceRecognizer"): //yüzün sayısal değeri eşik değerinden büyük ise ad soyad değeri döndürülür.(**) if (YuzDistance > OzyuzEsikDeger) { return(AdSoyad_label); } else { return("TANINMADI"); } case ("EMGU.CV.LBPHFaceRecognizer"): case ("EMGU.CV.FisherFaceRecognizer"): default: return(AdSoyad_label); } } } else { return(""); } }
private bool MatchFaces(Bitmap bitmap) { var imageReceived = DetectFaceTrainer(new List <Bitmap> { bitmap }); if ((imageReceived == null) || (imageReceived.Count() == Decimal.Zero)) { return(false); } LBPHFaceRecognizer.PredictionResult ER = recognizer.Predict(imageReceived[0]); return(ER.Label > -1); }
private void Guess(Image <Bgr, byte> capturedImage) { //we need the gray scale image so we apply convert to gray Image <Gray, byte> grayImage = capturedImage.Copy().Convert <Gray, byte>(); //detects every face in an image var faces = classifier.DetectMultiScale( grayImage, 1.3, 5, new Size(64, 64), Size.Empty); //for every rectangle that we found from classifier.DetectMultiScale we need to make a guess foreach (var rect in faces) { PredictionResult result; try { result = faceRecognizer.Predict(grayImage.Copy(rect)); } catch { //if any exception happens we dont want to use this so we give it a -1 label result.Label = -1; } if (result.Label >= 0) { //draw the face rectangle capturedImage.Draw(rect, new Bgr(Color.OrangeRed), 3); //draw the filled rectangle under the face rectangle capturedImage.Draw(new Rectangle(new Point(rect.X, rect.Y + rect.Height), new Size(rect.Width - 2, 20)), new Bgr(Color.OrangeRed), 20); //draw prediction result over filled rectangle capturedImage.Draw(listOfFileNames[result.Label], ref mCvFont, new Point(rect.X + 4, rect.Y + rect.Height + 15), new Bgr(Color.White)); } else { capturedImage.Draw(rect, new Bgr(Color.Red), 3); //draw the filled rectangle under the face rectangle capturedImage.Draw(new Rectangle(new Point(rect.X, rect.Y + rect.Height), new Size(rect.Width - 2, 20)), new Bgr(Color.Red), 20); capturedImage.Draw("Unknown", ref mCvFont, new Point(rect.X + 4, rect.Y + rect.Height + 15), new Bgr(Color.White)); } } foreach (var item in grid.ControlsList) { (item as ImageBox).Image = capturedImage; } }
public int RecognizePerson(Image <Gray, byte> userImage) { /* Stream stream = new MemoryStream(); * stream.Write(userImage, 0, userImage.Length); * var faceImage = new Image<Gray, byte>(new Bitmap(stream));*/ _faceRecognizer.Load(_recognizerFilePath); try { var result = _faceRecognizer.Predict(userImage.Resize(100, 100, Inter.Cubic)); return(result.Label); } catch (Exception ex) { Debugger.writeLine(ex.Message); } return(-1); }
public static String Recognize(Image <Gray, byte> source, int threshold = -1) { //if threshold parameter is set ( not default [-1] ), we use parameter value as threshold //else, use EigenThreshold's value as threshold (2000) if (threshold > -1) { EigenThreshold = threshold; } //normalize and resize image Image <Gray, byte> face = source.Resize(width, height, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC, false); face._EqualizeHist(); try { String eigenLabel = ""; FaceRecognizer.PredictionResult result = faceRecognizer.Predict(face); if (result.Label == -1) { return("Unknown -1"); } else { float eigenDistance; //result is INT, we need STRING eigenLabel = labels[result.Label]; eigenDistance = (float)result.Distance; if (eigenDistance > EigenThreshold) { return(eigenLabel); } else { return("Unknown " + eigenDistance); } } } catch (Exception e) { MessageBox.Show(e.Message); } return("Unknown -2"); }
// Метод, выполняющий распознование лица. public string Recognize_me(VideoCapture capture) { // Объявляем переменную для хранения результата распозвнования лица. FaceRecognizer.PredictionResult result; string return_label; // Чтение тренировочного файла. faceRecognizer.Read(recognizerFilePath); // Предположение о найденном лице. result = faceRecognizer.Predict(capture.QueryFrame().ToImage <Gray, byte>().Resize(100, 100, Inter.Cubic)); // Преобразуем полученное предположение в строку. return_label = result.Label.ToString(); // Возвращение результата предсказания. return(return_label); }
public async Task <List <EMGUFace> > detect(Image <Gray, byte> img) { List <EMGUFace> faces = new List <EMGUFace>(); if (img == null) { return(faces); } // var rect = cascade.DetectMultiScale(img, 1.4, 0, new Size(100, 100), new Size(800, 800)); var rect = cascadeFace.DetectMultiScale(img, 1.2, 10); foreach (var r in rect) { Image <Gray, byte> imgBox = img.GetSubRect(r); var eyes = cascadeEyes.DetectMultiScale(imgBox, 1.2, 10); if (imgBox.Height != 200 || imgBox.Width != 200) { imgBox = imgBox.Resize(200, 200, Emgu.CV.CvEnum.Inter.Cubic); } var res = model.Predict(imgBox); EMGUFace face = new EMGUFace(); face.x = r.X; face.y = r.Y; face.width = r.Width; face.height = r.Height; face.gender = (res.Label == 0) ? Gender.Male : Gender.Female; face.confidence = res.Distance; foreach (var eye in eyes) { face.eyes.Add(new System.Windows.Point(eye.X + eye.Width / 2, eye.Y + eye.Height / 2)); } faces.Add(face); } return(faces); }