public bool TrainRecognizer() { var allFaces = _dataStoreAccess.CallFaces("ALL_USERS"); if (allFaces != null) { var faceImages = new Image <Gray, byte> [allFaces.Count]; var faceLabels = new int[allFaces.Count]; for (int i = 0; i < allFaces.Count; i++) { Stream stream = new MemoryStream(); stream.Write(allFaces[i].Image, 0, allFaces[i].Image.Length); var faceImage = new Image <Gray, byte>(new Bitmap(stream)); faceImages[i] = faceImage.Resize(200, 200, Inter.Cubic); faceLabels[i] = allFaces[i].UserId; } //LBPHFaceRecognizer-------------------------- _LBPHFaceRecognizer.Train(faceImages, faceLabels); _LBPHFaceRecognizer.Save(_recognizerFilePath); ////EigenFaceRecognizer------------------------- //_faceRecognizer.Train(faceImages, faceLabels); //_faceRecognizer.Save(_recognizerFilePath); } return(true); }
// Trains a recognizer using the labeled images in inputDir and saves the result // to outputPath static void TrainRecognizer(string inputDir, string outputPath) { var imageFiles = Directory.EnumerateFiles(inputDir).ToList(); var images = new Image <Gray, byte> [imageFiles.Count]; var labels = new int[imageFiles.Count]; // Load each file and it's label Console.WriteLine("Loading data..."); int i = 0; foreach (var imageFile in imageFiles) { var label = Path.GetFileNameWithoutExtension(imageFile).Split('_')[0]; labels[i] = int.Parse(label); images[i] = CvInvoke.Imread(imageFile, Emgu.CV.CvEnum.LoadImageType.Grayscale).ToImage <Gray, byte>(); i++; } Console.WriteLine("Loading complete."); // Train the recognizer and save the result Console.WriteLine("Training..."); LBPHFaceRecognizer recognizer = new LBPHFaceRecognizer(); recognizer.Train(images, labels); Console.WriteLine("Training done. Saving results..."); recognizer.Save(outputPath); }
private void Train() { var inputImages = new List <Image <Gray, byte> >(); var labelsList = new List <int>(); foreach (var photo in PhotoContext.Photos.Where(x => x.Id != 1).ToList()) { MemoryStream str = new MemoryStream(photo.Face.Image); Bitmap b = new Bitmap(Image.FromStream(str)); Image <Gray, byte> img = new Image <Gray, byte>(b); img._EqualizeHist(); inputImages.Add(img); labelsList.Add(photo.OwnerId); } DataPath = Server.MapPath("~/App_Data/Faces"); FaceRecognizer.Train(inputImages.ToArray(), labelsList.ToArray()); FaceRecognizer.Save(DataPath); }
public void Save(string filename) { recognizer.Save(filename); string path = Path.GetDirectoryName(filename); FileStream labels = File.OpenWrite(Path.Combine(path, "Labels.xml")); using (XmlWriter writer = XmlWriter.Create(labels)) { writer.WriteStartDocument(); writer.WriteStartElement("labels"); for (int i = 0; i < names.Count; ++i) { writer.WriteStartElement("label"); writer.WriteElementString("name", names[i]); writer.WriteEndElement(); } writer.WriteEndElement(); writer.WriteEndDocument(); } labels.Close(); }
/*** * Function: private void TrainMachine(FaceIdentity faceIdentity, String name) * Parameter(s): FaceIdentity faceIdentity * Privilege of the face that is being trained to store in the ASSET_INDEX.dat file. * String name * The name of the individual that is being trained; currently, it is not used, but it exists so that in the next version, * the machine can be more customized. * Return Value: void ***/ private void TrainMachine(FaceIdentity faceIdentity, String name) { // Notify the user that training has begun. MessageBox.Show(trainingBeginning, this.Title, MessageBoxButton.OK); String[] fileList = Directory.GetFiles(FileUtilities.TrainingDirectoryName); List <Mat> matList = new List <Mat>(); foreach (String file in fileList) { matList.Add(new Mat(file, Emgu.CV.CvEnum.LoadImageType.Unchanged)); } List <Image <Gray, Byte> > list = new List <Image <Gray, Byte> >(); // Detect each face in each image. foreach (Mat mat in matList) { Image <Gray, Byte> image = mat.ToImage <Gray, Byte>().Resize(1 / (double)scale_factor, Emgu.CV.CvEnum.Inter.Cubic); Rectangle[] faceList = faceClassifier.DetectMultiScale(image); foreach (Rectangle rect in faceList) { list.Add(image.Copy(rect).Convert <Gray, Byte>()); } } // Make sure that there is at least one face to train. if (list.Count() == 0) { PanicAndTerminateProgram(); } // If a height exists in the CORE_IMAGE_DATA.dat file, resize to that, useful for future training. int height = facialRecognitionHeight == 0 ? list[0].Height * scale_factor : facialRecognitionHeight; int width = facialRecognitionWidth == 0 ? list[0].Width * scale_factor : facialRecognitionWidth; if (facialRecognitionHeight == 0 || facialRecognitionWidth == 0) { List <String> lines = new List <String>(); lines.Add(height + "|" + width); File.WriteAllLines(FileUtilities.DirectoryName + "\\" + FileUtilities.CoreImageData, lines.ToArray()); } List <Image <Gray, Byte> > listFinal = new List <Image <Gray, Byte> >(); foreach (Image <Gray, Byte> image in list) { listFinal.Add(image.Resize(width, height, Emgu.CV.CvEnum.Inter.Cubic)); } List <int> labelList = new List <int>(); int integer = 0; String prefix = ""; String ident = ""; if (faceIdentity == FaceIdentity.FaceAdmin) { integer = 0; prefix = FileUtilities.AdminTrainedPrefix; ident = adminIdentifier; } else if (faceIdentity == FaceIdentity.FaceAsset) { integer = 2; prefix = FileUtilities.AssetTrainedPrefix; ident = auxAdminIdentifier; } else if (faceIdentity == FaceIdentity.FaceAuxAdmin) { integer = 1; prefix = FileUtilities.AuxAdminTrainedPrefix; ident = assetIdentifier; } else { PanicAndTerminateProgram(); } for (int i = 0; i < list.Count(); i++) { labelList.Add(integer); } // Train the machine and write its trained state to a file. LBPHFaceRecognizer lbphFaceRecognizer = new LBPHFaceRecognizer(); lbphFaceRecognizer.Train <Gray, Byte>(listFinal.ToArray(), labelList.ToArray()); Directory.Delete(FileUtilities.TrainingDirectoryName, true); String temp = categories[integer]; String fname = FileUtilities.DirectoryName + "\\" + prefix + temp.ToUpper().Replace(' ', '_') + FileUtilities.FileExtension; lbphFaceRecognizer.Save(fname); // Write everything to the ASSET_INDEX.dat file. FileUtilities.TrainingDirectoryDeletion(); List <String> aboutTraining = new List <String>(); aboutTraining.Add(name + "^" + ident + "^" + fname); File.AppendAllLines(FileUtilities.DirectoryName + "\\" + FileUtilities.AssetIndexData, aboutTraining.ToArray()); // Notify the used that training has ended. MessageBox.Show(trainingEnded, this.Title, MessageBoxButton.OK); }
private void button1_Click(object sender, EventArgs e) { if (comboBoxAlgorithm.Text == "EigenFaces") { try { string dataDirectory = Directory.GetCurrentDirectory() + "\\TrainedFaces"; string[] files = Directory.GetFiles(dataDirectory, "*.jpg", SearchOption.AllDirectories); eigenTrainedImageCounter = 0; foreach (var file in files) { Image <Bgr, Byte> TrainedImage = new Image <Bgr, Byte>(file); if (eqHisChecked.Checked == true) { TrainedImage._EqualizeHist(); } eigenTrainingImages.Add(TrainedImage.Convert <Gray, Byte>()); eigenlabels.Add(fileName(file)); eigenIntlabels.Add(eigenTrainedImageCounter); eigenTrainedImageCounter++; richTextBox1.Text += fileName(file) + "\n"; } /* * //TermCriteria for face recognition with numbers of trained images like maxIteration * MCvTermCriteria termCrit = new MCvTermCriteria(eigenTrainedImageCounter, 0.001); * * //Eigen face recognizer * eigenObjRecognizer=new EigenObjectRecognizer( * eigenTrainingImages.ToArray(), * eigenlabels.ToArray(), * 3000, * ref termCrit); */ eigenFaceRecognizer = new EigenFaceRecognizer(eigenTrainedImageCounter, 2000); eigenFaceRecognizer.Train(eigenTrainingImages.ToArray(), eigenIntlabels.ToArray()); //eigenFaceRecognizer.Save(dataDirectory + "\\trainedDataEigen.dat"); } catch (Exception ex) { MessageBox.Show(ex.ToString()); MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation); } } else if (comboBoxAlgorithm.Text == "FisherFaces") { try { string dataDirectory = Directory.GetCurrentDirectory() + "\\TrainedFaces"; string[] files = Directory.GetFiles(dataDirectory, "*.jpg", SearchOption.AllDirectories); fisherTrainedImageCounter = 0; foreach (var file in files) { Image <Bgr, Byte> TrainedImage = new Image <Bgr, Byte>(file); fisherTrainingImages.Add(TrainedImage.Convert <Gray, Byte>()); if (eqHisChecked.Checked == true) { TrainedImage._EqualizeHist(); } fisherlabels.Add(fileName(file)); fisherIntlabels.Add(fisherTrainedImageCounter); fisherTrainedImageCounter++; richTextBox1.Text += fileName(file) + "\n"; } fisherFaceRecognizer = new FisherFaceRecognizer(fisherTrainedImageCounter, 2000); fisherFaceRecognizer.Train(fisherTrainingImages.ToArray(), fisherIntlabels.ToArray()); //fisherFaceRecognizer.Save(dataDirectory + "\\trainedDataFisher.dat"); } catch (Exception ex) { MessageBox.Show(ex.ToString()); MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation); } } else if (comboBoxAlgorithm.Text == "LBPHFaces") { try { string dataDirectory = Directory.GetCurrentDirectory() + "\\TrainedFaces"; string[] files = Directory.GetFiles(dataDirectory, "*.jpg", SearchOption.AllDirectories); lbphTrainedImageCounter = 0; foreach (var file in files) { Image <Bgr, Byte> TrainedImage = new Image <Bgr, Byte>(file); if (eqHisChecked.Checked == true) { TrainedImage._EqualizeHist(); } lbphTrainingImages.Add(TrainedImage.Convert <Gray, Byte>()); lbphlabels.Add(fileName(file)); lbphIntlabels.Add(lbphTrainedImageCounter); lbphTrainedImageCounter++; richTextBox1.Text += fileName(file) + "\n"; } lbphFaceRecognizer = new LBPHFaceRecognizer(1, 8, 8, 8, 400); lbphFaceRecognizer.Train(lbphTrainingImages.ToArray(), lbphIntlabels.ToArray()); lbphFaceRecognizer.Save(dataDirectory + "\\trainedDataLBPH.dat"); } catch (Exception ex) { MessageBox.Show(ex.ToString()); MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation); } } }