public Algorithm ToAlgorithm(string PluginUUID, bool enabled = true, string elp = "") { var setEnabled = Enabled.HasValue ? Enabled.Value : enabled; var setElp = ExtraLaunchParameters != null ? ExtraLaunchParameters : elp; var ret = new Algorithm(PluginUUID, IDs.ToArray()) { Enabled = setEnabled, ExtraLaunchParameters = setElp }; return(ret); }
private void Timer_Tick(object sender, EventArgs e) { webCam.Retrieve(frame); var imageFrame = frame.ToImage <Gray, byte>(); if (TimerCounter < TimeLimit) { TimerCounter++; if (imageFrame != null) { var faces = faceDetection.DetectMultiScale(imageFrame, 1.3, 5); if (faces.Count() > 0) { var processedImage = imageFrame.Copy(faces[0]).Resize(ProcessedImageWidth, ProcessedImageHeight, Emgu.CV.CvEnum.Inter.Cubic); Faces.Add(processedImage); IDs.Add(Convert.ToInt32(IdBox.Text)); ScanCounter++; OutPutBox.AppendText($"{ScanCounter} Success Scan Taken... {Environment.NewLine}"); OutPutBox.ScrollToCaret(); } } } else { Mat[] faceImages = new Mat[Faces.Count]; faceImages = Faces.Select(c => c.Mat).ToArray(); faceRecognition.Train(faceImages, IDs.ToArray()); faceRecognition.Write(YMLPath); timer.Stop(); Trainbottom.Enabled = !Trainbottom.Enabled; IdBox.Enabled = !IdBox.Enabled; OutPutBox.AppendText($"Training Complete!{Environment.NewLine}"); MessageBox.Show("Training Completed!"); PredictButton.Enabled = true; } }
private void Timer_Tick(object sender, EventArgs e) { Webcam.Retrieve(Frame); var imageFrame = Frame.ToImage <Gray, byte>(); if (TimerCounter < TimeLimit) { TimerCounter++; if (imageFrame != null) { Rectangle[] faces = FaceDetection.DetectMultiScale(imageFrame, 1.3, 5); if (faces.Any()) { var processedImage = imageFrame.Copy(faces[0]).Resize(ProcessedImageWidth, ProcessedImageHeight, Inter.Cubic); // Will zoom into the rectangle it finds to only see that. Faces.Add(processedImage.Mat); IDs.Add(userId); ScanCounter++; OutputBox.AppendText($"{ScanCounter} Successful Scans Taken...{Environment.NewLine}"); OutputBox.ScrollToCaret(); } } } else { FaceRecognition.Train(new VectorOfMat(Faces.ToArray()), new VectorOfInt(IDs.ToArray())); // Here we finally train on face and ID collection we just captures and is written to the pathfile "YMLPath" FaceRecognition.Write(YMLPath); Timer.Stop(); TimerCounter = 0; //IDBox.Clear(); nameBox.Clear(); TrainButton.Enabled = !TrainButton.Enabled; //IDBox.Enabled = !IDBox.Enabled; nameBox.Enabled = !nameBox.Enabled; OutputBox.AppendText($"Training Complete! {Environment.NewLine}"); //MessageBox.Show("Training Complete"); doneTraining = true; Timer = new Timer(); Timer.Interval = 500; // ticks every 0.5 sec Timer.Tick += Timer_Tick1; // this method gets called every time the timer fires. Timer.Start(); } }