Beispiel #1
0
        public MeshData Convert()
        {
            MeshData instance = new MeshData();

            instance.Faces  = Faces.ToArray();
            instance.Strips = Strips.ToArray();
            return(instance);
        }
        private void Timer_Tick(object sender, EventArgs e)
        {
            Webcam.Retrieve(Frame);
            var imageFrame = Frame.ToImage <Gray, byte>();

            if (TimerCounter < TimeLimit)
            {
                TimerCounter++;

                if (imageFrame != null)
                {
                    Rectangle[] faces = FaceDetection.DetectMultiScale(imageFrame, 1.3, 5);

                    if (faces.Any())
                    {
                        var processedImage = imageFrame.Copy(faces[0]).Resize(ProcessedImageWidth, ProcessedImageHeight, Inter.Cubic); // Will zoom into the rectangle it finds to only see that.
                        Faces.Add(processedImage.Mat);
                        IDs.Add(userId);
                        ScanCounter++;
                        OutputBox.AppendText($"{ScanCounter} Successful Scans Taken...{Environment.NewLine}");
                        OutputBox.ScrollToCaret();
                    }
                }
            }
            else
            {
                FaceRecognition.Train(new VectorOfMat(Faces.ToArray()), new VectorOfInt(IDs.ToArray())); // Here we finally train on face and ID collection we just captures and is written to the pathfile "YMLPath"
                FaceRecognition.Write(YMLPath);

                Timer.Stop();
                TimerCounter = 0;

                //IDBox.Clear();
                nameBox.Clear();

                TrainButton.Enabled = !TrainButton.Enabled;
                //IDBox.Enabled = !IDBox.Enabled;
                nameBox.Enabled = !nameBox.Enabled;

                OutputBox.AppendText($"Training Complete! {Environment.NewLine}");
                //MessageBox.Show("Training Complete");

                doneTraining = true;

                Timer          = new Timer();
                Timer.Interval = 500;         // ticks every 0.5 sec
                Timer.Tick    += Timer_Tick1; // this method gets called every time the timer fires.
                Timer.Start();
            }
        }
Beispiel #3
0
        private void Timer_Tick(object sender, EventArgs e)
        {
            WebCam.Retrieve(Frame);
            var imageFrame = Frame.ToImage <Gray, byte>();

            if (TimerCounter < TimeLimit)
            {
                TimerCounter++;
                if (imageFrame != null)
                {
                    //imageBox1.Image = imageFrame;
                    var faces = FaceDetection.DetectMultiScale(imageFrame, 1.3, 5);
                    if (faces.Count() > 0)
                    {
                        var procImage = imageFrame.Copy(faces[0]).Resize(ProcessImageWidth, ProcessImageHeight, Emgu.CV.CvEnum.Inter.Cubic);
                        Faces.Add(procImage);
                        //Ids.Add(Convert.ToInt32(tbID.Text));
                        //Names.Add(tbName.Text);
                        ScanCounter++;
                        //tbOutput.AppendText($"{ScanCounter} successfull scabs taken .. {Environment.NewLine}");
                        //tbOutput.ScrollToCaret();
                    }
                }
            }
            else
            {
                // train faces !!!
                FaceRecognition.Train(Faces.ToArray(), Ids.ToArray());
                FaceRecognition.Write(YMLPath);



                //foreach (var face in Faces)
                //{


                //}

                Timer.Stop();
                TimerCounter = 0;
                //tbID.Enabled = true;
                //btnBeginTraining.Enabled = true;
                //tbOutput.AppendText($"Taining Complete {Environment.NewLine}");
                MessageBox.Show("Training completed");
            }
        }
Beispiel #4
0
        private void Timer_Tick(object sender, EventArgs e)
        {
            Camera.Retrieve(Frame);
            var ImgFrame = Frame.ToImage <Gray, byte>();

            if (TimerCounter < TimeLimit)
            {
                TimerCounter++;

                if (ImgFrame != null)
                {
                    var faces = FaceDetection.DetectMultiScale(ImgFrame, 1.3, 5);

                    if (faces.Count() > 0)
                    {
                        var processedImage = ImgFrame.Copy(faces[0]).Resize(ProcessedImageWidth, ProcessedImageHeight, Emgu.CV.CvEnum.Inter.Cubic);
                        Faces.Add(processedImage);
                        Ids.Add(Convert.ToInt32(IdBox.Text));
                        ScanCounter++;
                        OutputBox.AppendText($"{ScanCounter} Succesful Scans Taken...{Environment.NewLine}");
                        OutputBox.ScrollToCaret();
                    }
                }
            }
            else
            {
                FacialRecognition.Train(Faces.ToArray(), Ids.ToArray());
                FacialRecognition.Write(YMLPath);
                Timer.Stop();
                TimerCounter     = 0;
                btnTrain.Enabled = !btnTrain.Enabled;
                IdBox.Enabled    = !IdBox.Enabled;
                OutputBox.AppendText($"Training  Complete! {Environment.NewLine}");
                MessageBox.Show("Training Complete");
            }
        }
Beispiel #5
0
 public override void GetObjectData(SerializationInfo info, StreamingContext context)
 {
     base.GetObjectData(info, context);
     info.AddValue("Faces", Faces.ToArray());
 }
Beispiel #6
0
 public MeshData GetMesh()
 {
     return(new MeshData(Faces.ToArray(), Vertices.ToArray(), VertexNomals.ToArray(), TextureCoords.ToArray()));
 }