コード例 #1
0
        /// <summary>
        /// Routine to train face recognizer with sample images
        /// </summary>

        /*private void TrainRecognizer(string root)
         * {
         *      // This one was actually used to train the recognizer. I didn't push much effort and satisfied once it
         *      // distinguished all detected faces on the sample image, for the real-world application you might want to
         *      // refer to the following documentation:
         *      // OpenCV documentation and samples: http://docs.opencv.org/3.0-beta/modules/face/doc/facerec/tutorial/facerec_video_recognition.html
         *      // Training sets overview: https://www.kairos.com/blog/60-facial-recognition-databases
         *      // Another OpenCV doc: http://docs.opencv.org/2.4/modules/contrib/doc/facerec/facerec_tutorial.html#face-database
         *
         *      int id = 0;
         *      var ids = new List<int>();
         *      var mats = new List<Mat>();
         *      var namesList = new List<string>();
         *
         *      foreach (string dir in Directory.GetDirectories(root))
         *      {
         *              string name = System.IO.Path.GetFileNameWithoutExtension(dir);
         *              if (name.StartsWith("-"))
         *                      continue;
         *
         *              namesList.Add(name);
         *              UnityEngine.Debug.LogFormat("{0} = {1}", id, name);
         *
         *              foreach (string file in Directory.GetFiles(dir))
         *              {
         *                      var bytes = File.ReadAllBytes(file);
         *                      var texture = new UnityEngine.Texture2D(2, 2);
         *                      texture.LoadImage(bytes); // <--- this one has changed in Unity 2017 API and on that version must be changed
         *
         *                      ids.Add(id);
         *
         *                      // each loaded texture is converted to OpenCV Mat, turned to grayscale (assuming we have RGB source) and resized
         *                      var mat = Unity.TextureToMat(texture);
         *                      mat = mat.CvtColor(ColorConversionCodes.BGR2GRAY);
         *                      if (requiredSize.Width > 0 && requiredSize.Height > 0)
         *                              mat = mat.Resize(requiredSize);
         *                      mats.Add(mat);
         *              }
         *              id++;
         *      }
         *
         *      names = namesList.ToArray();
         *
         *      // train recognizer and save result for the future re-use, while this isn't quite necessary on small training sets, on a bigger set it should
         *      // give serious performance boost
         *      recognizer.Train(mats, ids);
         *      recognizer.Save(root + "/face-recognizer.xml");
         * }*/
        #endregion

        /// <summary>
        /// Initializes scene
        /// </summary>
        protected virtual void Awake()
        {
            // classifier
            FileStorage storageFaces = new FileStorage(faces.text, FileStorage.Mode.Read | FileStorage.Mode.Memory);

            cascadeFaces = new CascadeClassifier();
            if (!cascadeFaces.Read(storageFaces.GetFirstTopLevelNode()))
            {
                throw new System.Exception("FaceProcessor.Initialize: Failed to load faces cascade classifier");
            }

            // recognizer
            // There are three available face recognition algorithms in current version of the OpenCV library (please, refer to the OpenCV documentation for details)
            // Our particular training set was trained and saved with FisherFaceRecognizer() and shuld not work with others, however, you can refer to the "TrainRecognizer"
            // method defined above to instructions and sample code regarding training your own recognizer from the scratch
            //recognizer = FaceRecognizer.CreateLBPHFaceRecognizer();
            //recognizer = FaceRecognizer.CreateEigenFaceRecognizer();
            recognizer = FaceRecognizer.CreateFisherFaceRecognizer();

            // This pre-trained set was quite tiny and contained only those 5 persons that are detected and recognized on the image. We took 5 photos for each person from
            // public images on Google, for a real-world application you will need much more sample data for each persona, for more info refer to the OpenCV documentation
            // (there are some links in the "TrainRecognizer" sample function
            recognizer.Load(new FileStorage(recognizerXml.text, FileStorage.Mode.Read | FileStorage.Mode.Memory));

            // label names
            names = new string[] { "Cooper", "DeGeneres", "Nyongo", "Pitt", "Roberts", "Spacey" };
        }
コード例 #2
0
ファイル: Program.cs プロジェクト: xeliot/OpenCVSharp-Samples
        static void Main(string[] args)
        {
            var images = new List <ImageInfo>();

            var imageId = 0;

            foreach (var dir in new DirectoryInfo(@"..\..\Images").GetDirectories())
            {
                var groupId = int.Parse(dir.Name.Replace("s", string.Empty)) - 1;
                foreach (var imageFile in dir.GetFiles("*.pgm"))
                {
                    images.Add(new ImageInfo
                    {
                        Image        = new Mat(imageFile.FullName, ImreadModes.GrayScale),
                        ImageId      = imageId++,
                        ImageGroupId = groupId
                    });
                }
            }

            var model = FaceRecognizer.CreateFisherFaceRecognizer();

            model.Train(images.Select(x => x.Image), images.Select(x => x.ImageGroupId));

            var rnd           = new Random();
            var randomImageId = rnd.Next(0, images.Count - 1);
            var testSample    = images[randomImageId];

            Console.WriteLine("Actual group: {0}", testSample.ImageGroupId);
            Cv2.ImShow("actual", testSample.Image);

            var predictedGroupId = model.Predict(testSample.Image);

            Console.WriteLine("Predicted group: {0}", predictedGroupId);

            Cv2.WaitKey(0);
        }
コード例 #3
0
ファイル: Cv2_face.cs プロジェクト: zfq308/opencvsharp
 /// <summary>
 ///
 /// </summary>
 /// <param name="numComponents"></param>
 /// <param name="threshold"></param>
 /// <returns></returns>
 public static BasicFaceRecognizer CreateFisherFaceRecognizer(
     int numComponents = 0, double threshold = Double.MaxValue)
 {
     return(FaceRecognizer.CreateFisherFaceRecognizer(numComponents, threshold));
 }
        /*
         * SK: Metóda, ktorá sa zavolá, pre vygenerovanie otázky
         * ENG: Method, which is called for generating question
         */
        void GenerateQuestion()
        {
            if (QnA.Count > 0)
            {
                currentQuestion   = Random.Range(0, QnA.Count);
                QuestionText.text = QnA[currentQuestion].Questiion;
                Image.texture     = QnA[currentQuestion].sample;

                Mat image = Unity.TextureToMat(QnA[currentQuestion].sample);

                // Deteguje tváre
                var gray = image.CvtColor(ColorConversionCodes.BGR2GRAY);
                Cv2.EqualizeHist(gray, gray);
                // deteguje zhodné regióny (Faces bounding)

                FileStorage storageFaces = new FileStorage(Faces.text, FileStorage.Mode.Read | FileStorage.Mode.Memory);
                cascadeFaces = new CascadeClassifier();
                if (!cascadeFaces.Read(storageFaces.GetFirstTopLevelNode()))
                {
                    throw new System.Exception("FaceProcessor.Initialize: Failed to load Faces cascade classifier");
                }

                recognizer = FaceRecognizer.CreateFisherFaceRecognizer();
                recognizer.Load(new FileStorage(RecognizerXml.text, FileStorage.Mode.Read | FileStorage.Mode.Memory));
                // popisky (labels)
                names = new string[] { "Cooper", "DeGeneres", "Nyongo", "Pitt", "Roberts", "Spacey" };

                OpenCvSharp.Rect[] rawFaces = cascadeFaces.DetectMultiScale(gray, 1.1, 6);

                foreach (var faceRect in rawFaces)
                {
                    var grayFace = new Mat(gray, faceRect);
                    if (requiredSize.Width > 0 && requiredSize.Height > 0)
                    {
                        grayFace = grayFace.Resize(requiredSize);
                    }

                    int label = -1;

                    /*SK:
                     * pokús sa rozpoznať tvár:
                     * confidence" je v princípe vzdialenosť od vzorky k najbližšej známej tvári
                     * 0 je nejaká „ideálna zhoda“
                     */

                    /*ENG:
                     *  now try to recognize the face:
                     *  confidence" here is actually a misguide. in fact, it's "distance from the sample to the closest known face".
                     *  0 being some "ideal match"
                     */

                    double confidence = 0.0;
                    recognizer.Predict(grayFace, out label, out confidence);
                    faceName = names[label];

                    int          line        = 0;
                    const int    textPadding = 2;
                    const double textScale   = 2.0;
                    string       messge      = String.Format("{0}", names[label], (int)confidence);
                    var          textSize    = Cv2.GetTextSize(messge, HersheyFonts.HersheyPlain, textScale, 1, out line);
                    var          textBox     = new OpenCvSharp.Rect(
                        faceRect.X + (faceRect.Width - textSize.Width) / 2 - textPadding,
                        faceRect.Bottom,
                        textSize.Width + textPadding * 2,
                        textSize.Height + textPadding * 2
                        );
                    faceName = names[label];
                    Debug.Log(faceName);
                }
                // Priradenie obrázku textúre Image komponentu na scéne
                // Asign image to the texture on Image component
                var texture  = Unity.MatToTexture(image);
                var rawImage = Image;
                rawImage.texture = texture;

                var transform = Image.GetComponent <RectTransform>();
                transform.sizeDelta = new Vector2(image.Width, image.Height);

                for (int i = 0; i < Options.Length; i++)
                {
                    Options[i].transform.GetChild(0).GetComponent <Text>().text = QnA[currentQuestion].Answers[i];
                    if (faceName == Options[i].transform.GetChild(0).GetComponent <Text>().text)
                    {
                        Options[i].GetComponent <AnswerScript>().isCorrect = true;
                    }
                    else
                    {
                        Options[i].GetComponent <AnswerScript>().isCorrect = false;
                    }
                }
            }
            else
            {
                GameOver();
            }
        }
コード例 #5
0
        public void CreateAndDisposeFisher()
        {
            var recognizer = FaceRecognizer.CreateFisherFaceRecognizer();

            recognizer.Dispose();
        }