// Start training from the collected faces.
        // The face recognition algorithm can be one of these and perhaps more, depending on your version of OpenCV, which must be atleast v2.4.1:
        //    "FaceRecognizer.Eigenfaces":  Eigenfaces, also referred to as PCA (Turk and Pentland, 1991).
        //    "FaceRecognizer.Fisherfaces": Fisherfaces, also referred to as LDA (Belhumeur et al, 1997).
        //    "FaceRecognizer.LBPH":        Local Binary Pattern Histograms (Ahonen et al, 2006).
        public static BasicFaceRecognizer LearnCollectedFaces(List <Mat> preprocessedFaces, List <int> faceLabels, string facerecAlgorithm = "FaceRecognizer.Eigenfaces")
        {
            BasicFaceRecognizer model = null;

            Debug.Log("Learning the collected faces using the [" + facerecAlgorithm + "] algorithm ...");

            if (facerecAlgorithm == "FaceRecognizer.Fisherfaces")
            {
                model = FisherFaceRecognizer.create();
            }
            else if (facerecAlgorithm == "FaceRecognizer.Eigenfaces")
            {
                model = EigenFaceRecognizer.create();
            }

            if (model == null)
            {
                Debug.LogError("ERROR: The FaceRecognizer algorithm [" + facerecAlgorithm + "] is not available in your version of OpenCV. Please update to OpenCV v2.4.1 or newer.");
                //exit(1);
            }

            // Do the actual training from the collected faces. Might take several seconds or minutes depending on input!
            MatOfInt labels = new MatOfInt();

            labels.fromList(faceLabels);
            model.train(preprocessedFaces, labels);

            return(model);
        }
        private void Run()
        {
            List <Mat> images     = new List <Mat> ();
            List <int> labelsList = new List <int> ();
            MatOfInt   labels     = new MatOfInt();

            images.Add(Imgcodecs.imread(image_0_filepath, 0));
            images.Add(Imgcodecs.imread(image_1_filepath, 0));
            labelsList.Add(0);
            labelsList.Add(1);
            labels.fromList(labelsList);

            Mat testSampleMat   = Imgcodecs.imread(sample_image_filepath, 0);
            int testSampleLabel = 0;


            //                      foreach (Mat item in images) {
            //                              Debug.Log ("images.ToString " + item.ToString ());
            //                      }
            //                      foreach (int item in labelsList) {
            //                              Debug.Log ("labels.ToString " + item.ToString ());
            //                      }

            int[]    predictedLabel      = new int[1];
            double[] predictedConfidence = new double[1];


            BasicFaceRecognizer faceRecognizer = EigenFaceRecognizer.create();

            faceRecognizer.train(images, labels);

            faceRecognizer.predict(testSampleMat, predictedLabel, predictedConfidence);


            Debug.Log("Predicted class: " + predictedLabel [0] + " / " + "Actual class: " + testSampleLabel);
            Debug.Log("Confidence: " + predictedConfidence [0]);


            Mat predictedMat = images [predictedLabel [0]];

            Mat baseMat = new Mat(testSampleMat.rows(), predictedMat.cols() + testSampleMat.cols(), CvType.CV_8UC1);

            predictedMat.copyTo(baseMat.submat(new OpenCVForUnity.CoreModule.Rect(0, 0, predictedMat.cols(), predictedMat.rows())));
            testSampleMat.copyTo(baseMat.submat(new OpenCVForUnity.CoreModule.Rect(predictedMat.cols(), 0, testSampleMat.cols(), testSampleMat.rows())));

            Imgproc.putText(baseMat, "Predicted", new Point(10, 15), Imgproc.FONT_HERSHEY_SIMPLEX, 0.4, new Scalar(255), 1, Imgproc.LINE_AA, false);
            Imgproc.putText(baseMat, "Confidence:", new Point(5, 25), Imgproc.FONT_HERSHEY_SIMPLEX, 0.2, new Scalar(255), 1, Imgproc.LINE_AA, false);
            Imgproc.putText(baseMat, "   " + predictedConfidence [0], new Point(5, 33), Imgproc.FONT_HERSHEY_SIMPLEX, 0.2, new Scalar(255), 1, Imgproc.LINE_AA, false);
            Imgproc.putText(baseMat, "TestSample", new Point(predictedMat.cols() + 10, 15), Imgproc.FONT_HERSHEY_SIMPLEX, 0.4, new Scalar(255), 1, Imgproc.LINE_AA, false);


            Texture2D texture = new Texture2D(baseMat.cols(), baseMat.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(baseMat, texture);

            gameObject.GetComponent <Renderer> ().material.mainTexture = texture;
        }
Beispiel #3
0
        /// <summary>
        /// Raises the load button event.
        /// </summary>
        public void OnLoadButton()
        {
            Debug.Log("User clicked [Load] button.");

            // Restart everything!
            dispose();

            if (facerecAlgorithm == "FaceRecognizer.Fisherfaces")
            {
                model = Face.createFisherFaceRecognizer();
            }
            else if (facerecAlgorithm == "FaceRecognizer.Eigenfaces")
            {
                model = Face.createEigenFaceRecognizer();
            }

            if (model == null)
            {
                Debug.LogError("ERROR: The FaceRecognizer algorithm [" + facerecAlgorithm + "] is not available in your version of OpenCV. Please update to OpenCV v2.4.1 or newer.");
                m_mode = MODES.MODE_DETECTION;
                return;
            }

            // load the train data.
            model.load(Application.temporaryCachePath + "/traindata.yml");

            int maxLabel = (int)Core.minMaxLoc(model.getLabels()).maxVal;

            if (maxLabel <= 0)
            {
                Debug.Log("load failure.");
                model.Dispose();
                model  = null;
                m_mode = MODES.MODE_DETECTION;
                return;
            }

            // Restore the save data.
            m_numPersons = maxLabel + 1;
            for (int i = 0; i < m_numPersons; ++i)
            {
                m_latestFaces.Add(i);
                preprocessedFaces.Add(Imgcodecs.imread(Application.temporaryCachePath + "/preprocessedface" + i + ".jpg", 0));
                if (preprocessedFaces [i].empty())
                {
                    preprocessedFaces [i] = new Mat(faceHeight, faceWidth, CvType.CV_8UC1, new Scalar(128));
                }
                faceLabels.Add(i);
            }

            // go to the recognition mode!
            m_mode = MODES.MODE_RECOGNITION;
        }
Beispiel #4
0
    private void Run()
    {
        List <Mat> images     = new List <Mat>();
        List <int> labelsList = new List <int>();
        MatOfInt   labels     = new MatOfInt();

        images.Add(Imgcodecs.imread(facerec_0_bmp_filepath, 0));
        images.Add(Imgcodecs.imread(facerec_1_bmp_filepath, 0));

        labelsList.Add(0); //积极
        labelsList.Add(1); //消极
        labels.fromList(labelsList);

        Mat testSampleMat = Imgcodecs.imread(facerec_sample_bmp_filepath, 0);

        int testSampleLabel = 0;

        int[]    predictedLabel      = new int[1];
        double[] predictedConfidence = new double[1];

        BasicFaceRecognizer faceRecognizer = EigenFaceRecognizer.create();

        //faceRecognizer.train(images, labels); //会清空之前模型
        faceRecognizer.update(images, labels); //不会清空load的模型
        faceRecognizer.predict(testSampleMat, predictedLabel, predictedConfidence);
        //faceRecognizer.save(Application.dataPath + "/Cascades/train_face.txt"); //生成一个txt

        Debug.Log("Predicted class: " + predictedLabel[0] + " / " + "Actual class: " + testSampleLabel);
        Debug.Log("Confidence: " + predictedConfidence[0]);

        Mat predictedMat = images[predictedLabel[0]];
        Mat baseMat      = new Mat(testSampleMat.rows(), predictedMat.cols() + testSampleMat.cols(), CvType.CV_8UC1);

        predictedMat.copyTo(baseMat.submat(new OpenCVForUnity.Rect(0, 0, predictedMat.cols(), predictedMat.rows())));
        testSampleMat.copyTo(baseMat.submat(new OpenCVForUnity.Rect(predictedMat.cols(), 0, testSampleMat.cols(), testSampleMat.rows())));

        Imgproc.putText(baseMat, "Predicted", new Point(10, 15), Core.FONT_HERSHEY_SIMPLEX, 0.4, new Scalar(255), 1, Imgproc.LINE_AA, false);
        Imgproc.putText(baseMat, "Confidence:", new Point(5, 25), Core.FONT_HERSHEY_SIMPLEX, 0.2, new Scalar(255), 1, Imgproc.LINE_AA, false);
        Imgproc.putText(baseMat, "   " + predictedConfidence[0], new Point(5, 33), Core.FONT_HERSHEY_SIMPLEX, 0.2, new Scalar(255), 1, Imgproc.LINE_AA, false);
        Imgproc.putText(baseMat, "TestSample", new Point(predictedMat.cols() + 10, 15), Core.FONT_HERSHEY_SIMPLEX, 0.4, new Scalar(255), 1, Imgproc.LINE_AA, false);

        Texture2D t2d = new Texture2D(baseMat.width(), baseMat.height());

        Utils.matToTexture2D(baseMat, t2d);
        Sprite sp = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);

        m_srcImage.sprite         = sp;
        m_srcImage.preserveAspect = true;
    }
Beispiel #5
0
        // Disposal to restart all.
        private void dispose()
        {
            m_selectedPerson = -1;
            m_numPersons     = 0;
            m_latestFaces.Clear();
            faceLabels.Clear();
            prev_identity   = -1;
            prev_similarity = 100000000.0d;

            foreach (Mat face in preprocessedFaces)
            {
                if (face != null && !face.IsDisposed)
                {
                    face.Dispose();
                }
            }
            preprocessedFaces.Clear();

            if (old_prepreprocessedFace != null && !old_prepreprocessedFace.IsDisposed)
            {
                old_prepreprocessedFace.Dispose();
            }
            old_prepreprocessedFace = null;

            if (prev_prepreprocessedFace != null && !prev_prepreprocessedFace.IsDisposed)
            {
                prev_prepreprocessedFace.Dispose();
            }
            prev_prepreprocessedFace = null;

            if (reconstructedFace != null && !reconstructedFace.IsDisposed)
            {
                reconstructedFace.Dispose();
            }
            reconstructedFace = null;

            if (model != null && !model.IsDisposed)
            {
                model.Dispose();
                model = null;
            }
        }
        // Generate an approximately reconstructed face by back-projecting the eigenvectors & eigenvalues of the given (preprocessed) face.
        public static Mat ReconstructFace(BasicFaceRecognizer model, Mat preprocessedFace)
        {
            // Since we can only reconstruct the face for some types of FaceRecognizer models (ie: Eigenfaces or Fisherfaces),
            // we should surround the OpenCV calls by a try/catch block so we don't crash for other models.
            try
            {
                // Get some required data from the FaceRecognizer model.
                Mat eigenvectors   = model.getEigenVectors();
                Mat averageFaceRow = model.getMean();

                int faceHeight = preprocessedFace.rows();

                // Project the input image onto the PCA subspace.
                Mat projection = subspaceProject(eigenvectors, averageFaceRow, preprocessedFace.reshape(1, 1));
                //printMatInfo(projection, "projection");

                // Generate the reconstructed face back from the PCA subspace.
                Mat reconstructionRow = subspaceReconstruct(eigenvectors, averageFaceRow, projection);
                //printMatInfo(reconstructionRow, "reconstructionRow");

                // Convert the float row matrix to a regular 8-bit image. Note that we
                // shouldn't use "getImageFrom1DFloatMat()" because we don't want to normalize
                // the data since it is already at the perfect scale.

                // Make it a rectangular shaped image instead of a single row.
                Mat reconstructionMat = reconstructionRow.reshape(1, faceHeight);
                // Convert the floating-point pixels to regular 8-bit uchar pixels.
                Mat reconstructedFace = new Mat(reconstructionMat.size(), CvType.CV_8UC1);
                reconstructionMat.convertTo(reconstructedFace, CvType.CV_8UC1, 1, 0);
                //printMatInfo(reconstructedFace, "reconstructedFace");

                return(reconstructedFace);
            }
            catch (CvException e)
            {
                Debug.Log("WARNING: Missing FaceRecognizer properties." + e);
                return(new Mat());
            }
        }
 // Show the internal face recognition data, to help debugging.
 public static void ShowTrainingDebugData(BasicFaceRecognizer model, int faceWidth, int faceHeight)
 {
     // TODO...
 }
Beispiel #8
0
        // Main loop that runs forever, until the user hits Escape to quit.
        private void recognizeAndTrainUsingWebcam(Mat cameraFrame, CascadeClassifier faceCascade, CascadeClassifier eyeCascade1, CascadeClassifier eyeCascade2)
        {
            if (cameraFrame != null && cameraFrame.empty())
            {
                Debug.LogError("ERROR: Couldn't grab the next camera frame.");
            }

            // Get a copy of the camera frame that we can draw onto.
            Mat displayedFrame = cameraFrame;

            int   cx;
            float current_processingTime     = Time.realtimeSinceStartup;
            float processingTimeDiff_seconds = (current_processingTime - old_processingTime);

            if (processingTimeDiff_seconds > CHANGE_IN_SECONDS_FOR_PROCESSING)
            {
                // Run the face recognition system on the camera image. It will draw some things onto the given image, so make sure it is not read-only memory!
                int identity = -1;

                // Find a face and preprocess it to have a standard size and contrast & brightness.
                Rect  faceRect = new Rect();                                       // Position of detected face.
                Rect  searchedLeftEye = new Rect(), searchedRightEye = new Rect(); // top-left and top-right regions of the face, where eyes were searched.
                Point leftEye = new Point(), rightEye = new Point();               // Position of the detected eyes.

                Mat preprocessedFace = PreprocessFace.GetPreprocessedFace(displayedFrame, faceWidth, faceCascade, eyeCascade1, eyeCascade2, preprocessLeftAndRightSeparately, ref faceRect, ref leftEye, ref rightEye, ref searchedLeftEye, ref searchedRightEye);

                bool gotFaceAndEyes = false;

                if (preprocessedFace != null && !preprocessedFace.empty())
                {
                    gotFaceAndEyes = true;
                }

                // Draw an anti-aliased rectangle around the detected face.
                if (faceRect.width > 0)
                {
                    Imgproc.rectangle(displayedFrame, faceRect.tl(), faceRect.br(), YELLOW, 2, Imgproc.LINE_AA, 0);

                    // Draw light-blue anti-aliased circles for the 2 eyes.
                    Scalar eyeColor = LIGHT_BLUE;
                    if (leftEye.x >= 0)     // Check if the eye was detected
                    {
                        Imgproc.circle(displayedFrame, new Point(faceRect.x + leftEye.x, faceRect.y + leftEye.y), 6, eyeColor, 1, Imgproc.LINE_AA, 0);
                    }
                    if (rightEye.x >= 0)     // Check if the eye was detected
                    {
                        Imgproc.circle(displayedFrame, new Point(faceRect.x + rightEye.x, faceRect.y + rightEye.y), 6, eyeColor, 1, Imgproc.LINE_AA, 0);
                    }
                }

                prev_prepreprocessedFace = preprocessedFace;

                if (m_mode == MODES.MODE_DETECTION)
                {
                    // Don't do anything special.
                }
                else if (m_mode == MODES.MODE_COLLECT_FACES)
                {
                    // Check if we have detected a face.
                    if (gotFaceAndEyes)
                    {
                        // Check if this face looks somewhat different from the previously collected face.
                        double imageDiff = 10000000000.0d;
                        if (old_prepreprocessedFace != null && !old_prepreprocessedFace.empty())
                        {
                            imageDiff = Recognition.GetSimilarity(preprocessedFace, old_prepreprocessedFace);
                        }

                        // Also record when it happened.
                        double current_time     = Time.realtimeSinceStartup;
                        double timeDiff_seconds = (current_time - old_time);

                        // Only process the face if it is noticeably different from the previous frame and there has been noticeable time gap.
                        if ((imageDiff > CHANGE_IN_IMAGE_FOR_COLLECTION) && (timeDiff_seconds > CHANGE_IN_SECONDS_FOR_COLLECTION))
                        {
                            // Also add the mirror image to the training set, so we have more training data, as well as to deal with faces looking to the left or right.
                            Mat mirroredFace = new Mat();
                            Core.flip(preprocessedFace, mirroredFace, 1);

                            // Add the face images to the list of detected faces.
                            preprocessedFaces.Add(preprocessedFace);
                            preprocessedFaces.Add(mirroredFace);
                            faceLabels.Add(m_selectedPerson);
                            faceLabels.Add(m_selectedPerson);

                            // Keep a reference to the latest face of each person.
                            m_latestFaces [m_selectedPerson] = preprocessedFaces.Count - 2;  // Point to the non-mirrored face.
                            // Show the number of collected faces. But since we also store mirrored faces, just show how many the user thinks they stored.
                            Debug.Log("Saved face " + (preprocessedFaces.Count / 2) + " for person " + m_selectedPerson);

                            // Make a white flash on the face, so the user knows a photo has been taken.
                            using (Mat displayedFaceRegion = new Mat(displayedFrame, faceRect)) {
                                Core.add(displayedFaceRegion, DARK_GRAY, displayedFaceRegion);
                            }

                            // Keep a copy of the processed face, to compare on next iteration.
                            old_prepreprocessedFace = preprocessedFace;
                            old_time = current_time;
                        }
                    }
                }
                else if (m_mode == MODES.MODE_TRAINING)
                {
                    // Check if there is enough data to train from. For Eigenfaces, we can learn just one person if we want, but for Fisherfaces,
                    // we need atleast 2 people otherwise it will crash!
                    bool haveEnoughData = true;
                    if (facerecAlgorithm == "FaceRecognizer.Fisherfaces")
                    {
                        if ((m_numPersons < 2) || (m_numPersons == 2 && m_latestFaces [1] < 0))
                        {
                            Debug.Log("Warning: Fisherfaces needs atleast 2 people, otherwise there is nothing to differentiate! Collect more data ...");
                            haveEnoughData = false;
                        }
                    }
                    if (m_numPersons < 1 || preprocessedFaces.Count <= 0 || preprocessedFaces.Count != faceLabels.Count)
                    {
                        Debug.Log("Warning: Need some training data before it can be learnt! Collect more data ...");
                        haveEnoughData = false;
                    }

                    if (haveEnoughData)
                    {
                        // Start training from the collected faces using Eigenfaces or a similar algorithm.
                        model = Recognition.LearnCollectedFaces(preprocessedFaces, faceLabels, facerecAlgorithm);

                        // Show the internal face recognition data, to help debugging.
                        //if (m_debug)
                        //Recognition.ShowTrainingDebugData(model, faceWidth, faceHeight);

                        // Now that training is over, we can start recognizing!
                        m_mode = MODES.MODE_RECOGNITION;
                    }
                    else
                    {
                        // Since there isn't enough training data, go back to the face collection mode!
                        m_mode = MODES.MODE_COLLECT_FACES;
                    }
                }
                else if (m_mode == MODES.MODE_RECOGNITION)
                {
                    prev_identity   = -1;
                    prev_similarity = 100000000.0d;
                    if (reconstructedFace != null && !reconstructedFace.IsDisposed)
                    {
                        reconstructedFace.Dispose();
                    }
                    reconstructedFace = null;

                    if (gotFaceAndEyes && (preprocessedFaces.Count > 0) && (preprocessedFaces.Count == faceLabels.Count))
                    {
                        // Generate a face approximation by back-projecting the eigenvectors & eigenvalues.
                        reconstructedFace = Recognition.ReconstructFace(model, preprocessedFace);

                        // Verify whether the reconstructed face looks like the preprocessed face, otherwise it is probably an unknown person.
                        double similarity = Recognition.GetSimilarity(preprocessedFace, reconstructedFace);
                        double confidence = 0.0d;

                        string outputStr;
                        if (similarity < UNKNOWN_PERSON_THRESHOLD)
                        {
                            int[]    predictedLabel      = new int [1];
                            double[] predictedConfidence = new double [1];
                            // Identify who the person is in the preprocessed face image.
                            model.predict(preprocessedFace, predictedLabel, predictedConfidence);
                            identity   = predictedLabel [0];
                            confidence = predictedConfidence [0];

                            outputStr     = identity.ToString();
                            prev_identity = identity;
                        }
                        else
                        {
                            // Since the confidence is low, assume it is an unknown person.
                            outputStr = "Unknown";
                        }
                        prev_similarity = similarity;
                        Debug.Log("Identity: " + outputStr + ". Similarity: " + similarity + ". Confidence: " + confidence);
                    }
                }
                else if (m_mode == MODES.MODE_DELETE_ALL)
                {
                    // Restart everything!
                    dispose();

                    // Restart in Detection mode.
                    m_mode = MODES.MODE_DETECTION;
                }
                else
                {
                    Debug.LogError("ERROR: Invalid run mode " + m_mode);
                    //exit(1);
                }

                old_processingTime = current_processingTime;
            }

            // Show the help, while also showing the number of collected faces. Since we also collect mirrored faces, we should just
            // tell the user how many faces they think we saved (ignoring the mirrored faces), hence divide by 2.
            strBuilder.Length = 0;
            Rect rcHelp = new Rect();

            if (m_mode == MODES.MODE_DETECTION)
            {
                strBuilder.Append("Click [Add Person] when ready to collect faces.");
            }
            else if (m_mode == MODES.MODE_COLLECT_FACES)
            {
                strBuilder.Append("Click anywhere to train from your ");
                strBuilder.Append(preprocessedFaces.Count / 2);
                strBuilder.Append(" faces of ");
                strBuilder.Append(m_numPersons);
                strBuilder.Append(" people.");
            }
            else if (m_mode == MODES.MODE_TRAINING)
            {
                strBuilder.Append("Please wait while your ");
                strBuilder.Append(preprocessedFaces.Count / 2);
                strBuilder.Append(" faces of ");
                strBuilder.Append(m_numPersons);
                strBuilder.Append(" people builds.");
            }
            else if (m_mode == MODES.MODE_RECOGNITION)
            {
                strBuilder.Append("Click people on the right to add more faces to them, or [Add Person] for someone new.");
            }

            if (strBuilder.Length > 0)
            {
                // Draw it with a black background and then again with a white foreground.
                // Since BORDER may be 0 and we need a negative position, subtract 2 from the border so it is always negative.
                float txtSize = 0.4f;
                drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER, -BORDER - 2), BLACK, txtSize);              // Black shadow.
                rcHelp = drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER + 1, -BORDER - 1), WHITE, txtSize); // White text.
            }

            // Show the current mode.
            strBuilder.Length = 0;
            if (m_mode >= 0 && m_mode < MODES.MODE_END)
            {
                strBuilder.Append(" people builds.");
                strBuilder.Append(MODE_NAMES [(int)m_mode]);
                drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER, -BORDER - 2 - rcHelp.height), BLACK);     // Black shadow
                drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER + 1, -BORDER - 1 - rcHelp.height), GREEN); // Green text
            }

            // Show the current preprocessed face in the top-center of the display.
            cx = (displayedFrame.cols() - faceWidth) / 2;
            if (prev_prepreprocessedFace != null && !prev_prepreprocessedFace.empty())
            {
                // Get a RGBA version of the face, since the output is RGBA color.
                using (Mat srcRGBA = new Mat(prev_prepreprocessedFace.size(), CvType.CV_8UC4)) {
                    Imgproc.cvtColor(prev_prepreprocessedFace, srcRGBA, Imgproc.COLOR_GRAY2RGBA);
                    // Get the destination ROI (and make sure it is within the image!).
                    Rect dstRC = new Rect(cx, BORDER, faceWidth, faceHeight);
                    using (Mat dstROI = new Mat(displayedFrame, dstRC)) {
                        // Copy the pixels from src to dst.
                        srcRGBA.copyTo(dstROI);
                    }
                }
            }

            // Draw an anti-aliased border around the face, even if it is not shown.
            Imgproc.rectangle(displayedFrame, new Point(cx - 1, BORDER - 1), new Point(cx - 1 + faceWidth + 2, BORDER - 1 + faceHeight + 2), LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);

            // Show the most recent face for each of the collected people, on the right side of the display.
            m_gui_faces_left = displayedFrame.cols() - BORDER - faceWidth;
            m_gui_faces_top  = BORDER;
            for (int i = 0; i < m_numPersons; i++)
            {
                int index = m_latestFaces [i];
                if (index >= 0 && index < preprocessedFaces.Count)
                {
                    Mat srcGray = preprocessedFaces [index];
                    if (srcGray != null && !srcGray.empty())
                    {
                        // Get a RGBA version of the face, since the output is RGBA color.
                        using (Mat srcRGBA = new Mat(srcGray.size(), CvType.CV_8UC4)) {
                            Imgproc.cvtColor(srcGray, srcRGBA, Imgproc.COLOR_GRAY2RGBA);
                            // Get the destination ROI (and make sure it is within the image!).
                            int  y     = Mathf.Min(m_gui_faces_top + i * faceHeight, displayedFrame.rows() - faceHeight);
                            Rect dstRC = new Rect(m_gui_faces_left, y, faceWidth, faceHeight);
                            using (Mat dstROI = new Mat(displayedFrame, dstRC)) {
                                // Copy the pixels from src to dst.
                                srcRGBA.copyTo(dstROI);
                            }
                        }
                    }
                }
            }

            // Highlight the person being collected, using a red rectangle around their face.
            if (m_mode == MODES.MODE_COLLECT_FACES)
            {
                if (m_selectedPerson >= 0 && m_selectedPerson < m_numPersons)
                {
                    int  y  = Mathf.Min(m_gui_faces_top + m_selectedPerson * faceHeight, displayedFrame.rows() - faceHeight);
                    Rect rc = new Rect(m_gui_faces_left, y, faceWidth, faceHeight);
                    Imgproc.rectangle(displayedFrame, rc.tl(), rc.br(), RED, 3, Imgproc.LINE_AA, 0);
                }
            }

            // Highlight the person that has been recognized, using a green rectangle around their face.
            if (m_mode == MODES.MODE_RECOGNITION && prev_identity >= 0 && prev_identity < 1000)
            {
                int  y  = Mathf.Min(m_gui_faces_top + prev_identity * faceHeight, displayedFrame.rows() - faceHeight);
                Rect rc = new Rect(m_gui_faces_left, y, faceWidth, faceHeight);
                Imgproc.rectangle(displayedFrame, rc.tl(), rc.br(), GREEN, 3, Imgproc.LINE_AA, 0);
            }

            if (m_mode == MODES.MODE_RECOGNITION)
            {
                if (m_debug)
                {
                    if (reconstructedFace != null && !reconstructedFace.empty())
                    {
                        cx = (displayedFrame.cols() - faceWidth) / 2;
                        Point rfDebugBottomRight = new Point(cx + faceWidth * 2 + 5, BORDER + faceHeight);
                        Point rfDebugTopLeft     = new Point(cx + faceWidth + 5, BORDER);
                        Rect  rfDebugRC          = new Rect(rfDebugTopLeft, rfDebugBottomRight);
                        using (Mat srcRGBA = new Mat(reconstructedFace.size(), CvType.CV_8UC4)) {
                            Imgproc.cvtColor(reconstructedFace, srcRGBA, Imgproc.COLOR_GRAY2RGBA);
                            using (Mat dstROI = new Mat(displayedFrame, rfDebugRC)) {
                                srcRGBA.copyTo(dstROI);
                            }
                        }
                        Imgproc.rectangle(displayedFrame, rfDebugTopLeft, rfDebugBottomRight, LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);
                    }
                }

                // Show the confidence rating for the recognition in the mid-top of the display.
                cx = (displayedFrame.cols() - faceWidth) / 2;
                Point ptBottomRight = new Point(cx - 5, BORDER + faceHeight);
                Point ptTopLeft     = new Point(cx - 15, BORDER);
                // Draw a gray line showing the threshold for an "unknown" person.
                Point ptThreshold = new Point(ptTopLeft.x, ptBottomRight.y - (1.0 - UNKNOWN_PERSON_THRESHOLD) * faceHeight);
                Imgproc.rectangle(displayedFrame, ptThreshold, new Point(ptBottomRight.x, ptThreshold.y), LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);
                // Crop the confidence rating between 0.0 to 1.0, to show in the bar.
                double confidenceRatio = 1.0d - Math.Min(Math.Max(prev_similarity, 0.0d), 1.0d);
                Point  ptConfidence    = new Point(ptTopLeft.x, ptBottomRight.y - confidenceRatio * faceHeight);
                // Show the light-blue confidence bar.
                Imgproc.rectangle(displayedFrame, ptConfidence, ptBottomRight, LIGHT_BLUE, Core.FILLED, Imgproc.LINE_AA, 0);
                // Show the gray border of the bar.
                Imgproc.rectangle(displayedFrame, ptTopLeft, ptBottomRight, LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);
            }

            /*
             * // If the user wants all the debug data, show it to them!
             * if (m_debug)
             * {
             *  Mat face = new Mat();
             *  if (faceRect.width > 0)
             *  {
             *      face = new Mat(cameraFrame, faceRect);
             *      if (searchedLeftEye.width > 0 && searchedRightEye.width > 0)
             *      {
             *          Mat topLeftOfFace = new Mat(face, searchedLeftEye);
             *          Mat topRightOfFace = new Mat(face, searchedRightEye);
             *          //imshow("topLeftOfFace", topLeftOfFace);
             *          //imshow("topRightOfFace", topRightOfFace);
             *      }
             *  }
             *
             *  //if (model != null)
             *      //showTrainingDebugData(model, faceWidth, faceHeight);
             * }
             */
        }
        /// <summary>
        /// Raises the load button click event.
        /// </summary>
        public void LoadModel()
        {
            string loadDirectoryPath = Path.Combine(Application.persistentDataPath, saveDirectoryName);

            if (!Directory.Exists(loadDirectoryPath))
            {
                Debug.Log("load failure. saved train data file does not exist.");
                return;
            }

            // Restart everything!
            dispose();

            if (facerecAlgorithm == "FaceRecognizer.Fisherfaces")
            {
                model = FisherFaceRecognizer.create();
            }
            else if (facerecAlgorithm == "FaceRecognizer.Eigenfaces")
            {
                model = EigenFaceRecognizer.create();
            }

            if (model == null)
            {
                Debug.LogError("ERROR: The FaceRecognizer algorithm [" + facerecAlgorithm + "] is not available in your version of OpenCV. Please update to OpenCV v2.4.1 or newer.");
                m_mode = R_MODES.MODE_DETECTION;
                return;
            }

            // load the train data.
            model.read(Path.Combine(loadDirectoryPath, "traindata.yml"));

            int maxLabel = (int)Core.minMaxLoc(model.getLabels()).maxVal;

            if (maxLabel < 0)
            {
                Debug.Log("load failure.");
                model.Dispose();
                return;
            }

            // Restore the save data.
            #if UNITY_WEBGL && !UNITY_EDITOR
            string format = "jpg";
            #else
            string format = "png";
            #endif
            m_numPersons = maxLabel + 1;
            personsNames = new string[m_numPersons];

            for (int i = 0; i < m_numPersons; ++i)
            {
                personsNames[i] = GameManager.instance.personsNames[i];

                m_latestFaces.Add(i);
                preprocessedFaces.Add(Imgcodecs.imread(Path.Combine(loadDirectoryPath, "preprocessedface" + i + "." + format), 0));
                if (preprocessedFaces[i].total() == 0)
                {
                    preprocessedFaces[i] = new Mat(faceHeight, faceWidth, CvType.CV_8UC1, new Scalar(128));
                }
                faceLabels.Add(i);
            }


            // go to the recognition mode!
            m_mode = R_MODES.MODE_RECOGNITION;
        }