// Main loop that runs forever, until the user hits Escape to quit. private void recognizeAndTrainUsingWebcam(Mat cameraFrame, CascadeClassifier faceCascade, CascadeClassifier eyeCascade1, CascadeClassifier eyeCascade2) { if (cameraFrame != null && cameraFrame.total() == 0) { Debug.LogError("ERROR: Couldn't grab the next camera frame."); } // Get a copy of the camera frame that we can draw onto. Mat displayedFrame = cameraFrame; int cx; float current_processingTime = Time.realtimeSinceStartup; float processingTimeDiff_seconds = (current_processingTime - old_processingTime); if (processingTimeDiff_seconds > CHANGE_IN_SECONDS_FOR_PROCESSING) { // Run the face recognition system on the camera image. It will draw some things onto the given image, so make sure it is not read-only memory! int identity = -1; // Find a face and preprocess it to have a standard size and contrast & brightness. Rect faceRect = new Rect(); // Position of detected face. Rect searchedLeftEye = new Rect(), searchedRightEye = new Rect(); // top-left and top-right regions of the face, where eyes were searched. Point leftEye = new Point(), rightEye = new Point(); // Position of the detected eyes. Mat preprocessedFace = PreprocessFace.GetPreprocessedFace(displayedFrame, faceWidth, faceCascade, eyeCascade1, eyeCascade2, preprocessLeftAndRightSeparately, ref faceRect, ref leftEye, ref rightEye, ref searchedLeftEye, ref searchedRightEye); bool gotFaceAndEyes = false; if (preprocessedFace != null && preprocessedFace.total() > 0) { gotFaceAndEyes = true; } // Draw an anti-aliased rectangle around the detected face. if (faceRect.width > 0) { Imgproc.rectangle(displayedFrame, faceRect.tl(), faceRect.br(), YELLOW, 2, Imgproc.LINE_AA, 0); // Draw light-blue anti-aliased circles for the 2 eyes. Scalar eyeColor = LIGHT_BLUE; if (leftEye.x >= 0) { // Check if the eye was detected Imgproc.circle(displayedFrame, new Point(faceRect.x + leftEye.x, faceRect.y + leftEye.y), 6, eyeColor, 1, Imgproc.LINE_AA, 0); } if (rightEye.x >= 0) { // Check if the eye was detected Imgproc.circle(displayedFrame, new Point(faceRect.x + rightEye.x, faceRect.y + rightEye.y), 6, eyeColor, 1, Imgproc.LINE_AA, 0); } } prev_prepreprocessedFace = preprocessedFace; if (m_mode == R_MODES.MODE_DETECTION) { // Don't do anything special. } else if (m_mode == R_MODES.MODE_RECOGNITION) { prev_identity = -1; prev_similarity = 100000000.0d; if (reconstructedFace != null && !reconstructedFace.IsDisposed) { reconstructedFace.Dispose(); } reconstructedFace = null; if (gotFaceAndEyes && (preprocessedFaces.Count > 0) && (preprocessedFaces.Count == faceLabels.Count)) { // Generate a face approximation by back-projecting the eigenvectors & eigenvalues. reconstructedFace = Recognition.ReconstructFace(model, preprocessedFace); // Verify whether the reconstructed face looks like the preprocessed face, otherwise it is probably an unknown person. double similarity = Recognition.GetSimilarity(preprocessedFace, reconstructedFace); double confidence = 0.0d; string outputStr; if (similarity < UNKNOWN_PERSON_THRESHOLD) { int[] predictedLabel = new int[1]; double[] predictedConfidence = new double[1]; // Identify who the person is in the preprocessed face image. model.predict(preprocessedFace, predictedLabel, predictedConfidence); identity = predictedLabel[0]; confidence = predictedConfidence[0]; outputStr = identity.ToString(); prev_identity = identity; //Display name strBuilder.Length = 0; Rect rcHelp = new Rect(); strBuilder.Append(GameManager.instance.personsNames[prev_identity]); if (strBuilder.Length > 0) { // Draw it with a black background and then again with a white foreground. // Since BORDER may be 0 and we need a negative position, subtract 2 from the border so it is always negative. float txtSize = 2.5f; drawString(displayedFrame, strBuilder.ToString(), new Point(faceRect.tl().x, faceRect.tl().y), YELLOW, txtSize); // White text. } } else { // Since the confidence is low, assume it is an unknown person. outputStr = "Unknown"; } prev_similarity = similarity; Debug.Log("Identity: " + outputStr + ". Similarity: " + similarity + ". Confidence: " + confidence); } } else if (m_mode == R_MODES.MODE_DELETE_ALL) { // Restart everything! dispose(); // Restart in Detection mode. m_mode = R_MODES.MODE_DETECTION; } else { Debug.LogError("ERROR: Invalid run mode " + m_mode); //exit(1); } old_processingTime = current_processingTime; } // Show the help, while also showing the number of collected faces. Since we also collect mirrored faces, we should just // tell the user how many faces they think we saved (ignoring the mirrored faces), hence divide by 2. /*strBuilder.Length = 0; * Rect rcHelp = new Rect(); * if (m_mode == R_MODES.MODE_DETECTION) * { * strBuilder.Append("Click [Add Person] when ready to collect faces."); * } * else if (m_mode == R_MODES.MODE_COLLECT_FACES) * { * strBuilder.Append(preprocessedFaces.Count / 2); * strBuilder.Append(" faces of "); * strBuilder.Append(m_numPersons); * strBuilder.Append(" people."); * } * else if (m_mode == R_MODES.MODE_RECOGNITION) * strBuilder.Append("Click people on the right to add more faces to them, or [Add Person] for someone new."); * * if (strBuilder.Length > 0) * { * // Draw it with a black background and then again with a white foreground. * // Since BORDER may be 0 and we need a negative position, subtract 2 from the border so it is always negative. * float txtSize = 0.7f; * drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER, -BORDER - 2), BLACK, txtSize); // Black shadow. * rcHelp = drawString(displayedFrame, strBuilder.ToString(), new Point(BORDER + 1, -BORDER - 1), WHITE, txtSize); // White text. * } * * // Show the current mode. * /*strBuilder.Length = 0; * if (m_mode >= 0 && m_mode < R_MODES.MODE_END) { * strBuilder.Append (" people builds."); * strBuilder.Append (MODE_NAMES [(int)m_mode]); * drawString (displayedFrame, strBuilder.ToString (), new Point (BORDER, -BORDER - 2 - rcHelp.height), BLACK); // Black shadow * drawString (displayedFrame, strBuilder.ToString (), new Point (BORDER + 1, -BORDER - 1 - rcHelp.height), GREEN); // Green text * }*/ // Show the current preprocessed face in the top-center of the display. /*cx = (displayedFrame.cols() - faceWidth) / 2; * if (prev_prepreprocessedFace != null && prev_prepreprocessedFace.total() > 0) * { * // Get a RGBA version of the face, since the output is RGBA color. * using (Mat srcRGBA = new Mat(prev_prepreprocessedFace.size(), CvType.CV_8UC4)) * { * Imgproc.cvtColor(prev_prepreprocessedFace, srcRGBA, Imgproc.COLOR_GRAY2RGBA); * // Get the destination ROI (and make sure it is within the image!). * Rect dstRC = new Rect(cx, BORDER, faceWidth, faceHeight); * using (Mat dstROI = new Mat(displayedFrame, dstRC)) * { * // Copy the pixels from src to dst. * srcRGBA.copyTo(dstROI); * } * } * } * * // Draw an anti-aliased border around the face, even if it is not shown. * Imgproc.rectangle(displayedFrame, new Point(cx - 1, BORDER - 1), new Point(cx - 1 + faceWidth + 2, BORDER - 1 + faceHeight + 2), LIGHT_GRAY, 1, Imgproc.LINE_AA, 0); */ // Show the most recent face for each of the collected people, on the right side of the display. /*m_gui_faces_left = displayedFrame.cols() - BORDER - faceWidth; * m_gui_faces_top = BORDER; * for (int i = 0; i < m_numPersons; i++) * { * int index = m_latestFaces[i]; * if (index >= 0 && index < preprocessedFaces.Count) * { * Mat srcGray = preprocessedFaces[index]; * if (srcGray != null && srcGray.total() > 0) * { * // Get a RGBA version of the face, since the output is RGBA color. * using (Mat srcRGBA = new Mat(srcGray.size(), CvType.CV_8UC4)) * { * Imgproc.cvtColor(srcGray, srcRGBA, Imgproc.COLOR_GRAY2RGBA); * // Get the destination ROI (and make sure it is within the image!). * int y = Mathf.Min(m_gui_faces_top + i * faceHeight, displayedFrame.rows() - faceHeight); * Rect dstRC = new Rect(m_gui_faces_left, y, faceWidth, faceHeight); * using (Mat dstROI = new Mat(displayedFrame, dstRC)) * { * // Copy the pixels from src to dst. * srcRGBA.copyTo(dstROI); * } * } * } * } * }*/ // Highlight the person being collected, using a red rectangle around their face. /* if (m_mode == R_MODES.MODE_COLLECT_FACES) * { * if (m_selectedPerson >= 0 && m_selectedPerson < m_numPersons) * { * int y = Mathf.Min(m_gui_faces_top + m_selectedPerson * faceHeight, displayedFrame.rows() - faceHeight); * Rect rc = new Rect(m_gui_faces_left, y, faceWidth, faceHeight); * Imgproc.rectangle(displayedFrame, rc.tl(), rc.br(), RED, 3, Imgproc.LINE_AA, 0); * } * }*/ // Highlight the person that has been recognized, using a green rectangle around their face. /*if (m_mode == R_MODES.MODE_RECOGNITION && prev_identity >= 0 && prev_identity < 1000) * { * int y = Mathf.Min(m_gui_faces_top + prev_identity * faceHeight, displayedFrame.rows() - faceHeight); * Rect rc = new Rect(m_gui_faces_left, y, faceWidth, faceHeight); * Imgproc.rectangle(displayedFrame, rc.tl(), rc.br(), GREEN, 3, Imgproc.LINE_AA, 0); * } * * if (m_mode == R_MODES.MODE_RECOGNITION) * {*/ /*if (m_debug) * { * if (reconstructedFace != null && reconstructedFace.total() > 0) * { * cx = (displayedFrame.cols() - faceWidth) / 2; * Point rfDebugBottomRight = new Point(cx + faceWidth * 2 + 5, BORDER + faceHeight); * Point rfDebugTopLeft = new Point(cx + faceWidth + 5, BORDER); * Rect rfDebugRC = new Rect(rfDebugTopLeft, rfDebugBottomRight); * using (Mat srcRGBA = new Mat(reconstructedFace.size(), CvType.CV_8UC4)) * { * Imgproc.cvtColor(reconstructedFace, srcRGBA, Imgproc.COLOR_GRAY2RGBA); * using (Mat dstROI = new Mat(displayedFrame, rfDebugRC)) * { * srcRGBA.copyTo(dstROI); * } * } * Imgproc.rectangle(displayedFrame, rfDebugTopLeft, rfDebugBottomRight, LIGHT_GRAY, 1, Imgproc.LINE_AA, 0); * } * }*/ // Show the confidence rating for the recognition in the mid-top of the display. /*cx = (displayedFrame.cols() - faceWidth) / 2; * Point ptBottomRight = new Point(cx - 5, BORDER + faceHeight); * Point ptTopLeft = new Point(cx - 15, BORDER); * // Draw a gray line showing the threshold for an "unknown" person. * Point ptThreshold = new Point(ptTopLeft.x, ptBottomRight.y - (1.0 - UNKNOWN_PERSON_THRESHOLD) * faceHeight); * Imgproc.rectangle(displayedFrame, ptThreshold, new Point(ptBottomRight.x, ptThreshold.y), LIGHT_GRAY, 1, Imgproc.LINE_AA, 0); * // Crop the confidence rating between 0.0 to 1.0, to show in the bar. * double confidenceRatio = 1.0d - Math.Min(Math.Max(prev_similarity, 0.0d), 1.0d); * Point ptConfidence = new Point(ptTopLeft.x, ptBottomRight.y - confidenceRatio * faceHeight); * // Show the light-blue confidence bar. * Imgproc.rectangle(displayedFrame, ptConfidence, ptBottomRight, LIGHT_BLUE, Core.FILLED, Imgproc.LINE_AA, 0); * // Show the gray border of the bar. * Imgproc.rectangle(displayedFrame, ptTopLeft, ptBottomRight, LIGHT_GRAY, 1, Imgproc.LINE_AA, 0);*/ //} /* * // If the user wants all the debug data, show it to them! * if (m_debug) * { * Mat face = new Mat(); * if (faceRect.width > 0) * { * face = new Mat(cameraFrame, faceRect); * if (searchedLeftEye.width > 0 && searchedRightEye.width > 0) * { * Mat topLeftOfFace = new Mat(face, searchedLeftEye); * Mat topRightOfFace = new Mat(face, searchedRightEye); * //imshow("topLeftOfFace", topLeftOfFace); * //imshow("topRightOfFace", topRightOfFace); * } * } * * //if (model != null) * //showTrainingDebugData(model, faceWidth, faceHeight); * } */ }
/// <summary> /// Raises the load button click event. /// </summary> public void LoadModel() { string loadDirectoryPath = Path.Combine(Application.persistentDataPath, saveDirectoryName); if (!Directory.Exists(loadDirectoryPath)) { Debug.Log("load failure. saved train data file does not exist."); return; } // Restart everything! dispose(); if (facerecAlgorithm == "FaceRecognizer.Fisherfaces") { model = FisherFaceRecognizer.create(); } else if (facerecAlgorithm == "FaceRecognizer.Eigenfaces") { model = EigenFaceRecognizer.create(); } if (model == null) { Debug.LogError("ERROR: The FaceRecognizer algorithm [" + facerecAlgorithm + "] is not available in your version of OpenCV. Please update to OpenCV v2.4.1 or newer."); m_mode = R_MODES.MODE_DETECTION; return; } // load the train data. model.read(Path.Combine(loadDirectoryPath, "traindata.yml")); int maxLabel = (int)Core.minMaxLoc(model.getLabels()).maxVal; if (maxLabel < 0) { Debug.Log("load failure."); model.Dispose(); return; } // Restore the save data. #if UNITY_WEBGL && !UNITY_EDITOR string format = "jpg"; #else string format = "png"; #endif m_numPersons = maxLabel + 1; personsNames = new string[m_numPersons]; for (int i = 0; i < m_numPersons; ++i) { personsNames[i] = GameManager.instance.personsNames[i]; m_latestFaces.Add(i); preprocessedFaces.Add(Imgcodecs.imread(Path.Combine(loadDirectoryPath, "preprocessedface" + i + "." + format), 0)); if (preprocessedFaces[i].total() == 0) { preprocessedFaces[i] = new Mat(faceHeight, faceWidth, CvType.CV_8UC1, new Scalar(128)); } faceLabels.Add(i); } // go to the recognition mode! m_mode = R_MODES.MODE_RECOGNITION; }