コード例 #1
0
        /// <summary>
        /// Memorizes a human face
        /// </summary>
        /// <param name="person">Name to asign the recognized human</param>
        /// <returns>True if a person was detected and name could be stores. False otherwise</returns>
        private bool RememberHuman(string person)
        {
            // Captured image
            NImage image = null;
            // Grayscale image used for face recognition
            NGrayscaleImage gray = null;
            // Copy of captured image bitmap
            Bitmap bitmap;
            // Variable used to not wait forever for an image
            int count = 0;
            // attempts while trying to enroll
            int attempts = 0;
            // Extraction has succeded
            bool extractionSucceded = false;
            // Stores the detection details of detected face
            VleDetectionDetails details = new VleDetectionDetails();
            // Stores the features of detected face
            byte[] features = null;
            // Stores detected and recognized face
            Face face = null;

            sleepCapture = false;
            lastOperationTime = DateTime.Now;
            UseResources();

            // Start enrolling
            vlExtractor.ExtractStart(settings.AttemptsWhileEnrolling);
            // Attemp counter
            for (attempts = 0; running && (attempts < settings.AttemptsWhileEnrolling); ++attempts)
            {
                // Convert image to a gray one
                if (image != null) image.Dispose();
                if (gray != null) gray.Dispose();
                image = NImage.FromBitmap(imageSource.GetImage(100));
                if (image == null) continue;
                gray = (NGrayscaleImage)NImage.FromImage(NPixelFormat.Grayscale, 0, image);

                // Extract face details (if any)
                try
                {
                    extractionSucceded = vlExtractor.ExtractNext(gray, out details, out features);
                }
                catch { }
                // Check if we have all data (extraction succeded)
                //if (extractionSucceded && (details != null) && (details.FaceAvailable != null) && (features != null) && (features.Length > 0))
                if (extractionSucceded && !details.Equals(null) && details.FaceAvailable && (features != null) && (features.Length > 0))
                {
                    // Create the face
                    face = new Face(features, details);
                    break;
                }
            }
            vlExtractor.Reset();
            ReleaseResources();

            // If there is no face, enrollment failed
            if (face == null)
            {
                if (image != null) image.Dispose();
                if (gray != null) gray.Dispose();
                return false;
            }

            // Update video output
            bitmap = image.ToBitmap();
            RecognitionResultUpdate(bitmap, face);

            // Set the image and name

            // Bitmap to draw in the detected face region
            Bitmap croppedBitmap;
            // Graphics used to copy the face detected region
            Graphics g;
            // Rectangle used to copy the scaled region of face detected
            Rectangle rect;

            // Get a rectangle a bit larger than the one the face has been recognized.
            // Its because some times in the exact area of the face the face cannot be recognized again
            rect = new Rectangle(face.VlFace.Rectangle.X - face.VlFace.Rectangle.Width / 2, face.VlFace.Rectangle.Y - face.VlFace.Rectangle.Height / 2, face.VlFace.Rectangle.Width * 2, face.VlFace.Rectangle.Height * 2);
            croppedBitmap = new Bitmap(rect.Width, rect.Height);
            g = Graphics.FromImage(croppedBitmap);
            bitmap = image.ToBitmap();
            g.DrawImage(bitmap, 0, 0, rect, GraphicsUnit.Pixel);
            face.SetBitmap(croppedBitmap);
            face.Name = person;

            // Register and save db
            knownFaces.Add(face);
            SaveKnownFaces();
            LoadKnownFaces();
            if (image != null) image.Dispose();
            if (gray != null) gray.Dispose();
            return true;
        }
コード例 #2
0
		/// <summary>
		/// Updates the output video control
		/// </summary>
		/// <param name="image">Base Image</param>
		/// <param name="faces">List of faces detected</param>
		private void RecognitionResultUpdate(Bitmap image, Face face)
		{
			if (face == null)
				return;
			if (this.InvokeRequired)
			{
				if (!this.IsHandleCreated || this.Disposing || this.IsDisposed)
					return;
				this.BeginInvoke(dlgRecognitionResultUpdate3, image, face);
				return;
			}
			VleFace[] vlFaces;
			VleDetectionDetails[] vlDetails;

			vlFaces = new VleFace[] { face.VlFace };
			vlDetails = new VleDetectionDetails[] { face.DetectionDetails };
			vcRecognitionResult.Image = image;
			vcRecognitionResult.Faces = vlFaces;
			vcRecognitionResult.DetectionDetails = vlDetails;
			if (face.HasFOV)
			{
				vcRecognitionResult.String = "FOV: (" + face.HFoV.ToString("0.00") + "," + face.VFoV.ToString("0.00") + ")";
			}
		}
コード例 #3
0
		/// <summary>
		/// Updates the output video control
		/// </summary>
		/// <param name="image">Base Image</param>
		/// <param name="faces">List of faces detected</param>
		private void RecognitionResultUpdate(Bitmap image, Face[] faces)
		{
			if (this.InvokeRequired)
			{
				if (!this.IsHandleCreated || this.Disposing || this.IsDisposed)
					return;
				this.BeginInvoke(dlgRecognitionResultUpdate4, image, faces);
				return;
			}
			VleFace[] vlFaces;
			VleDetectionDetails[] vlDetails;

			vlFaces = new VleFace[faces.Length];
			vlDetails = new VleDetectionDetails[faces.Length];
			for (int i = 0; i < faces.Length; ++i)
			{
				if (faces[i] != null)
				{
					vlFaces[i] = faces[i].VlFace;
					vlDetails[i] = faces[i].DetectionDetails;
				}
			}
			vcRecognitionResult.Image = image;
			vcRecognitionResult.Faces = vlFaces;
			vcRecognitionResult.DetectionDetails = vlDetails;
			if (faces[0].HasFOV)
			{
				vcRecognitionResult.String = "(" + faces[0].HFoV.ToString("0.00") + "," + faces[0].VFoV.ToString("0.00") + ")";
			}
		}
コード例 #4
0
		/// <summary>
		/// Updates the output video control
		/// </summary>
		/// <param name="image">Base Image</param>
		/// <param name="faces">List of faces detected</param>
		/// <param name="details">Recognition details asociated to face detected</param>
		private void RecognitionResultUpdate(Bitmap image, VleFace[] faces, VleDetectionDetails[] details)
		{
			if (this.InvokeRequired)
			{
				if (!this.IsHandleCreated || this.Disposing || this.IsDisposed)
					return;
				this.BeginInvoke(dlgRecognitionResultUpdate2, image, faces, details);
				return;
			}
			vcRecognitionResult.Image = image;
			vcRecognitionResult.Faces = faces;
			vcRecognitionResult.DetectionDetails = details;

		}