コード例 #1
0
        private static void RunFaceDetectionsPipeline()
        {
            // TODO
            // Complete Pipeline
            var images = Directory.GetFiles("../../Data/Images");

            var destination = "./Images/Output";

            if (!Directory.Exists(destination))
            {
                Directory.CreateDirectory(destination);
            }



            // TODO :
            //      try different concurrent implementations
            //      look the file /ImageDetection/FaceDetection.cs, there are
            //      some method that you can use
            //
            //      implement the pipeline (then component used in /ImageDetection/FaceDetection.cs)
            foreach (var image in images)
            {
                Console.WriteLine($"Processing {Path.GetFileNameWithoutExtension(image)}");

                FaceDetection.DetectFaces(image, destination);
            }
        }
コード例 #2
0
ファイル: Eyes.cs プロジェクト: aleksandartraja/robbie
        /// <summary>
        /// Delegate to define the actions taken upon the photo capture of the camera class.
        /// This one uses the memory stream of the photo capture to identify the faces in the iamge, using the local face detection instance.
        /// </summary>
        /// <param name="memoryStream">The memory stream containing the captured image.</param>
        private async Task IdentifyFaces_Delegate(MemoryStream memoryStream)
        {
            var faces = await faceDetection.DetectFaces(memoryStream);

            if (faces.Length > 0)
            {
                // you could call the face API once for all faces together
                // but there's no way to map the found persons to the detected face rectangles
                // so we have to call the API per detected face rectangle
                foreach (var face in faces)
                {
                    try
                    {
                        var persons = await faceDetection.IdentifyFace(new[] { face });

                        // set identities when there is at least one person within the viewport
                        if (persons.Count > 0)
                        {
                            var person = persons.FirstOrDefault();
                            if (person != null)
                            {
                                identityInterpolation.IdentifiedFace(face, person);
                            }
                        }

                        // remove the current identity when there is no person identified
                        else if (persons.Count == 0)
                        {
                            currentIdentity = null;
                        }
                    }
                    catch (FaceAPIException)
                    {
                        // if the person group is not yet trained or face identification fails for any other reason, continue
                    }
                }

                // after adding new identified faces, switch to interact with the possibly new person within the viewport,
                // so make sure to get the new largest face object (event won't be fired again!)
                var trackedIdentity = identityInterpolation.GetLargestFace();
                if (trackedIdentity != null)
                {
                    // if there is no current identity, handle the new person as found
                    if (currentIdentity == null)
                    {
                        OnPersonFound(trackedIdentity);
                    }
                    else if (currentIdentity != null)
                    {
                        // if there is an identity currently, check if it has actually changed before calling person found
                        if (currentIdentity.PersonId != trackedIdentity.PersonId)
                        {
                            OnPersonFound(trackedIdentity);
                        }
                    }
                }
            }
        }