public async Task CaptureFaceToFileAsync(StorageFile photoFile)
        {
            // Get video frame
            const int height = 480;
            double    scale  = height / (double)_previewProperties.Height;

            // Capture gray image for face detection
            var videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)(_previewProperties.Width * scale), height);
            var frame      = await VideoCapture.GetPreviewFrameAsync(videoFrame);

            //Capture color image for saving
            var grayVideoFrame = new VideoFrame(BitmapPixelFormat.Gray8, (int)(_previewProperties.Width * scale), height);
            await VideoCapture.GetPreviewFrameAsync(grayVideoFrame);

            // Detect faces
            IList <DetectedFace> faces = null;

            if (FaceDetector.IsBitmapPixelFormatSupported(grayVideoFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                faces = await _faceTracker.ProcessNextFrameAsync(grayVideoFrame);
            }

            if ((faces ?? throw new InvalidOperationException()).Any())
            {
                var mainFace   = faces.OrderByDescending(f => f.FaceBox.Height * f.FaceBox.Width).First();
                var faceBounds = GetFaceBoundsFromFrame(mainFace.FaceBox, 1);
                TryExtendFaceBounds(
                    videoFrame.SoftwareBitmap.PixelWidth,
                    videoFrame.SoftwareBitmap.PixelHeight,
                    Constants.FaceBoxRatio,
                    ref faceBounds);
                await SaveBoundedBoxToFileAsync(photoFile, frame.SoftwareBitmap, BitmapEncoder.BmpEncoderId, faceBounds);
            }
        }
Exemple #2
0
        /// <summary>
        /// Detect faces and process them
        /// </summary>
        /// <param name="timer"></param>
        private async void ProcessCurrentVideoFrameAsync(ThreadPoolTimer timer)
        {
            // fill the frame
            await _mediaCapture.GetPreviewFrameAsync(_previewFrame);

            // collection for faces
            IList <DetectedFace> faces;

            if (FaceDetector.IsBitmapPixelFormatSupported(_previewFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                // get detected faces on the frame
                faces = await _faceTracker.ProcessNextFrameAsync(_previewFrame);
            }
            else
            {
                throw new NotSupportedException($"PixelFormat {BitmapPixelFormat.Nv12} is not supported by FaceDetector.");
            }

            // get the size of frame webcam provided, we need it to scale image on the screen
            var previewFrameSize = new Size(_previewFrame.SoftwareBitmap.PixelWidth, _previewFrame.SoftwareBitmap.PixelHeight);

            ProcessFrameFaces(previewFrameSize, faces);

            // arrange the next processing time
            ThreadPoolTimer.CreateTimer(ProcessCurrentVideoFrameAsync, _frameProcessingTimerInterval);
        }
 /// <summary>
 /// Detects faces in a frame of a running video or stream, using a video frame object as a source.
 /// The detected faces will be stored in the corresponding local class member.
 ///
 /// Use this method if you want to continuously track faces in a stream or video.
 /// </summary>
 /// <param name="currentFrame">The video frame object representing the latest snapshot frame to detect the faces in.</param>
 public async void Track(VideoFrame currentFrame)
 {
     if (currentFrame != null && currentFrame.SoftwareBitmap.BitmapPixelFormat == BitmapPixelFormat.Nv12)
     {
         detectedFaces = await tracker.ProcessNextFrameAsync(currentFrame);
     }
 }
Exemple #4
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (captureManager.CameraStreamState != CameraStreamState.Streaming ||
                !frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IEnumerable <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame currentFrame = new VideoFrame(InputPixelFormat, (int)videoProperties.Width, (int)videoProperties.Height))
                {
                    await captureManager.GetPreviewFrameAsync(currentFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(currentFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await faceTracker.ProcessNextFrameAsync(currentFrame);

                        if (FilterOutSmallFaces)
                        {
                            // We filter out small faces here.
                            faces = faces.Where(f => CoreUtil.IsFaceBigEnoughForDetection((int)f.FaceBox.Height, (int)videoProperties.Height));
                        }

                        NumFacesOnLastFrame = faces.Count();

                        if (EnableAutoCaptureMode)
                        {
                            UpdateAutoCaptureState(faces);
                        }

                        // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                        var currentFrameSize = new Windows.Foundation.Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);

                        var rgbaBitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Rgba8);

                        HandleFaces(currentFrameSize, faces, rgbaBitmap);
                    }
                }
            }
            catch (Exception)
            {
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Exemple #5
0
        public void ProcessFrame()
        {
            MediaFrameReference frame           = videoFrameProcessor.GetLatestFrame();
            VideoMediaFrame     videoMediaFrame = frame?.VideoMediaFrame;

            if (videoMediaFrame == null)
            {
                return;
            }
            // Validate that the incoming frame format is compatible with the FaceTracker
            bool isBitmapPixelFormatSupported = videoMediaFrame.SoftwareBitmap != null && FaceTracker.IsBitmapPixelFormatSupported(videoMediaFrame.SoftwareBitmap.BitmapPixelFormat);

            if (!isBitmapPixelFormatSupported)
            {
                return;
            }
            // Ask the FaceTracker to process this frame asynchronously
            IAsyncOperation <IList <DetectedFace> > processFrameTask = faceTracker.ProcessNextFrameAsync(videoMediaFrame.GetVideoFrame());

            try
            {
                IList <DetectedFace> faces = processFrameTask.GetResults();

                lock (@lock)
                {
                    if (faces.Count == 0)
                    {
                        ++numFramesWithoutFaces;

                        // The FaceTracker might lose track of faces for a few frames, for example,
                        // if the person momentarily turns their head away from the videoFrameProcessor. To smooth out
                        // the tracking, we allow 30 video frames (~1 second) without faces before
                        // we say that we're no longer tracking any faces.
                        if (numFramesWithoutFaces > 30 && latestFaces.Any())
                        {
                            latestFaces.Clear();
                        }
                    }
                    else
                    {
                        numFramesWithoutFaces = 0;
                        latestFaces.Clear();
                        foreach (var face in faces)
                        {
                            latestFaces.Add(face.FaceBox);
                        }
                    }
                }
            }
            catch (Exception e)
            {
                // The task might be cancelled if the FaceAnalysis failed.
                Debug.LogException(e);
            }
        }
Exemple #6
0
        private async Task ProcessVideoFrame()
        {
            using (VideoFrame videoFrame = new VideoFrame(_faceTrackerSupportedPixelFormat, (int)_cameraCapture.FrameWidth, (int)_cameraCapture.FrameHeight))
            {
                await _cameraCapture.MediaCapture.GetPreviewFrameAsync(videoFrame);

                var faces = await _faceTracker.ProcessNextFrameAsync(videoFrame);

                DisplayFaces(videoFrame.SoftwareBitmap, faces);
            }
        }
        private async Task ProcessCurrentVideoFrameAsync()
        {
            // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
            // GetPreviewFrame will convert the native webcam frame into this format.
            const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;

            IList <DetectedFace> faces;

            using (var previewFrame = new VideoFrame(InputPixelFormat, (int)_videoProperties.Width, (int)_videoProperties.Height))
            {
                try
                {
                    await _mediaCapture.GetPreviewFrameAsync(previewFrame);
                }
                catch (UnauthorizedAccessException)
                {
                    // Lost access to the camera
                    AbandonStreaming();
                    NavigateToPermissionsPage();
                    return;
                }
                catch (Exception exception)
                {
                    await DisplayMessage($"Error en GetPreviewFrameAsync: {exception.Message}");

                    return;
                }

                if (!FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                {
                    Console.WriteLine($"PixelFormat '{previewFrame.SoftwareBitmap.BitmapPixelFormat}' is not supported by FaceDetector");
                    return;
                }

                try
                {
                    faces = await _faceTracker.ProcessNextFrameAsync(previewFrame);
                }
                catch (Exception exception)
                {
                    await DisplayMessage($"Error al procesar el frame del reconocimiento facial: {exception.Message}");

                    return;
                }

                var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                var ignored          = Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    SetupVisualization(previewFrameSize, faces);
                });
            }
        }
Exemple #8
0
        public async void ProcessCurrentVideoFrame()
        {
            if (!_frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            VideoFrame currentFrame = await GetVideoFrameSafe();

            // Use FaceDetector.GetSupportedBitmapPixelFormats and IsBitmapPixelFormatSupported to dynamically
            // determine supported formats
            const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Nv12;

            if (currentFrame == null || currentFrame.SoftwareBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                _frameProcessingSemaphore.Release();
                return;
            }

            try
            {
                IList <DetectedFace> detectedFaces = await _faceTracker.ProcessNextFrameAsync(currentFrame);

                if (detectedFaces.Count == 0)
                {
                    NoFaceDetected?.Invoke(this, null);
                }
                else if (detectedFaces.Count != _detectedFacesInLastFrame)
                {
                    OnPreAnalysis?.Invoke(this, null);

                    var output = await AnalysisFunction(currentFrame.SoftwareBitmap.ToByteArray());

                    UsersIdentified?.Invoke(this, output);
                }

                _detectedFacesInLastFrame = detectedFaces.Count;
            }
            catch (Exception ex)
            {
                // Face tracking failed
                Debug.WriteLine(ex);
            }
            finally
            {
                _frameProcessingSemaphore.Release();
            }

            currentFrame.Dispose();
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="timer"></param>
        private async void CurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!semaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList <DetectedFace>    faces            = null;
                const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame previewFrame = new VideoFrame(inputPixelFormat, 320, 240))
                {
                    await capture.GetPreviewFrameAsync(previewFrame);

                    //顔検出実行
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + inputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    //顔が検出されたら録画スタート
                    if (faces.Count != 0)
                    {
                        Debug.WriteLine("Found Face");
                        await startRecoding();
                    }
                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }
            finally
            {
                semaphore.Release();
            }
        }
Exemple #10
0
        private async Task ProcessVideoFrame()
        {
            using (VideoFrame videoFrame = new VideoFrame(faceTrackerSupportedPixelFormat,
                                                          (int)cameraCapture.FrameWidth, (int)cameraCapture.FrameHeight))
            {
                await cameraCapture.MediaCapture.GetPreviewFrameAsync(videoFrame);

                var faces = await faceTracker.ProcessNextFrameAsync(videoFrame);

                if (ledArray == null)
                {
                    DisplayFaces(videoFrame.SoftwareBitmap, faces);
                }
                else
                {
                    TrackFace(faces);
                }
            }
        }
Exemple #11
0
        /// <summary>
        /// process
        /// </summary>
        /// <param name="timer"></param>
        public async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            VideoFrame currentFrame = await GetLatestFrame(BitmapPixelFormat.Nv12);

            // Use FaceDetector.GetSupportedBitmapPixelFormats and IsBitmapPixelFormatSupported to dynamically
            // determine supported formats
            const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Nv12;

            if (currentFrame.SoftwareBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                return;
            }

            try
            {
                detectedFaces = await faceTracker.ProcessNextFrameAsync(currentFrame);

                SoftwareBitmap currentFrameBGRA = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8);

                //To modify the UI we have to run this on the UI Thread
                var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    ShowDetectedFaces(currentFrameBGRA);
                });
            }
            catch (Exception e)
            {
                System.Diagnostics.Debug.WriteLine(e.Message);
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }

            currentFrame.Dispose();
        }
Exemple #12
0
        //<SnippetProcessCurrentVideoFrame>
        public async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            VideoFrame currentFrame = await GetLatestFrame();

            // Use FaceDetector.GetSupportedBitmapPixelFormats and IsBitmapPixelFormatSupported to dynamically
            // determine supported formats
            const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Nv12;

            if (currentFrame.SoftwareBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                return;
            }

            try
            {
                IList <DetectedFace> detectedFaces = await faceTracker.ProcessNextFrameAsync(currentFrame);

                var previewFrameSize = new Windows.Foundation.Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);
                var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    this.SetupVisualization(previewFrameSize, detectedFaces);
                });
            }
            catch (Exception e)
            {
                // Face tracking failed
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }

            currentFrame.Dispose();
        }
    private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
    {
        if (m_mediaCapture == null)
        {
            return;
        }

        if (m_mediaCapture.CameraStreamState != CameraStreamState.Streaming)
        {
            return;
        }

        if (!m_faceProcessingSemaphore.Wait(0))
        {
            return;
        }

        IList <DetectedFace> faces = null;

        const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

        using (VideoFrame previewFrame = new VideoFrame(inputPixelFormat, (int)m_videoProperties.Width, (int)m_videoProperties.Height))
        {
            await m_mediaCapture.GetPreviewFrameAsync(previewFrame);

            if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                faces = await m_faceTracker.ProcessNextFrameAsync(previewFrame);
            }
        };

        foreach (DetectedFace face in faces)
        {
            Debug.Log(string.Format("x={0}, y={1}, w={2}, h={3}", face.FaceBox.X, face.FaceBox.Y, face.FaceBox.Width, face.FaceBox.Height));
        }

        m_faceProcessingSemaphore.Release();
    }
Exemple #14
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            // If state is not Streaming, return.
            if (_state != StreamingState.Streaming)
            {
                return;
            }

            // If there has a process still running, return.
            if (!_semaphoreSlim.Wait(0))
            {
                return;
            }

            const BitmapPixelFormat PixelFormat = BitmapPixelFormat.Nv12;

            try
            {
                using (VideoFrame currentFrame = new VideoFrame(PixelFormat, (int)_videoProperties.Width, (int)_videoProperties.Height))
                {
                    // Get current preview frame from _mediaCaputre and copy into currentFrame.
                    await _mediaCapture.GetPreviewFrameAsync(currentFrame);

                    // Detected face by _faceTracker.
                    IList <DetectedFace> builtinFaces = await _faceTracker.ProcessNextFrameAsync(currentFrame);

                    SoftwareBitmap tempBitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8);

                    if (builtinFaces.Count != 0)
                    {
                        var frameSize = new Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);
                        //await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        //{
                        //    ShowResult(frameSize, builtinFaces);
                        //});

                        // Get picture from videoframe.
                        IRandomAccessStream stream  = new InMemoryRandomAccessStream();
                        BitmapEncoder       encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                        encoder.SetSoftwareBitmap(tempBitmap);
                        await encoder.FlushAsync();

                        CustomFaceModel[] customFaces = await _faceApiHelper.GetDetectEmojiAsync(stream.AsStream());

                        CustomFaceEmojiModel customFaceEmojiModel = new CustomFaceEmojiModel();
                        EmojiNum             emojiNum = new EmojiNum();
                        float upperleft = 0, upperrignt = 0, buttomleft = 0, buttomright = 0, averageX = 0, averageY = 0;
                        foreach (var eachemoliModel in customFaces)
                        {
                            averageX += eachemoliModel.Left;
                            averageY += eachemoliModel.Top;
                        }
                        averageX /= customFaces.Length;
                        averageY /= customFaces.Length;

                        for (int i = 0; i < customFaces.Length; i++)
                        {
                            emojiNum.Emoji = -1 * (customFaces[i].Anger + customFaces[i].Contempt + customFaces[i].Disgust + customFaces[i].Fear + customFaces[i].Sadness) + customFaces[i].Happiness + customFaces[i].Neutral + customFaces[i].Suprise;
                            EmojiNum model = new EmojiNum
                            {
                                Emoji = -1 * (customFaces[i].Anger + customFaces[i].Contempt + customFaces[i].Disgust + customFaces[i].Fear + customFaces[i].Sadness) + customFaces[i].Happiness + customFaces[i].Neutral + customFaces[i].Suprise
                            };
                            //customFaceEmojiModel.Emojis[i] = model;
                            //customFaceEmojiModel.Emojis[i].Emoji = -1 * (customFaces[i].Anger + customFaces[i].Contempt + customFaces[i].Disgust + customFaces[i].Fear + customFaces[i].Sadness) + customFaces[i].Happiness + customFaces[i].Neutral + customFaces[i].Suprise;
                            customFaceEmojiModel.EmojiSum += model.Emoji;
                            //customFaceEmojiModel.EmojiSum += customFaceEmojiModel.Emojis[i].Emoji;
                            if (customFaces[i].Left <averageX && customFaces[i].Top> averageY)
                            {
                                upperleft += emojiNum.Emoji;
                            }
                            else if (customFaces[i].Left < averageX && customFaces[i].Top < averageY)
                            {
                                buttomleft += emojiNum.Emoji;
                            }
                            else if (customFaces[i].Left > averageX && customFaces[i].Top > averageY)
                            {
                                upperrignt += emojiNum.Emoji;
                            }
                            else if (customFaces[i].Left > averageX && customFaces[i].Top < averageY)
                            {
                                buttomright += emojiNum.Emoji;
                            }
                        }
                        customFaceEmojiModel.UpperLeft  /= upperleft;
                        customFaceEmojiModel.ButtomLeft /= buttomleft;
                        customFaceEmojiModel.UpperRight /= upperrignt;
                        customFaceEmojiModel.ButtoRight /= buttomright;

                        //CustomFaceEmojiModel customFaceEmojiModel = await _faceApiHelper.GetEmojiResult(customFaces);
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                  ShowFromFaceApi(frameSize, customFaces, emojiNum));

                        await _eventHubHelper.SendMessagesToEventHub(customFaceEmojiModel);
                    }
                    else
                    {
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                  PaintingCanvas.Children.Clear());
                    }
                }
            }
            catch (Microsoft.ProjectOxford.Face.FaceAPIException faceEx)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowErrorHelper.ShowDialog(faceEx.ErrorMessage, faceEx.ErrorCode));
            }
            catch (Exception ex)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowErrorHelper.ShowDialog(ex.Message));
            }
            finally
            {
                _semaphoreSlim.Release();
            }
        }
        /// <summary>
        /// Extracts a frame from the camera stream and detects if any faces are found. Used as a precursor to making an expensive API
        /// call to get proper face details.
        /// </summary>
        /// <remarks>
        /// Keep in mind this method is called from a Timer and not synchronized with the camera stream. Also, the processing time of FaceTracker
        /// will vary depending on the size of each frame and the number of faces being tracked. That is, a large image with several tracked faces may
        /// take longer to process.
        /// </remarks>
        private async Task <ApiRequestParameters> ProcessCurrentVideoFrameAsync()
        {
            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (!frameProcessingSemaphore.Wait(0))
            {
                return(null);
            }

            try
            {
                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (var previewFrame = new VideoFrame(InputPixelFormat, (int)videoProperties.Width, (int)videoProperties.Height))
                {
                    await mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (!FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        throw new NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }


                    var faces = await faceTracker.ProcessNextFrameAsync(previewFrame);

                    if (faces.Any())
                    {
                        // Found faces so create a bounding rectangle and store the parameters to make the API call and process the response.
                        using (var ms = new MemoryStream())
                        {
                            // It'll be faster to send a smaller rectangle of the faces found instead of the whole image. This is what we do here.
                            var encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, ms.AsRandomAccessStream());

                            // To use the encoder to resize we need to change the bitmap format. Might be a better way to do this, I can't see it.
                            var converted = SoftwareBitmap.Convert(previewFrame.SoftwareBitmap, BitmapPixelFormat.Rgba16);

                            encoder.SetSoftwareBitmap(converted);
                            //var bounds = boundingBoxCreator.BoundingBoxForFaces(faces, converted.PixelWidth, converted.PixelHeight);
                            //encoder.BitmapTransform.Bounds = bounds;
                            await encoder.FlushAsync();

                            LogStatusMessage($"Found face(s) on camera: {faces.Count}", StatusSeverity.Info, false);


                            return(new ApiRequestParameters
                            {
                                Image = ms.ToArray(),
                                Faces = faces
                            });
                        }
                    }
                    return(null);
                }
            }
            catch (Exception ex)
            {
                LogStatusMessage("Unable to process current frame: " + ex.ToString(), StatusSeverity.Error, false);
                return(null);
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            // If state is not Streaming, return.
            if (_state != StreamingState.Streaming)
            {
                return;
            }

            // If there has a process still running, return.
            if (!_semaphoreSlim.Wait(0))
            {
                return;
            }

            const BitmapPixelFormat PixelFormat = BitmapPixelFormat.Nv12;

            try
            {
                using (VideoFrame currentFrame = new VideoFrame(PixelFormat, (int)_videoProperties.Width, (int)_videoProperties.Height))
                {
                    // Get current preview frame from _mediaCaputre and copy into currentFrame.
                    await _mediaCapture.GetPreviewFrameAsync(currentFrame);

                    // Detected face by _faceTracker.
                    IList <DetectedFace> builtinFaces = await _faceTracker.ProcessNextFrameAsync(currentFrame);

                    SoftwareBitmap tempBitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8);

                    if (builtinFaces.Count != 0)
                    {
                        var frameSize = new Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            ShowFromBuiltIn(frameSize, builtinFaces);
                        });

                        // Get picture from videoframe.
                        IRandomAccessStream stream  = new InMemoryRandomAccessStream();
                        BitmapEncoder       encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                        encoder.SetSoftwareBitmap(tempBitmap);
                        await encoder.FlushAsync();

                        CustomFaceModel customFaces = await _faceApiHelper.GetIdentifySingleResultAsync(stream.AsStream());


                        if (customFaces != null)
                        {
                            await _dataHelper.ChangeAttendStatusAsync(customFaces.Name, true);

                            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                      ShowLoginSuccess(customFaces));
                        }
                        //await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        //    ShowFromFaceApi(frameSize, customFaces));
                    }
                    else
                    {
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                  PaintingCanvas.Children.Clear());
                    }
                }
            }
            catch (Microsoft.ProjectOxford.Face.FaceAPIException faceEx)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowAlertHelper.ShowDialog(faceEx.ErrorMessage, faceEx.ErrorCode));
            }
            catch (Exception ex)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowAlertHelper.ShowDialog(ex.Message));
            }
            finally
            {
                _semaphoreSlim.Release();
            }
        }
Exemple #17
0
        async void ProcessCurrentVideoFrame(object sender, object e)
        {
            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (!_isStreaming || !_frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                using (var previewFrame = new VideoFrame(BitmapPixelFormat.Nv12,
                                                         (int)_videoProperties.Width,
                                                         (int)_videoProperties.Height))
                {
                    await _mediaManager.GetPreviewFrameAsync(previewFrame);

                    IList <DetectedFace> faces = null;

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await _faceTracker.ProcessNextFrameAsync(previewFrame);
                    }

                    //// Create our visualization using the frame dimensions and face results but run it on the UI thread.
                    var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() =>
                    {
                        SetupVisualization(previewFrameSize, faces);

                        if (_isProcessing)
                        {
                            return;
                        }

                        var emotions = await CaptureEmotionAsync();
                        if (emotions.IsNullOrEmpty() == false)
                        {
                            var mostProbable =
                                emotions.ToResults()
                                .Where(result => result != Result.Empty)
                                .FirstOrDefault();

                            if (mostProbable == null)
                            {
                                _messageLabel.Text = string.Empty;
                                _emoticon.Text     = string.Empty;
                            }
                            else
                            {
                                _emoticon.Text = Emoticons.From(mostProbable.Emotion);

                                var current = _messageLabel.Text;
                                var message = EmotionMessages.Messages[mostProbable.Emotion].RandomElement();
                                while (current == message)
                                {
                                    message = EmotionMessages.Messages[mostProbable.Emotion].RandomElement();
                                }
                                _messageLabel.Text = message;
                                await _speechEngine.SpeakAsync(message, _speaker);

                                ++_captureCounter;
                                if (_captureCounter >= MaxCaptureBeforeReset)
                                {
                                    await ChangeStreamStateAsync(false);
                                }
                            }
                        }
                    });
                }
            }
            catch (Exception ex) when(DebugHelper.IsHandled <MainPage>(ex))
            {
            }
            finally
            {
                _frameProcessingSemaphore.Release();
            }
        }
        private async void ProcessVideoFrame(ThreadPoolTimer timer)
        {
            if (!frameProcessingSimaphore.Wait(0))
            {
                // We are already doing something
                return;
            }

            try
            {
                IEnumerable <DetectedFace> faces = null;

                const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

                Face[] globalFaces = null;

                using (var previewFrame = new VideoFrame(inputPixelFormat, (int)videoProperties.Width, (int)videoProperties.Height))
                {
                    await mediaCapture.GetPreviewFrameAsync(previewFrame);

                    if (FaceTracker.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await faceTracker.ProcessNextFrameAsync(previewFrame);

                        if (!facesExistInFrame)
                        {
                            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                // Enable the Train feature and disable the other buttons
                                PageYes.Visibility = Visibility.Collapsed;
                                PageNo.Visibility  = Visibility.Collapsed;
                                TrainMe.Visibility = Visibility.Visible;
                            });
                        }

                        if (faces.Any())
                        {
                            if (!facesExistInFrame)
                            {
                                facesExistInFrame = true;

                                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                                {
                                    // Enable the Yes/No buttons.  Disable the Train Button
                                    PageYes.Visibility = Visibility.Visible;
                                    PageNo.Visibility  = Visibility.Visible;
                                    TrainMe.Visibility = Visibility.Collapsed;
                                });

                                await ShowMessage("Will you help me?  If so, make sure I can see you face and click \"Yse\"", 1);
                            }

                            if (faces.Count() > 1)
                            {
                                await ShowMessage("Can only identify when multiple faces are visible.");

                                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                                {
                                    // Disable the Yes/No buttons.
                                    PageYes.Visibility = Visibility.Collapsed;
                                    PageNo.Visibility  = Visibility.Collapsed;
                                });
                            }
                            else
                            {
                                await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                                {
                                    // Enable the Yes/No Buttons
                                    PageYes.Visibility = Visibility.Visible;
                                    PageNo.Visibility  = Visibility.Visible;
                                    TrainMe.Visibility = Visibility.Collapsed;
                                });

                                var captureStream = new MemoryStream();
                                await mediaCapture.CapturePhotoToStreamAsync(ImageEncodingProperties.CreatePng(), captureStream.AsRandomAccessStream());

                                captureStream.AsRandomAccessStream().Seek(0);

                                // ask the face api what it sees
                                // See: https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/howtodetectfacesinimage
                                globalFaces = await faceServiceClient.DetectAsync(captureStream, true, true, requiredFaceAttributes);

                                if (random.Next(3) == 0 && imageNeededCount > 0)
                                {
                                    imageNeededCount--;
                                    SavePicture(mediaCapture);

                                    if (imageNeededCount == 0)
                                    {
                                        await ShowMessage("Ok, you have been recognized...", 1000);

                                        AddToFaceIdList();
                                    }
                                }
                                ;
                            }

                            var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                ShowFaceTracking(faces, previewFrameSize);
                                ShowIdentificationiStatus(globalFaces);
                            });

                            var firstFace = faces.FirstOrDefault();
                        }
                        else
                        {
                            facesExistInFrame = false;
                            // reset the stuff because there are no faces to analyze.

                            await ShowMessage(String.Empty);

                            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                ShowFaceTracking(faces, new Size());
                            });
                        }

                        var test = faces.Count();
                    }
                }
            }
            catch (Exception ex)
            {
                var test = ex;


                // face detection failed for some reason.
            }
            finally
            {
                frameProcessingSimaphore.Release();
            }
        }