/// <summary>
        ///
        /// </summary>
        /// <param name="timer"></param>
        private async void CurrentVideoFrame(ThreadPoolTimer timer)
        {
            //追跡動作中の場合は処理をしない
            if (!semaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList <DetectedFace> faces = null;

                //FaceTrackingではNv12フォーマットのみ対応
                using (VideoFrame previewFrame = new VideoFrame(BitmapPixelFormat.Nv12, 320, 240))
                    //CustomVisionで出力したモデルの入力は227x227サイズでフォーマットはBGRA8となる
                    using (VideoFrame inputFrame = new VideoFrame(BitmapPixelFormat.Bgra8, 227, 227))
                    {
                        //ビデオフレームの取得
                        //フォーマットが違うので別々にフレームを取得
                        await mediaCapture.GetPreviewFrameAsync(previewFrame);

                        await mediaCapture.GetPreviewFrameAsync(inputFrame);


                        if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                        {
                            //顔認識の実行
                            faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                        }
                        else
                        {
                            throw new NotSupportedException("PixelFormat 'Nv12' is not supported by FaceDetector");
                        }

                        //FaceTrackingで顔が見つかった場合のみ顔認証を行っている。
                        if (faces.Count > 0)
                        {
                            //認識に使ったフレームのサイズ取得
                            var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);

                            //顔追跡はUIスレッドとは別スレッドなので顔の位置表示のためにUIスレッドに切り替え
                            var ignored = this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                FaceDraw(previewFrameSize, faces, previewFrame.SoftwareBitmap);
                            });

                            //顔認証
                            await EvaluateVideoFrameAsync(inputFrame);
                        }
                    }
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }
            finally
            {
                semaphore.Release();
            }
        }
        public async Task CaptureFaceToFileAsync(StorageFile photoFile)
        {
            // Get video frame
            const int height = 480;
            double    scale  = height / (double)_previewProperties.Height;

            // Capture gray image for face detection
            var videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)(_previewProperties.Width * scale), height);
            var frame      = await VideoCapture.GetPreviewFrameAsync(videoFrame);

            //Capture color image for saving
            var grayVideoFrame = new VideoFrame(BitmapPixelFormat.Gray8, (int)(_previewProperties.Width * scale), height);
            await VideoCapture.GetPreviewFrameAsync(grayVideoFrame);

            // Detect faces
            IList <DetectedFace> faces = null;

            if (FaceDetector.IsBitmapPixelFormatSupported(grayVideoFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                faces = await _faceTracker.ProcessNextFrameAsync(grayVideoFrame);
            }

            if ((faces ?? throw new InvalidOperationException()).Any())
            {
                var mainFace   = faces.OrderByDescending(f => f.FaceBox.Height * f.FaceBox.Width).First();
                var faceBounds = GetFaceBoundsFromFrame(mainFace.FaceBox, 1);
                TryExtendFaceBounds(
                    videoFrame.SoftwareBitmap.PixelWidth,
                    videoFrame.SoftwareBitmap.PixelHeight,
                    Constants.FaceBoxRatio,
                    ref faceBounds);
                await SaveBoundedBoxToFileAsync(photoFile, frame.SoftwareBitmap, BitmapEncoder.BmpEncoderId, faceBounds);
            }
        }
        private async Task <Rect> CreateCropRegion(SoftwareBitmap bitmap)
        {
            const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;

            if (!FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
            {
                return(new Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight));
            }

            using var detectorInput = SoftwareBitmap.Convert(bitmap, InputPixelFormat);

            var faces = await faceDetector.DetectFacesAsync(detectorInput);

            var first = faces.FirstOrDefault();

            if (first == null)
            {
                return(new Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight));
            }

            var faceBox = first.FaceBox;
            int margin  = 150;

            int x = Math.Max(0, (int)faceBox.X - margin);
            int y = Math.Max(0, (int)faceBox.Y - margin);

            int width  = Math.Min(bitmap.PixelWidth - x, (int)faceBox.Width + (margin * 2));
            int height = Math.Min(bitmap.PixelHeight - y, (int)faceBox.Height + (margin * 2));

            return(new Rect(x, y, width, height));
        }
Beispiel #4
0
 public static void ImageToFace()
 {
     //Console.WriteLine("Detecting face..");
     Task.Run(async() =>
     {
         var faceDetector   = await FaceDetector.CreateAsync();
         var screenBitmap   = GetBitmapFromScreen();
         var softwareBitmap = await GetSoftwareBitmapFromBitmap(screenBitmap);
         if (!FaceDetector.IsBitmapPixelFormatSupported(softwareBitmap.BitmapPixelFormat))
         {
             //Console.WriteLine("Converting to supported bitmap pixel format..");
             //Console.WriteLine("srcBitmap Width={0}, Height={1}", screenBitmap.Width, screenBitmap.Height);
             //Console.WriteLine("dstBitmap Width={0}, Height={1}", softwareBitmap.PixelWidth, softwareBitmap.PixelHeight);
             softwareBitmap = SoftwareBitmap.Convert(softwareBitmap, FaceDetector.GetSupportedBitmapPixelFormats().First());
             //Console.WriteLine("Converted successfully");
         }
         //Console.WriteLine(screenBitmap.PixelFormat);
         //Console.WriteLine(softwareBitmap.BitmapPixelFormat);
         screenBitmap = await GetBitmapFromSoftwareBitmap(softwareBitmap);
         //Console.WriteLine(screenBitmap.PixelFormat);
         //Console.WriteLine(softwareBitmap.BitmapPixelFormat);
         using (var g = Graphics.FromImage(screenBitmap))
         {
             var detectedFaces = await faceDetector.DetectFacesAsync(softwareBitmap);
             //Console.WriteLine("Detected faces: {0}", detectedFaces.Count);
             foreach (var detectedFace in detectedFaces)
             {
                 var facebox = detectedFace.FaceBox;
                 g.DrawRectangle(Pens.Red, new Rectangle((int)facebox.X, (int)facebox.Y, (int)facebox.Width, (int)facebox.Height));
                 //Console.WriteLine("Face at X={0}, Y={1}, Width={2}, Height={3}", facebox.X, facebox.Y, facebox.Width, facebox.Height);
             }
         }
         //screenBitmap.Save("screenbitmap" + DateTime.Now.Ticks + ".png", ImageFormat.Png);
     }).Wait();
 }
Beispiel #5
0
        internal void SetSample(SoftwareBitmap input)
        {
            if (!Activated)
            {
                return;
            }

            if (Sample == null)
            {
                lock (SampleLock)
                {
                    if (!SupportedBitmapPixelFormat.HasValue)
                    {
                        if (FaceDetector.IsBitmapPixelFormatSupported(input.BitmapPixelFormat))
                        {
                            SupportedBitmapPixelFormat = input.BitmapPixelFormat;
                        }
                        else
                        {
                            SupportedBitmapPixelFormat = FaceDetector.GetSupportedBitmapPixelFormats().First();
                        }
                    }

                    Sample = SoftwareBitmap.Copy(input);
                }
            }
        }
Beispiel #6
0
        /// <summary>
        /// Detect faces and process them
        /// </summary>
        /// <param name="timer"></param>
        private async void ProcessCurrentVideoFrameAsync(ThreadPoolTimer timer)
        {
            // fill the frame
            await _mediaCapture.GetPreviewFrameAsync(_previewFrame);

            // collection for faces
            IList <DetectedFace> faces;

            if (FaceDetector.IsBitmapPixelFormatSupported(_previewFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                // get detected faces on the frame
                faces = await _faceTracker.ProcessNextFrameAsync(_previewFrame);
            }
            else
            {
                throw new NotSupportedException($"PixelFormat {BitmapPixelFormat.Nv12} is not supported by FaceDetector.");
            }

            // get the size of frame webcam provided, we need it to scale image on the screen
            var previewFrameSize = new Size(_previewFrame.SoftwareBitmap.PixelWidth, _previewFrame.SoftwareBitmap.PixelHeight);

            ProcessFrameFaces(previewFrameSize, faces);

            // arrange the next processing time
            ThreadPoolTimer.CreateTimer(ProcessCurrentVideoFrameAsync, _frameProcessingTimerInterval);
        }
        private async void ProcessCurrentVideoFrame(DispatcherTimer timer)
        {
            if (captureManager.CameraStreamState != Windows.Media.Devices.CameraStreamState.Streaming)
            {
                return;
            }
            if (!await frameProcessingSemaphore.WaitAsync(250))
            {
                return;
            }

            try
            {
                IEnumerable <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.captureManager.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);

                        if (this.FilterOutSmallFaces)
                        {
                            // We filter out small faces here.
                            faces = faces.Where(f => CoreUtil.IsFaceBigEnoughForDetection((int)f.FaceBox.Height, (int)this.videoProperties.Height));
                        }

                        this.NumFacesOnLastFrame = faces.Count();

                        if (this.EnableAutoCaptureMode)
                        {
                            this.UpdateAutoCaptureState(faces);
                        }

                        // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                        var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                        var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            this.ShowFaceTrackingVisualization(previewFrameSize, faces);
                        });
                    }
                }
            }
            catch (Exception x)
            {
                Debug.WriteLine(x.Message);
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Beispiel #8
0
        private async Task <IList <DetectedFace> > DetectFaces(SoftwareBitmap inputBitmap)
        {
            if (!FaceDetector.IsBitmapPixelFormatSupported(inputBitmap.BitmapPixelFormat))
            {
                inputBitmap = SoftwareBitmap.Convert(inputBitmap, faceDetectorSupportedPixelFormat);
            }

            return(await faceDetector.DetectFacesAsync(inputBitmap));
        }
Beispiel #9
0
        /// <summary>
        /// Initializes a new MediaCapture instance and starts the Preview streaming to the CamPreview UI element.
        /// </summary>
        /// <returns>Async Task object returning true if initialization and streaming were successful and false if an exception occurred.</returns>
        //private async Task<bool> StartWebcamStreaming()
        //{
        //    bool successful = true;

        //    try
        //    {
        //        this.mediaCapture = new MediaCapture();

        //        // For this scenario, we only need Video (not microphone) so specify this in the initializer.
        //        // NOTE: the appxmanifest only declares "webcam" under capabilities and if this is changed to include
        //        // microphone (default constructor) you must add "microphone" to the manifest or initialization will fail.
        //        MediaCaptureInitializationSettings settings = new MediaCaptureInitializationSettings();
        //        settings.StreamingCaptureMode = StreamingCaptureMode.Video;
        //        await this.mediaCapture.InitializeAsync(settings);
        //        this.mediaCapture.Failed += this.MediaCapture_CameraStreamFailed;

        //        // Cache the media properties as we'll need them later.
        //        var deviceController = this.mediaCapture.VideoDeviceController;
        //        this.videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

        //        // Immediately start streaming to our CaptureElement UI.
        //        // NOTE: CaptureElement's Source must be set before streaming is started.
        //        this.CamPreview.Source = this.mediaCapture;
        //        await this.mediaCapture.StartPreviewAsync();

        //        // Ensure the Semaphore is in the signalled state.
        //        this.frameProcessingSemaphore.Release();

        //        // Use a 66 milisecond interval for our timer, i.e. 15 frames per second
        //        TimeSpan timerInterval = TimeSpan.FromMilliseconds(66);
        //        this.frameProcessingTimer = Windows.System.Threading.ThreadPoolTimer.CreatePeriodicTimer(new Windows.System.Threading.TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);
        //    }
        //    catch (System.UnauthorizedAccessException)
        //    {
        //        // If the user has disabled their webcam this exception is thrown; provide a descriptive message to inform the user of this fact.
        //        //this.rootPage.NotifyUser("Webcam is disabled or access to the webcam is disabled for this app.\nEnsure Privacy Settings allow webcam usage.", NotifyType.ErrorMessage);

        //        successful = false;
        //    }
        //    catch (Exception ex)
        //    {
        //        //this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
        //        successful = false;
        //    }

        //    return successful;
        //}

        /// <summary>
        /// Safely stops webcam streaming (if running) and releases MediaCapture object.
        /// </summary>
        //private async void ShutdownWebCam()
        //{
        //    if (this.frameProcessingTimer != null)
        //    {
        //        this.frameProcessingTimer.Cancel();
        //    }

        //    if (this.mediaCapture != null)
        //    {
        //        if (this.mediaCapture.CameraStreamState == Windows.Media.Devices.CameraStreamState.Streaming)
        //        {
        //            try
        //            {
        //                await this.mediaCapture.StopPreviewAsync();
        //            }
        //            catch (Exception)
        //            {
        //                ;   // Since we're going to destroy the MediaCapture object there's nothing to do here
        //            }
        //        }
        //        this.mediaCapture.Dispose();
        //    }

        //    this.frameProcessingTimer = null;
        //    this.CamPreview.Source = null;
        //    this.mediaCapture = null;
        //    this.CameraStreamingButton.IsEnabled = true;

        //}

        /// <summary>
        /// This method is invoked by a ThreadPoolTimer to execute the FaceTracker and Visualization logic at approximately 15 frames per second.
        /// </summary>
        /// <remarks>
        /// Keep in mind this method is called from a Timer and not sychronized with the camera stream. Also, the processing time of FaceTracker
        /// will vary depending on the size of each frame and the number of faces being tracked. That is, a large image with several tracked faces may
        /// take longer to process.
        /// </remarks>
        /// <param name="timer">Timer object invoking this call</param>
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            //if (this.currentState != ScenarioState.Streaming)
            //{
            //    return;
            //}

            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                    var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);

                    var ignored = rootPage.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    {
                        this.SetupVisualization(previewFrameSize, faces);
                    });
                }
            }
            catch (Exception ex)
            {
                //var ignored = rootPage.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                //{
                //    this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                //});
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Beispiel #10
0
        private async Task <IList <DetectedFace> > DetectFaces(SoftwareBitmap inputbitMapBitmap)
        {
            var conertedIfrequired = inputbitMapBitmap;

            if (!FaceDetector.IsBitmapPixelFormatSupported(inputbitMapBitmap.BitmapPixelFormat))
            {
                conertedIfrequired = SoftwareBitmap.Convert(inputbitMapBitmap, _faceDectorSupportedPixelFormat);
            }
            return(await _faceDetector.DetectFacesAsync(conertedIfrequired));
        }
Beispiel #11
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (captureManager.CameraStreamState != CameraStreamState.Streaming ||
                !frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IEnumerable <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame currentFrame = new VideoFrame(InputPixelFormat, (int)videoProperties.Width, (int)videoProperties.Height))
                {
                    await captureManager.GetPreviewFrameAsync(currentFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(currentFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await faceTracker.ProcessNextFrameAsync(currentFrame);

                        if (FilterOutSmallFaces)
                        {
                            // We filter out small faces here.
                            faces = faces.Where(f => CoreUtil.IsFaceBigEnoughForDetection((int)f.FaceBox.Height, (int)videoProperties.Height));
                        }

                        NumFacesOnLastFrame = faces.Count();

                        if (EnableAutoCaptureMode)
                        {
                            UpdateAutoCaptureState(faces);
                        }

                        // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                        var currentFrameSize = new Windows.Foundation.Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);

                        var rgbaBitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Rgba8);

                        HandleFaces(currentFrameSize, faces, rgbaBitmap);
                    }
                }
            }
            catch (Exception)
            {
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Beispiel #12
0
        public static async Task <IList <DetectedFace> > FaceDetectAsync(this SoftwareBitmap source)
        {
            var dest     = source;
            var detector = await FaceDetector.CreateAsync();

            if (!FaceDetector.IsBitmapPixelFormatSupported(dest.BitmapPixelFormat))
            {
                dest = SoftwareBitmap.Convert(dest, BitmapPixelFormat.Gray8);
            }

            return(await detector.DetectFacesAsync(dest));
        }
        private async Task ProcessCurrentVideoFrameAsync()
        {
            // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
            // GetPreviewFrame will convert the native webcam frame into this format.
            const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;

            IList <DetectedFace> faces;

            using (var previewFrame = new VideoFrame(InputPixelFormat, (int)_videoProperties.Width, (int)_videoProperties.Height))
            {
                try
                {
                    await _mediaCapture.GetPreviewFrameAsync(previewFrame);
                }
                catch (UnauthorizedAccessException)
                {
                    // Lost access to the camera
                    AbandonStreaming();
                    NavigateToPermissionsPage();
                    return;
                }
                catch (Exception exception)
                {
                    await DisplayMessage($"Error en GetPreviewFrameAsync: {exception.Message}");

                    return;
                }

                if (!FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                {
                    Console.WriteLine($"PixelFormat '{previewFrame.SoftwareBitmap.BitmapPixelFormat}' is not supported by FaceDetector");
                    return;
                }

                try
                {
                    faces = await _faceTracker.ProcessNextFrameAsync(previewFrame);
                }
                catch (Exception exception)
                {
                    await DisplayMessage($"Error al procesar el frame del reconocimiento facial: {exception.Message}");

                    return;
                }

                var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                var ignored          = Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                {
                    SetupVisualization(previewFrameSize, faces);
                });
            }
        }
Beispiel #14
0
        /// <summary>
        /// Captures a single frame from the running webcam stream and executes the FaceDetector on the image. If successful calls SetupVisualization to display the results.
        /// </summary>
        /// <returns>Async Task object returning true if the capture was successful and false if an exception occurred.</returns>
        private async Task <bool> TakeSnapshotAndFindFaces()
        {
            bool successful = true;

            try
            {
                if (this.currentState != ScenarioState.Streaming)
                {
                    return(false);
                }

                WriteableBitmap      displaySource = null;
                IList <DetectedFace> faces         = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        String filename = saveSoftwareBitmap(previewFrame.SoftwareBitmap);
                        faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap);
                    }
                    else
                    {
//                        this.rootPage.NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage);
                    }

                    // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                    // Note that WriteableBitmap doesn't support NV12 and we have to convert it to 32-bit BGRA.
                    using (SoftwareBitmap convertedSource = SoftwareBitmap.Convert(previewFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8))
                    {
                        displaySource = new WriteableBitmap(convertedSource.PixelWidth, convertedSource.PixelHeight);
                        convertedSource.CopyToBuffer(displaySource.PixelBuffer);
                    }

                    // Create our display using the available image and face results.
                    this.SetupVisualization(displaySource, faces);
                }
            }
            catch (Exception ex)
            {
//                this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                successful = false;
            }

            return(successful);
        }
Beispiel #15
0
        /// <summary>
        /// This method is called to execute the FaceTracker and Visualization logic at each timer tick.
        /// </summary>
        /// <remarks>
        /// Keep in mind this method is called from a Timer and not synchronized with the camera stream. Also, the processing time of FaceTracker
        /// will vary depending on the size of each frame and the number of faces being tracked. That is, a large image with several tracked faces may
        /// take longer to process.
        /// </remarks>
        private async Task ProcessCurrentVideoFrameAsync()
        {
            // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
            // GetPreviewFrame will convert the native webcam frame into this format.
            const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;

            using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
            {
                try
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);
                }
                catch (UnauthorizedAccessException)
                {
                    // Lost access to the camera.
                    AbandonStreaming();
                    return;
                }
                catch (Exception)
                {
                    this.rootPage.NotifyUser($"PreviewFrame with format '{InputPixelFormat}' is not supported by your Webcam", NotifyType.ErrorMessage);
                    return;
                }

                // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                if (!FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                {
                    this.rootPage.NotifyUser($"PixelFormat '{previewFrame.SoftwareBitmap.BitmapPixelFormat}' is not supported by FaceDetector", NotifyType.ErrorMessage);
                    return;
                }

                IList <DetectedFace> faces;
                try
                {
                    faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                }
                catch (Exception ex)
                {
                    this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                    return;
                }

                // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    this.SetupVisualization(previewFrameSize, faces);
                });
            }
        }
Beispiel #16
0
        public async void ProcessFrame(FrameData frame)
        {
            if (callbacks.Count == 0 || busy)
            {
                return;
            }
            else
            {
                busy = true;
            }

            var bitmap = frame.bitmap;

            if (!FaceDetector.IsBitmapPixelFormatSupported(bitmap.BitmapPixelFormat))
            {
                bitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);
            }

            var detectedFaces = await faceDetector.DetectFacesAsync(bitmap);

            int frameKey = -1;

            if (detectedFaces.Count > 0)
            {
                frameKey = SceneCameraManager.Inst.AddFrameToCache(frame);
            }

            ProjectRuntime.Inst.DispatchRuntimeCode(() => {
                var jsImg = JavaScriptValue.CreateObject();
                jsImg.SetProperty(JavaScriptPropertyId.FromString("id"), JavaScriptValue.FromInt32(frameKey), true);
                Native.JsSetObjectBeforeCollectCallback(jsImg, IntPtr.Zero, jsObjectCallback);

                var faces    = JavaScriptValue.CreateArray(0);
                var pushFunc = faces.GetProperty(JavaScriptPropertyId.FromString("push"));
                foreach (var face in detectedFaces)
                {
                    var pos    = GetEstimatedPositionFromFaceBounds(face.FaceBox, frame.bitmap);
                    var jsFace = JavaScriptContext.RunScript($"new Face(new Position({pos.X}, {pos.Y}, {pos.Z}), {0});");
                    jsFace.SetProperty(JavaScriptPropertyId.FromString("frame"), jsImg, true);
                    jsFace.SetProperty(JavaScriptPropertyId.FromString("bounds"), face.FaceBox.ToJavaScriptValue(), true);
                    pushFunc.CallFunction(faces, jsFace);
                }
                foreach (var callback in callbacks)
                {
                    callback.CallFunction(callback, faces);
                }
            });

            busy = false;
        }
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (captureManager.CameraStreamState != Windows.Media.Devices.CameraStreamState.Streaming ||
                !frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IEnumerable <DetectedFace> faces = null;

                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.captureManager.GetPreviewFrameAsync(previewFrame);

                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);

                        if (this.FilterOutSmallFaces)
                        {
                            faces = faces.Where(f => CoreUtil.IsFaceBigEnoughForDetection((int)f.FaceBox.Height, (int)this.videoProperties.Height));
                        }

                        this.NumFacesOnLastFrame = faces.Count();

                        if (this.EnableAutoCaptureMode)
                        {
                            this.UpdateAutoCaptureState(faces);
                        }

                        var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                        var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            this.ShowFaceTrackingVisualization(previewFrameSize, faces);
                        });
                    }
                }
            }
            catch (Exception)
            {
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Beispiel #18
0
        private async Task <BitmapBounds?> FindFace(SoftwareBitmap bitmap)
        {
            if (!FaceDetector.IsBitmapPixelFormatSupported(bitmap.BitmapPixelFormat))
            {
                bitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);
            }

            var faces = await faceDetector.DetectFacesAsync(bitmap);

            if (faces.Count != 1)
            {
                return(null);
            }

            return(faces[0].FaceBox);
        }
Beispiel #19
0
        public async Task <IList <DetectedFace> > DetectFaces(Stream fileStream)
        {
            var stream        = fileStream.AsRandomAccessStream();
            var bitmapDecoder = await BitmapDecoder.CreateAsync(stream);

            using SoftwareBitmap bitmap = await bitmapDecoder.GetSoftwareBitmapAsync();

            var bmp = FaceDetector.IsBitmapPixelFormatSupported(bitmap.BitmapPixelFormat)
                ? bitmap : SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);

            var faceDetector = await FaceDetector.CreateAsync();

            var detectedFaces = await faceDetector.DetectFacesAsync(bmp);

            return(detectedFaces);
        }
Beispiel #20
0
        private async void DetectFaces()
        {
            if (file != null)
            {
                // Open the image file and decode the bitmap into memory.
                // We'll need to make 2 bitmap copies: one for the FaceDetector and another to display.
                using (IRandomAccessStream fileStream = await file.OpenAsync(FileAccessMode.Read))
                {
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // We need to convert the image into a format that's compatible with FaceDetector.
                        // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);

                                NotifyUser("Detecting...", NotifyType.StatusMessage);

                                // Initialize our FaceDetector and execute it against our input image.
                                // NOTE: FaceDetector initialization can take a long time, and in most cases
                                // you should create a member variable and reuse the object.
                                // However, for simplicity in this scenario we instantiate a new instance each time.
                                FaceDetector detector = await FaceDetector.CreateAsync();

                                faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                DrawDetectedFaces(displaySource, faces);
                            }
                        }
                        else
                        {
                            NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage);
                        }
                    }
                }
            }
        }
        public async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            IList <DetectedFace> detectedFaces = null;

            try
            {
                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame previewFrame = new VideoFrame(faceDetectionPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        detectedFaces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        frameProcessingSemaphore.Release();
                        return;
                    }

                    var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                    var ignored          = this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        this.SetupVisualization(previewFrameSize, detectedFaces);
                    });
                }
            }
            catch (Exception e)
            {
                // Face tracking failed
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="timer"></param>
        private async void CurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!semaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList <DetectedFace>    faces            = null;
                const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame previewFrame = new VideoFrame(inputPixelFormat, 320, 240))
                {
                    await capture.GetPreviewFrameAsync(previewFrame);

                    //顔検出実行
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + inputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    //顔が検出されたら録画スタート
                    if (faces.Count != 0)
                    {
                        Debug.WriteLine("Found Face");
                        await startRecoding();
                    }
                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }
            finally
            {
                semaphore.Release();
            }
        }
        private async void _timer_Tick(object sender, object e)
        {
            try
            {
                this.FaceCanvas.Children.Clear();
                IEnumerable <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, 1280, 720))
                {
                    await this._mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                }
                if (faces != null)
                {
                    foreach (DetectedFace face in faces)
                    {
                        Face.Margin = new Thickness(face.FaceBox.X, face.FaceBox.Y, 0, 0);

                        //faceBorder.ShowFaceRectangle(0, 0, (uint)(face.FaceBox.Width), (uint)(face.FaceBox.Height ));
                        FaceText.Text = face.FaceBox.X.ToString() + face.FaceBox.Y.ToString();
                    }
                }


                PicBtn.Content = DateTime.Now.ToString();
                await Task.Delay(50);
            }
            catch (Exception)
            {
            }
        }
    private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
    {
        if (m_mediaCapture == null)
        {
            return;
        }

        if (m_mediaCapture.CameraStreamState != CameraStreamState.Streaming)
        {
            return;
        }

        if (!m_faceProcessingSemaphore.Wait(0))
        {
            return;
        }

        IList <DetectedFace> faces = null;

        const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

        using (VideoFrame previewFrame = new VideoFrame(inputPixelFormat, (int)m_videoProperties.Width, (int)m_videoProperties.Height))
        {
            await m_mediaCapture.GetPreviewFrameAsync(previewFrame);

            if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                faces = await m_faceTracker.ProcessNextFrameAsync(previewFrame);
            }
        };

        foreach (DetectedFace face in faces)
        {
            Debug.Log(string.Format("x={0}, y={1}, w={2}, h={3}", face.FaceBox.X, face.FaceBox.Y, face.FaceBox.Width, face.FaceBox.Height));
        }

        m_faceProcessingSemaphore.Release();
    }
Beispiel #25
0
        private async void OpenImg_Click(object sender, RoutedEventArgs e)
        {
            IList <DetectedFace> faces         = null;
            SoftwareBitmap       detectorInput = null;
            WriteableBitmap      displaySource = null;

            try
            {
                FileOpenPicker photoPicker = new FileOpenPicker();
                photoPicker.ViewMode = PickerViewMode.Thumbnail;
                photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
                photoPicker.FileTypeFilter.Add(".jpg");
                photoPicker.FileTypeFilter.Add(".jpeg");
                photoPicker.FileTypeFilter.Add(".png");
                photoPicker.FileTypeFilter.Add(".bmp");
                photoFile = await photoPicker.PickSingleFileAsync();

                if (photoFile == null)
                {
                    return;
                }

                using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read))
                {
                    BitmapImage bitmapImage = new BitmapImage();
                    bitmapImage.SetSource(fileStream);
                    sourceImg.Source = bitmapImage;

                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // face can detect Gray8 file
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);
                                FaceDetector detector = await FaceDetector.CreateAsync();  // should reuse the detect obj

                                faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                this.SetupVisualization(displaySource, faces);
                            }
                        }
                        else
                        {
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                this.ClearVisualization();
            }
        }
Beispiel #26
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (this.currentState != ScenarioState.Streaming)
            {
                return;
            }

            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }


            try
            {
                const BitmapPixelFormat InputPixelFormat1 = BitmapPixelFormat.Nv12;


                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat1, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    var valor = await this.mediaCapture.GetPreviewFrameAsync(previewFrame);


                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                        var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.High, () =>
                        {
                            var caraNuevaValor = "";
                            this.SetupVisualization(previewFrameSize, faces);


                            //this.imagenCompletar.Source = bitmpatSRC;
                            //bitmpatSRC.SetBitmapAsync(previewFrameBMO);
                        });
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);

                        if (faces.Count != 0 && IdentidadEncontrada == "")
                        {
                            string nombre   = "";
                            int    contador = 0;
                            foreach (var caraEncontrad in faces)
                            {
                                var cara = caraEncontrad.FaceBox.ToString();

                                nombre = await ObtenerIdentidad();

                                contador           += 1;
                                IdentidadEncontrada = nombre;
                                identidades.Identidad(new GenericEventArgs <string>(IdentidadEncontrada));
                            }
                        }
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }
                }
            }
            catch (Exception ex)
            {
                var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    //this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                });
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
        /// <summary>
        /// Loads an image file (selected by the user) and runs the FaceDetector on the loaded bitmap. If successful calls SetupVisualization to display the results.
        /// </summary>
        /// <param name="sender">Button user clicked</param>
        /// <param name="e">Event data</param>
        private async void OpenFile_Click(object sender, RoutedEventArgs e)
        {
            SoftwareBitmap detectorInput = null;

            try
            {
                FileOpenPicker photoPicker = new FileOpenPicker();
                photoPicker.ViewMode = PickerViewMode.Thumbnail;
                photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
                photoPicker.FileTypeFilter.Add(".jpg");
                photoPicker.FileTypeFilter.Add(".jpeg");
                photoPicker.FileTypeFilter.Add(".png");
                photoPicker.FileTypeFilter.Add(".bmp");

                StorageFile photoFile = await photoPicker.PickSingleFileAsync();

                if (photoFile == null)
                {
                    return;
                }

                this.ClearVisualization();
                //this.rootPage.NotifyUser("Opening...", NotifyType.StatusMessage);

                // Open the image file and decode the bitmap into memory.
                // We'll need to make 2 bitmap copies: one for the FaceDetector and another to display.
                using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read))
                {
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // We need to convert the image into a format that's compatible with FaceDetector.
                        // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                this.displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);

                                //this.rootPage.NotifyUser("Detecting...", NotifyType.StatusMessage);

                                // Initialize our FaceDetector and execute it against our input image.
                                // NOTE: FaceDetector initialization can take a long time, and in most cases
                                // you should create a member variable and reuse the object.
                                // However, for simplicity in this scenario we instantiate a new instance each time.
                                FaceDetector detector = await FaceDetector.CreateAsync();

                                this.faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                this.SetupVisualization(displaySource, faces);
                            }
                        }
                        else
                        {
                            //this.rootPage.NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage);
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                this.ClearVisualization();
                //this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
            }
        }
Beispiel #28
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    //Setting the format of the picture to send to cognitive services
                    var imageEncodingProp = ImageEncodingProperties.CreateJpeg();

                    var stream = new InMemoryRandomAccessStream();

                    //Capturing the picture and stoing into the Main memory
                    await mediaCapture.CapturePhotoToStreamAsync(imageEncodingProp, stream);

                    stream.Seek(0);

                    //Making a copy of the bite stream to send it to the cognitive services
                    var age_stream = stream.CloneStream();

                    //Getting the list of the emotions of the faces
                    var emotions = await GetEmotions(stream.AsStreamForRead());

                    //Getting the list of the gender and age of the faces
                    var ageandgender = await GetFaces(age_stream.AsStreamForRead());


                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        //Returning the dected faces using the Media analysis library in .Net no need for internet here
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                    var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                    var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    {
                        this.SetupVisualization(previewFrameSize, faces, emotions, ageandgender);
                    });
                }
            }
            catch (Exception ex)
            {
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Beispiel #29
0
        private async Task Capture()
        {
            if (_waiting)
            {
                return;
            }

            Stream imageStream = await _camera.CaptureImageAsync();

            if (imageStream == null)
            {
                Log("Capture failed.");
                return;
            }

            MemoryStream bitmapStream = new MemoryStream();

            imageStream.CopyTo(bitmapStream);
            SoftwareBitmap image = await _camera.ConvertStreamToBitmap(bitmapStream);

            SoftwareBitmap convertedImage = SoftwareBitmap.Convert(image, BitmapPixelFormat.Nv12);

            if (!FaceDetector.IsBitmapPixelFormatSupported(convertedImage.BitmapPixelFormat))
            {
                Log($"Unsupported pixel format! ({convertedImage.BitmapPixelFormat})");
                return;
            }

            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async() => {
                var imgSrc = new SoftwareBitmapSource();
                await imgSrc.SetBitmapAsync(image);
                imgCapture.Source = imgSrc;
            });


            var fd = await FaceDetector.CreateAsync();

            var faces = await fd.DetectFacesAsync(convertedImage);

            if (faces.Count == 0)
            {
                // no faces, nothing to do
                Log("No faces detected.", false);
                return;
            }

            Log($"{faces.Count} faces.", false);

            _waiting = true; // block any other processing

            if (_config.EnableUpload)
            {
                // face detected locally, send to Storage
                imageStream.Seek(0, SeekOrigin.Begin);
                string fileName = await StorageService.UploadImageAsync(imageStream);

                Log($"Sent to processing.");

                int serverFacesDetected;
                while ((serverFacesDetected = await CheckService.FacesDetectedAsync(fileName)) == -1)
                {
                    await Task.Delay(TimeSpan.FromSeconds(2));
                }

                if (serverFacesDetected > 0)
                {
                    // something detected on server, activate delay
                    Log($"Faces found on server. Waiting {_vm.Delay} seconds.");
                    await Task.Delay(TimeSpan.FromSeconds(_vm.Delay));
                }
                else
                {
                    Log("No faces found on server. Skipping delay.");
                }
            }

            imageStream.Dispose();
            bitmapStream.Dispose();

            _waiting = false;
        }
        /// <summary>
        /// Render ObjectDetector skill results
        /// </summary>
        /// <param name="frame"></param>
        /// <param name="objectDetections"></param>
        /// <returns></returns>
        private async Task DisplayFrameAndResultAsync(VideoFrame frame, int CCTVIndex)
        {
            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async() =>
            {
                try
                {
                    SoftwareBitmap savedBmp = null;
                    if (frame.SoftwareBitmap != null)
                    {
                        await m_processedBitmapSource[CCTVIndex].SetBitmapAsync(frame.SoftwareBitmap);
                        savedBmp = frame.SoftwareBitmap;
                    }
                    else
                    {
                        var bitmap = await SoftwareBitmap.CreateCopyFromSurfaceAsync(frame.Direct3DSurface, BitmapAlphaMode.Ignore);
                        await m_processedBitmapSource[CCTVIndex].SetBitmapAsync(bitmap);
                        savedBmp = bitmap;
                    }

                    // Retrieve and filter results if requested
                    IReadOnlyList <ObjectDetectorResult> objectDetections = m_binding.DetectedObjects;
                    if (m_objectKinds?.Count > 0)
                    {
                        objectDetections = objectDetections.Where(det => m_objectKinds.Contains(det.Kind)).ToList();
                    }
                    if (objectDetections != null)
                    {
                        // Update displayed results
                        m_bboxRenderer[CCTVIndex].Render(objectDetections);
                        bool PersonDetected = false;
                        int PersonCount     = 0;
                        var rects           = new List <Rect>();
                        foreach (var obj in objectDetections)
                        {
                            if (obj.Kind.ToString().ToLower() == "person")
                            {
                                PersonCount++;
                                PersonDetected = true;
                                rects.Add(obj.Rect);
                            }
                        }
                        if (PersonDetected)
                        {
                            bool KeepDistance = false;
                            if ((bool)ChkSocialDistancing.IsChecked)
                            {
                                //make sure there is more than 1 person
                                if (rects.Count > 1)
                                {
                                    var res = SocialDistanceHelpers.Detect(rects.ToArray());
                                    if (res.Result)
                                    {
                                        KeepDistance = true;
                                        m_bboxRenderer[CCTVIndex].DistanceLineRender(res.Lines);
                                        await speech.Read($"Please keep distance in {DataConfig.RoomName[CCTVIndex]}");
                                    }
                                }
                                else
                                {
                                    m_bboxRenderer[CCTVIndex].ClearLineDistance();
                                }
                            }
                            else
                            {
                                m_bboxRenderer[CCTVIndex].ClearLineDistance();
                            }
                            var msg = $"I saw {PersonCount} person in {DataConfig.RoomName[CCTVIndex]}";
                            if ((bool)ChkMode.IsChecked)
                            {
                                PlaySound(Sounds[Rnd.Next(0, Sounds.Count - 1)]);
                            }
                            else if (!KeepDistance)
                            {
                                await speech.Read(msg);
                            }
                            if ((bool)ChkPatrol.IsChecked)
                            {
                                await NotificationService.SendMail("Person Detected in BMSpace", msg, DataConfig.MailTo, DataConfig.MailFrom);
                                await NotificationService.SendSms(DataConfig.SmsTo, msg);
                            }
                            bool IsFaceDetected = false;
                            if ((bool)ChkDetectMask.IsChecked)
                            {
                                SoftwareBitmap softwareBitmapInput = frame.SoftwareBitmap;
                                // Retrieve a SoftwareBitmap to run face detection
                                if (softwareBitmapInput == null)
                                {
                                    if (frame.Direct3DSurface == null)
                                    {
                                        throw (new ArgumentNullException("An invalid input frame has been bound"));
                                    }
                                    softwareBitmapInput = await SoftwareBitmap.CreateCopyFromSurfaceAsync(frame.Direct3DSurface);
                                }
                                // We need to convert the image into a format that's compatible with FaceDetector.
                                // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                                if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                                {
                                    using (var detectorInput = SoftwareBitmap.Convert(softwareBitmapInput, InputPixelFormat))
                                    {
                                        // Run face detection and retrieve face detection result
                                        var faceDetectionResult = await m_faceDetector.DetectFacesAsync(detectorInput);

                                        // If a face is found, update face rectangle feature
                                        if (faceDetectionResult.Count > 0)
                                        {
                                            IsFaceDetected = true;
                                            // Retrieve the face bound and enlarge it by a factor of 1.5x while also ensuring clamping to frame dimensions
                                            BitmapBounds faceBound = faceDetectionResult[0].FaceBox;
                                            var additionalOffset   = faceBound.Width / 2;
                                            faceBound.X            = Math.Max(0, faceBound.X - additionalOffset);
                                            faceBound.Y            = Math.Max(0, faceBound.Y - additionalOffset);
                                            faceBound.Width        = (uint)Math.Min(faceBound.Width + 2 * additionalOffset, softwareBitmapInput.PixelWidth - faceBound.X);
                                            faceBound.Height       = (uint)Math.Min(faceBound.Height + 2 * additionalOffset, softwareBitmapInput.PixelHeight - faceBound.Y);

                                            var maskdetect  = await MaskDetect.PredictImageAsync(frame);
                                            var noMaskCount = maskdetect.Where(x => x.TagName == "no-mask").Count();
                                            if (noMaskCount > 0)
                                            {
                                                if (!KeepDistance)
                                                {
                                                    await speech.Read($"please wear a face mask in {DataConfig.RoomName[CCTVIndex]}");
                                                }
                                            }
                                        }
                                    }
                                }
                            }
                            if (!IsFaceDetected)
                            {
                                m_bboxRenderer[CCTVIndex].ClearMaskLabel();
                            }
                            //save to picture libs

                            /*
                             * String path = Environment.GetFolderPath(Environment.SpecialFolder.MyPictures);
                             * path += "\\CCTV";
                             * if (!Directory.Exists(path))
                             * {
                             *  Directory.CreateDirectory(path);
                             * }*/
                            var TS = DateTime.Now - LastSaved[CCTVIndex];
                            if (savedBmp != null && TS.TotalSeconds > DataConfig.CaptureIntervalSecs && (bool)ChkCapture.IsChecked)
                            {
                                var myPictures = await Windows.Storage.StorageLibrary.GetLibraryAsync(Windows.Storage.KnownLibraryId.Pictures);
                                Windows.Storage.StorageFolder rootFolder    = myPictures.SaveFolder;
                                Windows.Storage.StorageFolder storageFolder = rootFolder;
                                var folderName = "cctv";
                                try
                                {
                                    storageFolder = await rootFolder.GetFolderAsync(folderName);
                                }
                                catch
                                {
                                    storageFolder = await rootFolder.CreateFolderAsync(folderName);
                                }
                                //if (Directory.Exists($"{rootFolder.Path}\\{folderName}"))
                                //else
                                // Create sample file; replace if exists.
                                //Windows.Storage.StorageFolder storageFolder = await Windows.Storage.StorageFolder.GetFolderFromPathAsync(path);
                                Windows.Storage.StorageFile sampleFile =
                                    await storageFolder.CreateFileAsync($"cctv_{DateTime.Now.ToString("dd_MM_yyyy_HH_mm_ss")}_{CCTVIndex}.jpg",
                                                                        Windows.Storage.CreationCollisionOption.ReplaceExisting);
                                ImageHelpers.SaveSoftwareBitmapToFile(savedBmp, sampleFile);
                                LastSaved[CCTVIndex] = DateTime.Now;
                            }
                        }
                    }

                    // Update the displayed performance text
                    StatusLbl.Text = $"bind: {m_bindTime.ToString("F2")}ms, eval: {m_evalTime.ToString("F2")}ms";
                }
                catch (TaskCanceledException)
                {
                    // no-op: we expect this exception when we change media sources
                    // and can safely ignore/continue
                }
                catch (Exception ex)
                {
                    NotifyUser($"Exception while rendering results: {ex.Message}");
                }
            });
        }