private async Task <Rect> CreateCropRegion(SoftwareBitmap bitmap)
        {
            const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;

            if (!FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
            {
                return(new Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight));
            }

            using var detectorInput = SoftwareBitmap.Convert(bitmap, InputPixelFormat);

            var faces = await faceDetector.DetectFacesAsync(detectorInput);

            var first = faces.FirstOrDefault();

            if (first == null)
            {
                return(new Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight));
            }

            var faceBox = first.FaceBox;
            int margin  = 150;

            int x = Math.Max(0, (int)faceBox.X - margin);
            int y = Math.Max(0, (int)faceBox.Y - margin);

            int width  = Math.Min(bitmap.PixelWidth - x, (int)faceBox.Width + (margin * 2));
            int height = Math.Min(bitmap.PixelHeight - y, (int)faceBox.Height + (margin * 2));

            return(new Rect(x, y, width, height));
        }
Exemple #2
0
        private async Task <List <BitmapBounds> > DetectFace(SoftwareBitmap image)
        {
            if (imageData == null || imageData.Length != image.PixelHeight * image.PixelWidth)
            {
                imageData = new byte[image.PixelHeight * image.PixelWidth];
            }
            unsafe
            {
                fixed(byte *grayPointer = imageData)
                {
                    FaceProcessing.ImageProcessing.ColorToGrayscale(image, (Int32)(grayPointer));
                }
            }

            SoftwareBitmap grayImage = SoftwareBitmap.CreateCopyFromBuffer(imageData.AsBuffer(), BitmapPixelFormat.Gray8, image.PixelWidth, image.PixelHeight);
            var            faces     = await faceDetector.DetectFacesAsync(grayImage);

            List <BitmapBounds> boundingBoxes = new List <BitmapBounds>();

            for (int i = 0; i < faces.Count; i++)
            {
                boundingBoxes.Add(faces[i].FaceBox);
            }

            return(boundingBoxes);
        }
        public async Task <IList <DetectedFace> > GetFaces()
        {
            // Use FaceDetector.GetSupportedBitmapPixelFormats and IsBitmapPixelFormatSupported to dynamically
            // determine supported formats
            const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;

            SoftwareBitmap convertedBitmap = null;

            if (image.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                convertedBitmap = SoftwareBitmap.Convert(image, faceDetectionPixelFormat);
            }
            else
            {
                convertedBitmap = image;
            }
            using (convertedBitmap)
            {
                FaceDetector faceDetector = await FaceDetector.CreateAsync();

                IList <DetectedFace> detectedFaces = await faceDetector.DetectFacesAsync(convertedBitmap);

                return(detectedFaces);
            }
        }
        /// <summary>
        /// Detect face by using local function.
        /// </summary>
        private async Task <InMemoryRandomAccessStream> DetectFaceAsync()
        {
            var imgFormat = ImageEncodingProperties.CreateJpeg();

            while (true)
            {
                try
                {
                    var stream = new InMemoryRandomAccessStream();
                    await mediaCapture.CapturePhotoToStreamAsync(imgFormat, stream);

                    var image = await ImageConverter.ConvertToSoftwareBitmapAsync(stream);

                    detectedFaces = await faceDetector.DetectFacesAsync(image);

                    if (detectedFaces.Count == 0)
                    {
                        continue;
                    }
                    else if (detectedFaces.Count != 1)
                    {
                        Message = "too many faces!";
                    }
                    else
                    {
                        return(stream);
                    }
                }
                catch (Exception ex)
                {
                }
            }
        }
        /// <summary>
        /// Detects faces in a single frame, using a software bitmap object as a source.
        /// The detected faces will be stored in the corresponding local class member.
        /// </summary>
        /// <param name="bitmap">The software bitmap object to detect the faces in.</param>
        public async void Detect(SoftwareBitmap bitmap)
        {
            var convertedBitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);

            detectedFaces = await detector.DetectFacesAsync(convertedBitmap);

            convertedBitmap.Dispose();
        }
Exemple #6
0
        private async Task <IList <DetectedFace> > DetectFaces(SoftwareBitmap inputBitmap)
        {
            if (!FaceDetector.IsBitmapPixelFormatSupported(inputBitmap.BitmapPixelFormat))
            {
                inputBitmap = SoftwareBitmap.Convert(inputBitmap, faceDetectorSupportedPixelFormat);
            }

            return(await faceDetector.DetectFacesAsync(inputBitmap));
        }
Exemple #7
0
        private async Task <IList <DetectedFace> > DetectFaces(SoftwareBitmap inputbitMapBitmap)
        {
            var conertedIfrequired = inputbitMapBitmap;

            if (!FaceDetector.IsBitmapPixelFormatSupported(inputbitMapBitmap.BitmapPixelFormat))
            {
                conertedIfrequired = SoftwareBitmap.Convert(inputbitMapBitmap, _faceDectorSupportedPixelFormat);
            }
            return(await _faceDetector.DetectFacesAsync(conertedIfrequired));
        }
Exemple #8
0
        public async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!_frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            var latestFrame = await _camera.GetLatestFrame();

            SoftwareBitmap currentFrame = latestFrame as SoftwareBitmap;

            // Use FaceDetector.GetSupportedBitmapPixelFormats and IsBitmapPixelFormatSupported to dynamically
            // determine supported formats
            const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Nv12;

            if (currentFrame == null || currentFrame.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                _frameProcessingSemaphore.Release();
                return;
            }

            try
            {
                IList <DetectedFace> detectedFaces = await _faceDetector.DetectFacesAsync(currentFrame);

                if (detectedFaces.Count == 0)
                {
                    NoFaceDetected?.Invoke(this, null);
                }
                else if (detectedFaces.Count != _detectedFacesInLastFrame)
                {
                    OnPreAnalysis?.Invoke(this, null);

                    var bytes = await _camera.GetEncodedBytesAsync(currentFrame);

                    var output = await AnalysisFunction(bytes);// currentFrame.SoftwareBitmap.ToByteArray());

                    UsersIdentified?.Invoke(this, output);
                }

                _detectedFacesInLastFrame = detectedFaces.Count;
            }
            catch (Exception ex)
            {
                // Face tracking failed
                Debug.WriteLine(ex);
            }
            finally
            {
                _frameProcessingSemaphore.Release();
            }

            currentFrame.Dispose();
        }
Exemple #9
0
        private async Task <string> DetectEmotionWithWinML()
        {
            var videoFrame = lastFrame;

            if (faceDetector == null)
            {
                faceDetector = await FaceDetector.CreateAsync();
            }

            var detectedFaces = await faceDetector.DetectFacesAsync(videoFrame.SoftwareBitmap);

            if (detectedFaces != null && detectedFaces.Any())
            {
                var face = detectedFaces.OrderByDescending(s => s.FaceBox.Height * s.FaceBox.Width).First();
                using (var randomAccessStream = new InMemoryRandomAccessStream())
                {
                    var encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.BmpEncoderId, randomAccessStream);

                    var softwareBitmap = SoftwareBitmap.Convert(videoFrame.SoftwareBitmap, BitmapPixelFormat.Rgba16);
                    Debug.WriteLine(softwareBitmap.BitmapPixelFormat);
                    encoder.SetSoftwareBitmap(softwareBitmap);
                    encoder.BitmapTransform.Bounds = new BitmapBounds
                    {
                        X      = face.FaceBox.X,
                        Y      = face.FaceBox.Y,
                        Width  = face.FaceBox.Width,
                        Height = face.FaceBox.Height
                    };

                    await encoder.FlushAsync();

                    var decoder = await BitmapDecoder.CreateAsync(randomAccessStream);

                    var croppedImage = await decoder.GetSoftwareBitmapAsync(softwareBitmap.BitmapPixelFormat, softwareBitmap.BitmapAlphaMode);

                    videoFrame = VideoFrame.CreateWithSoftwareBitmap(croppedImage);
                }
            }

            var input   = ImageFeatureValue.CreateFromVideoFrame(videoFrame);
            var emotion = await model.EvaluateAsync(new FER_Emotion_RecognitionInput()
            {
                Input3 = input
            });

            var list  = new List <float>(emotion.Plus692_Output_0.GetAsVectorView());
            var index = list.IndexOf(list.Max());
            var label = labels[index];

            return(label);
        }
Exemple #10
0
        public async void ProcessFrame(FrameData frame)
        {
            if (callbacks.Count == 0 || busy)
            {
                return;
            }
            else
            {
                busy = true;
            }

            var bitmap = frame.bitmap;

            if (!FaceDetector.IsBitmapPixelFormatSupported(bitmap.BitmapPixelFormat))
            {
                bitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);
            }

            var detectedFaces = await faceDetector.DetectFacesAsync(bitmap);

            int frameKey = -1;

            if (detectedFaces.Count > 0)
            {
                frameKey = SceneCameraManager.Inst.AddFrameToCache(frame);
            }

            ProjectRuntime.Inst.DispatchRuntimeCode(() => {
                var jsImg = JavaScriptValue.CreateObject();
                jsImg.SetProperty(JavaScriptPropertyId.FromString("id"), JavaScriptValue.FromInt32(frameKey), true);
                Native.JsSetObjectBeforeCollectCallback(jsImg, IntPtr.Zero, jsObjectCallback);

                var faces    = JavaScriptValue.CreateArray(0);
                var pushFunc = faces.GetProperty(JavaScriptPropertyId.FromString("push"));
                foreach (var face in detectedFaces)
                {
                    var pos    = GetEstimatedPositionFromFaceBounds(face.FaceBox, frame.bitmap);
                    var jsFace = JavaScriptContext.RunScript($"new Face(new Position({pos.X}, {pos.Y}, {pos.Z}), {0});");
                    jsFace.SetProperty(JavaScriptPropertyId.FromString("frame"), jsImg, true);
                    jsFace.SetProperty(JavaScriptPropertyId.FromString("bounds"), face.FaceBox.ToJavaScriptValue(), true);
                    pushFunc.CallFunction(faces, jsFace);
                }
                foreach (var callback in callbacks)
                {
                    callback.CallFunction(callback, faces);
                }
            });

            busy = false;
        }
Exemple #11
0
        private async Task <BitmapBounds?> FindFace(SoftwareBitmap bitmap)
        {
            if (!FaceDetector.IsBitmapPixelFormatSupported(bitmap.BitmapPixelFormat))
            {
                bitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);
            }

            var faces = await faceDetector.DetectFacesAsync(bitmap);

            if (faces.Count != 1)
            {
                return(null);
            }

            return(faces[0].FaceBox);
        }
Exemple #12
0
        private async void DetectFaces()
        {
            if (file != null)
            {
                // Open the image file and decode the bitmap into memory.
                // We'll need to make 2 bitmap copies: one for the FaceDetector and another to display.
                using (IRandomAccessStream fileStream = await file.OpenAsync(FileAccessMode.Read))
                {
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // We need to convert the image into a format that's compatible with FaceDetector.
                        // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);

                                NotifyUser("Detecting...", NotifyType.StatusMessage);

                                // Initialize our FaceDetector and execute it against our input image.
                                // NOTE: FaceDetector initialization can take a long time, and in most cases
                                // you should create a member variable and reuse the object.
                                // However, for simplicity in this scenario we instantiate a new instance each time.
                                FaceDetector detector = await FaceDetector.CreateAsync();

                                faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                DrawDetectedFaces(displaySource, faces);
                            }
                        }
                        else
                        {
                            NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage);
                        }
                    }
                }
            }
        }
Exemple #13
0
        public async Task ProcessImageAsync(BitmapDecoder bitmapDecoder, IRandomAccessStream imageStream,
                                            string cameraId)
        {
            try
            {
                SoftwareBitmap image =
                    await
                    bitmapDecoder.GetSoftwareBitmapAsync(bitmapDecoder.BitmapPixelFormat,
                                                         BitmapAlphaMode.Premultiplied);

                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;
                if (image.BitmapPixelFormat != faceDetectionPixelFormat)
                {
                    image = SoftwareBitmap.Convert(image, faceDetectionPixelFormat);
                }
                IEnumerable <DetectedFace> detectedFaces = await _faceDetector.DetectFacesAsync(image);

                if (detectedFaces != null)
                {
                    List <Stream> faceImages = new List <Stream>();
                    foreach (DetectedFace face in detectedFaces)
                    {
                        MemoryStream faceImageStream = new MemoryStream();
                        Image        faceImage = new Image(imageStream.AsStreamForRead());
                        int          width, height, xStartPosition, yStartPosition;
                        EnlargeFaceBoxSize(face, image, out width, out height, out xStartPosition,
                                           out yStartPosition);
                        faceImage.Crop(width, height,
                                       new Rectangle(xStartPosition, yStartPosition,
                                                     width, height)).SaveAsJpeg(faceImageStream, 80);
                        faceImages.Add(faceImageStream);
                    }


                    await _imagePersiter.PersistAsync(faceImages, cameraId);
                }
            }
            catch (Exception ex)
            {
                //ToDo Logging
                Debug.WriteLine(ex.Message);
            }
        }
        public async Task <int> DetectFacesAsync(byte[] photoByteArray)
        {
            BitmapDecoder decoder = await BitmapDecoder.CreateAsync(photoByteArray.ToRandomAccessMemory());

            BitmapTransform transform = new BitmapTransform();
            const float     sourceImageHeightLimit = 1280;

            if (decoder.PixelHeight > sourceImageHeightLimit)
            {
                float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
            }

            SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

            SoftwareBitmap convertedBitmap = sourceBitmap;

            if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
            }

            FaceDetector detector = await FaceDetector.CreateAsync();

            IList <DetectedFace> faces = null;

            faces = await detector.DetectFacesAsync(convertedBitmap);

            /* ICollection<System.Drawing.Rectangle> rectangles = new List<System.Drawing.Rectangle>();
             *
             * foreach(DetectedFace face in faces)
             *   rectangles.Add(new System.Drawing.Rectangle(Convert.ToInt32(face.FaceBox.X), Convert.ToInt32(face.FaceBox.Y), Convert.ToInt32(face.FaceBox.Width), Convert.ToInt32(face.FaceBox.Height)));
             */
            sourceBitmap.Dispose();
            convertedBitmap.Dispose();

            return(faces.Count);
        }
Exemple #15
0
        private async Task <string> DetectEmotionWithWinML()
        {
            var videoFrame = lastFrame;

            if (faceDetector == null)
            {
                faceDetector = await FaceDetector.CreateAsync();
            }

            var detectedFaces = await faceDetector.DetectFacesAsync(videoFrame.SoftwareBitmap);

            if (detectedFaces != null && detectedFaces.Any())
            {
                var face = detectedFaces.OrderByDescending(s => s.FaceBox.Height * s.FaceBox.Width).First();
                var randomAccessStream = new InMemoryRandomAccessStream();
                var decoder            = await BitmapDecoder.CreateAsync(randomAccessStream);

                var croppedImage = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, new BitmapTransform()
                {
                    Bounds = new BitmapBounds()
                    {
                        X = face.FaceBox.X, Y = face.FaceBox.Y, Width = face.FaceBox.Width, Height = face.FaceBox.Height
                    }
                }, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

                videoFrame = VideoFrame.CreateWithSoftwareBitmap(croppedImage);
            }

            var emotion = await model.EvaluateAsync(new CNTKGraphModelInput()
            {
                Input338 = videoFrame
            });

            var    index = emotion.Plus692_Output_0.IndexOf(emotion.Plus692_Output_0.Max());
            string label = labels[index];

            return(label);
        }
        private async Task <bool> TakePictureAsync()
        {
            bool successful = true;

            if (_state != StreamingState.Streaming)
            {
                return(false);
            }

            try
            {
                IList <DetectedFace>    faces;
                const BitmapPixelFormat PixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame currentFrame = new VideoFrame(PixelFormat, (int)_properties.Width, (int)_properties.Height))
                {
                    await _mediaCapture.GetPreviewFrameAsync(currentFrame);

                    faces = await _faceDetector.DetectFacesAsync(currentFrame.SoftwareBitmap);

                    Size size = new Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);

                    if (faces.Count == 0 || faces.Count > 1)
                    {
                        throw new Exception("Too many people. (Or no one.)");
                    }

                    using (SoftwareBitmap bitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8))
                    {
                        WriteableBitmap source = new WriteableBitmap(bitmap.PixelWidth, bitmap.PixelHeight);
                        bitmap.CopyToBuffer(source.PixelBuffer);

                        IRandomAccessStream stream  = new InMemoryRandomAccessStream();
                        BitmapEncoder       encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                        encoder.SetSoftwareBitmap(bitmap);
                        await encoder.FlushAsync();

                        ShowUp(size, faces, source);

                        if (UserName.Text.Equals(string.Empty))
                        {
                            throw new Exception("Enter your name.");
                        }

                        if (await _faceApiHelper.CreatePersonAsync(stream.AsStream(), UserName.Text))
                        {
                            ShowAlertHelper.ShowDialog("Hi " + UserName.Text + ".", "Success");
                        }
                        else
                        {
                            ShowAlertHelper.ShowDialog("Something went wrong. Try again.");
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                ShowAlertHelper.ShowDialog(ex.Message);
                successful = false;
            }

            return(successful);
        }
        /// <summary>
        /// Loads an image file (selected by the user) and runs the FaceDetector on the loaded bitmap. If successful calls SetupVisualization to display the results.
        /// </summary>
        /// <param name="sender">Button user clicked</param>
        /// <param name="e">Event data</param>
        private async void OpenFile_Click(object sender, RoutedEventArgs e)
        {
            SoftwareBitmap detectorInput = null;

            try
            {
                FileOpenPicker photoPicker = new FileOpenPicker();
                photoPicker.ViewMode = PickerViewMode.Thumbnail;
                photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
                photoPicker.FileTypeFilter.Add(".jpg");
                photoPicker.FileTypeFilter.Add(".jpeg");
                photoPicker.FileTypeFilter.Add(".png");
                photoPicker.FileTypeFilter.Add(".bmp");

                StorageFile photoFile = await photoPicker.PickSingleFileAsync();

                if (photoFile == null)
                {
                    return;
                }

                this.ClearVisualization();
                //this.rootPage.NotifyUser("Opening...", NotifyType.StatusMessage);

                // Open the image file and decode the bitmap into memory.
                // We'll need to make 2 bitmap copies: one for the FaceDetector and another to display.
                using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read))
                {
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // We need to convert the image into a format that's compatible with FaceDetector.
                        // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                this.displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);

                                //this.rootPage.NotifyUser("Detecting...", NotifyType.StatusMessage);

                                // Initialize our FaceDetector and execute it against our input image.
                                // NOTE: FaceDetector initialization can take a long time, and in most cases
                                // you should create a member variable and reuse the object.
                                // However, for simplicity in this scenario we instantiate a new instance each time.
                                FaceDetector detector = await FaceDetector.CreateAsync();

                                this.faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                this.SetupVisualization(displaySource, faces);
                            }
                        }
                        else
                        {
                            //this.rootPage.NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage);
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                this.ClearVisualization();
                //this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
            }
        }
        /// <summary>
        /// Runs the skill against a binding object, executing the skill logic on the associated input features and populating the output ones
        /// This skill proceeds in 2 steps:
        /// 1) Run FaceDetector against the image and populate the face bound feature in the binding object
        /// 2) If a face was detected, proceeds with sentiment analysis of that portion fo the image using Windows ML then updating the score
        /// of each possible sentiment returned as result
        /// </summary>
        /// <param name="binding"></param>
        /// <returns></returns>
        public IAsyncAction EvaluateAsync(ISkillBinding binding)
        {
            FaceSentimentAnalyzerBinding bindingObj = binding as FaceSentimentAnalyzerBinding;

            if (bindingObj == null)
            {
                throw new ArgumentException("Invalid ISkillBinding parameter: This skill handles evaluation of FaceSentimentAnalyzerBinding instances only");
            }

            return(AsyncInfo.Run(async(token) =>
            {
                // Retrieve input frame from the binding object
                VideoFrame inputFrame = (binding[FaceSentimentAnalyzerConst.SKILL_INPUTNAME_IMAGE].FeatureValue as SkillFeatureImageValue).VideoFrame;
                SoftwareBitmap softwareBitmapInput = inputFrame.SoftwareBitmap;

                // Retrieve a SoftwareBitmap to run face detection
                if (softwareBitmapInput == null)
                {
                    if (inputFrame.Direct3DSurface == null)
                    {
                        throw (new ArgumentNullException("An invalid input frame has been bound"));
                    }
                    softwareBitmapInput = await SoftwareBitmap.CreateCopyFromSurfaceAsync(inputFrame.Direct3DSurface);
                }

                // Run face detection and retrieve face detection result
                var faceDetectionResult = await m_faceDetector.DetectFacesAsync(softwareBitmapInput);

                // Retrieve face rectangle feature from the binding object
                var faceRectangleFeature = binding[FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACERECTANGLE];

                // Retrieve face sentiment scores feature from the binding object
                var faceSentimentScores = binding[FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACESENTIMENTSCORES];

                // If a face is found, update face rectangle feature
                if (faceDetectionResult.Count > 0)
                {
                    // Retrieve the face bound and enlarge it by a factor of 1.5x while also ensuring clamping to frame dimensions
                    BitmapBounds faceBound = faceDetectionResult[0].FaceBox;
                    var additionalOffset = faceBound.Width / 2;
                    faceBound.X = Math.Max(0, faceBound.X - additionalOffset);
                    faceBound.Y = Math.Max(0, faceBound.Y - additionalOffset);
                    faceBound.Width = (uint)Math.Min(faceBound.Width + 2 * additionalOffset, softwareBitmapInput.PixelWidth - faceBound.X);
                    faceBound.Height = (uint)Math.Min(faceBound.Height + 2 * additionalOffset, softwareBitmapInput.PixelHeight - faceBound.Y);

                    // Set the face rectangle SkillFeatureValue in the skill binding object
                    // note that values are in normalized coordinates between [0, 1] for ease of use
                    await faceRectangleFeature.SetFeatureValueAsync(
                        new List <float>()
                    {
                        (float)faceBound.X / softwareBitmapInput.PixelWidth,                      // left
                        (float)faceBound.Y / softwareBitmapInput.PixelHeight,                     // top
                        (float)(faceBound.X + faceBound.Width) / softwareBitmapInput.PixelWidth,  // right
                        (float)(faceBound.Y + faceBound.Height) / softwareBitmapInput.PixelHeight // bottom
                    });

                    // Bind the WinML input frame with the adequate face bounds specified as metadata
                    bindingObj.m_winmlBinding.Bind(
                        FaceSentimentAnalyzerConst.WINML_MODEL_INPUTNAME, // WinML feature name
                        inputFrame,                                       // VideoFrame
                        new PropertySet()                                 // VideoFrame bounds
                    {
                        { "BitmapBounds",
                          PropertyValue.CreateUInt32Array(new uint[] { faceBound.X, faceBound.Y, faceBound.Width, faceBound.Height }) }
                    });

                    // Run WinML evaluation
                    var winMLEvaluationResult = await m_winmlSession.EvaluateAsync(bindingObj.m_winmlBinding, "");
                    var winMLModelResult = (winMLEvaluationResult.Outputs[FaceSentimentAnalyzerConst.WINML_MODEL_OUTPUTNAME] as TensorFloat).GetAsVectorView();
                    var predictionScores = SoftMax(winMLModelResult);

                    // Set the SkillFeatureValue in the skill binding object related to the face sentiment scores for each possible SentimentType
                    // note that we SoftMax the output of WinML to give a score normalized between [0, 1] for ease of use
                    await faceSentimentScores.SetFeatureValueAsync(predictionScores);
                }
                else // if no face found, reset output SkillFeatureValues with 0s
                {
                    await faceRectangleFeature.SetFeatureValueAsync(FaceSentimentAnalyzerConst.ZeroFaceRectangleCoordinates);
                    await faceSentimentScores.SetFeatureValueAsync(FaceSentimentAnalyzerConst.ZeroFaceSentimentScores);
                }
            }));
        }
        private async void Current_SoftwareBitmapFrameCaptured(object sender, SoftwareBitmapEventArgs e)
        {
            Debug.WriteLine("FrameCaptured");
            Debug.WriteLine($"Frame evaluation started {DateTime.Now}");
            if (e.SoftwareBitmap != null)
            {
                BitmapPixelFormat bpf = e.SoftwareBitmap.BitmapPixelFormat;

                var uncroppedBitmap = SoftwareBitmap.Convert(e.SoftwareBitmap, BitmapPixelFormat.Nv12);
                var faces           = await _faceDetector.DetectFacesAsync(uncroppedBitmap);

                if (faces.Count > 0)
                {
                    //crop image to focus on face portion
                    var        faceBox    = faces[0].FaceBox;
                    VideoFrame inputFrame = VideoFrame.CreateWithSoftwareBitmap(e.SoftwareBitmap);
                    VideoFrame tmp        = null;
                    tmp = new VideoFrame(e.SoftwareBitmap.BitmapPixelFormat, (int)(faceBox.Width + faceBox.Width % 2) - 2, (int)(faceBox.Height + faceBox.Height % 2) - 2);
                    await inputFrame.CopyToAsync(tmp, faceBox, null);

                    //crop image to fit model input requirements
                    VideoFrame croppedInputImage = new VideoFrame(BitmapPixelFormat.Gray8, (int)_inputImageDescriptor.Shape[3], (int)_inputImageDescriptor.Shape[2]);
                    var        srcBounds         = GetCropBounds(
                        tmp.SoftwareBitmap.PixelWidth,
                        tmp.SoftwareBitmap.PixelHeight,
                        croppedInputImage.SoftwareBitmap.PixelWidth,
                        croppedInputImage.SoftwareBitmap.PixelHeight);
                    await tmp.CopyToAsync(croppedInputImage, srcBounds, null);

                    ImageFeatureValue imageTensor = ImageFeatureValue.CreateFromVideoFrame(croppedInputImage);

                    _binding = new LearningModelBinding(_session);

                    TensorFloat  outputTensor        = TensorFloat.Create(_outputTensorDescriptor.Shape);
                    List <float> _outputVariableList = new List <float>();

                    // Bind inputs + outputs
                    _binding.Bind(_inputImageDescriptor.Name, imageTensor);
                    _binding.Bind(_outputTensorDescriptor.Name, outputTensor);

                    // Evaluate results
                    var results = await _session.EvaluateAsync(_binding, new Guid().ToString());

                    Debug.WriteLine("ResultsEvaluated: " + results.ToString());

                    var outputTensorList = outputTensor.GetAsVectorView();
                    var resultsList      = new List <float>(outputTensorList.Count);
                    for (int i = 0; i < outputTensorList.Count; i++)
                    {
                        resultsList.Add(outputTensorList[i]);
                    }

                    var softMaxexOutputs = SoftMax(resultsList);

                    double maxProb  = 0;
                    int    maxIndex = 0;

                    // Comb through the evaluation results
                    for (int i = 0; i < Constants.POTENTIAL_EMOJI_NAME_LIST.Count(); i++)
                    {
                        // Record the dominant emotion probability & its location
                        if (softMaxexOutputs[i] > maxProb)
                        {
                            maxIndex = i;
                            maxProb  = softMaxexOutputs[i];
                        }
                    }

                    Debug.WriteLine($"Probability = {maxProb}, Threshold set to = {Constants.CLASSIFICATION_CERTAINTY_THRESHOLD}, Emotion = {Constants.POTENTIAL_EMOJI_NAME_LIST[maxIndex]}");

                    // For evaluations run on the MainPage, update the emoji carousel
                    if (maxProb >= Constants.CLASSIFICATION_CERTAINTY_THRESHOLD)
                    {
                        Debug.WriteLine("first page emoji should start to update");
                        IntelligenceServiceEmotionClassified?.Invoke(this, new ClassifiedEmojiEventArgs(CurrentEmojis._emojis.Emojis[maxIndex]));
                    }

                    // Dispose of resources
                    if (e.SoftwareBitmap != null)
                    {
                        e.SoftwareBitmap.Dispose();
                        e.SoftwareBitmap = null;
                    }
                }
            }
            IntelligenceServiceProcessingCompleted?.Invoke(this, null);
            Debug.WriteLine($"Frame evaluation finished {DateTime.Now}");
        }
Exemple #20
0
        async Task SetAsync(SoftwareBitmap input)
        {
            BitmapBounds bounds = default;

            try
            {
                var faces = await FaceDetector.DetectFacesAsync(input);

                if (faces.Any())
                {
                    bounds = faces.First().FaceBox;
                    if (!IsRegionSet)
                    {
                        IsRegionSet = true;
                    }

                    var hOffset = (bounds.X + bounds.Width == input.PixelWidth) ? 1 : 0;
                    var vOffset = (bounds.Y + bounds.Height == input.PixelHeight) ? 1 : 0;

                    var rectBounds = new Rect(
                        Math.Max(bounds.X / (double)input.PixelWidth, float.Epsilon),
                        Math.Max(bounds.Y / (double)input.PixelHeight, float.Epsilon),
                        (bounds.Width - hOffset) / (double)input.PixelWidth,
                        (bounds.Height - vOffset) / (double)input.PixelHeight);

                    if ((ExampleMediaCapture.InfraredEnclosureLocation?.RotationAngleInDegreesClockwise ?? 0) != 0)
                    {
                        rectBounds = rectBounds.Rotate((int)ExampleMediaCapture.InfraredEnclosureLocation.RotationAngleInDegreesClockwise);
                    }

                    var region = new[] {
                        new RegionOfInterest()
                        {
                            AutoExposureEnabled = true,
                            Bounds           = rectBounds,
                            BoundsNormalized = true,
                            Type             = RegionOfInterestType.Face,
                            Weight           = 100,
                        }
                    };

                    if (!Paused)
                    {
                        await RegionsOfInterestControl.SetRegionsAsync(region);

                        // Camera.FaceBox.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                        // {
                        //     Camera.FaceBox.Width = rectBounds.Width * Camera.FaceCanvas.Width;
                        //     Camera.FaceBox.Height = rectBounds.Height * Camera.FaceCanvas.Height;
                        //     Canvas.SetTop(Camera.FaceBox, rectBounds.Top * Camera.FaceCanvas.Height);
                        //     Canvas.SetLeft(Camera.FaceBox, rectBounds.Left * Camera.FaceCanvas.Width);
                        // });

                        SetEpoch = Stopwatch.Elapsed;
                    }
                }
            }
            catch (COMException e) when(e.HResult.Equals(unchecked ((int)0x8000000b)))  // E_BOUNDS
            {
                // TODO RegionOfInterest.OutOfBounds
            }
            catch (Exception e) when(e.HResult.Equals(unchecked ((int)0x80070016)))     // ERROR_BAD_COMMAND
            {
                // UseNativeFallback = true;
                // TODO RegionOfInterest.Set.Failed");
            }
            catch (Exception)
            {
                // TODO swallow
            }
        }
        //Start the process
        private async void button_Click(object sender, RoutedEventArgs e)
        {
            FolderPicker folderPicker = new FolderPicker();

            folderPicker.FileTypeFilter.Add(".jpg");
            folderPicker.FileTypeFilter.Add(".jpeg");
            folderPicker.FileTypeFilter.Add(".png");
            folderPicker.FileTypeFilter.Add(".bmp");
            folderPicker.ViewMode = PickerViewMode.Thumbnail;

            StorageFolder photoFolder = await folderPicker.PickSingleFolderAsync();

            if (photoFolder == null)
            {
                return;
            }

            var files = await photoFolder.GetFilesAsync();

            List <Scores> E = new List <Scores>();

            int[] num = new int[files.Count];

            for (int i = 0; i < files.Count; i++)
            {
                IRandomAccessStream fileStream = await files[i].OpenAsync(FileAccessMode.Read);
                BitmapDecoder       decoder    = await BitmapDecoder.CreateAsync(fileStream);

                BitmapTransform transform = new BitmapTransform();
                const float     sourceImageHeightLimit = 1280;

                if (decoder.PixelHeight > sourceImageHeightLimit)
                {
                    float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                    transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                    transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
                }

                SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;

                SoftwareBitmap convertedBitmap;

                if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
                {
                    convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
                }
                else
                {
                    convertedBitmap = sourceBitmap;
                }

                if (faceDetector == null)
                {
                    faceDetector = await FaceDetector.CreateAsync();
                }

                detectedFaces = await faceDetector.DetectFacesAsync(convertedBitmap);

                Scores sc = null;

                if (detectedFaces.Count > 0)
                {
                    num[i] = detectedFaces.Count;
                    FaceRectangle rectID = new FaceRectangle();
                    rectID = await UploadAndDetectFaces(files[i].Path);

                    if (rectID != null)
                    {
                        sc = await EstimateEmotions(files[i].Path, rectID);
                    }
                }

                E.Add(sc);
                if (sc != null)
                {
                    Items.Add(new DataItem(i.ToString(), (int)(sc.Happiness * 100)));
                }

                sourceBitmap.Dispose();
                fileStream.Dispose();
                convertedBitmap.Dispose();
            }
        }
Exemple #22
0
        public async void DetectFaces()
        {
            //<SnippetPicker>
            FileOpenPicker photoPicker = new FileOpenPicker();

            photoPicker.ViewMode = PickerViewMode.Thumbnail;
            photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
            photoPicker.FileTypeFilter.Add(".jpg");
            photoPicker.FileTypeFilter.Add(".jpeg");
            photoPicker.FileTypeFilter.Add(".png");
            photoPicker.FileTypeFilter.Add(".bmp");

            StorageFile photoFile = await photoPicker.PickSingleFileAsync();

            if (photoFile == null)
            {
                return;
            }
            //</SnippetPicker>

            //<SnippetDecode>
            IRandomAccessStream fileStream = await photoFile.OpenAsync(FileAccessMode.Read);

            BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

            BitmapTransform transform = new BitmapTransform();
            const float     sourceImageHeightLimit = 1280;

            if (decoder.PixelHeight > sourceImageHeightLimit)
            {
                float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
            }

            SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

            //</SnippetDecode>


            //<SnippetFormat>
            // Use FaceDetector.GetSupportedBitmapPixelFormats and IsBitmapPixelFormatSupported to dynamically
            // determine supported formats
            const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;

            SoftwareBitmap convertedBitmap;

            if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
            }
            else
            {
                convertedBitmap = sourceBitmap;
            }
            //</SnippetFormat>

            //<SnippetDetect>
            if (faceDetector == null)
            {
                faceDetector = await FaceDetector.CreateAsync();
            }

            detectedFaces = await faceDetector.DetectFacesAsync(convertedBitmap);

            ShowDetectedFaces(sourceBitmap, detectedFaces);
            //</SnippetDetect>

            //<SnippetDispose>
            sourceBitmap.Dispose();
            fileStream.Dispose();
            convertedBitmap.Dispose();
            //</SnippetDispose>
        }
Exemple #23
0
        // get frame and analyze
        private async void Preview_FrameArrived(object sender, FrameEventArgs e)
        {
            if (!alarmOn)
            {
                return;
            }

            var bitmap = e.VideoFrame.SoftwareBitmap;

            if (bitmap == null)
            {
                return;
            }

            // faceDector requires Gray8 or Nv12
            var convertedBitmap = SoftwareBitmap.Convert(bitmap, BitmapPixelFormat.Gray8);
            var faces           = await faceDetector.DetectFacesAsync(convertedBitmap);

            // if there is a face in the frame, evaluate the emotion
            var detectedFace = faces.FirstOrDefault();

            if (detectedFace != null)
            {
                var boundingBox = new Rect(detectedFace.FaceBox.X,
                                           detectedFace.FaceBox.Y,
                                           detectedFace.FaceBox.Width,
                                           detectedFace.FaceBox.Height);

                var croppedFace = Crop(convertedBitmap, boundingBox);

                emotion_ferplusInput input = new emotion_ferplusInput();
                input.Input338 = VideoFrame.CreateWithSoftwareBitmap(croppedFace);

                var emotionResults = await model.EvaluateAsync(input);

                // to get percentages, you'd need to run the output through a softmax function
                // we don't need percentages, we just need max value
                var emotionIndex = emotionResults.Plus692_Output_0.IndexOf(emotionResults.Plus692_Output_0.Max());

                if (emotionIndex == currentEmotionIndex)
                {
                    // if the user has been dooing the same emotion for over 3 seconds - turn off alarm
                    if (lastTimeEmotionMatched != null && DateTime.Now - lastTimeEmotionMatched >= TimeSpan.FromSeconds(3))
                    {
                        alarmOn = false;
                    }

                    if (lastTimeEmotionMatched == null)
                    {
                        lastTimeEmotionMatched = DateTime.Now;
                    }
                }
                else
                {
                    lastTimeEmotionMatched = null;
                }
            }
            else
            {
                // can't find face
                lastTimeEmotionMatched = null;
            }
        }
        private async void Current_SoftwareBitmapFrameCaptured(object sender, SoftwareBitmapEventArgs e)
        {
            Debug.WriteLine("FrameCaptured");
            Debug.WriteLine($"Frame evaluation started {DateTime.Now}");
            if (e.SoftwareBitmap != null)
            {
                BitmapPixelFormat bpf = e.SoftwareBitmap.BitmapPixelFormat;

                var uncroppedBitmap = SoftwareBitmap.Convert(e.SoftwareBitmap, BitmapPixelFormat.Nv12);
                var faces           = await _faceDetector.DetectFacesAsync(uncroppedBitmap);

                if (faces.Count > 0)
                {
                    //crop image to focus on face portion
                    var        faceBox    = faces[0].FaceBox;
                    VideoFrame inputFrame = VideoFrame.CreateWithSoftwareBitmap(e.SoftwareBitmap);
                    VideoFrame tmp        = null;
                    tmp = new VideoFrame(e.SoftwareBitmap.BitmapPixelFormat, (int)(faceBox.Width + faceBox.Width % 2) - 2, (int)(faceBox.Height + faceBox.Height % 2) - 2);
                    await inputFrame.CopyToAsync(tmp, faceBox, null);

                    //crop image to fit model input requirements
                    VideoFrame croppedInputImage = new VideoFrame(BitmapPixelFormat.Gray8, (int)_inputImageDescriptor.Shape[3], (int)_inputImageDescriptor.Shape[2]);
                    var        srcBounds         = GetCropBounds(
                        tmp.SoftwareBitmap.PixelWidth,
                        tmp.SoftwareBitmap.PixelHeight,
                        croppedInputImage.SoftwareBitmap.PixelWidth,
                        croppedInputImage.SoftwareBitmap.PixelHeight);
                    await tmp.CopyToAsync(croppedInputImage, srcBounds, null);

                    ImageFeatureValue imageTensor = ImageFeatureValue.CreateFromVideoFrame(croppedInputImage);

                    _binding = new LearningModelBinding(_session);

                    TensorFloat  outputTensor        = TensorFloat.Create(_outputTensorDescriptor.Shape);
                    List <float> _outputVariableList = new List <float>();

                    // Bind inputs + outputs
                    _binding.Bind(_inputImageDescriptor.Name, imageTensor);
                    _binding.Bind(_outputTensorDescriptor.Name, outputTensor);

                    // Evaluate results
                    var results = await _session.EvaluateAsync(_binding, new Guid().ToString());

                    Debug.WriteLine("ResultsEvaluated: " + results.ToString());

                    var outputTensorList = outputTensor.GetAsVectorView();
                    var resultsList      = new List <float>(outputTensorList.Count);
                    for (int i = 0; i < outputTensorList.Count; i++)
                    {
                        resultsList.Add(outputTensorList[i]);
                    }

                    var softMaxexOutputs = SoftMax(resultsList);

                    double maxProb  = 0;
                    int    maxIndex = 0;

                    // Comb through the evaluation results
                    for (int i = 0; i < Constants.POTENTIAL_EMOJI_NAME_LIST.Count(); i++)
                    {
                        // Record the dominant emotion probability & its location
                        if (softMaxexOutputs[i] > maxProb)
                        {
                            maxIndex = i;
                            maxProb  = softMaxexOutputs[i];
                        }

                        //for evaluations run on the EmotionPage, record info about single specific emotion of interest
                        if (CurrentEmojis._currentEmoji != null && Constants.POTENTIAL_EMOJI_NAME_LIST[i].Equals(CurrentEmojis._currentEmoji.Name))
                        {
                            SoftwareBitmap potentialBestPic;

                            try
                            {
                                potentialBestPic = SoftwareBitmap.Convert(uncroppedBitmap, BitmapPixelFormat.Bgra8);
                            }
                            catch (Exception ex)
                            {
                                Debug.WriteLine($"Error converting SoftwareBitmap. Details:{ex.Message}. Attempting to continue...");
                                return;
                            }

                            await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                                                                                                        async() =>
                            {
                                // Give user immediate visual feedback by updating success gauge
                                ScoreUpdated?.Invoke(this, new EmotionPageGaugeScoreEventArgs()
                                {
                                    Score = softMaxexOutputs[i]
                                });

                                // Save original pic for each emotion no matter how bad it is (and record its associated info)
                                double bestScore = CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestScore;
                                if (softMaxexOutputs[i] > bestScore)
                                {
                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestScore = softMaxexOutputs[i];

                                    var source = new SoftwareBitmapSource();

                                    await source.SetBitmapAsync(potentialBestPic);

                                    // Create format of potentialBestPic to be displayed in a gif later
                                    SoftwareBitmap tmpBitmap = potentialBestPic;
                                    WriteableBitmap wb       = new WriteableBitmap(tmpBitmap.PixelWidth, tmpBitmap.PixelHeight);
                                    tmpBitmap.CopyToBuffer(wb.PixelBuffer);

                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestPic      = source;
                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].ShowOopsIcon = false;
                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestPicWB    = wb;
                                }
                            }
                                                                                                                        );
                        }
                    }

                    Debug.WriteLine($"Probability = {maxProb}, Threshold set to = {Constants.CLASSIFICATION_CERTAINTY_THRESHOLD}, Emotion = {Constants.POTENTIAL_EMOJI_NAME_LIST[maxIndex]}");

                    // For evaluations run on the MainPage, update the emoji carousel
                    if (maxProb >= Constants.CLASSIFICATION_CERTAINTY_THRESHOLD)
                    {
                        Debug.WriteLine("first page emoji should start to update");
                        IntelligenceServiceEmotionClassified?.Invoke(this, new ClassifiedEmojiEventArgs(CurrentEmojis._emojis.Emojis[maxIndex]));
                    }

                    // Dispose of resources
                    if (e.SoftwareBitmap != null)
                    {
                        e.SoftwareBitmap.Dispose();
                        e.SoftwareBitmap = null;
                    }
                }
            }
            IntelligenceServiceProcessingCompleted?.Invoke(this, null);
            Debug.WriteLine($"Frame evaluation finished {DateTime.Now}");
        }
        /// <summary>
        /// Render ObjectDetector skill results
        /// </summary>
        /// <param name="frame"></param>
        /// <param name="objectDetections"></param>
        /// <returns></returns>
        private async Task DisplayFrameAndResultAsync(VideoFrame frame, int CCTVIndex)
        {
            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async() =>
            {
                try
                {
                    SoftwareBitmap savedBmp = null;
                    if (frame.SoftwareBitmap != null)
                    {
                        await m_processedBitmapSource[CCTVIndex].SetBitmapAsync(frame.SoftwareBitmap);
                        savedBmp = frame.SoftwareBitmap;
                    }
                    else
                    {
                        var bitmap = await SoftwareBitmap.CreateCopyFromSurfaceAsync(frame.Direct3DSurface, BitmapAlphaMode.Ignore);
                        await m_processedBitmapSource[CCTVIndex].SetBitmapAsync(bitmap);
                        savedBmp = bitmap;
                    }

                    // Retrieve and filter results if requested
                    IReadOnlyList <ObjectDetectorResult> objectDetections = m_binding.DetectedObjects;
                    if (m_objectKinds?.Count > 0)
                    {
                        objectDetections = objectDetections.Where(det => m_objectKinds.Contains(det.Kind)).ToList();
                    }
                    if (objectDetections != null)
                    {
                        // Update displayed results
                        m_bboxRenderer[CCTVIndex].Render(objectDetections);
                        bool PersonDetected = false;
                        int PersonCount     = 0;
                        var rects           = new List <Rect>();
                        foreach (var obj in objectDetections)
                        {
                            if (obj.Kind.ToString().ToLower() == "person")
                            {
                                PersonCount++;
                                PersonDetected = true;
                                rects.Add(obj.Rect);
                            }
                        }
                        if (PersonDetected)
                        {
                            bool KeepDistance = false;
                            if ((bool)ChkSocialDistancing.IsChecked)
                            {
                                //make sure there is more than 1 person
                                if (rects.Count > 1)
                                {
                                    var res = SocialDistanceHelpers.Detect(rects.ToArray());
                                    if (res.Result)
                                    {
                                        KeepDistance = true;
                                        m_bboxRenderer[CCTVIndex].DistanceLineRender(res.Lines);
                                        await speech.Read($"Please keep distance in {DataConfig.RoomName[CCTVIndex]}");
                                    }
                                }
                                else
                                {
                                    m_bboxRenderer[CCTVIndex].ClearLineDistance();
                                }
                            }
                            else
                            {
                                m_bboxRenderer[CCTVIndex].ClearLineDistance();
                            }
                            var msg = $"I saw {PersonCount} person in {DataConfig.RoomName[CCTVIndex]}";
                            if ((bool)ChkMode.IsChecked)
                            {
                                PlaySound(Sounds[Rnd.Next(0, Sounds.Count - 1)]);
                            }
                            else if (!KeepDistance)
                            {
                                await speech.Read(msg);
                            }
                            if ((bool)ChkPatrol.IsChecked)
                            {
                                await NotificationService.SendMail("Person Detected in BMSpace", msg, DataConfig.MailTo, DataConfig.MailFrom);
                                await NotificationService.SendSms(DataConfig.SmsTo, msg);
                            }
                            bool IsFaceDetected = false;
                            if ((bool)ChkDetectMask.IsChecked)
                            {
                                SoftwareBitmap softwareBitmapInput = frame.SoftwareBitmap;
                                // Retrieve a SoftwareBitmap to run face detection
                                if (softwareBitmapInput == null)
                                {
                                    if (frame.Direct3DSurface == null)
                                    {
                                        throw (new ArgumentNullException("An invalid input frame has been bound"));
                                    }
                                    softwareBitmapInput = await SoftwareBitmap.CreateCopyFromSurfaceAsync(frame.Direct3DSurface);
                                }
                                // We need to convert the image into a format that's compatible with FaceDetector.
                                // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                                if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                                {
                                    using (var detectorInput = SoftwareBitmap.Convert(softwareBitmapInput, InputPixelFormat))
                                    {
                                        // Run face detection and retrieve face detection result
                                        var faceDetectionResult = await m_faceDetector.DetectFacesAsync(detectorInput);

                                        // If a face is found, update face rectangle feature
                                        if (faceDetectionResult.Count > 0)
                                        {
                                            IsFaceDetected = true;
                                            // Retrieve the face bound and enlarge it by a factor of 1.5x while also ensuring clamping to frame dimensions
                                            BitmapBounds faceBound = faceDetectionResult[0].FaceBox;
                                            var additionalOffset   = faceBound.Width / 2;
                                            faceBound.X            = Math.Max(0, faceBound.X - additionalOffset);
                                            faceBound.Y            = Math.Max(0, faceBound.Y - additionalOffset);
                                            faceBound.Width        = (uint)Math.Min(faceBound.Width + 2 * additionalOffset, softwareBitmapInput.PixelWidth - faceBound.X);
                                            faceBound.Height       = (uint)Math.Min(faceBound.Height + 2 * additionalOffset, softwareBitmapInput.PixelHeight - faceBound.Y);

                                            var maskdetect  = await MaskDetect.PredictImageAsync(frame);
                                            var noMaskCount = maskdetect.Where(x => x.TagName == "no-mask").Count();
                                            if (noMaskCount > 0)
                                            {
                                                if (!KeepDistance)
                                                {
                                                    await speech.Read($"please wear a face mask in {DataConfig.RoomName[CCTVIndex]}");
                                                }
                                            }
                                        }
                                    }
                                }
                            }
                            if (!IsFaceDetected)
                            {
                                m_bboxRenderer[CCTVIndex].ClearMaskLabel();
                            }
                            //save to picture libs

                            /*
                             * String path = Environment.GetFolderPath(Environment.SpecialFolder.MyPictures);
                             * path += "\\CCTV";
                             * if (!Directory.Exists(path))
                             * {
                             *  Directory.CreateDirectory(path);
                             * }*/
                            var TS = DateTime.Now - LastSaved[CCTVIndex];
                            if (savedBmp != null && TS.TotalSeconds > DataConfig.CaptureIntervalSecs && (bool)ChkCapture.IsChecked)
                            {
                                var myPictures = await Windows.Storage.StorageLibrary.GetLibraryAsync(Windows.Storage.KnownLibraryId.Pictures);
                                Windows.Storage.StorageFolder rootFolder    = myPictures.SaveFolder;
                                Windows.Storage.StorageFolder storageFolder = rootFolder;
                                var folderName = "cctv";
                                try
                                {
                                    storageFolder = await rootFolder.GetFolderAsync(folderName);
                                }
                                catch
                                {
                                    storageFolder = await rootFolder.CreateFolderAsync(folderName);
                                }
                                //if (Directory.Exists($"{rootFolder.Path}\\{folderName}"))
                                //else
                                // Create sample file; replace if exists.
                                //Windows.Storage.StorageFolder storageFolder = await Windows.Storage.StorageFolder.GetFolderFromPathAsync(path);
                                Windows.Storage.StorageFile sampleFile =
                                    await storageFolder.CreateFileAsync($"cctv_{DateTime.Now.ToString("dd_MM_yyyy_HH_mm_ss")}_{CCTVIndex}.jpg",
                                                                        Windows.Storage.CreationCollisionOption.ReplaceExisting);
                                ImageHelpers.SaveSoftwareBitmapToFile(savedBmp, sampleFile);
                                LastSaved[CCTVIndex] = DateTime.Now;
                            }
                        }
                    }

                    // Update the displayed performance text
                    StatusLbl.Text = $"bind: {m_bindTime.ToString("F2")}ms, eval: {m_evalTime.ToString("F2")}ms";
                }
                catch (TaskCanceledException)
                {
                    // no-op: we expect this exception when we change media sources
                    // and can safely ignore/continue
                }
                catch (Exception ex)
                {
                    NotifyUser($"Exception while rendering results: {ex.Message}");
                }
            });
        }
Exemple #26
0
        private async void OpenImg_Click(object sender, RoutedEventArgs e)
        {
            IList <DetectedFace> faces         = null;
            SoftwareBitmap       detectorInput = null;
            WriteableBitmap      displaySource = null;

            try
            {
                FileOpenPicker photoPicker = new FileOpenPicker();
                photoPicker.ViewMode = PickerViewMode.Thumbnail;
                photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
                photoPicker.FileTypeFilter.Add(".jpg");
                photoPicker.FileTypeFilter.Add(".jpeg");
                photoPicker.FileTypeFilter.Add(".png");
                photoPicker.FileTypeFilter.Add(".bmp");
                photoFile = await photoPicker.PickSingleFileAsync();

                if (photoFile == null)
                {
                    return;
                }

                using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read))
                {
                    BitmapImage bitmapImage = new BitmapImage();
                    bitmapImage.SetSource(fileStream);
                    sourceImg.Source = bitmapImage;

                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // face can detect Gray8 file
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);
                                FaceDetector detector = await FaceDetector.CreateAsync();  // should reuse the detect obj

                                faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                this.SetupVisualization(displaySource, faces);
                            }
                        }
                        else
                        {
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                this.ClearVisualization();
            }
        }
Exemple #27
0
        private async Task InitMirrorAsync(MediaCapture mediaCapture, CancellationToken cancellationToken)
        {
            await
            Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                async() => { await Geolocator.RequestAccessAsync(); });

            FaceServiceClient client       = new FaceServiceClient(OxfordApiKey);
            FaceDetector      faceDetector = FaceDetector.IsSupported ? await FaceDetector.CreateAsync() : null;

            while (!cancellationToken.IsCancellationRequested)
            {
                Stream photoStream;
                try
                {
                    photoStream = await GetPhotoStreamAsync(mediaCapture);
                }
                catch (Exception ex)
                {
                    HockeyClient.Current.TrackEvent("InitMirrorAsync (GetPhotoStreamAsync) - Exception",
                                                    new Dictionary <string, string> {
                        { "Message", ex.Message }, { "Stack", ex.StackTrace }
                    });
                    continue;
                }
                if (FaceDetector.IsSupported && faceDetector != null)
                {
                    SoftwareBitmap image = await ConvertImageForFaceDetection(photoStream.AsRandomAccessStream());

                    IList <DetectedFace> localFace;
                    try
                    {
                        localFace = await faceDetector.DetectFacesAsync(image);
                    }
                    catch (Exception ex)
                    {
                        HockeyClient.Current.TrackEvent("InitMirrorAsync (DetectFacesAsync Locally) - Exception",
                                                        new Dictionary <string, string> {
                            { "Message", ex.Message }, { "Stack", ex.StackTrace }
                        });
                        continue;
                    }

                    if (!localFace.Any())
                    {
                        await ClearScrean();

                        continue;
                    }
                    HockeyClient.Current.TrackEvent("Face Detected Locally");
                }
                try
                {
                    await ShowPersonalizedInformation(client, photoStream);
                }
                catch (Exception ex)
                {
                    HockeyClient.Current.TrackEvent("InitMirrorAsync (ShowPersonalizedInformation) - Exception",
                                                    new Dictionary <string, string> {
                        { "Message", ex.Message }, { "Stack", ex.StackTrace }
                    });
                }
            }
        }