DirectXPixelFormat GetDirectXPixelFormatUsedForBitmapPixelFormat(BitmapPixelFormat format)
        {
            //
            // Although BitmapPixelFormat looks like it corresponds directly to DirectXPixelFormat,
            // it turns out that some of the types are generally intended to be used differently.  Win2D
            // provides these conversions.
            //
            switch (format)
            {
            // The BitmapPixelFormat entry for these types use the plain UInt version.  However,
            // these really were meant to use the UIntNormalized types so Win2D treats them as such.
            case BitmapPixelFormat.Rgba16: return(DirectXPixelFormat.R16G16B16A16UIntNormalized);

            case BitmapPixelFormat.Rgba8: return(DirectXPixelFormat.R8G8B8A8UIntNormalized);

            // The BitmapPixelFormat entry for Gray8 suggests R8Uint.  However, it's intended to be
            // used as UIntNormalized, plus D2D only supports A8UintNormalized, so that's what is
            // used here.
            case BitmapPixelFormat.Gray8: return(DirectXPixelFormat.A8UIntNormalized);

            // Other pixel formats are directly castable to the DirectXPixelFormat.
            default: return((DirectXPixelFormat)format);
            }
        }
        private async Task <bool> AreFacesStillPresent()
        {
            try
            {
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (var previewFrame = new VideoFrame(InputPixelFormat, (int)videoProperties.Width, (int)videoProperties.Height))
                {
                    await mediaCapture.GetPreviewFrameAsync(previewFrame);

                    var faces = await faceTracker.ProcessNextFrameAsync(previewFrame);

                    return(faces.Any());
                }
            }
            catch (Exception ex)
            {
                LogStatusMessage("Unable to process current frame: " + ex.ToString(), StatusSeverity.Error, false);
                return(false);  //TODO ? true or false?
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Пример #3
0
        public async static Task SaveImageAsync(BitmapSource image, IRandomAccessStream file)
        {
            BitmapEncoder encoder = await BitmapEncoder.CreateAsync(Windows.Graphics.Imaging.BitmapEncoder.PngEncoderId, file);

            WriteableBitmap writeable = image as WriteableBitmap;

            if (writeable == null)
            {
                return;
            }

            // remember the format of this image
            BitmapPixelFormat format = PixelBufferObject.GetBitmapPixelFormat(writeable);
            BitmapAlphaMode   mode   = PixelBufferObject.GetBitmapAlphaMode(writeable);

            IBuffer    buffer     = writeable.PixelBuffer;
            DataReader dataReader = DataReader.FromBuffer(buffer);

            byte[] pixels = new byte[buffer.Length];
            dataReader.ReadBytes(pixels);

            encoder.SetPixelData(format, mode, (uint)image.PixelWidth, (uint)image.PixelHeight, 96, 96, pixels);
            await encoder.FlushAsync();
        }
        private async void CaptureButton_Click(object sender, RoutedEventArgs e)
        {
            if (mediaCapture == null)
            {
                return;
            }

            const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;

            using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
            {
                await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                var softwareBitmap = previewFrame?.SoftwareBitmap;

                if (softwareBitmap != null)
                {
                    if (softwareBitmap.BitmapPixelFormat != BitmapPixelFormat.Bgra8 || softwareBitmap.BitmapAlphaMode == BitmapAlphaMode.Straight)
                    {
                        softwareBitmap = SoftwareBitmap.Convert(softwareBitmap, BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied);
                    }

                    var savePicker = new FileSavePicker
                    {
                        SuggestedStartLocation = PickerLocationId.Desktop
                    };
                    savePicker.FileTypeChoices.Add("Jpg Image", new[] { ".jpg" });
                    savePicker.SuggestedFileName = "example.jpg";
                    StorageFile sFile = await savePicker.PickSaveFileAsync();

                    await WriteToStorageFile(softwareBitmap, sFile);

                    await SetCapturedImage(sFile);
                }
            }
        }
Пример #5
0
        private async void StartOnLineDetection()
        {
            try
            {
                if (_mediaCapture != null)
                {
                    //For online we need the frame in different format
                    const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Bgra8;
                    using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)_videoWidth, (int)_videoHeight))
                    {
                        await _mediaCapture.GetPreviewFrameAsync(previewFrame);

                        _faceMetaData?.DetectFaces(previewFrame.SoftwareBitmap);
                    }
                }
            }
            catch (Exception ex)
            {
                var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    Debug.WriteLine("StartOnLineDetection failed: " + ex.Message);
                });
            }
        }
        private async void Current_SoftwareBitmapFrameCaptured(object sender, SoftwareBitmapEventArgs e)
        {
            Debug.WriteLine("FrameCaptured");
            Debug.WriteLine($"Frame evaluation started {DateTime.Now}");
            if (e.SoftwareBitmap != null)
            {
                BitmapPixelFormat bpf = e.SoftwareBitmap.BitmapPixelFormat;

                var uncroppedBitmap = SoftwareBitmap.Convert(e.SoftwareBitmap, BitmapPixelFormat.Nv12);
                var faces           = await _faceDetector.DetectFacesAsync(uncroppedBitmap);

                if (faces.Count > 0)
                {
                    //crop image to focus on face portion
                    var        faceBox    = faces[0].FaceBox;
                    VideoFrame inputFrame = VideoFrame.CreateWithSoftwareBitmap(e.SoftwareBitmap);
                    VideoFrame tmp        = null;
                    tmp = new VideoFrame(e.SoftwareBitmap.BitmapPixelFormat, (int)(faceBox.Width + faceBox.Width % 2) - 2, (int)(faceBox.Height + faceBox.Height % 2) - 2);
                    await inputFrame.CopyToAsync(tmp, faceBox, null);

                    //crop image to fit model input requirements
                    VideoFrame croppedInputImage = new VideoFrame(BitmapPixelFormat.Gray8, (int)_inputImageDescriptor.Shape[3], (int)_inputImageDescriptor.Shape[2]);
                    var        srcBounds         = GetCropBounds(
                        tmp.SoftwareBitmap.PixelWidth,
                        tmp.SoftwareBitmap.PixelHeight,
                        croppedInputImage.SoftwareBitmap.PixelWidth,
                        croppedInputImage.SoftwareBitmap.PixelHeight);
                    await tmp.CopyToAsync(croppedInputImage, srcBounds, null);

                    ImageFeatureValue imageTensor = ImageFeatureValue.CreateFromVideoFrame(croppedInputImage);

                    _binding = new LearningModelBinding(_session);

                    TensorFloat  outputTensor        = TensorFloat.Create(_outputTensorDescriptor.Shape);
                    List <float> _outputVariableList = new List <float>();

                    // Bind inputs + outputs
                    _binding.Bind(_inputImageDescriptor.Name, imageTensor);
                    _binding.Bind(_outputTensorDescriptor.Name, outputTensor);

                    // Evaluate results
                    var results = await _session.EvaluateAsync(_binding, new Guid().ToString());

                    Debug.WriteLine("ResultsEvaluated: " + results.ToString());

                    var outputTensorList = outputTensor.GetAsVectorView();
                    var resultsList      = new List <float>(outputTensorList.Count);
                    for (int i = 0; i < outputTensorList.Count; i++)
                    {
                        resultsList.Add(outputTensorList[i]);
                    }

                    var softMaxexOutputs = SoftMax(resultsList);

                    double maxProb  = 0;
                    int    maxIndex = 0;

                    // Comb through the evaluation results
                    for (int i = 0; i < Constants.POTENTIAL_EMOJI_NAME_LIST.Count(); i++)
                    {
                        // Record the dominant emotion probability & its location
                        if (softMaxexOutputs[i] > maxProb)
                        {
                            maxIndex = i;
                            maxProb  = softMaxexOutputs[i];
                        }
                    }

                    Debug.WriteLine($"Probability = {maxProb}, Threshold set to = {Constants.CLASSIFICATION_CERTAINTY_THRESHOLD}, Emotion = {Constants.POTENTIAL_EMOJI_NAME_LIST[maxIndex]}");

                    // For evaluations run on the MainPage, update the emoji carousel
                    if (maxProb >= Constants.CLASSIFICATION_CERTAINTY_THRESHOLD)
                    {
                        Debug.WriteLine("first page emoji should start to update");
                        IntelligenceServiceEmotionClassified?.Invoke(this, new ClassifiedEmojiEventArgs(CurrentEmojis._emojis.Emojis[maxIndex]));
                    }

                    // Dispose of resources
                    if (e.SoftwareBitmap != null)
                    {
                        e.SoftwareBitmap.Dispose();
                        e.SoftwareBitmap = null;
                    }
                }
            }
            IntelligenceServiceProcessingCompleted?.Invoke(this, null);
            Debug.WriteLine($"Frame evaluation finished {DateTime.Now}");
        }
Пример #7
0
        //Start the process
        private async void button_Click(object sender, RoutedEventArgs e)
        {
            FolderPicker folderPicker = new FolderPicker();

            folderPicker.FileTypeFilter.Add(".jpg");
            folderPicker.FileTypeFilter.Add(".jpeg");
            folderPicker.FileTypeFilter.Add(".png");
            folderPicker.FileTypeFilter.Add(".bmp");
            folderPicker.ViewMode = PickerViewMode.Thumbnail;

            StorageFolder photoFolder = await folderPicker.PickSingleFolderAsync();

            if (photoFolder == null)
            {
                return;
            }

            var files = await photoFolder.GetFilesAsync();

            List <Scores> E = new List <Scores>();

            int[] num = new int[files.Count];

            for (int i = 0; i < files.Count; i++)
            {
                IRandomAccessStream fileStream = await files[i].OpenAsync(FileAccessMode.Read);
                BitmapDecoder       decoder    = await BitmapDecoder.CreateAsync(fileStream);

                BitmapTransform transform = new BitmapTransform();
                const float     sourceImageHeightLimit = 1280;

                if (decoder.PixelHeight > sourceImageHeightLimit)
                {
                    float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                    transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                    transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
                }

                SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;

                SoftwareBitmap convertedBitmap;

                if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
                {
                    convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
                }
                else
                {
                    convertedBitmap = sourceBitmap;
                }

                if (faceDetector == null)
                {
                    faceDetector = await FaceDetector.CreateAsync();
                }

                detectedFaces = await faceDetector.DetectFacesAsync(convertedBitmap);

                Scores sc = null;

                if (detectedFaces.Count > 0)
                {
                    num[i] = detectedFaces.Count;
                    FaceRectangle rectID = new FaceRectangle();
                    rectID = await UploadAndDetectFaces(files[i].Path);

                    if (rectID != null)
                    {
                        sc = await EstimateEmotions(files[i].Path, rectID);
                    }
                }

                E.Add(sc);
                if (sc != null)
                {
                    Items.Add(new DataItem(i.ToString(), (int)(sc.Happiness * 100)));
                }

                sourceBitmap.Dispose();
                fileStream.Dispose();
                convertedBitmap.Dispose();
            }
        }
Пример #8
0
        DirectXPixelFormat GetDirectXPixelFormatUsedForBitmapPixelFormat(BitmapPixelFormat format)
        {
            //
            // Although BitmapPixelFormat looks like it corresponds directly to DirectXPixelFormat,
            // it turns out that some of the types are generally intended to be used differently.  Win2D
            // provides these conversions.
            //
            switch (format)
            {
                // The BitmapPixelFormat entry for these types use the plain UInt version.  However,
                // these really were meant to use the UIntNormalized types so Win2D treats them as such.
                case BitmapPixelFormat.Rgba16: return DirectXPixelFormat.R16G16B16A16UIntNormalized;
                case BitmapPixelFormat.Rgba8: return DirectXPixelFormat.R8G8B8A8UIntNormalized;

                // The BitmapPixelFormat entry for Gray8 suggests R8Uint.  However, it's intended to be
                // used as UIntNormalized, plus D2D only supports A8UintNormalized, so that's what is
                // used here.
                case BitmapPixelFormat.Gray8: return DirectXPixelFormat.A8UIntNormalized;

                // Other pixel formats are directly castable to the DirectXPixelFormat.
                default: return (DirectXPixelFormat)format;
            }
        }
Пример #9
0
        public async Task <BitmapImage> CreateImageFromBuffer(int width, int height, Byte[] pixData, BitmapPixelFormat format = BitmapPixelFormat.Bgra8)
        {
            var image = new BitmapImage();

            using (var stream = new InMemoryRandomAccessStream())
            {
                var enc = await BitmapEncoder.CreateAsync(BitmapEncoder.BmpEncoderId, stream);

                enc.SetPixelData(format, BitmapAlphaMode.Ignore, (uint)width, (uint)height, width, height, pixData);

                await enc.FlushAsync();

                await image.SetSourceAsync(stream);
            }
            return(image);
        }
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (captureManager.CameraStreamState != Windows.Media.Devices.CameraStreamState.Streaming ||
                !frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IEnumerable <Windows.Media.FaceAnalysis.DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.captureManager.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);

                        if (this.FilterOutSmallFaces)
                        {
                            // We filter out small faces here.
                            faces = faces.Where(f => CoreUtil.IsFaceBigEnoughForDetection((int)f.FaceBox.Height, (int)this.videoProperties.Height));
                        }

                        this.NumFacesOnLastFrame = faces.Count();

                        if (this.EnableAutoCaptureMode)
                        {
                            this.UpdateAutoCaptureState(faces);
                        }

                        if (this.ShowFaceTracking)
                        {
                            // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                            var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                            var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                            {
                                this.ShowFaceTrackingVisualization(previewFrameSize, faces);
                            });
                        }

                        if (CameraFrameProcessor != null)
                        {
                            await CameraFrameProcessor.ProcessFrame(previewFrame, this.FaceTrackingVisualizationCanvas);
                        }
                    }
                }
            }
            catch (Exception)
            {
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Пример #11
0
        public static async Task CropandScaleAsync(StorageFile source, StorageFile dest, Point startPoint, Size size, double m_scaleFactor)
        {
            uint startPointX = (uint)Math.Floor(startPoint.X);
            uint startPointY = (uint)Math.Floor(startPoint.Y);
            uint height      = (uint)Math.Floor(size.Height);
            uint width       = (uint)Math.Floor(size.Width);

            using (IRandomAccessStream sourceStream = await source.OpenReadAsync(),
                   destStream = await dest.OpenAsync(FileAccessMode.ReadWrite))
            {
                BitmapDecoder decoder = await BitmapDecoder.CreateAsync(sourceStream);

                var m_displayHeightNonScaled = decoder.OrientedPixelHeight;
                var m_displayWidthNonScaled  = decoder.OrientedPixelWidth;

                // Use the native (no orientation applied) image dimensions because we want to handle
                // orientation ourselves.
                BitmapTransform transform = new BitmapTransform();
                BitmapBounds    bounds    = new BitmapBounds()
                {
                    X      = (uint)(startPointX * m_scaleFactor),
                    Y      = (uint)(startPointY * m_scaleFactor),
                    Height = (uint)(height * m_scaleFactor),
                    Width  = (uint)(width * m_scaleFactor)
                };
                transform.Bounds = bounds;

                // Scaling occurs before flip/rotation, therefore use the original dimensions
                // (no orientation applied) as parameters for scaling.
                transform.ScaledHeight = (uint)(decoder.PixelHeight * m_scaleFactor);
                transform.ScaledWidth  = (uint)(decoder.PixelWidth * m_scaleFactor);
                transform.Rotation     = BitmapRotation.None;

                // Fant is a relatively high quality interpolation mode.
                transform.InterpolationMode = BitmapInterpolationMode.Fant;
                BitmapPixelFormat format = decoder.BitmapPixelFormat;
                BitmapAlphaMode   alpha  = decoder.BitmapAlphaMode;

                // Set the encoder's destination to the temporary, in-memory stream.
                PixelDataProvider pixelProvider = await decoder.GetPixelDataAsync(
                    format,
                    alpha,
                    transform,
                    ExifOrientationMode.IgnoreExifOrientation,
                    ColorManagementMode.ColorManageToSRgb
                    );

                byte[] pixels = pixelProvider.DetachPixelData();


                Guid encoderID = Guid.Empty;

                switch (dest.FileType.ToLower())
                {
                case ".png":
                    encoderID = BitmapEncoder.PngEncoderId;
                    break;

                case ".bmp":
                    encoderID = BitmapEncoder.BmpEncoderId;
                    break;

                default:
                    encoderID = BitmapEncoder.JpegEncoderId;
                    break;
                }

                // Write the pixel data onto the encoder. Note that we can't simply use the
                // BitmapTransform.ScaledWidth and ScaledHeight members as the user may have
                // requested a rotation (which is applied after scaling).
                var encoder = await BitmapEncoder.CreateAsync(encoderID, destStream);

                encoder.SetPixelData(
                    format,
                    alpha,
                    (bounds.Width),
                    (bounds.Height),
                    decoder.DpiX,
                    decoder.DpiY,
                    pixels
                    );

                await encoder.FlushAsync();
            }
        }
Пример #12
0
        // Return a vector of software bitmaps of maximum count of maxFrames selected by a user and loaded from a stream
        public async Task <List <SoftwareBitmap> > GetSelectedSoftwareBitmaps()
        {
            // <SnippetGetMaxLLFFrames>
            // Query the supported max number of input bitmap frames for Low Light Fusion
            int maxFrames = LowLightFusion.MaxSupportedFrameCount;

            // The bitmap frames to perform Low Light Fusion on.
            var framelist = new List <SoftwareBitmap>(maxFrames);
            // </SnippetGetMaxLLFFrames>



            // <SnippetGetFrames>
            var fileOpenPicker = new FileOpenPicker();

            fileOpenPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
            fileOpenPicker.FileTypeFilter.Add(".png");
            fileOpenPicker.ViewMode = PickerViewMode.Thumbnail;

            var inputFiles = await fileOpenPicker.PickMultipleFilesAsync();

            if (inputFiles == null)
            {
                // The user cancelled the picking operation
                return(null);
            }
            if (inputFiles.Count >= maxFrames)
            {
                Debug.WriteLine("You can only choose up to {0} image(s) to input.", maxFrames);
            }
            // </SnippetGetFrames>


            // <SnippetDecodeFrames>
            SoftwareBitmap softwareBitmap;

            // Decode the images into bitmaps
            for (int i = 0; i < inputFiles.Count; i++)
            {
                using (IRandomAccessStream stream = await inputFiles[0].OpenAsync(FileAccessMode.Read))
                {
                    // Create the decoder from the stream
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(stream);

                    // Get the SoftwareBitmap representation of the file
                    softwareBitmap = await decoder.GetSoftwareBitmapAsync();

                    framelist.Add(softwareBitmap);
                }
            }

            // Ensure that the selected bitmap frames have an acceptable pixel format for Low Light Fusion.
            // For this sample, we'll use the pixel format at index 0.
            IReadOnlyList <BitmapPixelFormat> formats = LowLightFusion.SupportedBitmapPixelFormats;
            BitmapPixelFormat llfFormat = formats[0];

            for (int i = 0; i < framelist.Count; i++)
            {
                if (framelist[i].BitmapPixelFormat == llfFormat)
                {
                    // The pixel format is acceptable
                }
                else
                {
                    // Convert the pixel format
                    framelist[i] = SoftwareBitmap.Convert(framelist[i], llfFormat);
                }
            }
            return(framelist);
            // </SnippetDecodeFrames>
        }
Пример #13
0
        /// <summary>
        /// Render ObjectDetector skill results
        /// </summary>
        /// <param name="frame"></param>
        /// <param name="objectDetections"></param>
        /// <returns></returns>
        private async Task DisplayFrameAndResultAsync(VideoFrame frame, int CCTVIndex)
        {
            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async() =>
            {
                try
                {
                    SoftwareBitmap savedBmp = null;
                    if (frame.SoftwareBitmap != null)
                    {
                        await m_processedBitmapSource[CCTVIndex].SetBitmapAsync(frame.SoftwareBitmap);
                        savedBmp = frame.SoftwareBitmap;
                    }
                    else
                    {
                        var bitmap = await SoftwareBitmap.CreateCopyFromSurfaceAsync(frame.Direct3DSurface, BitmapAlphaMode.Ignore);
                        await m_processedBitmapSource[CCTVIndex].SetBitmapAsync(bitmap);
                        savedBmp = bitmap;
                    }

                    // Retrieve and filter results if requested
                    IReadOnlyList <ObjectDetectorResult> objectDetections = m_binding.DetectedObjects;
                    if (m_objectKinds?.Count > 0)
                    {
                        objectDetections = objectDetections.Where(det => m_objectKinds.Contains(det.Kind)).ToList();
                    }
                    if (objectDetections != null)
                    {
                        // Update displayed results
                        m_bboxRenderer[CCTVIndex].Render(objectDetections);
                        bool PersonDetected = false;
                        int PersonCount     = 0;
                        var rects           = new List <Rect>();
                        foreach (var obj in objectDetections)
                        {
                            if (obj.Kind.ToString().ToLower() == "person")
                            {
                                PersonCount++;
                                PersonDetected = true;
                                rects.Add(obj.Rect);
                            }
                        }
                        if (PersonDetected)
                        {
                            bool KeepDistance = false;
                            if ((bool)ChkSocialDistancing.IsChecked)
                            {
                                //make sure there is more than 1 person
                                if (rects.Count > 1)
                                {
                                    var res = SocialDistanceHelpers.Detect(rects.ToArray());
                                    if (res.Result)
                                    {
                                        KeepDistance = true;
                                        m_bboxRenderer[CCTVIndex].DistanceLineRender(res.Lines);
                                        await speech.Read($"Please keep distance in {DataConfig.RoomName[CCTVIndex]}");
                                    }
                                }
                                else
                                {
                                    m_bboxRenderer[CCTVIndex].ClearLineDistance();
                                }
                            }
                            else
                            {
                                m_bboxRenderer[CCTVIndex].ClearLineDistance();
                            }
                            var msg = $"I saw {PersonCount} person in {DataConfig.RoomName[CCTVIndex]}";
                            if ((bool)ChkMode.IsChecked)
                            {
                                PlaySound(Sounds[Rnd.Next(0, Sounds.Count - 1)]);
                            }
                            else if (!KeepDistance)
                            {
                                await speech.Read(msg);
                            }
                            if ((bool)ChkPatrol.IsChecked)
                            {
                                await NotificationService.SendMail("Person Detected in BMSpace", msg, DataConfig.MailTo, DataConfig.MailFrom);
                                await NotificationService.SendSms(DataConfig.SmsTo, msg);
                            }
                            bool IsFaceDetected = false;
                            if ((bool)ChkDetectMask.IsChecked)
                            {
                                SoftwareBitmap softwareBitmapInput = frame.SoftwareBitmap;
                                // Retrieve a SoftwareBitmap to run face detection
                                if (softwareBitmapInput == null)
                                {
                                    if (frame.Direct3DSurface == null)
                                    {
                                        throw (new ArgumentNullException("An invalid input frame has been bound"));
                                    }
                                    softwareBitmapInput = await SoftwareBitmap.CreateCopyFromSurfaceAsync(frame.Direct3DSurface);
                                }
                                // We need to convert the image into a format that's compatible with FaceDetector.
                                // Gray8 should be a good type but verify it against FaceDetector’s supported formats.
                                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                                if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                                {
                                    using (var detectorInput = SoftwareBitmap.Convert(softwareBitmapInput, InputPixelFormat))
                                    {
                                        // Run face detection and retrieve face detection result
                                        var faceDetectionResult = await m_faceDetector.DetectFacesAsync(detectorInput);

                                        // If a face is found, update face rectangle feature
                                        if (faceDetectionResult.Count > 0)
                                        {
                                            IsFaceDetected = true;
                                            // Retrieve the face bound and enlarge it by a factor of 1.5x while also ensuring clamping to frame dimensions
                                            BitmapBounds faceBound = faceDetectionResult[0].FaceBox;
                                            var additionalOffset   = faceBound.Width / 2;
                                            faceBound.X            = Math.Max(0, faceBound.X - additionalOffset);
                                            faceBound.Y            = Math.Max(0, faceBound.Y - additionalOffset);
                                            faceBound.Width        = (uint)Math.Min(faceBound.Width + 2 * additionalOffset, softwareBitmapInput.PixelWidth - faceBound.X);
                                            faceBound.Height       = (uint)Math.Min(faceBound.Height + 2 * additionalOffset, softwareBitmapInput.PixelHeight - faceBound.Y);

                                            var maskdetect  = await MaskDetect.PredictImageAsync(frame);
                                            var noMaskCount = maskdetect.Where(x => x.TagName == "no-mask").Count();
                                            if (noMaskCount > 0)
                                            {
                                                if (!KeepDistance)
                                                {
                                                    await speech.Read($"please wear a face mask in {DataConfig.RoomName[CCTVIndex]}");
                                                }
                                            }
                                        }
                                    }
                                }
                            }
                            if (!IsFaceDetected)
                            {
                                m_bboxRenderer[CCTVIndex].ClearMaskLabel();
                            }
                            //save to picture libs

                            /*
                             * String path = Environment.GetFolderPath(Environment.SpecialFolder.MyPictures);
                             * path += "\\CCTV";
                             * if (!Directory.Exists(path))
                             * {
                             *  Directory.CreateDirectory(path);
                             * }*/
                            var TS = DateTime.Now - LastSaved[CCTVIndex];
                            if (savedBmp != null && TS.TotalSeconds > DataConfig.CaptureIntervalSecs && (bool)ChkCapture.IsChecked)
                            {
                                var myPictures = await Windows.Storage.StorageLibrary.GetLibraryAsync(Windows.Storage.KnownLibraryId.Pictures);
                                Windows.Storage.StorageFolder rootFolder    = myPictures.SaveFolder;
                                Windows.Storage.StorageFolder storageFolder = rootFolder;
                                var folderName = "cctv";
                                try
                                {
                                    storageFolder = await rootFolder.GetFolderAsync(folderName);
                                }
                                catch
                                {
                                    storageFolder = await rootFolder.CreateFolderAsync(folderName);
                                }
                                //if (Directory.Exists($"{rootFolder.Path}\\{folderName}"))
                                //else
                                // Create sample file; replace if exists.
                                //Windows.Storage.StorageFolder storageFolder = await Windows.Storage.StorageFolder.GetFolderFromPathAsync(path);
                                Windows.Storage.StorageFile sampleFile =
                                    await storageFolder.CreateFileAsync($"cctv_{DateTime.Now.ToString("dd_MM_yyyy_HH_mm_ss")}_{CCTVIndex}.jpg",
                                                                        Windows.Storage.CreationCollisionOption.ReplaceExisting);
                                ImageHelpers.SaveSoftwareBitmapToFile(savedBmp, sampleFile);
                                LastSaved[CCTVIndex] = DateTime.Now;
                            }
                        }
                    }

                    // Update the displayed performance text
                    StatusLbl.Text = $"bind: {m_bindTime.ToString("F2")}ms, eval: {m_evalTime.ToString("F2")}ms";
                }
                catch (TaskCanceledException)
                {
                    // no-op: we expect this exception when we change media sources
                    // and can safely ignore/continue
                }
                catch (Exception ex)
                {
                    NotifyUser($"Exception while rendering results: {ex.Message}");
                }
            });
        }
        private async void Current_SoftwareBitmapFrameCaptured(object sender, SoftwareBitmapEventArgs e)
        {
            Debug.WriteLine("FrameCaptured");
            Debug.WriteLine($"Frame evaluation started {DateTime.Now}");
            if (e.SoftwareBitmap != null)
            {
                BitmapPixelFormat bpf = e.SoftwareBitmap.BitmapPixelFormat;

                var uncroppedBitmap = SoftwareBitmap.Convert(e.SoftwareBitmap, BitmapPixelFormat.Nv12);
                var faces           = await _faceDetector.DetectFacesAsync(uncroppedBitmap);

                if (faces.Count > 0)
                {
                    //crop image to focus on face portion
                    var        faceBox    = faces[0].FaceBox;
                    VideoFrame inputFrame = VideoFrame.CreateWithSoftwareBitmap(e.SoftwareBitmap);
                    VideoFrame tmp        = null;
                    tmp = new VideoFrame(e.SoftwareBitmap.BitmapPixelFormat, (int)(faceBox.Width + faceBox.Width % 2) - 2, (int)(faceBox.Height + faceBox.Height % 2) - 2);
                    await inputFrame.CopyToAsync(tmp, faceBox, null);

                    //crop image to fit model input requirements
                    VideoFrame croppedInputImage = new VideoFrame(BitmapPixelFormat.Gray8, (int)_inputImageDescriptor.Shape[3], (int)_inputImageDescriptor.Shape[2]);
                    var        srcBounds         = GetCropBounds(
                        tmp.SoftwareBitmap.PixelWidth,
                        tmp.SoftwareBitmap.PixelHeight,
                        croppedInputImage.SoftwareBitmap.PixelWidth,
                        croppedInputImage.SoftwareBitmap.PixelHeight);
                    await tmp.CopyToAsync(croppedInputImage, srcBounds, null);

                    ImageFeatureValue imageTensor = ImageFeatureValue.CreateFromVideoFrame(croppedInputImage);

                    _binding = new LearningModelBinding(_session);

                    TensorFloat  outputTensor        = TensorFloat.Create(_outputTensorDescriptor.Shape);
                    List <float> _outputVariableList = new List <float>();

                    // Bind inputs + outputs
                    _binding.Bind(_inputImageDescriptor.Name, imageTensor);
                    _binding.Bind(_outputTensorDescriptor.Name, outputTensor);

                    // Evaluate results
                    var results = await _session.EvaluateAsync(_binding, new Guid().ToString());

                    Debug.WriteLine("ResultsEvaluated: " + results.ToString());

                    var outputTensorList = outputTensor.GetAsVectorView();
                    var resultsList      = new List <float>(outputTensorList.Count);
                    for (int i = 0; i < outputTensorList.Count; i++)
                    {
                        resultsList.Add(outputTensorList[i]);
                    }

                    var softMaxexOutputs = SoftMax(resultsList);

                    double maxProb  = 0;
                    int    maxIndex = 0;

                    // Comb through the evaluation results
                    for (int i = 0; i < Constants.POTENTIAL_EMOJI_NAME_LIST.Count(); i++)
                    {
                        // Record the dominant emotion probability & its location
                        if (softMaxexOutputs[i] > maxProb)
                        {
                            maxIndex = i;
                            maxProb  = softMaxexOutputs[i];
                        }

                        //for evaluations run on the EmotionPage, record info about single specific emotion of interest
                        if (CurrentEmojis._currentEmoji != null && Constants.POTENTIAL_EMOJI_NAME_LIST[i].Equals(CurrentEmojis._currentEmoji.Name))
                        {
                            SoftwareBitmap potentialBestPic;

                            try
                            {
                                potentialBestPic = SoftwareBitmap.Convert(uncroppedBitmap, BitmapPixelFormat.Bgra8);
                            }
                            catch (Exception ex)
                            {
                                Debug.WriteLine($"Error converting SoftwareBitmap. Details:{ex.Message}. Attempting to continue...");
                                return;
                            }

                            await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal,
                                                                                                                        async() =>
                            {
                                // Give user immediate visual feedback by updating success gauge
                                ScoreUpdated?.Invoke(this, new EmotionPageGaugeScoreEventArgs()
                                {
                                    Score = softMaxexOutputs[i]
                                });

                                // Save original pic for each emotion no matter how bad it is (and record its associated info)
                                double bestScore = CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestScore;
                                if (softMaxexOutputs[i] > bestScore)
                                {
                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestScore = softMaxexOutputs[i];

                                    var source = new SoftwareBitmapSource();

                                    await source.SetBitmapAsync(potentialBestPic);

                                    // Create format of potentialBestPic to be displayed in a gif later
                                    SoftwareBitmap tmpBitmap = potentialBestPic;
                                    WriteableBitmap wb       = new WriteableBitmap(tmpBitmap.PixelWidth, tmpBitmap.PixelHeight);
                                    tmpBitmap.CopyToBuffer(wb.PixelBuffer);

                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestPic      = source;
                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].ShowOopsIcon = false;
                                    CurrentEmojis._emojis.Emojis[CurrentEmojis._currentEmojiIndex].BestPicWB    = wb;
                                }
                            }
                                                                                                                        );
                        }
                    }

                    Debug.WriteLine($"Probability = {maxProb}, Threshold set to = {Constants.CLASSIFICATION_CERTAINTY_THRESHOLD}, Emotion = {Constants.POTENTIAL_EMOJI_NAME_LIST[maxIndex]}");

                    // For evaluations run on the MainPage, update the emoji carousel
                    if (maxProb >= Constants.CLASSIFICATION_CERTAINTY_THRESHOLD)
                    {
                        Debug.WriteLine("first page emoji should start to update");
                        IntelligenceServiceEmotionClassified?.Invoke(this, new ClassifiedEmojiEventArgs(CurrentEmojis._emojis.Emojis[maxIndex]));
                    }

                    // Dispose of resources
                    if (e.SoftwareBitmap != null)
                    {
                        e.SoftwareBitmap.Dispose();
                        e.SoftwareBitmap = null;
                    }
                }
            }
            IntelligenceServiceProcessingCompleted?.Invoke(this, null);
            Debug.WriteLine($"Frame evaluation finished {DateTime.Now}");
        }
 public static Task<WriteableBitmap> FromStream(this WriteableBitmap bmp, IRandomAccessStream stream, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
 {
     return BitmapFactory.FromStream(stream, pixelFormat);
 }
 public static Task<WriteableBitmap> FromContent(this WriteableBitmap bmp, Uri uri, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
 {
     return BitmapFactory.FromContent(uri, pixelFormat);
 }
Пример #17
0
 /// <summary>
 /// Loads an image from the applications content and returns a new WriteableBitmap.
 /// </summary>
 /// <param name="uri">The URI to the content file.</param>
 /// <param name="pixelFormat">The pixel format of the stream data. If Unknown is provided as param, the default format of the BitmapDecoder is used.</param>
 /// <returns>A new WriteableBitmap containing the pixel data.</returns>
 public static async Task<WriteableBitmap> FromContent(Uri uri, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
 {
     // Decode pixel data
     var file = await StorageFile.GetFileFromApplicationUriAsync(uri);
     using (var stream = await file.OpenAsync(FileAccessMode.Read))
     {
         return await FromStream(stream);
     }
 }
Пример #18
0
        /// <summary>
        /// Loads the data from an image stream and returns a new WriteableBitmap.
        /// </summary>
        /// <param name="stream">The stream with the image data.</param>
        /// <param name="pixelFormat">The pixel format of the stream data. If Unknown is provided as param, the default format of the BitmapDecoder is used.</param>
        /// <returns>A new WriteableBitmap containing the pixel data.</returns>
        public static async Task<WriteableBitmap> FromStream(IRandomAccessStream stream, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
        {
            var decoder = await BitmapDecoder.CreateAsync(stream);
            var transform = new BitmapTransform();
            if (pixelFormat == BitmapPixelFormat.Unknown)
            {
                pixelFormat = decoder.BitmapPixelFormat;
            }
            var pixelData = await decoder.GetPixelDataAsync(pixelFormat, decoder.BitmapAlphaMode, transform, ExifOrientationMode.RespectExifOrientation, ColorManagementMode.ColorManageToSRgb);
            var pixels = pixelData.DetachPixelData();

            // Copy to WriteableBitmap
            var bmp = new WriteableBitmap((int)decoder.OrientedPixelWidth, (int)decoder.OrientedPixelHeight);
            using (var bmpStream = bmp.PixelBuffer.AsStream())
            {
                bmpStream.Seek(0, SeekOrigin.Begin);
                bmpStream.Write(pixels, 0, (int)bmpStream.Length);
                return bmp;
            }
        }
Пример #19
0
 /// <summary>
 /// Loads the data from an image stream and returns a new WriteableBitmap.
 /// </summary>
 /// <param name="stream">The stream with the image data.</param>
 /// <param name="pixelFormat">The pixel format of the stream data. If Unknown is provided as param, the default format of the BitmapDecoder is used.</param>
 /// <returns>A new WriteableBitmap containing the pixel data.</returns>
 public static async Task<WriteableBitmap> FromStream(Stream stream, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
 {
     using (var dstStream = new InMemoryRandomAccessStream())
     {
         await RandomAccessStream.CopyAsync(stream.AsInputStream(), dstStream);
         return await FromStream(dstStream);
     }
 }
Пример #20
0
        public async Task <StorageFile> SaveToFile(WriteableBitmap image, string file_name, Windows.Storage.CreationCollisionOption collision, BitmapPixelFormat image_format, BitmapAlphaMode alpha_mode)
        {
            var file = await KnownFolders.PicturesLibrary.CreateFileAsync(file_name, collision);

            using (IRandomAccessStream image_stream = await file.OpenAsync(FileAccessMode.ReadWrite))
            {
                BitmapEncoder encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.BmpEncoderId, image_stream);

                Stream pixel_stream = image.PixelBuffer.AsStream();

                byte[] pixel_array = new byte[pixel_stream.Length];

                await pixel_stream.ReadAsync(pixel_array, 0, pixel_array.Length);

                encoder.SetPixelData(image_format, alpha_mode,
                                     (uint)image.PixelWidth,
                                     (uint)image.PixelHeight,
                                     96.0,
                                     96.0,
                                     pixel_array);
                await encoder.FlushAsync();
            }
            return(file);
        }
Пример #21
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            // If state is not Streaming, return.
            if (_state != StreamingState.Streaming)
            {
                return;
            }

            // If there has a process still running, return.
            if (!_semaphoreSlim.Wait(0))
            {
                return;
            }

            const BitmapPixelFormat PixelFormat = BitmapPixelFormat.Nv12;

            try
            {
                using (VideoFrame currentFrame = new VideoFrame(PixelFormat, (int)_videoProperties.Width, (int)_videoProperties.Height))
                {
                    // Get current preview frame from _mediaCaputre and copy into currentFrame.
                    await _mediaCapture.GetPreviewFrameAsync(currentFrame);

                    // Detected face by _faceTracker.
                    IList <DetectedFace> builtinFaces = await _faceTracker.ProcessNextFrameAsync(currentFrame);

                    SoftwareBitmap tempBitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8);

                    if (builtinFaces.Count != 0)
                    {
                        var frameSize = new Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);
                        //await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        //{
                        //    ShowResult(frameSize, builtinFaces);
                        //});

                        // Get picture from videoframe.
                        IRandomAccessStream stream  = new InMemoryRandomAccessStream();
                        BitmapEncoder       encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                        encoder.SetSoftwareBitmap(tempBitmap);
                        await encoder.FlushAsync();

                        CustomFaceModel[] customFaces = await _faceApiHelper.GetDetectEmojiAsync(stream.AsStream());

                        CustomFaceEmojiModel customFaceEmojiModel = new CustomFaceEmojiModel();
                        EmojiNum             emojiNum = new EmojiNum();
                        float upperleft = 0, upperrignt = 0, buttomleft = 0, buttomright = 0, averageX = 0, averageY = 0;
                        foreach (var eachemoliModel in customFaces)
                        {
                            averageX += eachemoliModel.Left;
                            averageY += eachemoliModel.Top;
                        }
                        averageX /= customFaces.Length;
                        averageY /= customFaces.Length;

                        for (int i = 0; i < customFaces.Length; i++)
                        {
                            emojiNum.Emoji = -1 * (customFaces[i].Anger + customFaces[i].Contempt + customFaces[i].Disgust + customFaces[i].Fear + customFaces[i].Sadness) + customFaces[i].Happiness + customFaces[i].Neutral + customFaces[i].Suprise;
                            EmojiNum model = new EmojiNum
                            {
                                Emoji = -1 * (customFaces[i].Anger + customFaces[i].Contempt + customFaces[i].Disgust + customFaces[i].Fear + customFaces[i].Sadness) + customFaces[i].Happiness + customFaces[i].Neutral + customFaces[i].Suprise
                            };
                            //customFaceEmojiModel.Emojis[i] = model;
                            //customFaceEmojiModel.Emojis[i].Emoji = -1 * (customFaces[i].Anger + customFaces[i].Contempt + customFaces[i].Disgust + customFaces[i].Fear + customFaces[i].Sadness) + customFaces[i].Happiness + customFaces[i].Neutral + customFaces[i].Suprise;
                            customFaceEmojiModel.EmojiSum += model.Emoji;
                            //customFaceEmojiModel.EmojiSum += customFaceEmojiModel.Emojis[i].Emoji;
                            if (customFaces[i].Left <averageX && customFaces[i].Top> averageY)
                            {
                                upperleft += emojiNum.Emoji;
                            }
                            else if (customFaces[i].Left < averageX && customFaces[i].Top < averageY)
                            {
                                buttomleft += emojiNum.Emoji;
                            }
                            else if (customFaces[i].Left > averageX && customFaces[i].Top > averageY)
                            {
                                upperrignt += emojiNum.Emoji;
                            }
                            else if (customFaces[i].Left > averageX && customFaces[i].Top < averageY)
                            {
                                buttomright += emojiNum.Emoji;
                            }
                        }
                        customFaceEmojiModel.UpperLeft  /= upperleft;
                        customFaceEmojiModel.ButtomLeft /= buttomleft;
                        customFaceEmojiModel.UpperRight /= upperrignt;
                        customFaceEmojiModel.ButtoRight /= buttomright;

                        //CustomFaceEmojiModel customFaceEmojiModel = await _faceApiHelper.GetEmojiResult(customFaces);
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                  ShowFromFaceApi(frameSize, customFaces, emojiNum));

                        await _eventHubHelper.SendMessagesToEventHub(customFaceEmojiModel);
                    }
                    else
                    {
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                  PaintingCanvas.Children.Clear());
                    }
                }
            }
            catch (Microsoft.ProjectOxford.Face.FaceAPIException faceEx)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowErrorHelper.ShowDialog(faceEx.ErrorMessage, faceEx.ErrorCode));
            }
            catch (Exception ex)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowErrorHelper.ShowDialog(ex.Message));
            }
            finally
            {
                _semaphoreSlim.Release();
            }
        }
Пример #22
0
        private async Task <bool> TakePictureAsync()
        {
            bool successful = true;

            if (_state != StreamingState.Streaming)
            {
                return(false);
            }

            try
            {
                IList <DetectedFace>    faces;
                const BitmapPixelFormat PixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame currentFrame = new VideoFrame(PixelFormat, (int)_properties.Width, (int)_properties.Height))
                {
                    await _mediaCapture.GetPreviewFrameAsync(currentFrame);

                    faces = await _faceDetector.DetectFacesAsync(currentFrame.SoftwareBitmap);

                    Size size = new Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);

                    if (faces.Count == 0 || faces.Count > 1)
                    {
                        throw new Exception("Too many people. (Or no one.)");
                    }

                    using (SoftwareBitmap bitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8))
                    {
                        WriteableBitmap source = new WriteableBitmap(bitmap.PixelWidth, bitmap.PixelHeight);
                        bitmap.CopyToBuffer(source.PixelBuffer);

                        IRandomAccessStream stream  = new InMemoryRandomAccessStream();
                        BitmapEncoder       encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                        encoder.SetSoftwareBitmap(bitmap);
                        await encoder.FlushAsync();

                        ShowUp(size, faces, source);

                        if (UserName.Text.Equals(string.Empty))
                        {
                            throw new Exception("Enter your name.");
                        }

                        if (await _faceApiHelper.CreatePersonAsync(stream.AsStream(), UserName.Text))
                        {
                            ShowAlertHelper.ShowDialog("Hi " + UserName.Text + ".", "Success");
                        }
                        else
                        {
                            ShowAlertHelper.ShowDialog("Something went wrong. Try again.");
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                ShowAlertHelper.ShowDialog(ex.Message);
                successful = false;
            }

            return(successful);
        }
Пример #23
0
        /// <summary>
        /// Resize image with format
        /// </summary>
        /// <param name="stream"></param>
        /// <param name="targetWidth"></param>
        /// <param name="targetHeight"></param>
        /// <param name="format"></param>
        /// <param name="alphaMode"></param>
        /// <returns></returns>
        public static async Task <SoftwareBitmap> ResizeImageAsync(IRandomAccessStream stream,
                                                                   int targetWidth,
                                                                   int targetHeight,
                                                                   BitmapPixelFormat format  = BitmapPixelFormat.Unknown,
                                                                   BitmapAlphaMode alphaMode = BitmapAlphaMode.Ignore)
        {
            BitmapDecoder decoder = await BitmapDecoder.CreateAsync(stream);

            var originalPixelWidth  = decoder.PixelWidth;
            var originalPixelHeight = decoder.PixelHeight;

            using (var outputStream = new InMemoryRandomAccessStream())
            {
                //create encoder based on decoder of the source file
                var encoder = await BitmapEncoder.CreateForTranscodingAsync(outputStream, decoder);

                double widthRatio = (double)targetWidth / originalPixelWidth;
                double heightRatio = (double)targetHeight / originalPixelHeight;
                uint   aspectHeight = (uint)targetHeight;
                uint   aspectWidth = (uint)targetWidth;
                uint   cropX = 0, cropY = 0;
                var    scaledWith   = (uint)targetWidth;
                var    scaledHeight = (uint)targetHeight;
                if (originalPixelWidth > originalPixelHeight)
                {
                    aspectWidth = (uint)(heightRatio * originalPixelWidth);
                    cropX       = (aspectWidth - aspectHeight) / 2;
                }
                else
                {
                    aspectHeight = (uint)(widthRatio * originalPixelHeight);
                    cropY        = (aspectHeight - aspectWidth) / 2;
                }
                //you can adjust interpolation and other options here, so far linear is fine for thumbnails
                encoder.BitmapTransform.InterpolationMode = BitmapInterpolationMode.Linear;
                encoder.BitmapTransform.ScaledHeight      = aspectHeight;
                encoder.BitmapTransform.ScaledWidth       = aspectWidth;
                encoder.BitmapTransform.Bounds            = new BitmapBounds()
                {
                    Width  = scaledWith,
                    Height = scaledHeight,
                    X      = cropX,
                    Y      = cropY,
                };
                await encoder.FlushAsync();

                //get reszied image
                var outputDecoder = await BitmapDecoder.CreateAsync(outputStream);

                var outputImg = await outputDecoder.GetSoftwareBitmapAsync();

                // apply alpha mode
                outputImg = SoftwareBitmap.Convert(outputImg, outputImg.BitmapPixelFormat, alphaMode);
                //apply piexl format
                if (format != BitmapPixelFormat.Unknown && format != decoder.BitmapPixelFormat)
                {
                    return(SoftwareBitmap.Convert(outputImg, format));
                }
                return(outputImg);
            }
        }
Пример #24
0
 /// <summary>
 /// Instantiates the <see cref="SoftwareBitmap"/> with provided.
 /// </summary>
 /// <param name="size">Size of the bitmap.</param>
 /// <param name="config">Bitmap pixel format.</param>
 public static SoftwareBitmap CreateForSize(int size, BitmapPixelFormat config)
 {
     Preconditions.CheckArgument(size % BitmapUtil.GetPixelSizeForBitmapConfig(config) == 0);
     return(Create(1, size / BitmapUtil.GetPixelSizeForBitmapConfig(config), config));
 }
Пример #25
0
        private async void Button_Click(object sender, RoutedEventArgs e)
        {
            Roll.Text = "Roll no.(s) of Identified Persons: ";
            VisualizationCanvas.Children.Clear();
            FileOpenPicker photoPicker = new FileOpenPicker();

            photoPicker.ViewMode = PickerViewMode.Thumbnail;
            photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
            photoPicker.FileTypeFilter.Add(".jpg");
            photoPicker.FileTypeFilter.Add(".jpeg");
            photoPicker.FileTypeFilter.Add(".png");
            photoPicker.FileTypeFilter.Add(".bmp");

            StorageFile photoFile = await photoPicker.PickSingleFileAsync();

            if (photoFile == null)
            {
                return;
            }
            string filePath = photoFile.Path;

            using (var stream = await photoFile.OpenAsync(FileAccessMode.Read))
            {
                var faces = await faceServiceClient.DetectAsync(stream.AsStream());

                var faceIds = faces.Select(face => face.FaceId).ToArray();
                var results = await faceServiceClient.IdentifyAsync("student", faceIds);

                foreach (var identifyResult in results)
                {
                    //Console.WriteLine("Result of face: {0}", identifyResult.FaceId);
                    if (identifyResult.Candidates.Length == 0)
                    {
                        string s = Roll.Text;
                        Roll.Text = s + "Not identified, ";
                    }
                    else
                    {
                        // Get top 1 among all candidates returned
                        Person[] x = await faceServiceClient.ListPersonsAsync("student");

                        Candidate name        = identifyResult.Candidates[0];
                        var       candidateId = name.PersonId;
                        int       p           = 0;
                        for (int i = 0; i < x.Length; i++)
                        {
                            if (x[i].PersonId == candidateId)
                            {
                                p = 1;
                                break;
                            }
                        }
                        if (p == 0)
                        {
                            Roll.Text = Roll.Text + " Not identified, ";
                            continue;
                        }
                        var person = await faceServiceClient.GetPersonAsync("student", candidateId);

                        string s = Roll.Text;
                        Roll.Text = s + person.Name + ", ";
                        string   day   = DateTime.Today.Day.ToString();
                        string   month = DateTime.Today.Month.ToString();
                        string   year  = DateTime.Today.Year.ToString();
                        string   date  = month + "/" + day + "/" + year;
                        TodoItem item  = new TodoItem {
                            Roll = person.Name,
                            Date = date
                        };
                        await MobileService.GetTable <TodoItem>().InsertAsync(item);
                    }
                }

                BitmapImage bitmapSource = new BitmapImage();

                IRandomAccessStream fileStream = await photoFile.OpenAsync(FileAccessMode.Read);

                BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                BitmapTransform transform = new BitmapTransform();
                const float     sourceImageHeightLimit = 1280;

                if (decoder.PixelHeight > sourceImageHeightLimit)
                {
                    float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                    transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                    transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
                }

                SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;

                SoftwareBitmap convertedBitmap;

                if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
                {
                    convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
                }
                else
                {
                    convertedBitmap = sourceBitmap;
                }

                SolidColorBrush lineBrush     = new SolidColorBrush(Windows.UI.Colors.Yellow);
                double          lineThickness = 2.0;
                SolidColorBrush fillBrush     = new SolidColorBrush(Windows.UI.Colors.Transparent);

                ImageBrush           brush        = new ImageBrush();
                SoftwareBitmapSource bitmapsource = new SoftwareBitmapSource();
                await bitmapsource.SetBitmapAsync(sourceBitmap);

                brush.ImageSource = bitmapsource;
                brush.Stretch     = Stretch.Fill;
                this.VisualizationCanvas.Background = brush;
                double widthScale  = sourceBitmap.PixelWidth / this.VisualizationCanvas.ActualWidth;
                double heightScale = sourceBitmap.PixelHeight / this.VisualizationCanvas.ActualHeight;

                foreach (var face in faces)
                {
                    // Create a rectangle element for displaying the face box but since we're using a Canvas
                    // we must scale the rectangles according to the image’s actual size.
                    // The original FaceBox values are saved in the Rectangle's Tag field so we can update the
                    // boxes when the Canvas is resized.
                    Rectangle box = new Rectangle
                    {
                        Tag             = face.FaceRectangle,
                        Width           = (uint)(face.FaceRectangle.Width / widthScale),
                        Height          = (uint)(face.FaceRectangle.Height / heightScale),
                        Fill            = fillBrush,
                        Stroke          = lineBrush,
                        StrokeThickness = lineThickness,
                        Margin          = new Thickness((uint)(face.FaceRectangle.Left / widthScale), (uint)(face.FaceRectangle.Top / heightScale), 0, 0)
                    };
                    this.VisualizationCanvas.Children.Add(box);
                }
            }
        }
Пример #26
0
 /// <summary>
 /// Gets the bitmap size in bytes with provided.
 /// </summary>
 /// <param name="width">Width of the bitmap.</param>
 /// <param name="height">Height of the bitmap.</param>
 /// <param name="config">Bitmap pixel format.</param>
 public static int BitmapSize(int width, int height, BitmapPixelFormat config)
 {
     return(BitmapUtil.GetSizeInByteForBitmap(width, height, config));
 }
Пример #27
0
        /// <summary>
        /// 处理并保存图片
        /// </summary>
        /// <param name="inputFile">输入文件</param>
        /// <param name="outputFile">输出文件</param>
        /// <param name="longSide">长边长度</param>
        /// <returns>成功返回true,否则false。</returns>
        private async Task <bool> LoadSaveFileAsync(StorageFile inputFile, StorageFile outputFile, uint longSide)
        {
            try
            {
                Guid encoderId;
                switch (outputFile.FileType)
                {
                case ".png":
                    encoderId = BitmapEncoder.PngEncoderId;
                    break;

                case ".bmp":
                    encoderId = BitmapEncoder.BmpEncoderId;
                    break;

                case ".jpg":
                case ".jpeg":
                default:
                    encoderId = BitmapEncoder.JpegEncoderId;
                    break;
                }

                //图片处理部分
                using (IRandomAccessStream inputStream = await inputFile.OpenAsync(FileAccessMode.Read),
                       outputStream = await outputFile.OpenAsync(FileAccessMode.ReadWrite))
                {
                    //BitmapEncoder需要一个空的输出流; 但是用户可能已经选择了预先存在的文件,所以清零。
                    outputStream.Size = 0;

                    //从解码器获取像素数据。 我们对解码的像素应用用户请求的变换以利用解码器中的潜在优化。
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(inputStream);

                    BitmapTransform transform = new BitmapTransform();

                    //原图尺寸比转换尺寸更小
                    if (decoder.PixelHeight < longSide && decoder.PixelWidth < longSide)
                    {
                        throw new Exception("设置的尺寸大于原图尺寸!");
                    }
                    // 判断长边并按原图比例确定另一条边的长度
                    if (decoder.PixelHeight > decoder.PixelWidth)
                    {
                        transform.ScaledHeight = longSide;
                        transform.ScaledWidth  = (uint)(decoder.PixelWidth * ((float)longSide / decoder.PixelHeight));
                    }
                    else
                    {
                        transform.ScaledHeight = (uint)(decoder.PixelHeight * ((float)longSide / decoder.PixelWidth));
                        transform.ScaledWidth  = longSide;
                    }

                    // Fant是相对高质量的插值模式。
                    transform.InterpolationMode = BitmapInterpolationMode.Fant;

                    // BitmapDecoder指示最佳匹配本地存储的图像数据的像素格式和alpha模式。 这可以提供高性能的与或质量增益。
                    BitmapPixelFormat format = decoder.BitmapPixelFormat;
                    BitmapAlphaMode   alpha  = decoder.BitmapAlphaMode;

                    // PixelDataProvider提供对位图帧中像素数据的访问
                    PixelDataProvider pixelProvider = await decoder.GetPixelDataAsync(
                        format,
                        alpha,
                        transform,
                        ExifOrientationMode.RespectExifOrientation,
                        ColorManagementMode.ColorManageToSRgb
                        );

                    byte[] pixels = pixelProvider.DetachPixelData();

                    //将像素数据写入编码器。
                    BitmapEncoder encoder = await BitmapEncoder.CreateAsync(encoderId, outputStream);

                    //设置像素数据
                    encoder.SetPixelData(
                        format,
                        alpha,
                        transform.ScaledWidth,
                        transform.ScaledHeight,
                        decoder.DpiX,
                        decoder.DpiY,
                        pixels
                        );

                    await encoder.FlushAsync(); //异步提交和刷新所有图像数据(这一步保存图片到文件)

                    Debug.WriteLine("保存成功:" + outputFile.Path);

                    // 显示图片
                    BitmapImage src = new BitmapImage();
                    //    IRandomAccessStream stream = await outputFile.OpenAsync(FileAccessMode.Read);
                    //从解码器获取像素数据。 我们对解码的像素应用用户请求的变换以利用解码器中的潜在优化。
                    BitmapDecoder decoder0 = await BitmapDecoder.CreateAsync(outputStream);

                    Compress_Size_info.Text = "压缩尺寸:" + decoder0.PixelHeight.ToString() + "*" + decoder0.OrientedPixelWidth.ToString();

                    string[] units       = new String[] { "B", "KB", "MB", "GB", "TB" };
                    int      digitGroups = (int)(Math.Log10(outputStream.Size) / Math.Log10(1024));
                    Compress_Save.Text = "压缩大小:" + String.Format("{0:F}", (outputStream.Size / Math.Pow(1024, digitGroups))) + " " + units[digitGroups];
                    //总压缩字节数
                    Compress_byte = outputStream.Size;

                    await src.SetSourceAsync(outputStream);

                    Compress_Image.Source = src;

                    //压缩总结
                    Now_save();
                    Total_save();

                    return(true);
                }
            }
            catch (Exception err)
            {
                Debug.WriteLine(err.Message);
                return(false);
            }
        }
Пример #28
0
        public async Task <string> ObtenerIdentidad()
        {
            byte[] arrayImage;
            var    PersonName = "";


            try
            {
                const BitmapPixelFormat InputPixelFormat1 = BitmapPixelFormat.Bgra8;

                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat1, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    var valor = await this.mediaCapture.GetPreviewFrameAsync(previewFrame);



                    SoftwareBitmap softwareBitmapPreviewFrame = valor.SoftwareBitmap;

                    Size  sizeCrop      = new Size(softwareBitmapPreviewFrame.PixelWidth, softwareBitmapPreviewFrame.PixelHeight);
                    Point point         = new Point(0, 0);
                    Rect  rect          = new Rect(0, 0, softwareBitmapPreviewFrame.PixelWidth, softwareBitmapPreviewFrame.PixelHeight);
                    var   arrayByteData = await EncodedBytes(softwareBitmapPreviewFrame, BitmapEncoder.JpegEncoderId);

                    SoftwareBitmap softwareBitmapCropped = await CreateFromBitmap(softwareBitmapPreviewFrame, (uint)softwareBitmapPreviewFrame.PixelWidth, (uint)softwareBitmapPreviewFrame.PixelHeight);

                    SoftwareBitmap displayableImage = SoftwareBitmap.Convert(softwareBitmapCropped, BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied);

                    arrayImage = await EncodedBytes(displayableImage, BitmapEncoder.JpegEncoderId);

                    var nuevoStreamFace = new MemoryStream(arrayImage);



                    //var ignored1 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    //{
                    //    softwareBitmapSource.SetBitmapAsync(displayableImage);

                    //    imagenCamaraWeb.Source = softwareBitmapSource;

                    //});

                    string subscriptionKey      = "a6fa05b6601b4ea398aa2039d601d983";
                    string subscriptionEndpoint = "https://southcentralus.api.cognitive.microsoft.com/face/v1.0";
                    var    faceServiceClient    = new FaceServiceClient(subscriptionKey, subscriptionEndpoint);

                    try
                    {
                        // using (var fsStream = File.OpenRead(sampleFile))
                        // {
                        IEnumerable <FaceAttributeType> faceAttributes =
                            new FaceAttributeType[] { FaceAttributeType.Gender, FaceAttributeType.Age, FaceAttributeType.Smile, FaceAttributeType.Emotion, FaceAttributeType.Glasses, FaceAttributeType.Hair };


                        var faces = await faceServiceClient.DetectAsync(nuevoStreamFace, true, false, faceAttributes);

                        string edad   = string.Empty;
                        string genero = string.Empty;
                        var    resultadoIdentifiacion = await faceServiceClient.IdentifyAsync(faces.Select(ff => ff.FaceId).ToArray(), largePersonGroupId : this.GroupId);

                        var ignored2 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            try
                            {
                                var Status             = faces.Length.ToString();
                                txtResultServicio.Text = "Caras encontradas: " + Status.ToString();
                            }
                            catch (Exception ex)
                            {
                                txtResultServicio.Text = "Error 1: " + ex.Message.ToString();

                                throw;
                            }
                        });


                        for (int idx = 0; idx < faces.Length; idx++)
                        {
                            // Update identification result for rendering
                            edad   = faces[idx].FaceAttributes.Age.ToString();
                            genero = faces[idx].FaceAttributes.Gender.ToString();
                            if (genero != string.Empty)
                            {
                                if (genero == "male")
                                {
                                    genero = "Masculino";
                                }
                                else
                                {
                                    genero = "Femenino";
                                }
                            }



                            var res = resultadoIdentifiacion[idx];

                            if (res.Candidates.Length > 0)
                            {
                                var nombrePersona = await faceServiceClient.GetPersonInLargePersonGroupAsync(GroupId, res.Candidates[0].PersonId);

                                PersonName = nombrePersona.Name.ToString();
                                //var estadoAnimo =
                                var ignored3 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                {
                                    txtResult.Text = nombrePersona.Name.ToString() + " / " + genero.ToString();
                                });
                            }
                            else
                            {
                                txtResult.Text = "Unknown";
                            }
                        }
                        //}
                    }
                    catch (Exception ex)
                    {
                        var error    = ex.Message.ToString();
                        var ignored3 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            txtResultServicio.Text = "Error 2: " + error;
                        });
                    }
                }
            }
            catch (Exception ex)
            {
                var mensaje  = ex.Message.ToString();
                var ignored4 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    txtResultServicio.Text = "Error 3: " + mensaje;
                });
            }
            return(PersonName);
        }
Пример #29
0
        void TestCreateFromSoftwareBitmap(CanvasDevice device, BitmapPixelFormat pixelFormat, BitmapAlphaMode alphaMode)
        {
            if (pixelFormat == BitmapPixelFormat.Unknown)
                return;

            int anyWidth = 3;
            int anyHeight = 5;

            var softwareBitmap = new SoftwareBitmap(pixelFormat, anyWidth, anyHeight, alphaMode);

            if (!IsFormatSupportedByWin2D(pixelFormat, alphaMode))
            {
                Assert.ThrowsException<Exception>(() =>
                {
                    CanvasBitmap.CreateFromSoftwareBitmap(device, softwareBitmap);
                });
                return;
            }
            
            var canvasBitmap = CanvasBitmap.CreateFromSoftwareBitmap(device, softwareBitmap);

            Assert.AreEqual(anyWidth, (int)canvasBitmap.SizeInPixels.Width);
            Assert.AreEqual(anyHeight, (int)canvasBitmap.SizeInPixels.Height);
            Assert.AreEqual(GetDirectXPixelFormatUsedForBitmapPixelFormat(pixelFormat), canvasBitmap.Format);

            CanvasAlphaMode expectedAlphaMode = CanvasAlphaMode.Straight;
            switch (alphaMode)
            {
                case BitmapAlphaMode.Ignore: expectedAlphaMode = CanvasAlphaMode.Ignore; break;
                case BitmapAlphaMode.Premultiplied: expectedAlphaMode = CanvasAlphaMode.Premultiplied; break;
                case BitmapAlphaMode.Straight: expectedAlphaMode = CanvasAlphaMode.Straight; break;
            }

            Assert.AreEqual(expectedAlphaMode, canvasBitmap.AlphaMode);
        }
        unsafe private TensorFloat CustomTensorize(List <VideoFrame> frameList, List <float> mean, List <float> std, bool toRGB = false)
        {
            int               temp_len       = frameList.Count();
            SoftwareBitmap    softwareBitmap = frameList[0].SoftwareBitmap;
            Int32             height         = softwareBitmap.PixelHeight;
            Int32             width          = softwareBitmap.PixelWidth;
            BitmapPixelFormat pixelFormat    = softwareBitmap.BitmapPixelFormat;

            Int32 channels = BitmapPixelFormat.Gray8 == pixelFormat ? 1 : 3;

            List <Int64> shape = new List <Int64>()
            {
                1, temp_len, channels, height, width
            };                                                                              // B,T,C,H,W

            // The channels of image stored in buffer is in order of BGRA-BGRA-BGRA-BGRA.
            // Then we transform it to the order of BBBBB....GGGGG....RRRR....AAAA(dropped)
            TensorFloat tf = TensorFloat.Create(shape);
            byte *      pCPUTensorbyte;
            float *     pCPUTensor;
            uint        uCapacity;

            // The channels of image stored in buffer is in order of BGRA-BGRA-BGRA-BGRA.
            // Then we transform it to the order of BBBBB....GGGGG....RRRR....AAAA(dropped)
            var tfr  = tf.CreateReference();
            var tfr2 = (IMemoryBufferByteAccess)tfr;

            tfr2.GetBuffer(out pCPUTensorbyte, out uCapacity);
            pCPUTensor = (float *)pCPUTensorbyte;

            for (Int32 t = 0; t < temp_len; t += 1)
            {
                VideoFrame     frame           = frameList[t];
                SoftwareBitmap softwareBitmap2 = frame.SoftwareBitmap;
                // 1. Get the access to buffer of softwarebitmap
                BitmapBuffer           spBitmapBuffer = softwareBitmap2.LockBuffer(BitmapBufferAccessMode.Read);
                IMemoryBufferReference reference      = spBitmapBuffer.CreateReference();

                byte *pData;
                uint  size;
                ((IMemoryBufferByteAccess)reference).GetBuffer(out pData, out size);

                // 2. Transform the data in buffer to a vector of float
                var offset = (height * width * channels) * t;
                if (BitmapPixelFormat.Bgra8 == pixelFormat)
                {
                    for (UInt32 i = 0; i < size; i += 4)
                    {
                        if (toRGB)
                        {
                            // suppose the model expects BGR image.
                            // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 2]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 0]) - mean[2]) / std[2];
                        }
                        else
                        {
                            // suppose the model expects BGR image.
                            // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 0]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 2]) - mean[2]) / std[2];
                        }
                    }
                }
                else if (BitmapPixelFormat.Rgba8 == pixelFormat)
                {
                    for (UInt32 i = 0; i < size; i += 4)
                    {
                        // suppose the model expects BGR image.
                        // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                        if (toRGB)
                        {
                            // suppose the model expects BGR image.
                            // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 0]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 2]) - mean[2]) / std[2];
                        }
                        else
                        {
                            UInt32 pixelInd = i / 4;
                            pCPUTensor[offset + (height * width * 0) + pixelInd] = (((float)pData[i + 2]) - mean[0]) / std[0];
                            pCPUTensor[offset + (height * width * 1) + pixelInd] = (((float)pData[i + 1]) - mean[1]) / std[1];
                            pCPUTensor[offset + (height * width * 2) + pixelInd] = (((float)pData[i + 0]) - mean[2]) / std[2];
                        }
                    }
                }
                else if (BitmapPixelFormat.Gray8 == pixelFormat)
                {
                    for (UInt32 i = 0; i < size; i += 4)
                    {
                        // suppose the model expects BGR image.
                        // index 0 is B, 1 is G, 2 is R, 3 is alpha(dropped).
                        UInt32 pixelInd = i / 4;
                        float  red      = (float)pData[i + 2];
                        float  green    = (float)pData[i + 1];
                        float  blue     = (float)pData[i];
                        float  gray     = 0.2126f * red + 0.7152f * green + 0.0722f * blue;
                        pCPUTensor[offset + pixelInd] = gray;
                    }
                }
            }

            // to prepend following error, copy to another instance and use it as model input.
            // The tensor has outstanding memory buffer references that must be closed prior to evaluation!
            TensorFloat ret = TensorFloat.CreateFromIterable(
                tf.Shape,
                tf.GetAsVectorView());

            return(ret);
        }
Пример #31
0
        bool IsFormatSupportedByWin2D(BitmapPixelFormat format, BitmapAlphaMode alphaMode)
        {
            // Win2D only supports A8UintNormalized with straight alpha.  SoftwareBitmap doesn't
            // support A8UintNormalized.
            if (alphaMode == BitmapAlphaMode.Straight)
                return false;

            switch (format)
            {
                case BitmapPixelFormat.Gray16:
                case BitmapPixelFormat.Nv12:
                case BitmapPixelFormat.Yuy2:
                    // Direct2D doesn't support these formats
                    return false;

                case BitmapPixelFormat.Gray8:
                    if (alphaMode == BitmapAlphaMode.Ignore)
                        return false;
                    else
                        return true;
            }

            return true;
        }
        /// <summary>
        /// Loads an image from the applications content and returns a new WriteableBitmap. The passed WriteableBitmap is not used.
        /// </summary>
        /// <param name="bmp">The WriteableBitmap.</param>
        /// <param name="uri">The URI to the content file.</param>
        /// <param name="pixelFormat">The pixel format of the stream data. If Unknown is provided as param, the default format of the BitmapDecoder is used.</param>
        /// <returns>A new WriteableBitmap containing the pixel data.</returns>
        public static async Task <WriteableBitmap> FromContent(this WriteableBitmap bmp, Uri uri, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
        {
            // Decode pixel data
            var file = await StorageFile.GetFileFromApplicationUriAsync(uri);

            using (var stream = await file.OpenAsync(FileAccessMode.Read))
            {
                return(await FromStream(bmp, stream));
            }
        }
Пример #33
0
 /// <summary>
 /// Returns the size in byte of an image with specific size and
 /// <see cref="BitmapPixelFormat"/>.
 /// </summary>
 /// <param name="width">The width of the image.</param>
 /// <param name="height">The height of the image.</param>
 /// <param name="bitmapConfig">
 /// The <see cref="BitmapPixelFormat"/> for which the size in byte will
 /// be returned.
 /// </param>
 public static int GetSizeInByteForBitmap(int width, int height, BitmapPixelFormat bitmapConfig)
 {
     return(width * height * GetPixelSizeForBitmapConfig(bitmapConfig));
 }
        /// <summary>
        /// Loads the data from an image stream and returns a new WriteableBitmap. The passed WriteableBitmap is not used.
        /// </summary>
        /// <param name="bmp">The WriteableBitmap.</param>
        /// <param name="stream">The stream with the image data.</param>
        /// <param name="pixelFormat">The pixel format of the stream data. If Unknown is provided as param, the default format of the BitmapDecoder is used.</param>
        /// <returns>A new WriteableBitmap containing the pixel data.</returns>
        public static async Task <WriteableBitmap> FromStream(this WriteableBitmap bmp, Stream stream, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
        {
            using (var dstStream = new InMemoryRandomAccessStream())
            {
                await RandomAccessStream.CopyAsync(stream.AsInputStream(), dstStream);

                return(await FromStream(bmp, dstStream));
            }
        }
Пример #35
0
        /// <summary>
        /// Applies the user-provided scale and rotation operation to a new image file picked by the user.
        /// This method writes the edited pixel data to the new file without
        /// any regard to existing metadata or other information in the original file.
        /// </summary>
        private async void SaveAs_Click(object sender, RoutedEventArgs e)
        {
            try
            {
                rootPage.NotifyUser("Saving to a new file...", NotifyType.StatusMessage);

                StorageFile inputFile = await m_futureAccess.GetFileAsync(m_fileToken);

                StorageFile outputFile = await Helpers.GetFileFromSavePickerAsync();

                Guid encoderId;

                switch (outputFile.FileType)
                {
                case ".png":
                    encoderId = BitmapEncoder.PngEncoderId;
                    break;

                case ".bmp":
                    encoderId = BitmapEncoder.BmpEncoderId;
                    break;

                case ".jpg":
                default:
                    encoderId = BitmapEncoder.JpegEncoderId;
                    break;
                }

                using (IRandomAccessStream inputStream = await inputFile.OpenAsync(FileAccessMode.Read),
                       outputStream = await outputFile.OpenAsync(FileAccessMode.ReadWrite))
                {
                    // BitmapEncoder expects an empty output stream; the user may have selected a
                    // pre-existing file.
                    outputStream.Size = 0;

                    // Get pixel data from the decoder. We apply the user-requested transforms on the
                    // decoded pixels to take advantage of potential optimizations in the decoder.
                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(inputStream);

                    BitmapTransform transform = new BitmapTransform();

                    // Note that we are requesting the oriented pixel dimensions, and not applying
                    // EXIF orientation in the BitmapTransform. We will request oriented pixel data
                    // later in the BitmapDecoder.GetPixelDataAsync() call.
                    transform.ScaledHeight = (uint)(decoder.OrientedPixelHeight * m_scaleFactor);
                    transform.ScaledWidth  = (uint)(decoder.OrientedPixelWidth * m_scaleFactor);
                    transform.Rotation     = Helpers.ConvertToBitmapRotation(m_userRotation);

                    // The BitmapDecoder indicates what pixel format and alpha mode best match the
                    // natively stored image data. This can provide a performance and/or quality gain.
                    BitmapPixelFormat format = decoder.BitmapPixelFormat;
                    BitmapAlphaMode   alpha  = decoder.BitmapAlphaMode;

                    PixelDataProvider pixelProvider = await decoder.GetPixelDataAsync(
                        format,
                        alpha,
                        transform,
                        ExifOrientationMode.RespectExifOrientation,
                        ColorManagementMode.ColorManageToSRgb
                        );

                    byte[] pixels = pixelProvider.DetachPixelData();

                    // Write the pixel data onto the encoder. Note that we can't simply use the
                    // BitmapTransform.ScaledWidth and ScaledHeight members as the user may have
                    // requested a rotation (which is applied after scaling).
                    BitmapEncoder encoder = await BitmapEncoder.CreateAsync(encoderId, outputStream);

                    encoder.SetPixelData(
                        format,
                        alpha,
                        (uint)((double)m_displayWidthNonScaled * m_scaleFactor),
                        (uint)((double)m_displayHeightNonScaled * m_scaleFactor),
                        decoder.DpiX,
                        decoder.DpiY,
                        pixels
                        );

                    await encoder.FlushAsync();

                    rootPage.NotifyUser("Successfully saved a copy: " + outputFile.Name, NotifyType.StatusMessage);
                }
            }
            catch (Exception err)
            {
                rootPage.NotifyUser("Error: " + err.Message, NotifyType.ErrorMessage);
                ResetPersistedState();
                ResetSessionState();
            }
        }
        /// <summary>
        /// Loads the data from an image stream and returns a new WriteableBitmap. The passed WriteableBitmap is not used.
        /// </summary>
        /// <param name="bmp">The WriteableBitmap.</param>
        /// <param name="stream">The stream with the image data.</param>
        /// <param name="pixelFormat">The pixel format of the stream data. If Unknown is provided as param, the default format of the BitmapDecoder is used.</param>
        /// <returns>A new WriteableBitmap containing the pixel data.</returns>
        public static async Task <WriteableBitmap> FromStream(this WriteableBitmap bmp, IRandomAccessStream stream, BitmapPixelFormat pixelFormat = BitmapPixelFormat.Unknown)
        {
            var decoder = await BitmapDecoder.CreateAsync(stream);

            var transform = new BitmapTransform();

            if (pixelFormat == BitmapPixelFormat.Unknown)
            {
                pixelFormat = decoder.BitmapPixelFormat;
            }
            var pixelData = await decoder.GetPixelDataAsync(pixelFormat, decoder.BitmapAlphaMode, transform, ExifOrientationMode.RespectExifOrientation, ColorManagementMode.ColorManageToSRgb);

            var pixels = pixelData.DetachPixelData();

            // Copy to WriteableBitmap
            bmp = new WriteableBitmap((int)decoder.OrientedPixelWidth, (int)decoder.OrientedPixelHeight);
            using (var bmpStream = bmp.PixelBuffer.AsStream())
            {
                bmpStream.Seek(0, SeekOrigin.Begin);
                bmpStream.Write(pixels, 0, (int)bmpStream.Length);
                return(bmp);
            }
        }
Пример #37
0
        private async void OpenImg_Click(object sender, RoutedEventArgs e)
        {
            IList <DetectedFace> faces         = null;
            SoftwareBitmap       detectorInput = null;
            WriteableBitmap      displaySource = null;

            try
            {
                FileOpenPicker photoPicker = new FileOpenPicker();
                photoPicker.ViewMode = PickerViewMode.Thumbnail;
                photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
                photoPicker.FileTypeFilter.Add(".jpg");
                photoPicker.FileTypeFilter.Add(".jpeg");
                photoPicker.FileTypeFilter.Add(".png");
                photoPicker.FileTypeFilter.Add(".bmp");
                photoFile = await photoPicker.PickSingleFileAsync();

                if (photoFile == null)
                {
                    return;
                }

                using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read))
                {
                    BitmapImage bitmapImage = new BitmapImage();
                    bitmapImage.SetSource(fileStream);
                    sourceImg.Source = bitmapImage;

                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // face can detect Gray8 file
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);
                                FaceDetector detector = await FaceDetector.CreateAsync();  // should reuse the detect obj

                                faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                this.SetupVisualization(displaySource, faces);
                            }
                        }
                        else
                        {
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                this.ClearVisualization();
            }
        }
        public async Task <StorageFile> SaveToFile(byte[] image_array, string file_name, CreationCollisionOption collision, BitmapPixelFormat image_format, BitmapAlphaMode alpha_mode)
        {
            //  create new bitmap
            WriteableBitmap image = new WriteableBitmap(ImageWidth, ImageHeight);

            // 'using' ensures that the data stream will be disposed after operation is finished
            using (Stream image_stream = image.PixelBuffer.AsStream())
            {
                await image_stream.WriteAsync(image_array, 0, ImageHeight *ImageWidth * 4);
            }

            // create new file in 'Pictures' folder with sent name and collision setting
            var file = await KnownFolders.PicturesLibrary.CreateFileAsync(file_name, collision);

            // opening file by data stream
            using (IRandomAccessStream image_stream = await file.OpenAsync(FileAccessMode.ReadWrite))
            {
                // encoding image from created data stream
                BitmapEncoder encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.BmpEncoderId, image_stream);

                Stream pixel_stream = image.PixelBuffer.AsStream();

                // creating an array with data stream's length
                byte[] pixel_array = new byte[pixel_stream.Length];

                // reading the data
                await pixel_stream.ReadAsync(pixel_array, 0, pixel_array.Length);

                // encoding the image with parameters below:
                encoder.SetPixelData(image_format, alpha_mode, // format and alpha channel
                                     (uint)image.PixelWidth,   // image width
                                     (uint)image.PixelHeight,  // image height
                                     96.0,                     // DPI in width
                                     96.0,                     // DPI in height
                                     pixel_array);             // byte stream
                await encoder.FlushAsync();                    // end of encoding
            }
            return(file);                                      // returning file
        }