private void SampleInputsGridView_SelectionChanged(object sender, SelectionChangedEventArgs e)
        {
            var gridView  = sender as GridView;
            var thumbnail = gridView.SelectedItem as WinMLSamplesGallery.Controls.Thumbnail;

            if (thumbnail != null)
            {
                var image          = thumbnail.ImageUri;
                var file           = StorageFile.GetFileFromApplicationUriAsync(new Uri(image)).GetAwaiter().GetResult();
                var softwareBitmap = CreateSoftwareBitmapFromStorageFile(file);


                tensorizationSession_ =
                    CreateLearningModelSession(
                        TensorizationModels.ReshapeFlatBufferNHWC(
                            1,
                            4,
                            softwareBitmap.PixelHeight,
                            softwareBitmap.PixelWidth,
                            416,
                            416));


                // Tensorize
                var stream            = file.OpenAsync(FileAccessMode.Read).GetAwaiter().GetResult();
                var decoder           = BitmapDecoder.CreateAsync(stream).GetAwaiter().GetResult();
                var bitmap            = decoder.GetSoftwareBitmapAsync(BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied).GetAwaiter().GetResult();
                var pixelDataProvider = decoder.GetPixelDataAsync().GetAwaiter().GetResult();
                var bytes             = pixelDataProvider.DetachPixelData();
                var buffer            = bytes.AsBuffer(); // Does this do a copy??
                var inputRawTensor    = TensorUInt8Bit.CreateFromBuffer(new long[] { 1, buffer.Length }, buffer);

                // 3 channel NCHW
                var tensorizeOutput = TensorFloat.Create(new long[] { 1, 416, 416, 3 });
                var b = new LearningModelBinding(tensorizationSession_);
                b.Bind(tensorizationSession_.Model.InputFeatures[0].Name, inputRawTensor);
                b.Bind(tensorizationSession_.Model.OutputFeatures[0].Name, tensorizeOutput);
                tensorizationSession_.Evaluate(b, "");

                // Resize
                var resizeBinding = new LearningModelBinding(_session);
                resizeBinding.Bind(_session.Model.InputFeatures[0].Name, tensorizeOutput);
                var results = _session.Evaluate(resizeBinding, "");

                var output1 = results.Output(0) as TensorFloat;

                var data       = output1.GetAsVectorView();
                var detections = ParseResult(data.ToList <float>().ToArray());

                Comparer cp = new Comparer();
                detections.Sort(cp);
                var final = NMS(detections);

                RenderImageInMainPanel(softwareBitmap);
            }
        }
        private void DecryptAndEvauluate()
        {
            // Load the encrypted model.
            // The encrypted model (encrypted.onnx) is embedded as a resource in
            // the native binary: WinMLSamplesGalleryNative.dll.
            var inferenceModel      = WinMLSamplesGalleryNative.EncryptedModels.LoadEncryptedResource(DecryptionKey.Password);
            var postProcessingModel = TensorizationModels.SoftMaxThenTopK(10);

            // Update the status
            var isModelDecrypted = inferenceModel != null;

            UpdateStatus(isModelDecrypted);

            // If loading the decrypted model failed (ie: due to an invalid key/password),
            // then skip performing evaluate.
            if (!isModelDecrypted)
            {
                return;
            }

            // Draw the image to classify in the Image control
            var decoder = ImageHelper.CreateBitmapDecoderFromPath("ms-appx:///InputData/hummingbird.jpg");

            // Create sessions
            var device  = new LearningModelDevice(LearningModelDeviceKind.Cpu);
            var options = new LearningModelSessionOptions()
            {
                CloseModelOnSessionCreation = true // Close the model to prevent extra memory usage
            };
            var inferenceSession      = new LearningModelSession(inferenceModel, device, options);
            var postProcessingSession = new LearningModelSession(postProcessingModel, device, options);

            // Classify the current image
            var softwareBitmap = decoder.GetSoftwareBitmapAsync().GetAwaiter().GetResult();
            var input          = VideoFrame.CreateWithSoftwareBitmap(softwareBitmap);

            // Inference
            var inferenceResults = Evaluate(inferenceSession, input);
            var inferenceOutput  = inferenceResults.Outputs.First().Value;

            // PostProcess
            var postProcessedOutputs = Evaluate(postProcessingSession, inferenceOutput);
            var topKValues           = (TensorFloat)postProcessedOutputs.Outputs["TopKValues"];
            var topKIndices          = (TensorInt64Bit)postProcessedOutputs.Outputs["TopKIndices"];

            // Return results
            var probabilities = topKValues.GetAsVectorView();
            var indices       = topKIndices.GetAsVectorView();
            var labels        = indices.Select((index) => ClassificationLabels.ImageNet[index]);

            // Render the classification and probabilities
            RenderInferenceResults(labels, probabilities);
        }
Exemplo n.º 3
0
        private void RecreateSessions()
        {
            tensorizationSession_?.Dispose();
            tensorizationSession_ =
                CreateLearningModelSession(
                    TensorizationModels.ReshapeFlatBufferToNCHW(
                        1,
                        4,
                        currentImageHeight_,
                        currentImageWidth_));

            resizeEffectSession_?.Dispose();
            resizeEffectSession_ = GetEffect(ResizeToggleSplitButton, ResizePicker);

            pixelSwizzleEffectSession_?.Dispose();
            pixelSwizzleEffectSession_ = GetPixelSwizzleEffect();

            blurSharpenEffectSession_?.Dispose();
            blurSharpenEffectSession_ = GetEffect(BlurSharpenToggleSplitButton, BlurSharpenPicker);

            contrastEffectSession_?.Dispose();
            contrastEffectSession_ = ContrastToggleSplitButton.IsChecked ?
                                     CreateLearningModelSession(TensorizationModels.AdjustBrightnessAndContrast(
                                                                    1,
                                                                    3,
                                                                    resizeEffectSession_ != null ? 224 : currentImageHeight_,
                                                                    resizeEffectSession_ != null ? 224 : currentImageWidth_)) :
                                     null;

            artisticEffectsEffectSession_?.Dispose();
            artisticEffectsEffectSession_ = GetEffect(ArtisticEffectsToggleSplitButton, ArtisticEffectsPicker);

            orientationEffectSession_?.Dispose();
            orientationEffectSession_ = GetOrientationEffect();

            shapeSession_?.Dispose();
            shapeSession_ = CreateLearningModelSession(TensorizationModels.ShapeNCHW(1, 3, currentImageHeight_, currentImageWidth_));

            detensorizationSession_?.Dispose();
            detensorizationSession_ = CreateLearningModelSession(TensorizationModels.IdentityNCHW(
                                                                     1,
                                                                     3,
                                                                     resizeEffectSession_ != null ? 224 : currentImageHeight_,
                                                                     resizeEffectSession_ != null ? 224 : currentImageWidth_));
        }
        public OpenCVInterop()
        {
            this.InitializeComponent();
            CurrentImagePath = null;
            InferenceChoice  = ClassifyChoice.Denoised;

            // Load inference session
            var modelName = "squeezenet1.1-7.onnx";
            var modelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", modelName);
            var model     = LearningModel.LoadFromFilePath(modelPath);

            _inferenceSession = CreateLearningModelSession(model);

            // Load post processing session
            _postProcessingSession = CreateLearningModelSession(TensorizationModels.SoftMaxThenTopK(TopK));

            BasicGridView.SelectedIndex = 0;
        }
        private void TryPerformInference(bool reloadImages = true)
        {
            if (CurrentImagePath != null)
            {
                if (reloadImages)
                {
                    Original = WinMLSamplesGalleryNative.OpenCVImage.CreateFromPath(CurrentImagePath);
                    Noisy    = WinMLSamplesGalleryNative.OpenCVImage.AddSaltAndPepperNoise(Original);
                    Denoised = WinMLSamplesGalleryNative.OpenCVImage.DenoiseMedianBlur(Noisy);

                    var baseImageBitmap = Original.AsSoftwareBitmap();
                    RenderingHelpers.BindSoftwareBitmapToImageControl(InputImage, baseImageBitmap);
                    RenderingHelpers.BindSoftwareBitmapToImageControl(NoisyImage, Noisy.AsSoftwareBitmap());
                    RenderingHelpers.BindSoftwareBitmapToImageControl(DenoisedImage, Denoised.AsSoftwareBitmap());

                    var tensorizationModel = TensorizationModels.CastResizeAndTranspose11(Height, Width, 1, 3, baseImageBitmap.PixelHeight, baseImageBitmap.PixelWidth, "nearest");
                    _tensorizationSession = CreateLearningModelSession(tensorizationModel, SelectedDeviceKind);
                }

                WinMLSamplesGalleryNative.OpenCVImage classificationImage = null;
                switch (InferenceChoice)
                {
                case ClassifyChoice.Original:
                    classificationImage = Original;
                    break;

                case ClassifyChoice.Noisy:
                    classificationImage = Noisy;
                    break;

                case ClassifyChoice.Denoised:
                    classificationImage = Denoised;
                    break;
                }

                // Classify
                var(labels, probabilities) = Classify(classificationImage);

                // Render the classification and probabilities
                RenderInferenceResults(labels, probabilities);
            }
        }
        public ImageSharpInterop()
        {
            this.InitializeComponent();

            var tensorizationModel = TensorizationModels.BasicTensorization(Height, Width, BatchSize, Channels, Height, Width, "nearest");

            _tensorizationSession = CreateLearningModelSession(tensorizationModel, SelectedDeviceKind);

            var inferenceModelName = "squeezenet1.1-7.onnx";
            var inferenceModelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", inferenceModelName);
            var inferenceModel     = LearningModel.LoadFromFilePath(inferenceModelPath);

            _inferenceSession = CreateLearningModelSession(inferenceModel);

            var postProcessingModel = TensorizationModels.SoftMaxThenTopK(TopK);

            _postProcessingSession = CreateLearningModelSession(postProcessingModel);

            BasicGridView.SelectedIndex = 0;
        }
Exemplo n.º 7
0
        private LearningModelSession GetEffectSession(Effect effect)
        {
            switch (effect)
            {
            case Effect.Blur3x3:
                return(CreateLearningModelSession(TensorizationModels.AveragePool(3)));

            case Effect.Blur5x5:
                return(CreateLearningModelSession(TensorizationModels.AveragePool(5)));

            case Effect.Blur7x7:
                return(CreateLearningModelSession(TensorizationModels.AveragePool(7)));

            case Effect.ResizeCubicFill:
                return(CreateLearningModelSession(TensorizationModels.UniformScaleAndCenterFill(currentImageHeight_, currentImageWidth_, 224, 224, "cubic")));

            case Effect.ResizeCubicFit:
                return(CreateLearningModelSession(TensorizationModels.UniformScaleAndCenterFit(currentImageHeight_, currentImageWidth_, 224, 224, "cubic")));

            case Effect.ResizeLinearFill:
                return(CreateLearningModelSession(TensorizationModels.UniformScaleAndCenterFill(currentImageHeight_, currentImageWidth_, 224, 224, "linear")));

            case Effect.ResizeLinearFit:
                return(CreateLearningModelSession(TensorizationModels.UniformScaleAndCenterFit(currentImageHeight_, currentImageWidth_, 224, 224, "linear")));

            case Effect.ResizeNearestFill:
                return(CreateLearningModelSession(TensorizationModels.UniformScaleAndCenterFill(currentImageHeight_, currentImageWidth_, 224, 224, "nearest")));

            case Effect.ResizeNearestFit:
                return(CreateLearningModelSession(TensorizationModels.UniformScaleAndCenterFit(currentImageHeight_, currentImageWidth_, 224, 224, "nearest")));

            case Effect.Sobel:
                return(CreateLearningModelSession(TensorizationModels.Sobel()));

            case Effect.PixelSwizzle_123_321:
                return(CreateLearningModelSession(TensorizationModels.Swizzle(3, 2, 1)));

            case Effect.PixelSwizzle_123_312:
                return(CreateLearningModelSession(TensorizationModels.Swizzle(3, 1, 2)));

            case Effect.PixelSwizzle_123_213:
                return(CreateLearningModelSession(TensorizationModels.Swizzle(2, 1, 3)));

            case Effect.PixelSwizzle_123_231:
                return(CreateLearningModelSession(TensorizationModels.Swizzle(2, 3, 1)));

            case Effect.PixelSwizzle_123_132:
                return(CreateLearningModelSession(TensorizationModels.Swizzle(1, 3, 2)));

            case Effect.MirrorHorizontal:
                return(CreateLearningModelSession(TensorizationModels.MirrorHorizontalNCHW()));

            case Effect.MirrorVertical:
                return(CreateLearningModelSession(TensorizationModels.MirrorVerticalNCHW()));

            case Effect.RotateRight90:
                return(CreateLearningModelSession(TensorizationModels.RotateRight90()));

            case Effect.RotateLeft90:
                return(CreateLearningModelSession(TensorizationModels.RotateLeft90()));

            default:
                return(null);
            }
        }
Exemplo n.º 8
0
#pragma warning disable CA1416 // Validate platform compatibility
        private void ApplyEffects(bool recreateSession = true)
        {
            if (!initialized_ || decoder_ == null)
            {
                return;
            }

            PerformanceMetricsMonitor.ClearLog();

            if (recreateSession)
            {
                RecreateSessions();
            }

            // TensorizeWithVideoFrame();

            long start, stop;

            // Tensorize
            start = HighResolutionClock.UtcNow();
            var pixelDataProvider = decoder_.GetPixelDataAsync().GetAwaiter().GetResult();
            var bytes             = pixelDataProvider.DetachPixelData();
            var buffer            = bytes.AsBuffer(); // Does this do a copy??
            var inputRawTensor    = TensorUInt8Bit.CreateFromBuffer(new long[] { 1, buffer.Length }, buffer);
            // 3 channel NCHW
            var nextOutputShape      = new long[] { 1, 3, currentImageHeight_, currentImageWidth_ };
            var intermediateTensor   = TensorFloat.Create(nextOutputShape);
            var tensorizationBinding = Evaluate(tensorizationSession_, inputRawTensor, intermediateTensor);

            stop = HighResolutionClock.UtcNow();
            var tensorizationDuration = HighResolutionClock.DurationInMs(start, stop);

            // Resize
            start = HighResolutionClock.UtcNow();
            TensorFloat          resizeOutput  = null;
            LearningModelBinding resizeBinding = null;

            if (resizeEffectSession_ != null)
            {
                nextOutputShape    = new long[] { 1, 3, 224, 224 };
                resizeOutput       = TensorFloat.Create(nextOutputShape);
                resizeBinding      = Evaluate(resizeEffectSession_, intermediateTensor, resizeOutput);
                intermediateTensor = resizeOutput;
            }
            stop = HighResolutionClock.UtcNow();
            var resizeDuration = HighResolutionClock.DurationInMs(start, stop);

            // Pixel Swizzle
            start = HighResolutionClock.UtcNow();
            TensorFloat          swizzleOutput  = null;
            LearningModelBinding swizzleBinding = null;

            if (pixelSwizzleEffectSession_ != null)
            {
                swizzleOutput      = TensorFloat.Create(nextOutputShape);
                swizzleBinding     = Evaluate(pixelSwizzleEffectSession_, intermediateTensor, swizzleOutput);
                intermediateTensor = swizzleOutput;
            }
            stop = HighResolutionClock.UtcNow();
            var swizzleDuration = HighResolutionClock.DurationInMs(start, stop);

            // Blur
            start = HighResolutionClock.UtcNow();
            TensorFloat          blurOutput  = null;
            LearningModelBinding blurBinding = null;

            if (blurSharpenEffectSession_ != null)
            {
                blurOutput         = TensorFloat.Create(nextOutputShape);
                blurBinding        = Evaluate(blurSharpenEffectSession_, intermediateTensor, blurOutput);
                intermediateTensor = blurOutput;
            }
            stop = HighResolutionClock.UtcNow();
            var blurDuration = HighResolutionClock.DurationInMs(start, stop);

            // Contrast
            start = HighResolutionClock.UtcNow();
            TensorFloat          contrastOutput  = null;
            LearningModelBinding contrastBinding = null;

            if (contrastEffectSession_ != null)
            {
                contrastOutput     = TensorFloat.Create(nextOutputShape);
                contrastBinding    = EvaluateContrastAndBrightnessSession(intermediateTensor, contrastOutput);
                intermediateTensor = contrastOutput;
            }
            stop = HighResolutionClock.UtcNow();
            var contrastDuration = HighResolutionClock.DurationInMs(start, stop);

            // Artistic Effects
            start = HighResolutionClock.UtcNow();
            LearningModelBinding artistiicEffectsBinding = null;

            if (artisticEffectsEffectSession_ != null)
            {
                var output = TensorFloat.Create(nextOutputShape);
                artistiicEffectsBinding = Evaluate(artisticEffectsEffectSession_, intermediateTensor, output);
                intermediateTensor      = output;
            }
            stop = HighResolutionClock.UtcNow();
            var artisticEffectsDuration = HighResolutionClock.DurationInMs(start, stop);

            // Orientation
            start = HighResolutionClock.UtcNow();
            TensorFloat          orientationOutput  = null;
            LearningModelBinding orientationBinding = null;

            if (orientationEffectSession_ != null)
            {
                var orientationEffect = (OrientationPicker.SelectedItem as OrientationViewModel).Tag;
                if (orientationEffect == Effect.RotateLeft90 ||
                    orientationEffect == Effect.RotateRight90)
                {
                    nextOutputShape   = new long[] { 1, 3, nextOutputShape[3], nextOutputShape[2] };
                    orientationOutput = TensorFloat.Create(nextOutputShape);
                }
                else
                {
                    orientationOutput = TensorFloat.Create(nextOutputShape);
                }
                orientationBinding = Evaluate(orientationEffectSession_, intermediateTensor, orientationOutput);
                intermediateTensor = orientationOutput;
            }
            stop = HighResolutionClock.UtcNow();
            var orientationDuration = HighResolutionClock.DurationInMs(start, stop);

            // Detensorize
            start = HighResolutionClock.UtcNow();
            var shape = intermediateTensor.Shape;
            var n     = (int)shape[0];
            var c     = (int)shape[1];
            var h     = (int)shape[2];
            var w     = (int)shape[3];

            // Rather than writing the data into the software bitmap ourselves from a Tensor (which may be on the gpu)
            // we call an indentity model to move the gpu memory back to the cpu via WinML de-tensorization.
            var outputImage = new SoftwareBitmap(BitmapPixelFormat.Bgra8, w, h, BitmapAlphaMode.Premultiplied);
            var outputFrame = VideoFrame.CreateWithSoftwareBitmap(outputImage);

            var descriptor        = detensorizationSession_.Model.InputFeatures[0] as TensorFeatureDescriptor;
            var detensorizerShape = descriptor.Shape;

            if (c != detensorizerShape[1] || h != detensorizerShape[2] || w != detensorizerShape[3])
            {
                detensorizationSession_ = CreateLearningModelSession(TensorizationModels.IdentityNCHW(n, c, h, w));
            }
            var detensorizationBinding = Evaluate(detensorizationSession_, intermediateTensor, outputFrame, true);

            stop = HighResolutionClock.UtcNow();
            var detensorizationDuration = HighResolutionClock.DurationInMs(start, stop);

            // Render
            var softwareBitmap = outputFrame.SoftwareBitmap;

            RenderingHelpers.BindSoftwareBitmapToImageControl(InputImage, softwareBitmap);

            PerformanceMetricsMonitor.Log("Tensorize", tensorizationDuration);
            PerformanceMetricsMonitor.Log("Resize Effect", resizeDuration);
            PerformanceMetricsMonitor.Log("Swizzle Effect", swizzleDuration);
            PerformanceMetricsMonitor.Log("Blur Effect", blurDuration);
            PerformanceMetricsMonitor.Log("Contrast Effect", contrastDuration);
            PerformanceMetricsMonitor.Log("Artistic Effect", artisticEffectsDuration);
            PerformanceMetricsMonitor.Log("Orientation Effect", orientationDuration);
            PerformanceMetricsMonitor.Log("Detensorize", detensorizationDuration);
        }