private void SampleInputsGridView_SelectionChanged(object sender, SelectionChangedEventArgs e)
        {
            var gridView  = sender as GridView;
            var thumbnail = gridView.SelectedItem as WinMLSamplesGallery.Controls.Thumbnail;

            if (thumbnail != null)
            {
                var image          = thumbnail.ImageUri;
                var file           = StorageFile.GetFileFromApplicationUriAsync(new Uri(image)).GetAwaiter().GetResult();
                var softwareBitmap = CreateSoftwareBitmapFromStorageFile(file);


                tensorizationSession_ =
                    CreateLearningModelSession(
                        TensorizationModels.ReshapeFlatBufferNHWC(
                            1,
                            4,
                            softwareBitmap.PixelHeight,
                            softwareBitmap.PixelWidth,
                            416,
                            416));


                // Tensorize
                var stream            = file.OpenAsync(FileAccessMode.Read).GetAwaiter().GetResult();
                var decoder           = BitmapDecoder.CreateAsync(stream).GetAwaiter().GetResult();
                var bitmap            = decoder.GetSoftwareBitmapAsync(BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied).GetAwaiter().GetResult();
                var pixelDataProvider = decoder.GetPixelDataAsync().GetAwaiter().GetResult();
                var bytes             = pixelDataProvider.DetachPixelData();
                var buffer            = bytes.AsBuffer(); // Does this do a copy??
                var inputRawTensor    = TensorUInt8Bit.CreateFromBuffer(new long[] { 1, buffer.Length }, buffer);

                // 3 channel NCHW
                var tensorizeOutput = TensorFloat.Create(new long[] { 1, 416, 416, 3 });
                var b = new LearningModelBinding(tensorizationSession_);
                b.Bind(tensorizationSession_.Model.InputFeatures[0].Name, inputRawTensor);
                b.Bind(tensorizationSession_.Model.OutputFeatures[0].Name, tensorizeOutput);
                tensorizationSession_.Evaluate(b, "");

                // Resize
                var resizeBinding = new LearningModelBinding(_session);
                resizeBinding.Bind(_session.Model.InputFeatures[0].Name, tensorizeOutput);
                var results = _session.Evaluate(resizeBinding, "");

                var output1 = results.Output(0) as TensorFloat;

                var data       = output1.GetAsVectorView();
                var detections = ParseResult(data.ToList <float>().ToArray());

                Comparer cp = new Comparer();
                detections.Sort(cp);
                var final = NMS(detections);

                RenderImageInMainPanel(softwareBitmap);
            }
        }
示例#2
0
        private void ThisAddIn_Startup(object sender, System.EventArgs e)
        {
            richTextBox1 = new RichTextBox();
            richTextBox1.Dock = DockStyle.Fill;

            richTextBox1.SelectionFont = new Font("Verdana", 12, FontStyle.Bold);
            richTextBox1.SelectionColor = Color.Red;

            Clipboard.SetImage(Image.FromFile(_imagePath));
            richTextBox1.Paste();

            // Load and create the model 
            outToLog($"Loading modelfile '{_modelPath}' on the '{_deviceName}' device");

            int ticks = Environment.TickCount;
            _model = LearningModel.LoadFromFilePath(_modelPath);
            ticks = Environment.TickCount - ticks;
            outToLog($"model file loaded in { ticks } ticks");

            // Create the evaluation session with the model and device
            _session = new LearningModelSession(_model);

            outToLog("Getting color management mode...");
            ColorManagementMode colorManagementMode = GetColorManagementMode();

            outToLog("Loading the image...");
            ImageFeatureValue imageTensor = LoadImageFile(colorManagementMode);

            // create a binding object from the session
            outToLog("Binding...");
            LearningModelBinding binding = new LearningModelBinding(_session);
            binding.Bind(_model.InputFeatures.ElementAt(0).Name, imageTensor);

            outToLog("Running the model...");
            ticks = Environment.TickCount;
            var results = _session.Evaluate(binding, "RunId");
            ticks = Environment.TickCount - ticks;
            outToLog($"model run took { ticks } ticks");

            // retrieve results from evaluation
            var resultTensor = results.Outputs[_model.OutputFeatures.ElementAt(0).Name] as TensorFloat;
            var resultVector = resultTensor.GetAsVectorView();

            PrintResults(resultVector);

            Form form1 = new Form();
            form1.Size = new Size(800, 800);
            form1.Controls.Add(richTextBox1);
            //form1.Show();
            form1.ShowDialog();
        }
示例#3
0
 private void EvaluateInternal(LearningModelSession session, LearningModelBinding binding, bool wait = false)
 {
     if (IsCpu)
     {
         session.Evaluate(binding, "");
     }
     else
     {
         var results = session.EvaluateAsync(binding, "");
         if (wait)
         {
             results.GetAwaiter().GetResult();
         }
     }
 }
        // usage: SqueezeNet [modelfile] [imagefile] [cpu|directx]
        static int Main(string[] args)
        {
            if (!ParseArgs(args))
            {
                Console.WriteLine("Usage: [executable_name] [modelfile] [imagefile] [cpu|directx]");
                return(-1);
            }

            // Load and create the model
            Console.WriteLine($"Loading modelfile '{_modelPath}' on the '{_deviceName}' device");

            int ticks = Environment.TickCount;

            _model = LearningModel.LoadFromFilePath(_modelPath);
            ticks  = Environment.TickCount - ticks;
            Console.WriteLine($"model file loaded in { ticks } ticks");

            // Create the evaluation session with the model and device
            _session = new LearningModelSession(_model, new LearningModelDevice(_deviceKind));

            Console.WriteLine("Getting color management mode...");
            ColorManagementMode colorManagementMode = GetColorManagementMode();

            Console.WriteLine("Loading the image...");
            ImageFeatureValue imageTensor = LoadImageFile(colorManagementMode);

            // create a binding object from the session
            Console.WriteLine("Binding...");
            LearningModelBinding binding = new LearningModelBinding(_session);

            binding.Bind(_model.InputFeatures.ElementAt(0).Name, imageTensor);

            Console.WriteLine("Running the model...");
            ticks = Environment.TickCount;
            var results = _session.Evaluate(binding, "RunId");

            ticks = Environment.TickCount - ticks;
            Console.WriteLine($"model run took { ticks } ticks");

            // retrieve results from evaluation
            var resultTensor = results.Outputs[_model.OutputFeatures.ElementAt(0).Name] as TensorFloat;
            var resultVector = resultTensor.GetAsVectorView();

            PrintResults(resultVector);
            return(0);
        }
示例#5
0
        internal String Evaluate()
        {
            // input tensor shape is [1x4]
            long[] shape = new long[2];
            shape[0] = 1;
            shape[1] = 4;

            // set up the input tensor
            float[] input_data = new float[4];
            input_data[0] = _sepal_length;
            input_data[1] = _sepal_width;
            input_data[2] = _petal_length;
            input_data[3] = _petal_width;
            TensorFloat tensor_float = TensorFloat.CreateFromArray(shape, input_data);

            // bind the tensor to "input"
            var binding = new LearningModelBinding(_session);

            binding.Bind("input", tensor_float);

            // evaluate
            var results = _session.Evaluate(binding, "");

            // get the results
            TensorFloat prediction      = (TensorFloat)results.Outputs.First().Value;
            var         prediction_data = prediction.GetAsVectorView();

            // find the highest predicted value
            int   max_index = 0;
            float max_value = 0;

            for (int i = 0; i < prediction_data.Count; i++)
            {
                var val = prediction_data.ElementAt(i);
                if (val > max_value)
                {
                    max_value = val;
                    max_index = i;
                }
            }

            // return the label corresponding to the highest predicted value
            return(_labels.ElementAt(max_index));
        }
示例#6
0
        public override List <ONNXTensor> Run(IEnumerable <string> outputs, Dictionary <string, ONNXTensor> feedDict)
        {
            var binding = new LearningModelBinding(sess);

            foreach (var item in feedDict)
            {
                object tensor;
                if (!IsFP16)
                {
                    tensor = TensorFloat.CreateFromArray(item.Value.Shape, item.Value.Buffer);
                    if (IsGPU)
                    {
                        //TODO: Move SoftwareTensor to DX12Tensor
                        tensor = MoveToGPU((TensorFloat)tensor);
                    }
                }
                else
                {
                    tensor = TensorFloat16Bit.CreateFromArray(item.Value.Shape, item.Value.Buffer);
                }
                binding.Bind(item.Key, tensor);
            }

            var result = sess.Evaluate(binding, $"eval{++evalCount}");

            var ret = new List <ONNXTensor>();

            foreach (var item in outputs)
            {
                var tensor = result.Outputs[item] as TensorFloat;
                var vector = tensor.GetAsVectorView().ToArray();
                ret.Add(new ONNXTensor()
                {
                    Buffer = vector, Shape = tensor.Shape.ToArray()
                });
            }

            return(ret);
        }
示例#7
0
        private static float Evaluate(LearningModelSession session, List <VideoFrame> input)
        {
            string inputName     = session.Model.InputFeatures[0].Name;
            float  totalDuration = 0;
            var    binding       = new LearningModelBinding(session);

            for (int j = 0; j < input.Count; j++)
            {
                if (navigatingAwayFromPage)
                {
                    break;
                }

                var start = HighResolutionClock.UtcNow();
                binding.Bind(inputName, input[j]);
                session.Evaluate(binding, "");
                var stop     = HighResolutionClock.UtcNow();
                var duration = HighResolutionClock.DurationInMs(start, stop);
                totalDuration += duration;
            }
            return(totalDuration);
        }
示例#8
0
        private static float EvaluateBatched(LearningModelSession session, List <VideoFrame> input, int batchSize)
        {
            int    numBatches    = (int)Math.Ceiling((Decimal)input.Count / batchSize);
            string inputName     = session.Model.InputFeatures[0].Name;
            float  totalDuration = 0;
            var    binding       = new LearningModelBinding(session);

            for (int i = 0; i < numBatches; i++)
            {
                if (navigatingAwayFromPage)
                {
                    break;
                }

                int rangeStart = batchSize * i;
                List <VideoFrame> batch;
                // Add padding to the last batch if necessary
                if (rangeStart + batchSize > input.Count)
                {
                    int numInputsRemaining = input.Count - rangeStart;
                    int paddingAmount      = batchSize - numInputsRemaining;
                    batch = input.GetRange(rangeStart, numInputsRemaining);
                    batch.AddRange(input.GetRange(0, paddingAmount));
                }
                else
                {
                    batch = input.GetRange(rangeStart, batchSize);
                }
                var start = HighResolutionClock.UtcNow();
                binding.Bind(inputName, batch);
                session.Evaluate(binding, "");
                var stop     = HighResolutionClock.UtcNow();
                var duration = HighResolutionClock.DurationInMs(start, stop);
                totalDuration += duration;
            }
            return(totalDuration);
        }
        private static LearningModelEvaluationResult Evaluate(LearningModelSession session, object input)
        {
            // Create the binding
            var binding = new LearningModelBinding(session);

            // Create an empty output, that will keep the output resources on the GPU
            // It will be chained into a the post processing on the GPU as well
            var output = TensorFloat.Create();

            // Bind inputs and outputs
            // For squeezenet these evaluate to "data", and "squeezenet0_flatten0_reshape0"
            string inputName  = session.Model.InputFeatures[0].Name;
            string outputName = session.Model.OutputFeatures[0].Name;

            binding.Bind(inputName, input);

            var outputBindProperties = new PropertySet();

            outputBindProperties.Add("DisableTensorCpuSync", PropertyValue.CreateBoolean(true));
            binding.Bind(outputName, output, outputBindProperties);

            // Evaluate
            return(session.Evaluate(binding, ""));
        }
        private async Task EvaluateVideoFrameAsync()
        {
            Debug.WriteLine("EvaluateVideoFrameAsync");
            if (_appModel.InputFrame.Direct3DSurface != null)
            {
                Debug.WriteLine("Has Direct3dsurface");
            }
            if ((_appModel.InputFrame != null) &&
                (_appModel.InputFrame.SoftwareBitmap != null || _appModel.InputFrame.Direct3DSurface != null))
            {
                _appModel.InputFrame = await ImageHelper.CenterCropImageAsync(_appModel.InputFrame, _inWidth, _inHeight);

                await InputSoftwareBitmapSource.SetBitmapAsync(_appModel.InputFrame.SoftwareBitmap);

                // Lock so eval + binding not destroyed mid-evaluation
                Debug.Write("Eval Begin | ");
                LearningModelEvaluationResult results;
                lock (_processLock)
                {
                    Debug.Write("Eval Lock | ");
                    _binding.Bind(_inputImageDescription, ImageFeatureValue.CreateFromVideoFrame(_appModel.InputFrame));
                    _binding.Bind(_outputImageDescription, ImageFeatureValue.CreateFromVideoFrame(_appModel.OutputFrame));
                    results = _session.Evaluate(_binding, "test");
                }
                Debug.Write("Eval Unlock\n");

                // Parse Results
                IReadOnlyDictionary <string, object> outputs = results.Outputs;
                foreach (var output in outputs)
                {
                    Debug.WriteLine($"{output.Key} : {output.Value} -> {output.Value.GetType()}");
                }

                await OutputSoftwareBitmapSource.SetBitmapAsync(_appModel.OutputFrame.SoftwareBitmap);
            }
        }
示例#11
0
        /// <summary>
        /// 1) Bind input and output features
        /// 2) Run evaluation of the model
        /// 3) Report the result
        /// </summary>
        /// <param name="inputVideoFrame"></param>
        /// <returns></returns>
        private async Task EvaluateVideoFrameAsync(VideoFrame inputVideoFrame)
        {
            Debug.WriteLine("EvaluateVideoFrameAsync");
            LearningModelSession session     = null;
            bool isReadyForEval              = false;
            bool showInitialImageAndProgress = true;
            bool proceedWithEval             = false;

            _evaluationLock.Wait();
            {
                session                     = m_session;
                isReadyForEval              = _isReadyForEval;
                _isReadyForEval             = false;
                showInitialImageAndProgress = _showInitialImageAndProgress;
                proceedWithEval             = _proceedWithEval;
            }
            _evaluationLock.Release();

            if ((inputVideoFrame != null) &&
                (inputVideoFrame.SoftwareBitmap != null || inputVideoFrame.Direct3DSurface != null) &&
                isReadyForEval &&
                (session != null) &&
                proceedWithEval)
            {
                try
                {
                    _perfStopwatch.Restart();
                    NotifyUser("Processing...", NotifyType.StatusMessage);
                    await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        if (showInitialImageAndProgress)
                        {
                            UIProcessingProgressRing.IsActive   = true;
                            UIProcessingProgressRing.Visibility = Visibility.Visible;
                            UIButtonSaveImage.IsEnabled         = false;
                        }
                    });

                    // Crop the input image to communicate appropriately to the user what is being evaluated
                    _inputFrame = await ImageHelper.CenterCropImageAsync(inputVideoFrame, m_inWidth, m_inHeight);

                    _perfStopwatch.Stop();
                    Int64 cropTime = _perfStopwatch.ElapsedMilliseconds;
                    Debug.WriteLine($"Image handling: {cropTime}ms");

                    // Bind and Eval
                    if (_inputFrame != null)
                    {
                        _evaluationLock.Wait();
                        try
                        {
                            _perfStopwatch.Restart();

                            // create bindings for the input and output buffers
                            // ###### BUG 4794 - Reusing the same binding object currently fails to update output on 2nd+ eval call as of 07/17/2018
                            //if (m_binding == null)
                            {
                                m_binding = new LearningModelBinding(session);
                                ImageFeatureValue outputImageFeatureValue = ImageFeatureValue.CreateFromVideoFrame(_outputFrame);
                                m_binding.Bind(m_outName, outputImageFeatureValue);
                            }

                            ImageFeatureValue inputImageFeatureValue = ImageFeatureValue.CreateFromVideoFrame(_inputFrame);
                            m_binding.Bind(m_inName, inputImageFeatureValue);

                            Int64 bindTime = _perfStopwatch.ElapsedMilliseconds;
                            Debug.WriteLine($"Binding: {bindTime}ms");

                            // render the input frame
                            if (showInitialImageAndProgress)
                            {
                                await ImageHelper.RenderFrameAsync(_inputFrameRenderer, _inputFrame);
                            }

                            // Process the frame with the model
                            _perfStopwatch.Restart();

                            var results = m_session.Evaluate(m_binding, "test");

                            _perfStopwatch.Stop();
                            Int64 evalTime = _perfStopwatch.ElapsedMilliseconds;
                            Debug.WriteLine($"Eval: {evalTime}ms");

                            // Parse result
                            IReadOnlyDictionary <string, object> outputs = results.Outputs;
                            foreach (var output in outputs)
                            {
                                Debug.WriteLine($"{output.Key} : {output.Value} -> {output.Value.GetType()}");
                            }

                            // Display result
                            //ImageFeatureValue outputImage = (results.Outputs[m_outputTensorDescription.Name] as ImageFeatureValue);
                            //if(outputImage != null)
                            //{
                            //    _outputFrame = outputImage.VideoFrame;
                            //}
                            await ImageHelper.RenderFrameAsync(_resultframeRenderer, _outputFrame);
                        }
                        catch (Exception ex)
                        {
                            NotifyUser(ex.Message, NotifyType.ErrorMessage);
                            Debug.WriteLine(ex.ToString());
                        }
                        finally
                        {
                            _evaluationLock.Release();
                        }

                        if (showInitialImageAndProgress)
                        {
                            await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                            {
                                UIProcessingProgressRing.IsActive   = false;
                                UIProcessingProgressRing.Visibility = Visibility.Collapsed;
                                UIButtonSaveImage.IsEnabled         = true;
                            });
                        }

                        NotifyUser("Done!", NotifyType.StatusMessage);
                    }
                    else
                    {
                        Debug.WriteLine("Skipped eval, null input frame");
                    }
                }
                catch (Exception ex)
                {
                    NotifyUser(ex.Message, NotifyType.ErrorMessage);
                    Debug.WriteLine(ex.ToString());
                }

                _evaluationLock.Wait();
                {
                    _isReadyForEval = true;
                }
                _evaluationLock.Release();

                _perfStopwatch.Reset();
            }
        }
示例#12
0
        static SoftwareBitmap GetMelspectrogramFromSignal(
            IEnumerable <float> rawSignal,
            int batchSize    = 1,
            int windowSize   = 256,
            int dftSize      = 256,
            int hopSize      = 3,
            int nMelBins     = 1024,
            int samplingRate = 8192,
            int amplitude    = 5000
            )
        {
            float[] signal = rawSignal.ToArray();

            //Scale the signal by a given amplitude
            for (int i = 0; i < signal.Length; i++)
            {
                signal[i] = signal[i] * amplitude;
            }

            int signalSize      = signal.Length;
            var nDFT            = 1 + (signalSize - dftSize) / hopSize;
            var onesidedDftSize = (dftSize >> 1) + 1;

            long[] signalShape         = { batchSize, signalSize };
            long[] melSpectrogramShape = { batchSize, 1, nDFT, nMelBins };

            var builder = LearningModelBuilder.Create(13)
                          .Inputs.Add(LearningModelBuilder.CreateTensorFeatureDescriptor("Input.TimeSignal", TensorKind.Float, signalShape))
                          .Outputs.Add(LearningModelBuilder.CreateTensorFeatureDescriptor("Output.MelSpectrogram", TensorKind.Float, melSpectrogramShape))
                          .Operators.Add(new Operator("HannWindow", MicrosoftExperimentalDomain)
                                         .SetConstant("size", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { windowSize }))
                                         .SetOutput("output", "hann_window"))
                          .Operators.Add(new Operator("STFT", MicrosoftExperimentalDomain)
                                         .SetName("STFT_NAMED_NODE")
                                         .SetInput("signal", "Input.TimeSignal")
                                         .SetInput("window", "hann_window")
                                         .SetConstant("frame_length", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { dftSize }))
                                         .SetConstant("frame_step", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { hopSize }))
                                         .SetOutput("output", "stft_output"))
                          .Operators.Add(new Operator("ReduceSumSquare")
                                         .SetInput("data", "stft_output")
                                         .SetAttribute("axes", TensorInt64Bit.CreateFromArray(new List <long>()
            {
                1
            }, new long[] { 3 }))
                                         .SetAttribute("keepdims", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { 0 }))
                                         .SetOutput("reduced", "magnitude_squared"))
                          .Operators.Add(new Operator("Div")
                                         .SetInput("A", "magnitude_squared")
                                         .SetConstant("B", TensorFloat.CreateFromArray(new List <long>(), new float[] { dftSize }))
                                         .SetOutput("C", "power_frames"))
                          .Operators.Add(new Operator("MelWeightMatrix", MicrosoftExperimentalDomain)
                                         .SetConstant("num_mel_bins", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { nMelBins }))
                                         .SetConstant("dft_length", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { dftSize }))
                                         .SetConstant("sample_rate", TensorInt64Bit.CreateFromArray(new List <long>(), new long[] { samplingRate }))
                                         .SetConstant("lower_edge_hertz", TensorFloat.CreateFromArray(new List <long>(), new float[] { 0 }))
                                         .SetConstant("upper_edge_hertz", TensorFloat.CreateFromArray(new List <long>(), new float[] { (float)(samplingRate / 2.0) }))
                                         .SetOutput("output", "mel_weight_matrix"))
                          .Operators.Add(new Operator("Reshape")
                                         .SetInput("data", "power_frames")
                                         .SetConstant("shape", TensorInt64Bit.CreateFromArray(new List <long>()
            {
                2
            }, new long[] { batchSize *nDFT, onesidedDftSize }))
                                         .SetOutput("reshaped", "reshaped_output"))
                          .Operators.Add(new Operator("MatMul")
                                         .SetInput("A", "reshaped_output")
                                         .SetInput("B", "mel_weight_matrix")
                                         .SetOutput("Y", "mel_spectrogram"))
                          .Operators.Add(new Operator("Reshape")
                                         .SetInput("data", "mel_spectrogram")
                                         .SetConstant("shape", TensorInt64Bit.CreateFromArray(new List <long>()
            {
                4
            }, melSpectrogramShape))
                                         .SetOutput("reshaped", "Output.MelSpectrogram"));

            var model = builder.CreateModel();

            LearningModelSession session = new LearningModelSession(model);
            LearningModelBinding binding = new LearningModelBinding(session);

            // Bind input
            binding.Bind("Input.TimeSignal", TensorFloat.CreateFromArray(signalShape, signal));

            // Bind output
            var outputImage = new VideoFrame(
                BitmapPixelFormat.Bgra8,
                nMelBins,
                nDFT);

            binding.Bind("Output.MelSpectrogram", outputImage);

            // Evaluate
            var sw     = Stopwatch.StartNew();
            var result = session.Evaluate(binding, "");

            sw.Stop();
            Console.WriteLine("Evaluate Took: %f\n", sw.ElapsedMilliseconds);

            return(outputImage.SoftwareBitmap);
        }
示例#13
0
        static void Main(string[] args)
        {
            Console.WriteLine("Load squeezenet.onnx.");
            using (var model = LearningModel.LoadFromFilePath("squeezenet.onnx"))
            {
                Console.WriteLine("Load kitten_224.png as StorageFile.");
                var name       = AppDomain.CurrentDomain.BaseDirectory + "kitten_224.png";
                var image_task = Windows.Storage.StorageFile.GetFileFromPathAsync(name);
                image_task.AsTask().Wait();
                var image = image_task.GetResults();
                Console.WriteLine("Load StorageFile into Stream.");
                var stream_task = image.OpenReadAsync();
                System.Threading.Thread.Sleep(1000);
                // stream_task.AsTask().Wait();
                //
                // Unable to call AsTask on IAsyncOperation<IRandomAccessStreamWithContentType>...
                // System.TypeInitializationException: 'The type initializer for 'ABI.Windows.Foundation.AsyncOperationCompletedHandler`1' threw an exception.'
                // This exception was originally thrown at this call stack:
                //   System.RuntimeType.ThrowIfTypeNeverValidGenericArgument(System.RuntimeType)
                //   System.RuntimeType.SanityCheckGenericArguments(System.RuntimeType[], System.RuntimeType[])
                //   System.RuntimeType.MakeGenericType(System.Type[])
                //   System.Linq.Expressions.Compiler.DelegateHelpers.MakeNewDelegate(System.Type[])
                //   System.Linq.Expressions.Compiler.DelegateHelpers.MakeDelegateType(System.Type[])
                //   ABI.Windows.Foundation.AsyncOperationCompletedHandler<TResult>.AsyncOperationCompletedHandler()
                //
                // So sleep instead...
                using (var stream = stream_task.GetResults())
                {
                    Console.WriteLine("Create SoftwareBitmap from decoded Stream.");
                    var decoder_task = Windows.Graphics.Imaging.BitmapDecoder.CreateAsync(stream);
                    System.Threading.Thread.Sleep(1000);
                    // decoder_task.AsTask().Wait();
                    //
                    // Unable to call AsTask on IAsyncOperation<SoftwareBitmap>...
                    // System.TypeInitializationException: 'The type initializer for 'ABI.Windows.Foundation.AsyncOperationCompletedHandler`1' threw an exception.'
                    // This exception was originally thrown at this call stack:
                    //   System.RuntimeType.ThrowIfTypeNeverValidGenericArgument(System.RuntimeType)
                    //   System.RuntimeType.SanityCheckGenericArguments(System.RuntimeType[], System.RuntimeType[])
                    //   System.RuntimeType.MakeGenericType(System.Type[])
                    //   System.Linq.Expressions.Compiler.DelegateHelpers.MakeNewDelegate(System.Type[])
                    //   System.Linq.Expressions.Compiler.DelegateHelpers.MakeDelegateType(System.Type[])
                    //   ABI.Windows.Foundation.AsyncOperationCompletedHandler<TResult>.AsyncOperationCompletedHandler()
                    //
                    // So sleep instead...
                    var decoder = decoder_task.GetResults();
                    var software_bitmap_task = decoder.GetSoftwareBitmapAsync();
                    System.Threading.Thread.Sleep(1000);
                    // software_bitmap_task.AsTask().Wait();
                    //
                    // Unable to call AsTask on IAsyncOperation<SoftwareBitmap>...
                    // System.TypeInitializationException: 'The type initializer for 'ABI.Windows.Foundation.AsyncOperationCompletedHandler`1' threw an exception.'
                    // This exception was originally thrown at this call stack:
                    //   System.RuntimeType.ThrowIfTypeNeverValidGenericArgument(System.RuntimeType)
                    //   System.RuntimeType.SanityCheckGenericArguments(System.RuntimeType[], System.RuntimeType[])
                    //   System.RuntimeType.MakeGenericType(System.Type[])
                    //   System.Linq.Expressions.Compiler.DelegateHelpers.MakeNewDelegate(System.Type[])
                    //   System.Linq.Expressions.Compiler.DelegateHelpers.MakeDelegateType(System.Type[])
                    //   ABI.Windows.Foundation.AsyncOperationCompletedHandler<TResult>.AsyncOperationCompletedHandler()
                    //
                    // So sleep instead...
                    using (var software_bitmap = software_bitmap_task.GetResults())
                    {
                        Console.WriteLine("Create VideoFrame.");
                        var frame = Windows.Media.VideoFrame.CreateWithSoftwareBitmap(software_bitmap);

                        Console.WriteLine("Create LearningModelSession.");
                        using (var session = new LearningModelSession(model))
                        {
                            Console.WriteLine("Create LearningModelBinding.");
                            var binding = new LearningModelBinding(session);
                            Console.WriteLine("Bind data_0.");
                            binding.Bind("data_0", frame);
                            Console.WriteLine("Evaluate.");
                            var results = session.Evaluate(binding, "");
                        }
                        Console.WriteLine("Success!\n");
                    }
                }
            }
        }