示例#1
0
 public override void Dispose()
 {
     sess?.Dispose();
     sess = null;
     model?.Dispose();
     model = null;
 }
        /// <summary>
        /// Load the label and model files
        /// </summary>
        /// <returns></returns>
        private async Task LoadModelAsync()
        {
            // just load the model one time.
            if (_model != null)
            {
                return;
            }

            StatusBlock.Text = $"Loading {_kModelFileName} ... patience ";

            try
            {
                // Parse labels from label json file.  We know the file's
                // entries are already sorted in order.
                var fileString = File.ReadAllText($"Assets/{_kLabelsFileName}");
                var fileDict   = JsonConvert.DeserializeObject <Dictionary <string, string> >(fileString);
                foreach (var kvp in fileDict)
                {
                    _labels.Add(kvp.Value);
                }

                // Load and create the model
                var modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/{_kModelFileName}"));

                _model = await LearningModel.LoadFromStorageFileAsync(modelFile);

                // Create the evaluation session with the model and device
                _session = new LearningModelSession(_model, new LearningModelDevice(GetDeviceKind()));
            }
            catch (Exception ex)
            {
                StatusBlock.Text = $"error: {ex.Message}";
                _model           = null;
            }
        }
        private async Task LoadModelAsync()
        {
            Debug.Write("LoadModelBegin | ");

            Debug.Write("LoadModel Lock | ");

            _binding?.Clear();
            _session?.Dispose();

            StorageFile modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/{_appModel.ModelSource}.onnx"));

            _learningModel = await LearningModel.LoadFromStorageFileAsync(modelFile);

            _inferenceDeviceSelected = UseGpu ? LearningModelDeviceKind.DirectX : LearningModelDeviceKind.Cpu;

            // Lock so can't create a new session or binding while also being disposed
            lock (_processLock)
            {
                _session = new LearningModelSession(_learningModel, new LearningModelDevice(_inferenceDeviceSelected));
                _binding = new LearningModelBinding(_session);
            }

            debugModelIO();
            _inputImageDescription  = _learningModel.InputFeatures.ToList().First().Name;
            _outputImageDescription = _learningModel.OutputFeatures.ToList().First().Name;

            Debug.Write("LoadModel Unlock\n");
        }
示例#4
0
        private async Task LoadAndEvaluateModelAsync(VideoFrame _inputFrame, string _modelFileName)
        {
            LearningModelBinding _binding     = null;
            VideoFrame           _outputFrame = null;
            LearningModelSession _session;

            try
            {
                //Load and create the model
                if (_model == null)
                {
                    var modelFile =
                        await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///{_modelFileName}"));

                    _model = await LearningModel.LoadFromStorageFileAsync(modelFile);
                }

                // Create the evaluation session with the model
                _session = new LearningModelSession(_model);

                // Get input and output features of the model
                var inputFeatures  = _model.InputFeatures.ToList();
                var outputFeatures = _model.OutputFeatures.ToList();

                // Create binding and then bind input/ output features
                _binding = new LearningModelBinding(_session);

                _inputImageDescriptor =
                    inputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

                _outputTensorDescriptor =
                    outputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

                TensorFloat       outputTensor = TensorFloat.Create(_outputTensorDescriptor.Shape);
                ImageFeatureValue imageTensor  = ImageFeatureValue.CreateFromVideoFrame(_inputFrame);

                // Bind inputs +outputs
                _binding.Bind(_inputImageDescriptor.Name, imageTensor);
                _binding.Bind(_outputTensorDescriptor.Name, outputTensor);


                // Evaluate and get the results
                var results = await _session.EvaluateAsync(_binding, "test");

                Debug.WriteLine("ResultsEvaluated: " + results.ToString());

                var outputTensorList = outputTensor.GetAsVectorView();
                var resultsList      = new List <float>(outputTensorList.Count);
                for (int i = 0; i < outputTensorList.Count; i++)
                {
                    resultsList.Add(outputTensorList[i]);
                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine($"error: {ex.Message}");
                _model = null;
            }
        }
 void Reset()
 {
     // let everything go in reverse order, taking care to dispose
     _session.Dispose();
     _session = null;
     _model.Dispose();
     _model = null;
 }
示例#6
0
        /// <summary>
        /// Initialize
        /// </summary>
        /// <param name="file">The ONNX file</param>
        public async Task Init(StorageFile file)
        {
            this.model = await LearningModel.LoadFromStorageFileAsync(file);

            this.session = new LearningModelSession(this.model);

            Debug.Assert(this.model.InputFeatures.Count == 1, "The number of input must be 1");
            Debug.Assert(this.model.OutputFeatures.Count == 1, "The number of output must be 1");
        }
        public async Task InitializeAsync(ModelType modelType, params string[] parameters)
        {
            var file = await StorageFile.GetFileFromApplicationUriAsync(new Uri(parameters[0]));

            model = await LearningModel.LoadFromStreamAsync(file);

            session = new LearningModelSession(model);
            binding = new LearningModelBinding(session);
        }
示例#8
0
        public async Task Initialize()
        {
            // Load and create the model and session
            var modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets//Network.onnx"));

            _learning_model = await LearningModel.LoadFromStorageFileAsync(modelFile);

            _session = new LearningModelSession(_learning_model);
        }
        private void SampleInputsGridView_SelectionChanged(object sender, SelectionChangedEventArgs e)
        {
            var gridView  = sender as GridView;
            var thumbnail = gridView.SelectedItem as WinMLSamplesGallery.Controls.Thumbnail;

            if (thumbnail != null)
            {
                var image          = thumbnail.ImageUri;
                var file           = StorageFile.GetFileFromApplicationUriAsync(new Uri(image)).GetAwaiter().GetResult();
                var softwareBitmap = CreateSoftwareBitmapFromStorageFile(file);


                tensorizationSession_ =
                    CreateLearningModelSession(
                        TensorizationModels.ReshapeFlatBufferNHWC(
                            1,
                            4,
                            softwareBitmap.PixelHeight,
                            softwareBitmap.PixelWidth,
                            416,
                            416));


                // Tensorize
                var stream            = file.OpenAsync(FileAccessMode.Read).GetAwaiter().GetResult();
                var decoder           = BitmapDecoder.CreateAsync(stream).GetAwaiter().GetResult();
                var bitmap            = decoder.GetSoftwareBitmapAsync(BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied).GetAwaiter().GetResult();
                var pixelDataProvider = decoder.GetPixelDataAsync().GetAwaiter().GetResult();
                var bytes             = pixelDataProvider.DetachPixelData();
                var buffer            = bytes.AsBuffer(); // Does this do a copy??
                var inputRawTensor    = TensorUInt8Bit.CreateFromBuffer(new long[] { 1, buffer.Length }, buffer);

                // 3 channel NCHW
                var tensorizeOutput = TensorFloat.Create(new long[] { 1, 416, 416, 3 });
                var b = new LearningModelBinding(tensorizationSession_);
                b.Bind(tensorizationSession_.Model.InputFeatures[0].Name, inputRawTensor);
                b.Bind(tensorizationSession_.Model.OutputFeatures[0].Name, tensorizeOutput);
                tensorizationSession_.Evaluate(b, "");

                // Resize
                var resizeBinding = new LearningModelBinding(_session);
                resizeBinding.Bind(_session.Model.InputFeatures[0].Name, tensorizeOutput);
                var results = _session.Evaluate(resizeBinding, "");

                var output1 = results.Output(0) as TensorFloat;

                var data       = output1.GetAsVectorView();
                var detections = ParseResult(data.ToList <float>().ToArray());

                Comparer cp = new Comparer();
                detections.Sort(cp);
                var final = NMS(detections);

                RenderImageInMainPanel(softwareBitmap);
            }
        }
        /// <summary>
        /// FaceSentimentAnalyzerBinding constructor
        /// </summary>
        internal FaceSentimentAnalyzerBinding(
            ISkillDescriptor descriptor,
            ISkillExecutionDevice device,
            LearningModelSession session)
        {
            m_bindingHelper = new VisionSkillBindingHelper(descriptor, device);

            // Create WinML binding
            m_winmlBinding = new LearningModelBinding(session);
        }
示例#11
0
        /// <summary>
        /// NeuralStyleTransformerBinding constructor
        /// </summary>
        internal NeuralStyleTransformerBinding(
            ISkillDescriptor descriptor,
            ISkillExecutionDevice device,
            LearningModelSession session)
        {
            m_bindingHelper = new VisionSkillBindingHelper(descriptor, device);

            // Create WinML binding
            m_winmlBinding = new LearningModelBinding(session);
        }
示例#12
0
        public async Task <bool> IniciarModelo()
        {
            var modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/{_kModelFileName}"));

            _model = await LearningModel.LoadFromStorageFileAsync(modelFile);

            _session = new LearningModelSession(_model, new LearningModelDevice(LearningModelDeviceKindSelected));

            return(true);
        }
        private LearningModelSession CreateLearningModelSession(LearningModel model, Nullable <LearningModelDeviceKind> kind = null)
        {
            var device  = new LearningModelDevice(kind ?? SelectedDeviceKind);
            var options = new LearningModelSessionOptions()
            {
                CloseModelOnSessionCreation = true // Close the model to prevent extra memory usage
            };
            var session = new LearningModelSession(model, device, options);

            return(session);
        }
示例#14
0
        internal async Task InitModelAsync()
        {
            var model_file = await StorageFile.GetFileFromApplicationUriAsync(new Uri("ms-appx:///Assets//Yolo.onnx"));

            _model = await LearningModel.LoadFromStorageFileAsync(model_file);

            var device = new LearningModelDevice(LearningModelDeviceKind.Cpu);

            _session = new LearningModelSession(_model, device);
            _binding = new LearningModelBinding(_session);
        }
示例#15
0
        private LearningModelSession CreateLearningModelSession(LearningModel model, bool closeModel = true)
        {
            var device  = IsCpu ? cpuDevice : dmlDevice;
            var options = new LearningModelSessionOptions()
            {
                CloseModelOnSessionCreation = closeModel, // Close the model to prevent extra memory usage
                BatchSizeOverride           = 0
            };
            var session = new LearningModelSession(model, device, options);

            return(session);
        }
示例#16
0
        /// <summary>
        /// Initialize
        /// </summary>
        /// <param name="file">The ONNX file</param>
        ///
        public async Task Init()
        {
            StorageFile ModelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/" + "model.onnx"));

            this.model = await LearningModel.LoadFromStorageFileAsync(ModelFile);

            this.session = new LearningModelSession(this.model);
            this.binding = new LearningModelBinding(this.session);

            Debug.Assert(this.model.InputFeatures.Count == 1, "The number of input must be 1");
            Debug.Assert(this.model.OutputFeatures.Count == 1, "The number of output must be 1");
        }
        private void DecryptAndEvauluate()
        {
            // Load the encrypted model.
            // The encrypted model (encrypted.onnx) is embedded as a resource in
            // the native binary: WinMLSamplesGalleryNative.dll.
            var inferenceModel      = WinMLSamplesGalleryNative.EncryptedModels.LoadEncryptedResource(DecryptionKey.Password);
            var postProcessingModel = TensorizationModels.SoftMaxThenTopK(10);

            // Update the status
            var isModelDecrypted = inferenceModel != null;

            UpdateStatus(isModelDecrypted);

            // If loading the decrypted model failed (ie: due to an invalid key/password),
            // then skip performing evaluate.
            if (!isModelDecrypted)
            {
                return;
            }

            // Draw the image to classify in the Image control
            var decoder = ImageHelper.CreateBitmapDecoderFromPath("ms-appx:///InputData/hummingbird.jpg");

            // Create sessions
            var device  = new LearningModelDevice(LearningModelDeviceKind.Cpu);
            var options = new LearningModelSessionOptions()
            {
                CloseModelOnSessionCreation = true // Close the model to prevent extra memory usage
            };
            var inferenceSession      = new LearningModelSession(inferenceModel, device, options);
            var postProcessingSession = new LearningModelSession(postProcessingModel, device, options);

            // Classify the current image
            var softwareBitmap = decoder.GetSoftwareBitmapAsync().GetAwaiter().GetResult();
            var input          = VideoFrame.CreateWithSoftwareBitmap(softwareBitmap);

            // Inference
            var inferenceResults = Evaluate(inferenceSession, input);
            var inferenceOutput  = inferenceResults.Outputs.First().Value;

            // PostProcess
            var postProcessedOutputs = Evaluate(postProcessingSession, inferenceOutput);
            var topKValues           = (TensorFloat)postProcessedOutputs.Outputs["TopKValues"];
            var topKIndices          = (TensorInt64Bit)postProcessedOutputs.Outputs["TopKIndices"];

            // Return results
            var probabilities = topKValues.GetAsVectorView();
            var indices       = topKIndices.GetAsVectorView();
            var labels        = indices.Select((index) => ClassificationLabels.ImageNet[index]);

            // Render the classification and probabilities
            RenderInferenceResults(labels, probabilities);
        }
示例#18
0
        public async Task <string> ObtenerIdentidadOnnX(VideoFrame videoFrame, LearningModelSession _session)
        {
            identidadEncontradaTexto = string.Empty;
            if (videoFrame != null)
            {
                try
                {
                    LearningModelBinding binding     = new LearningModelBinding(_session);
                    ImageFeatureValue    imageTensor = ImageFeatureValue.CreateFromVideoFrame(videoFrame);
                    binding.Bind("data", imageTensor);
                    int ticks = Environment.TickCount;

                    // Process the frame with the model
                    var results = await _session.EvaluateAsync(binding, $"Run { ++_runCount } ");

                    ticks = Environment.TickCount - ticks;
                    var          label            = results.Outputs["classLabel"] as TensorString;
                    var          resultVector     = label.GetAsVectorView();
                    List <float> topProbabilities = new List <float>()
                    {
                        0.0f, 0.0f, 0.0f
                    };
                    List <int> topProbabilityLabelIndexes = new List <int>()
                    {
                        0, 0, 0
                    };
                    // SqueezeNet returns a list of 1000 options, with probabilities for each, loop through all
                    for (int i = 0; i < resultVector.Count(); i++)
                    {
                        // is it one of the top 3?
                        for (int j = 0; j < 3; j++)
                        {
                            identidadEncontradaTexto = resultVector[i].ToString();

                            //if (resultVector[i] > topProbabilities[j])
                            //{
                            //    topProbabilityLabelIndexes[j] = i;
                            //    topProbabilities[j] = resultVector[i];
                            //    break;
                            //}
                        }
                    }
                }

                catch (Exception ex)
                {
                    identidadEncontradaTexto = "";
                }
            }
            return(identidadEncontradaTexto);
        }
示例#19
0
        private void ThisAddIn_Startup(object sender, System.EventArgs e)
        {
            richTextBox1 = new RichTextBox();
            richTextBox1.Dock = DockStyle.Fill;

            richTextBox1.SelectionFont = new Font("Verdana", 12, FontStyle.Bold);
            richTextBox1.SelectionColor = Color.Red;

            Clipboard.SetImage(Image.FromFile(_imagePath));
            richTextBox1.Paste();

            // Load and create the model 
            outToLog($"Loading modelfile '{_modelPath}' on the '{_deviceName}' device");

            int ticks = Environment.TickCount;
            _model = LearningModel.LoadFromFilePath(_modelPath);
            ticks = Environment.TickCount - ticks;
            outToLog($"model file loaded in { ticks } ticks");

            // Create the evaluation session with the model and device
            _session = new LearningModelSession(_model);

            outToLog("Getting color management mode...");
            ColorManagementMode colorManagementMode = GetColorManagementMode();

            outToLog("Loading the image...");
            ImageFeatureValue imageTensor = LoadImageFile(colorManagementMode);

            // create a binding object from the session
            outToLog("Binding...");
            LearningModelBinding binding = new LearningModelBinding(_session);
            binding.Bind(_model.InputFeatures.ElementAt(0).Name, imageTensor);

            outToLog("Running the model...");
            ticks = Environment.TickCount;
            var results = _session.Evaluate(binding, "RunId");
            ticks = Environment.TickCount - ticks;
            outToLog($"model run took { ticks } ticks");

            // retrieve results from evaluation
            var resultTensor = results.Outputs[_model.OutputFeatures.ElementAt(0).Name] as TensorFloat;
            var resultVector = resultTensor.GetAsVectorView();

            PrintResults(resultVector);

            Form form1 = new Form();
            form1.Size = new Size(800, 800);
            form1.Controls.Add(richTextBox1);
            //form1.Show();
            form1.ShowDialog();
        }
示例#20
0
        private async Task <LearningModelSession> CreateLearningModelSession(LearningModel model, int batchSizeOverride = -1)
        {
            var deviceKind = DeviceComboBox.GetDeviceKind();
            var device     = new LearningModelDevice(deviceKind);
            var options    = new LearningModelSessionOptions();

            if (batchSizeOverride > 0)
            {
                options.BatchSizeOverride = (uint)batchSizeOverride;
            }
            var session = new LearningModelSession(model, device, options);

            return(session);
        }
示例#21
0
 private void EvaluateInternal(LearningModelSession session, LearningModelBinding binding, bool wait = false)
 {
     if (IsCpu)
     {
         session.Evaluate(binding, "");
     }
     else
     {
         var results = session.EvaluateAsync(binding, "");
         if (wait)
         {
             results.GetAwaiter().GetResult();
         }
     }
 }
        public ObjectDetector()
        {
            this.InitializeComponent();

            dmlDevice = new LearningModelDevice(LearningModelDeviceKind.DirectX);
            cpuDevice = new LearningModelDevice(LearningModelDeviceKind.Cpu);

            var modelName = "yolov4.onnx";
            var modelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", modelName);
            var model     = LearningModel.LoadFromFilePath(modelPath);

            _session = CreateLearningModelSession(model);

            initialized_ = true;
        }
        private LearningModelSession CreateLearningModelSession(LearningModel model)
        {
            var kind =
                (DeviceComboBox.SelectedIndex == 0) ?
                LearningModelDeviceKind.Cpu :
                LearningModelDeviceKind.DirectXHighPerformance;
            var device  = new LearningModelDevice(kind);
            var options = new LearningModelSessionOptions()
            {
                CloseModelOnSessionCreation = true              // Close the model to prevent extra memory usage
            };
            var session = new LearningModelSession(model, device, options);

            return(session);
        }
示例#24
0
        // usage: SqueezeNet [modelfile] [imagefile] [cpu|directx]
        static int Main(string[] args)
        {
            if (!ParseArgs(args))
            {
                Console.WriteLine("Usage: [executable_name] [modelfile] [imagefile] [cpu|directx]");
                return(-1);
            }

            // Load and create the model
            Console.WriteLine($"Loading modelfile '{_modelPath}' on the '{_deviceName}' device");

            int ticks = Environment.TickCount;

            _model = LearningModel.LoadFromFilePath(_modelPath);
            ticks  = Environment.TickCount - ticks;
            Console.WriteLine($"model file loaded in { ticks } ticks");

            // Create the evaluation session with the model and device
            _session = new LearningModelSession(_model, new LearningModelDevice(_deviceKind));

            Console.WriteLine("Getting color management mode...");
            ColorManagementMode colorManagementMode = GetColorManagementMode();

            Console.WriteLine("Loading the image...");
            ImageFeatureValue imageTensor = LoadImageFile(colorManagementMode);

            // create a binding object from the session
            Console.WriteLine("Binding...");
            LearningModelBinding binding = new LearningModelBinding(_session);

            binding.Bind(_model.InputFeatures.ElementAt(0).Name, imageTensor);

            Console.WriteLine("Running the model...");
            ticks = Environment.TickCount;
            var results = _session.Evaluate(binding, "RunId");

            ticks = Environment.TickCount - ticks;
            Console.WriteLine($"model run took { ticks } ticks");

            // retrieve results from evaluation
            var resultTensor = results.Outputs[_model.OutputFeatures.ElementAt(0).Name] as TensorFloat;
            var resultVector = resultTensor.GetAsVectorView();

            PrintResults(resultVector);
            return(0);
        }
示例#25
0
        private bool disposedValue = false; // 要检测冗余调用

        void Dispose(bool disposing)
        {
            if (!disposedValue)
            {
                if (disposing)
                {
                    // TODO: 释放托管状态(托管对象)。
                }
                // TODO: 释放未托管的资源(未托管的对象)并在以下内容中替代终结器。
                // TODO: 将大型字段设置为 null。
                model   = null;
                session = null;
                binding = null;

                disposedValue = true;
            }
        }
示例#26
0
        private void RecreateSessions()
        {
            tensorizationSession_?.Dispose();
            tensorizationSession_ =
                CreateLearningModelSession(
                    TensorizationModels.ReshapeFlatBufferToNCHW(
                        1,
                        4,
                        currentImageHeight_,
                        currentImageWidth_));

            resizeEffectSession_?.Dispose();
            resizeEffectSession_ = GetEffect(ResizeToggleSplitButton, ResizePicker);

            pixelSwizzleEffectSession_?.Dispose();
            pixelSwizzleEffectSession_ = GetPixelSwizzleEffect();

            blurSharpenEffectSession_?.Dispose();
            blurSharpenEffectSession_ = GetEffect(BlurSharpenToggleSplitButton, BlurSharpenPicker);

            contrastEffectSession_?.Dispose();
            contrastEffectSession_ = ContrastToggleSplitButton.IsChecked ?
                                     CreateLearningModelSession(TensorizationModels.AdjustBrightnessAndContrast(
                                                                    1,
                                                                    3,
                                                                    resizeEffectSession_ != null ? 224 : currentImageHeight_,
                                                                    resizeEffectSession_ != null ? 224 : currentImageWidth_)) :
                                     null;

            artisticEffectsEffectSession_?.Dispose();
            artisticEffectsEffectSession_ = GetEffect(ArtisticEffectsToggleSplitButton, ArtisticEffectsPicker);

            orientationEffectSession_?.Dispose();
            orientationEffectSession_ = GetOrientationEffect();

            shapeSession_?.Dispose();
            shapeSession_ = CreateLearningModelSession(TensorizationModels.ShapeNCHW(1, 3, currentImageHeight_, currentImageWidth_));

            detensorizationSession_?.Dispose();
            detensorizationSession_ = CreateLearningModelSession(TensorizationModels.IdentityNCHW(
                                                                     1,
                                                                     3,
                                                                     resizeEffectSession_ != null ? 224 : currentImageHeight_,
                                                                     resizeEffectSession_ != null ? 224 : currentImageWidth_));
        }
        private async Task <bool> LoadModelAsync()
        {
            var modelStorageFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri(Constants.MODEL_PATH));

            try
            {
                _model = await LearningModel.LoadFromStorageFileAsync(modelStorageFile);
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }

            // since we do not specify the device, we are using the default CPU option
            _session = new LearningModelSession(_model);

            List <ILearningModelFeatureDescriptor> inputFeatures;
            List <ILearningModelFeatureDescriptor> outputFeatures;

            if (_model.InputFeatures == null)
            {
                return(false);
            }
            else
            {
                inputFeatures = _model.InputFeatures.ToList();
            }

            if (_model.OutputFeatures == null)
            {
                return(false);
            }
            else
            {
                outputFeatures = _model.OutputFeatures.ToList();
            }

            _inputImageDescriptor =
                inputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

            _outputTensorDescriptor =
                outputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

            return(true);
        }
        public void CleanUp()
        {
            if (!_isInitialized)
            {
                throw new InvalidOperationException("Service not initialized.");
            }

            CameraService.Current.SoftwareBitmapFrameCaptured -= Current_SoftwareBitmapFrameCaptured;

            _current = null;
            _model.Dispose();
            _session.Dispose();
            _model   = null;
            _session = null;

            Debug.WriteLine("The evaluation event handler should have been removed");
            _isInitialized = false;
        }
        public OpenCVInterop()
        {
            this.InitializeComponent();
            CurrentImagePath = null;
            InferenceChoice  = ClassifyChoice.Denoised;

            // Load inference session
            var modelName = "squeezenet1.1-7.onnx";
            var modelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", modelName);
            var model     = LearningModel.LoadFromFilePath(modelPath);

            _inferenceSession = CreateLearningModelSession(model);

            // Load post processing session
            _postProcessingSession = CreateLearningModelSession(TensorizationModels.SoftMaxThenTopK(TopK));

            BasicGridView.SelectedIndex = 0;
        }
示例#30
0
        public WindowsONNXSession(Stream stream, bool isGPU = false)
        {
            IsGPU = isGPU;

            var filename = $"ONNXCache_{++cacheCount}.tmp";

            if (stream.CanSeek)
            {
                stream.Seek(0, SeekOrigin.Begin);
            }
            File.WriteAllBytes(filename, stream.ReadAll());
            model = LearningModel.LoadFromFilePath(filename);
            foreach (var item in model.Metadata)
            {
                Logger.Log($"{item.Key}, {item.Value.GetType().FullName}, {item.Value}");
            }
            sess = new LearningModelSession(model, new LearningModelDevice(LearningModelDeviceKind.Cpu));
        }