public void debugModelIO()
        {
            string _inName, _outName;

            foreach (var inputF in _learningModel.InputFeatures)
            {
                Debug.WriteLine($"input | kind:{inputF.Kind}, name:{inputF.Name}, type:{inputF.GetType()}");
                int i = 0;
                ImageFeatureDescriptor  imgDesc = inputF as ImageFeatureDescriptor;
                TensorFeatureDescriptor tfDesc  = inputF as TensorFeatureDescriptor;
                _inWidth  = (uint)(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width);
                _inHeight = (uint)(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height);
                _inName   = inputF.Name;

                Debug.WriteLine($"N: {(imgDesc == null ? tfDesc.Shape[0] : 1)}, " +
                                $"Channel: {(imgDesc == null ? tfDesc.Shape[1].ToString() : imgDesc.BitmapPixelFormat.ToString())}, " +
                                $"Height:{(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height)}, " +
                                $"Width: {(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width)}");
            }
            foreach (var outputF in _learningModel.OutputFeatures)
            {
                Debug.WriteLine($"output | kind:{outputF.Kind}, name:{outputF.Name}, type:{outputF.GetType()}");
                int i = 0;
                ImageFeatureDescriptor  imgDesc = outputF as ImageFeatureDescriptor;
                TensorFeatureDescriptor tfDesc  = outputF as TensorFeatureDescriptor;
                _outWidth  = (uint)(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width);
                _outHeight = (uint)(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height);
                _outName   = outputF.Name;

                Debug.WriteLine($"N: {(imgDesc == null ? tfDesc.Shape[0] : 1)}, " +
                                $"Channel: {(imgDesc == null ? tfDesc.Shape[1].ToString() : imgDesc.BitmapPixelFormat.ToString())}, " +
                                $"Height:{(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height)}, " +
                                $"Width: {(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width)}");
            }
        }
Exemplo n.º 2
0
        private async Task LoadAndEvaluateModelAsync(VideoFrame _inputFrame, string _modelFileName)
        {
            LearningModelBinding _binding     = null;
            VideoFrame           _outputFrame = null;
            LearningModelSession _session;

            try
            {
                //Load and create the model
                if (_model == null)
                {
                    var modelFile =
                        await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///{_modelFileName}"));

                    _model = await LearningModel.LoadFromStorageFileAsync(modelFile);
                }

                // Create the evaluation session with the model
                _session = new LearningModelSession(_model);

                // Get input and output features of the model
                var inputFeatures  = _model.InputFeatures.ToList();
                var outputFeatures = _model.OutputFeatures.ToList();

                // Create binding and then bind input/ output features
                _binding = new LearningModelBinding(_session);

                _inputImageDescriptor =
                    inputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

                _outputTensorDescriptor =
                    outputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

                TensorFloat       outputTensor = TensorFloat.Create(_outputTensorDescriptor.Shape);
                ImageFeatureValue imageTensor  = ImageFeatureValue.CreateFromVideoFrame(_inputFrame);

                // Bind inputs +outputs
                _binding.Bind(_inputImageDescriptor.Name, imageTensor);
                _binding.Bind(_outputTensorDescriptor.Name, outputTensor);


                // Evaluate and get the results
                var results = await _session.EvaluateAsync(_binding, "test");

                Debug.WriteLine("ResultsEvaluated: " + results.ToString());

                var outputTensorList = outputTensor.GetAsVectorView();
                var resultsList      = new List <float>(outputTensorList.Count);
                for (int i = 0; i < outputTensorList.Count; i++)
                {
                    resultsList.Add(outputTensorList[i]);
                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine($"error: {ex.Message}");
                _model = null;
            }
        }
        private async Task <bool> LoadModelAsync()
        {
            var modelStorageFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri(Constants.MODEL_PATH));

            try
            {
                _model = await LearningModel.LoadFromStorageFileAsync(modelStorageFile);
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }

            // since we do not specify the device, we are using the default CPU option
            _session = new LearningModelSession(_model);

            List <ILearningModelFeatureDescriptor> inputFeatures;
            List <ILearningModelFeatureDescriptor> outputFeatures;

            if (_model.InputFeatures == null)
            {
                return(false);
            }
            else
            {
                inputFeatures = _model.InputFeatures.ToList();
            }

            if (_model.OutputFeatures == null)
            {
                return(false);
            }
            else
            {
                outputFeatures = _model.OutputFeatures.ToList();
            }

            _inputImageDescriptor =
                inputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

            _outputTensorDescriptor =
                outputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor) as TensorFeatureDescriptor;

            return(true);
        }
Exemplo n.º 4
0
        public static async Task <CustomVisionModel> CreateFromStorageFile(StorageFile file)
        {
            CustomVisionModel learningModel = new CustomVisionModel();

            learningModel.model = await LearningModel.LoadFromStorageFileAsync(file);

            IReadOnlyList <ILearningModelFeatureDescriptor> input = learningModel.model.InputFeatures.ToList();
            MapFeatureDescriptor imgDesc = input[0] as MapFeatureDescriptor;

            TensorFeatureDescriptor tfDesc = input[0] as TensorFeatureDescriptor;

            learningModel.inputParameterName = input[0].Name;
            learningModel.inputWidth         = (int)tfDesc.Shape[2];
            learningModel.inputHeight        = (int)tfDesc.Shape[3];
            IReadOnlyList <ILearningModelFeatureDescriptor> output = learningModel.model.OutputFeatures.ToList();
            MapFeatureDescriptor    imgDesc1 = output[0] as MapFeatureDescriptor;
            TensorFeatureDescriptor tfDesc1  = output[0] as TensorFeatureDescriptor;

            learningModel.outputParameterName = output[0].Name;
            learningModel.session             = new LearningModelSession(learningModel.model);
            learningModel.binding             = new LearningModelBinding(learningModel.session);
            return(learningModel);
        }
        /// <summary>
        /// Evaluate the VideoFrame passed in as arg
        /// </summary>
        /// <param name="inputFrame"></param>
        /// <returns></returns>
        private async Task EvaluateVideoFrameAsync(VideoFrame inputFrame)
        {
            if (inputFrame != null)
            {
                try
                {
                    StatusBlock.Text = "Binding image...";

                    // create a binding object from the session
                    LearningModelBinding binding = new LearningModelBinding(_session);

                    // bind the input image
                    ImageFeatureValue imageTensor = ImageFeatureValue.CreateFromVideoFrame(inputFrame);
                    binding.Bind("data_0", imageTensor);

                    // temp: there is a bug where winml doesn't allow unbound outputs yet, prebind the output!
                    {
                        TensorFeatureDescriptor outputTensorDescription = _model.OutputFeatures.FirstOrDefault(
                            feature => feature.Name == "softmaxout_1"
                            ) as TensorFeatureDescriptor;
                        TensorFloat outputTensor = TensorFloat.Create(outputTensorDescription.Shape);
                        binding.Bind("softmaxout_1", outputTensor);
                    }

                    StatusBlock.Text = "Running model...";

                    int ticks = Environment.TickCount;

                    // Process the frame with the model
                    var results = await _session.EvaluateAsync(binding, $"Run { ++_runCount } ");

                    ticks = Environment.TickCount - ticks;

                    // retrieve results from evaluation
                    var resultTensor = results.Outputs["softmaxout_1"] as TensorFloat;
                    var resultVector = resultTensor.GetAsVectorView();

                    // Find the top 3 probabilities
                    List <float> topProbabilities = new List <float>()
                    {
                        0.0f, 0.0f, 0.0f
                    };
                    List <int> topProbabilityLabelIndexes = new List <int>()
                    {
                        0, 0, 0
                    };
                    // SqueezeNet returns a list of 1000 options, with probabilities for each, loop through all
                    for (int i = 0; i < resultVector.Count(); i++)
                    {
                        // is it one of the top 3?
                        for (int j = 0; j < 3; j++)
                        {
                            if (resultVector[i] > topProbabilities[j])
                            {
                                topProbabilityLabelIndexes[j] = i;
                                topProbabilities[j]           = resultVector[i];
                                break;
                            }
                        }
                    }

                    // Display the result
                    string message = $"Run took { ticks } ticks";
                    for (int i = 0; i < 3; i++)
                    {
                        message += $"\n\"{ _labels[topProbabilityLabelIndexes[i]]}\" with confidence of { topProbabilities[i]}";
                    }
                    StatusBlock.Text = message;
                }
                catch (Exception ex)
                {
                    StatusBlock.Text = $"error: {ex.Message}";
                }

                ButtonRun.IsEnabled = true;
            }
        }
Exemplo n.º 6
0
        /// <summary>
        /// Load the labels and model and initialize WinML
        /// </summary>
        /// <returns></returns>
        private async Task LoadModelAsync(string modelFileName)
        {
            Debug.WriteLine("LoadModelAsync");
            _evaluationLock.Wait();
            {
                m_binding       = null;
                m_model         = null;
                m_session       = null;
                _isReadyForEval = false;

                try
                {
                    // Start stopwatch
                    _perfStopwatch.Restart();

                    // Load Model
                    StorageFile modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/{modelFileName}.onnx"));

                    m_model = await LearningModel.LoadFromStorageFileAsync(modelFile);

                    // Stop stopwatch
                    _perfStopwatch.Stop();

                    // Setting preferred inference device given user's intent
                    m_inferenceDeviceSelected = _useGPU ? LearningModelDeviceKind.DirectXHighPerformance : LearningModelDeviceKind.Cpu;
                    m_session = new LearningModelSession(m_model, new LearningModelDevice(m_inferenceDeviceSelected));

                    // Debugging logic to see the input and output of ther model and retrieve dimensions of input/output variables
                    // ### DEBUG ###
                    foreach (var inputF in m_model.InputFeatures)
                    {
                        Debug.WriteLine($"input | kind:{inputF.Kind}, name:{inputF.Name}, type:{inputF.GetType()}");
                        int i = 0;
                        ImageFeatureDescriptor  imgDesc = inputF as ImageFeatureDescriptor;
                        TensorFeatureDescriptor tfDesc  = inputF as TensorFeatureDescriptor;
                        m_inWidth  = (uint)(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width);
                        m_inHeight = (uint)(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height);
                        m_inName   = inputF.Name;

                        Debug.WriteLine($"N: {(imgDesc == null ? tfDesc.Shape[0] : 1)}, " +
                                        $"Channel: {(imgDesc == null ? tfDesc.Shape[1].ToString() : imgDesc.BitmapPixelFormat.ToString())}, " +
                                        $"Height:{(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height)}, " +
                                        $"Width: {(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width)}");
                    }
                    foreach (var outputF in m_model.OutputFeatures)
                    {
                        Debug.WriteLine($"output | kind:{outputF.Kind}, name:{outputF.Name}, type:{outputF.GetType()}");
                        int i = 0;
                        ImageFeatureDescriptor  imgDesc = outputF as ImageFeatureDescriptor;
                        TensorFeatureDescriptor tfDesc  = outputF as TensorFeatureDescriptor;
                        m_outWidth  = (uint)(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width);
                        m_outHeight = (uint)(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height);
                        m_outName   = outputF.Name;

                        Debug.WriteLine($"N: {(imgDesc == null ? tfDesc.Shape[0] : 1)}, " +
                                        $"Channel: {(imgDesc == null ? tfDesc.Shape[1].ToString() : imgDesc.BitmapPixelFormat.ToString())}, " +
                                        $"Height:{(imgDesc == null ? tfDesc.Shape[2] : imgDesc.Height)}, " +
                                        $"Width: {(imgDesc == null ? tfDesc.Shape[3] : imgDesc.Width)}");
                    }
                    // ### END OF DEBUG ###

                    // Create output frame
                    _outputFrame?.Dispose();
                    _outputFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)m_outWidth, (int)m_outHeight);

                    Debug.WriteLine($"Elapsed time: {_perfStopwatch.ElapsedMilliseconds} ms");

                    _isReadyForEval = true;
                }
                catch (Exception ex)
                {
                    NotifyUser($"error: {ex.Message}", NotifyType.ErrorMessage);
                    Debug.WriteLine($"error: {ex.Message}");
                }
            }
            _evaluationLock.Release();
        }
Exemplo n.º 7
0
    public async Task LoadModelAsync(bool shouldUseGpu, bool resourceLoad)
    {
        try
        {
            // Parse labels from label file
            var labelsTextAsset = Resources.Load(LabelsFileName) as TextAsset;
            using (var streamReader = new StringReader(labelsTextAsset.text))
            {
                string line       = "";
                char[] charToTrim = { '\"', ' ' };
                while (streamReader.Peek() >= 0)
                {
                    line = streamReader.ReadLine();
                    line.Trim(charToTrim);
                    var indexAndLabel = line.Split(':');
                    if (indexAndLabel.Count() == 2)
                    {
                        _labels.Add(indexAndLabel[1]);
                    }
                }
            }

#if ENABLE_WINMD_SUPPORT
            if (resourceLoad)
            {
                // Load from Unity Resources via awkward UWP streams and initialize model
                using (var modelStream = new InMemoryRandomAccessStream())
                {
                    var dataWriter    = new DataWriter(modelStream);
                    var modelResource = Resources.Load(ModelFileName) as TextAsset;
                    dataWriter.WriteBytes(modelResource.bytes);
                    await dataWriter.StoreAsync();

                    var randomAccessStream = RandomAccessStreamReference.CreateFromStream(modelStream);

                    _model = await LearningModel.LoadFromStreamAsync(randomAccessStream);

                    var deviceKind = shouldUseGpu ? LearningModelDeviceKind.DirectXHighPerformance : LearningModelDeviceKind.Cpu;
                    _session = new LearningModelSession(_model, new LearningModelDevice(deviceKind));
                }
            }
            else
            {
                try
                {
                    var modelFile = await StorageFile.GetFileFromApplicationUriAsync(
                        new Uri($"ms-appx:///Data/StreamingAssets/SqueezeNet.onnx"));

                    _model = await LearningModel.LoadFromStorageFileAsync(modelFile);

                    var deviceKind = shouldUseGpu ? LearningModelDeviceKind.DirectXHighPerformance : LearningModelDeviceKind.Cpu;
                    _session = new LearningModelSession(_model, new LearningModelDevice(deviceKind));
                }
                catch (Exception e)
                {
                    var exceptionStr = e.ToString();
                    //StatusBlock.text = exceptionStr;
                }
            }

            // Get model input and output descriptions
            var inputImageDescription =
                _model.InputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Image)
                as ImageFeatureDescriptor;
            // Check if input is passed as image if not try to interpret it as generic tensor
            if (inputImageDescription != null)
            {
                InputWidth  = inputImageDescription.Width;
                InputHeight = inputImageDescription.Height;
            }
            else
            {
                var inputTensorDescription =
                    _model.InputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor)
                    as TensorFeatureDescriptor;
                InputWidth  = (uint)inputTensorDescription.Shape[3];
                InputHeight = (uint)inputTensorDescription.Shape[2];
            }

            _outputDescription =
                _model.OutputFeatures.FirstOrDefault(feature => feature.Kind == LearningModelFeatureKind.Tensor)
                as TensorFeatureDescriptor;
#endif
        }
        catch
        {
#if ENABLE_WINMD_SUPPORT
            _model = null;
#endif
            throw;
        }
    }