private void DecryptAndEvauluate() { // Load the encrypted model. // The encrypted model (encrypted.onnx) is embedded as a resource in // the native binary: WinMLSamplesGalleryNative.dll. var inferenceModel = WinMLSamplesGalleryNative.EncryptedModels.LoadEncryptedResource(DecryptionKey.Password); var postProcessingModel = TensorizationModels.SoftMaxThenTopK(10); // Update the status var isModelDecrypted = inferenceModel != null; UpdateStatus(isModelDecrypted); // If loading the decrypted model failed (ie: due to an invalid key/password), // then skip performing evaluate. if (!isModelDecrypted) { return; } // Draw the image to classify in the Image control var decoder = ImageHelper.CreateBitmapDecoderFromPath("ms-appx:///InputData/hummingbird.jpg"); // Create sessions var device = new LearningModelDevice(LearningModelDeviceKind.Cpu); var options = new LearningModelSessionOptions() { CloseModelOnSessionCreation = true // Close the model to prevent extra memory usage }; var inferenceSession = new LearningModelSession(inferenceModel, device, options); var postProcessingSession = new LearningModelSession(postProcessingModel, device, options); // Classify the current image var softwareBitmap = decoder.GetSoftwareBitmapAsync().GetAwaiter().GetResult(); var input = VideoFrame.CreateWithSoftwareBitmap(softwareBitmap); // Inference var inferenceResults = Evaluate(inferenceSession, input); var inferenceOutput = inferenceResults.Outputs.First().Value; // PostProcess var postProcessedOutputs = Evaluate(postProcessingSession, inferenceOutput); var topKValues = (TensorFloat)postProcessedOutputs.Outputs["TopKValues"]; var topKIndices = (TensorInt64Bit)postProcessedOutputs.Outputs["TopKIndices"]; // Return results var probabilities = topKValues.GetAsVectorView(); var indices = topKIndices.GetAsVectorView(); var labels = indices.Select((index) => ClassificationLabels.ImageNet[index]); // Render the classification and probabilities RenderInferenceResults(labels, probabilities); }
public OpenCVInterop() { this.InitializeComponent(); CurrentImagePath = null; InferenceChoice = ClassifyChoice.Denoised; // Load inference session var modelName = "squeezenet1.1-7.onnx"; var modelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", modelName); var model = LearningModel.LoadFromFilePath(modelPath); _inferenceSession = CreateLearningModelSession(model); // Load post processing session _postProcessingSession = CreateLearningModelSession(TensorizationModels.SoftMaxThenTopK(TopK)); BasicGridView.SelectedIndex = 0; }
public ImageSharpInterop() { this.InitializeComponent(); var tensorizationModel = TensorizationModels.BasicTensorization(Height, Width, BatchSize, Channels, Height, Width, "nearest"); _tensorizationSession = CreateLearningModelSession(tensorizationModel, SelectedDeviceKind); var inferenceModelName = "squeezenet1.1-7.onnx"; var inferenceModelPath = Path.Join(Windows.ApplicationModel.Package.Current.InstalledLocation.Path, "Models", inferenceModelName); var inferenceModel = LearningModel.LoadFromFilePath(inferenceModelPath); _inferenceSession = CreateLearningModelSession(inferenceModel); var postProcessingModel = TensorizationModels.SoftMaxThenTopK(TopK); _postProcessingSession = CreateLearningModelSession(postProcessingModel); BasicGridView.SelectedIndex = 0; }