public Task <MachineLearningResult> AnalizeImageAsync(string mlModel, Stream imageStream) { if (_machineLearningTask != null) { _machineLearningTask.TrySetCanceled(); _classifierRequestHandler.Dispose(); _classifierRequestHandler = null; } _machineLearningTask = new TaskCompletionSource <MachineLearningResult>(); try { VNImageOptions options = new VNImageOptions(); _classifierRequestHandler = new VNImageRequestHandler(NSData.FromStream(imageStream), options); _classifierRequestHandler.Perform(new VNRequest[] { GetClassificationRequest(mlModel) }, out var err); if (err != null) { Debug.WriteLine(err); _machineLearningTask.TrySetResult(null); } } catch (Exception error) { Debug.WriteLine(error); _machineLearningTask.TrySetResult(null); } return(_machineLearningTask.Task); }
public Task <ScanResult> ProcessImage(Stream bitmapStream) { var imageData = NSData.FromStream(bitmapStream); var image = UIImage.LoadFromData(imageData); var v = new VNImageOptions(); if (image.CGImage == null) { throw new Exception("No image"); } // TODO: Find a way to make orientation foolproof // (Probably convert stream to UIImage which has orientation encoded...) var requestHandler = new VNImageRequestHandler(image.CGImage, v); var completionSource = new TaskCompletionSource <ScanResult>(); var request = new VNRecognizeTextRequest((vnRequest, error) => { var results = vnRequest.GetResults <VNRecognizedTextObservation>(); var scanResult = new ScanResult(); foreach (var textObservation in results) { var candidate = textObservation.TopCandidates(1).FirstOrDefault(); if (candidate != null) { scanResult.Add(GetBlock(candidate, textObservation)); } } completionSource.TrySetResult(scanResult); }); requestHandler.Perform(new VNRequest[] { request }, out var nsError); // ReSharper disable once ConstantConditionalAccessQualifier if (!string.IsNullOrEmpty(nsError?.Description)) { throw new Exception(nsError.Description); } return(completionSource.Task); }
public void DidOutputSampleBuffer(AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection) { try { var currentDate = DateTime.Now; //Console.WriteLine("DidOutputSampleBuffer: " + currentDate + " " + lastAnalysis + " " + currentDate.Subtract(lastAnalysis).Milliseconds); // control the pace of the machine vision to protect battery life if (currentDate - lastAnalysis >= pace) { lastAnalysis = currentDate; } else { //Console.WriteLine("-- skip --"); return; // don't run the classifier more often than we need } // keep track of performance and log the frame rate if (trackPerformance) { frameCount = frameCount + 1; if (frameCount % framesPerSample == 0) { var diff = currentDate.Subtract(startDate); if (diff.Seconds > 0) { if (pace > TimeSpan.Zero) { Console.WriteLine("WARNING: Frame rate of image classification is being limited by \"pace\" setting. Set to 0.0 for fastest possible rate."); } } Console.WriteLine($"{diff.Seconds / framesPerSample}s per frame (average"); } startDate = currentDate; } // Crop and resize the image data. // Note, this uses a Core Image pipeline that could be appended with other pre-processing. // If we don't want to do anything custom, we can remove this step and let the Vision framework handle // crop and resize as long as we are careful to pass the orientation properly. using (var croppedBuffer = CroppedSampleBuffer(sampleBuffer, targetImageSize)) { if (croppedBuffer == null) { return; } try { VNImageOptions options = new VNImageOptions(); classifierRequestHandler = new VNImageRequestHandler(croppedBuffer, options); NSError err; classifierRequestHandler.Perform(ClassificationRequest, out err); if (err != null) { Console.WriteLine(err); } } catch (Exception error) { Console.WriteLine(error); } } } finally { sampleBuffer.Dispose(); } }