private async Task <IReadOnlyList <ImageClassification> > Classify(UIImage source) { var tcs = new TaskCompletionSource <IEnumerable <ImageClassification> >(); var request = new VNCoreMLRequest(_model, (response, e) => { if (e != null) { tcs.SetException(new NSErrorException(e)); } else { var results = response.GetResults <VNClassificationObservation>(); tcs.SetResult(results.Select(r => new ImageClassification(r.Identifier, r.Confidence)).ToList()); } }); var buffer = source.ToCVPixelBuffer(_targetImageSize); var requestHandler = new VNImageRequestHandler(buffer, new NSDictionary()); requestHandler.Perform(new[] { request }, out NSError error); var classifications = await tcs.Task; if (error != null) { throw new NSErrorException(error); } return(classifications.OrderByDescending(p => p.Probability) .ToList() .AsReadOnly()); }
public async Task <IEnumerable <ImageClassification> > Evalute(CoreMlInput source) { var tcs = new TaskCompletionSource <IEnumerable <ImageClassification> >(); var request = new VNCoreMLRequest(_model, (response, e) => { if (e != null) { tcs.SetException(new NSErrorException(e)); } else { var results = response.GetResults <VNClassificationObservation>(); tcs.SetResult(results.Select(r => new ImageClassification(r.Identifier, r.Confidence)).ToList()); } }); // Pre-process image (scale down) var buffer = source.Image.ToCVPixelBuffer(_targetImageSize); var requestHandler = new VNImageRequestHandler(buffer, new NSDictionary()); requestHandler.Perform(new[] { request }, out NSError error); var classifications = await tcs.Task; if (error != null) { throw new NSErrorException(error); } return(classifications); }
void LoadMLModel() { // Load the ML model var assetPath = NSBundle.MainBundle.GetUrlForResource("44105f291f4648b2b0ad7d42d639cb20", "mlmodelc"); var mlModel = MLModel.Create(assetPath, out NSError mlErr); var vModel = VNCoreMLModel.FromMLModel(mlModel, out NSError vnErr); ClassificationRequest = new VNCoreMLRequest(vModel, HandleClassification); }
VNRequest GetClassificationRequest(string resourceName) { resourceName = resourceName.Replace(".mlmodel", "").Replace(".mlmodelc", ""); var modelPath = NSBundle.MainBundle.GetUrlForResource(resourceName, ".mlmodelc"); NSError createErr, mlErr; var mlModel = MLModel.Create(modelPath, out createErr); var model = VNCoreMLModel.FromMLModel(mlModel, out mlErr); var classificationRequest = new VNCoreMLRequest(model, HandleClassifications); return(classificationRequest); }
public override void ViewDidLoad() { base.ViewDidLoad(); var modelUrl = NSBundle.MainBundle.GetUrlForResource("HotDogOrNot", "mlmodel"); var compiledModelUrl = MLModel.CompileModel(modelUrl, out var error); if (error == null) { model = MLModel.Create(compiledModelUrl, out error); Console.WriteLine($"MODEL LOADED: {model}"); if (error == null) { var nvModel = VNCoreMLModel.FromMLModel(model, out error); if (error == null) { classificationRequest = new VNCoreMLRequest(nvModel, HandleVNRequest); } } } if (error != null) { Console.WriteLine($"ERROR LOADING MODEL: {error}"); } arkitSupported = ARConfiguration.IsSupported; if (arkitSupported) { arView = new ARSCNView() { Frame = View.Bounds, AutoresizingMask = UIViewAutoresizing.FlexibleDimensions, }; arView.AddGestureRecognizer(new UITapGestureRecognizer(HandleARTapped)); View.AddSubview(arView); } else { imgView = new UIImageView(View.Bounds) { BackgroundColor = UIColor.Black, ContentMode = UIViewContentMode.ScaleAspectFill, UserInteractionEnabled = true, Frame = View.Bounds, AutoresizingMask = UIViewAutoresizing.FlexibleDimensions, }; imgView.AddGestureRecognizer(new UITapGestureRecognizer(HandleImageTapped)); View.AddSubview(imgView); } }
public void Classify(byte[] bytes) { var modelUrl = NSBundle.MainBundle.GetUrlForResource("people-or-not", "mlmodel"); var compiledUrl = MLModel.CompileModel(modelUrl, out var error); var compiledModel = MLModel.Create(compiledUrl, out error); var vnCoreModel = VNCoreMLModel.FromMLModel(compiledModel, out error); var classificationRequest = new VNCoreMLRequest(vnCoreModel, HandleVNRequest); var data = NSData.FromArray(bytes); var handler = new VNImageRequestHandler(data, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions()); handler.Perform(new[] { classificationRequest }, out error); }
public static async Task <MLMultiArray> PlayMN(VNCoreMLModel _VNMLModel, CVPixelBuffer image) { var tcs = new TaskCompletionSource <MLMultiArray>(); if (_VNMLModel == null) { tcs.TrySetCanceled(); } var request = new VNCoreMLRequest(_VNMLModel, (response, e) => { if (e != null) { tcs.SetException(new NSErrorException(e)); } else { var results = response.GetResults <VNCoreMLFeatureValueObservation>(); var r = results.FirstOrDefault(); if (r != null) { var fv = r.FeatureValue.MultiArrayValue; tcs.SetResult(fv); r.FeatureValue.Dispose(); } else { tcs.SetCanceled(); } } }) { ImageCropAndScaleOption = VNImageCropAndScaleOption.ScaleFill }; var requestHandler = new VNImageRequestHandler(image, new NSDictionary()); requestHandler.Perform(new[] { request }, out NSError error); var classifications = await tcs.Task; if (error != null) { throw new NSErrorException(error); } return(classifications); }
public override void ViewDidLoad() { base.ViewDidLoad(); // Configure UI CameraButton.Enabled = UIImagePickerController.IsSourceTypeAvailable(UIImagePickerControllerSourceType.Camera); GalleryButton.Enabled = UIImagePickerController.IsSourceTypeAvailable(UIImagePickerControllerSourceType.SavedPhotosAlbum); // Load the ML model var bundle = NSBundle.MainBundle; var assetPath = bundle.GetUrlForResource("MNISTClassifier", "mlmodelc"); var mlModel = MLModel.FromUrl(assetPath, out NSError mlErr); var model = VNCoreMLModel.FromMLModel(mlModel, out NSError vnErr); // Initialize RectangleRequest = new VNDetectRectanglesRequest(HandleRectangles); ClassificationRequest = new VNCoreMLRequest(model, HandleClassification); }
/// <summary> /// Vision classification request and model /// </summary> private VNCoreMLRequest ClassificationRequest() { var model = VNCoreMLModel.FromMLModel(new Inceptionv3().Model, out NSError error); if (error == null) { var request = new VNCoreMLRequest(model, (internalRequest, internalError) => { this.ProcessClassifications(internalRequest, internalError); }); // Crop input images to square area at center, matching the way the ML model was trained. request.ImageCropAndScaleOption = VNImageCropAndScaleOption.CenterCrop; // Use CPU for Vision processing to ensure that there are adequate GPU resources for rendering. request.UsesCpuOnly = true; return(request); } else { throw new Exception($"Failed to load Vision ML model: {error}"); } }
public async Task <Tuple <float[], float[]> > PlayMNSSD(object pixelBuffer, int labelsCount, int bbCount) { var image = pixelBuffer as CVPixelBuffer; PreprocessTime = TimeSpan.FromTicks(0); var startInferTime = DateTimeOffset.UtcNow; var tcs = new TaskCompletionSource <MLMultiArray[]>(); if (_VNMLModel == null) { tcs.TrySetCanceled(); } var request = new VNCoreMLRequest(_VNMLModel, (response, e) => { if (e != null) { tcs.SetException(new NSErrorException(e)); } else { var results = response.GetResults <VNCoreMLFeatureValueObservation>(); var probs = results[0].FeatureValue; var bboxes = results[1].FeatureValue; if (bboxes != null && probs != null) { tcs.SetResult(new MLMultiArray[] { probs.MultiArrayValue, bboxes.MultiArrayValue }); bboxes.Dispose(); probs.Dispose(); } else { tcs.SetCanceled(); } } }); request.ImageCropAndScaleOption = VNImageCropAndScaleOption.ScaleFill; var requestHandler = new VNImageRequestHandler(image, new NSDictionary()); requestHandler.Perform(new[] { request }, out NSError error); var outs = await tcs.Task; if (error != null) { throw new NSErrorException(error); } InferenceTime = DateTimeOffset.UtcNow - startInferTime; // Debug.WriteLine($"MNSSD infer: {InferenceTime.TotalMilliseconds}"); // ------------------------------------------- // var startPostprocTime = DateTimeOffset.UtcNow; var outProbs = new double[bbCount * labelsCount]; var outBBoxes = new double[bbCount * 4]; Marshal.Copy(outs[0].DataPointer, outProbs, 0, outProbs.Length); Marshal.Copy(outs[1].DataPointer, outBBoxes, 0, outBBoxes.Length); outs[0].Dispose(); outs[1].Dispose(); float[] probsAll = Array.ConvertAll(outProbs, x => (float)x); float[] bboxesAll = Array.ConvertAll(outBBoxes, x => (float)x); PostprocessTime = DateTimeOffset.UtcNow - startPostprocTime; // Debug.WriteLine($"MNSSD postproc: {InferenceTime.TotalMilliseconds}"); // ------------------------------------------- // return(Tuple.Create(probsAll, bboxesAll)); }
private async void PickCamera() { if (CrossMedia.Current == null) { await CrossMedia.Current.Initialize(); } if (!CrossMedia.Current.IsCameraAvailable || !CrossMedia.Current.IsTakePhotoSupported || !CrossMedia.Current.IsPickPhotoSupported) { UserDialogs.Instance.Alert("Device options not supported.", null, "OK"); return; } Console.WriteLine("Pcking Photo.."); var file = await CrossMedia.Current.PickPhotoAsync(); { }; if (file == null) { UserDialogs.Instance.Alert("You didn't pick a photo.", null, "OK"); return; } Stream s = file.GetStream(); byte[] result = null; var buffer = new byte[16 * 1024]; using (MemoryStream ms = new MemoryStream()) { int read; while ((read = s.Read(buffer, 0, buffer.Length)) > 0) { ms.Write(buffer, 0, read); } result = ms.ToArray(); } //show and tell shit, then Run ML SEEFOOD.Hidden = true; stat.Hidden = false; selectCamRollButton.Hidden = true; takePhotoButton.Hidden = true; image.Hidden = false; spinnyboy.Hidden = false; this.hotdogLbl.Hidden = true; this.ramenLbl.Hidden = true; this.ramenOrHotdog.Hidden = true; spinnyboy.StartAnimating(); returnToMenu.Hidden = false; this.stat.Text = "Analyzing..."; this.stat.TextColor = UIKit.UIColor.Black; showDebugInfo.Hidden = false; var data = NSData.FromArray(result); image.Image = UIImage.LoadFromData(data); await Task.Delay(1000); //ML Console.WriteLine("Selected: " + ViewController.Type.ToString()); //First we check what type of thing we have here if (ViewController.Type.Equals("Hotdog")) { var assetPath = NSBundle.MainBundle.GetUrlForResource("model", "mlmodel"); var transform = MLModel.CompileModel(assetPath, out NSError compErr); MLModel model = MLModel.Create(transform, out NSError fucl); var vnModel = VNCoreMLModel.FromMLModel(model, out NSError rror); var ciImage = new CIImage(image.Image); var classificationRequest = new VNCoreMLRequest(vnModel); //just do it var handler = new VNImageRequestHandler(ciImage, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions()); handler.Perform(new[] { classificationRequest }, out NSError perfError); var results = classificationRequest.GetResults <VNClassificationObservation>(); var thing = results[0]; Console.WriteLine("Hotdog OUT " + thing.Identifier); switch (thing.Identifier) { case "hotdog": if (thing.Confidence > 0.85f) { this.stat.Text = "✅ Hotdog"; this.stat.TextColor = UIKit.UIColor.Green; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } else { this.stat.Text = "❌ Not Hotdog"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } break; case "nothotdog": this.stat.Text = "❌ Not Hotdog"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); break; } this.confidence = thing.Confidence; Vibration.Vibrate(500); } else { NSUrl modelPath = NSBundle.MainBundle.GetUrlForResource("Ramen", "mlmodel"); if (modelPath == null) { Console.WriteLine("peeepee"); } var transform = MLModel.CompileModel(modelPath, out NSError compErr); MLModel model = MLModel.Create(transform, out NSError fucl); var vnModel = VNCoreMLModel.FromMLModel(model, out NSError rror); var ciImage = new CIImage(image.Image); var classificationRequest = new VNCoreMLRequest(vnModel); //just do it var handler = new VNImageRequestHandler(ciImage, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions()); handler.Perform(new[] { classificationRequest }, out NSError perfError); var results = classificationRequest.GetResults <VNClassificationObservation>(); var thing = results[0]; Console.WriteLine("Ramen OUT " + thing.Identifier); switch (thing.Identifier) { case "ramen": if (thing.Confidence > 0.85f) { this.stat.Text = "✅ Ramen"; this.stat.TextColor = UIKit.UIColor.Green; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } else { this.stat.Text = "❌ Not Ramen"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } break; case "notramen": this.stat.Text = "❌ Not Ramen"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); break; } this.confidence = thing.Confidence; Vibration.Vibrate(500); } }