private static VNCoreMLModel LoadModel(string modelName) { var modelPath = CompileModel(modelName); if (modelPath == null) { throw new ImageClassifierException($"Model {modelName} does not exist"); } var mlModel = MLModel.Create(modelPath, out NSError err); if (err != null) { throw new NSErrorException(err); } var model = VNCoreMLModel.FromMLModel(mlModel, out err); if (err != null) { throw new NSErrorException(err); } return(model); }
private VNCoreMLModel LoadModel(string modelName) { var modelPath = NSBundle.MainBundle.GetUrlForResource(modelName, "mlmodelc") ?? CompileModel(modelName); if (modelPath == null) { throw new ImageClassifierException($"Model {modelName} does not exist"); } var mlModel = MLModel.Create(modelPath, out NSError err); if (err != null) { throw new NSErrorException(err); } var model = VNCoreMLModel.FromMLModel(mlModel, out err); if (err != null) { throw new NSErrorException(err); } return(model); }
public PhotoDetector() { var assetPath = NSBundle.MainBundle.GetUrlForResource("FriesOrNotFries", "mlmodelc"); _mlModel = MLModel.Create(assetPath, out var _); _model = VNCoreMLModel.FromMLModel(_mlModel, out var __); }
static JankenJudgeService() { // Load the ML model var assetPath = NSBundle.MainBundle.GetUrlForResource("jankenmodel", "mlmodelc"); var friedOrNotFriedModel = MLModel.Create(assetPath, out _); _vnmodel = VNCoreMLModel.FromMLModel(friedOrNotFriedModel, out _); }
static FriesOrNotFriesService() { // Load the ML model var assetPath = NSBundle.MainBundle.GetUrlForResource("e3e4e645c0944c6ca84f9a000e501b22", "mlmodelc"); var friedOrNotFriedModel = MLModel.Create(assetPath, out _); VModel = VNCoreMLModel.FromMLModel(friedOrNotFriedModel, out _); }
static DetectService() { // Load the ML model var assetPath = NSBundle.MainBundle .GetUrlForResource(name: "detectBalls", fileExtension: "mlmodelc"); var detectModel = MLModel.Create(url: assetPath, error: out _); VModel = VNCoreMLModel.FromMLModel(model: detectModel, error: out _); }
void LoadMLModel() { // Load the ML model var assetPath = NSBundle.MainBundle.GetUrlForResource("44105f291f4648b2b0ad7d42d639cb20", "mlmodelc"); var mlModel = MLModel.Create(assetPath, out NSError mlErr); var vModel = VNCoreMLModel.FromMLModel(mlModel, out NSError vnErr); ClassificationRequest = new VNCoreMLRequest(vModel, HandleClassification); }
public iOSMNSSD() { var assetPath = NSBundle.MainBundle.GetUrlForResource("ssd_mobilenet_feature_extractor", "mlmodelc"); var mlModel = MLModel.Create(assetPath, out NSError mlError); if (mlError == null) { _VNMLModel = VNCoreMLModel.FromMLModel(mlModel, out mlError); } }
VNRequest GetClassificationRequest(string resourceName) { resourceName = resourceName.Replace(".mlmodel", "").Replace(".mlmodelc", ""); var modelPath = NSBundle.MainBundle.GetUrlForResource(resourceName, ".mlmodelc"); NSError createErr, mlErr; var mlModel = MLModel.Create(modelPath, out createErr); var model = VNCoreMLModel.FromMLModel(mlModel, out mlErr); var classificationRequest = new VNCoreMLRequest(model, HandleClassifications); return(classificationRequest); }
internal void Init(string modelName) { try { _model = LoadModel(modelName); } catch (Exception ex) { throw new ImageClassifierException("Failed to load the model - check the inner exception for more details", ex); } }
public iOSImageClassifier(string modelName, int outSize) { _outSize = outSize; var assetPath = NSBundle.MainBundle.GetUrlForResource(modelName, "mlmodelc"); var mlModel = MLModel.Create(assetPath, out NSError mlError); if (mlError == null) { _VNMLModel = VNCoreMLModel.FromMLModel(mlModel, out mlError); } }
public override void Init(string modelName, ModelType modelType, int inputSize = 227) { base.Init(modelName, modelType, inputSize); try { _model = LoadModel(modelName); } catch (Exception ex) { throw new ImageClassifierException("Failed to load the model - check the inner exception for more details", ex); } }
public override void ViewDidLoad() { base.ViewDidLoad(); var modelUrl = NSBundle.MainBundle.GetUrlForResource("HotDogOrNot", "mlmodel"); var compiledModelUrl = MLModel.CompileModel(modelUrl, out var error); if (error == null) { model = MLModel.Create(compiledModelUrl, out error); Console.WriteLine($"MODEL LOADED: {model}"); if (error == null) { var nvModel = VNCoreMLModel.FromMLModel(model, out error); if (error == null) { classificationRequest = new VNCoreMLRequest(nvModel, HandleVNRequest); } } } if (error != null) { Console.WriteLine($"ERROR LOADING MODEL: {error}"); } arkitSupported = ARConfiguration.IsSupported; if (arkitSupported) { arView = new ARSCNView() { Frame = View.Bounds, AutoresizingMask = UIViewAutoresizing.FlexibleDimensions, }; arView.AddGestureRecognizer(new UITapGestureRecognizer(HandleARTapped)); View.AddSubview(arView); } else { imgView = new UIImageView(View.Bounds) { BackgroundColor = UIColor.Black, ContentMode = UIViewContentMode.ScaleAspectFill, UserInteractionEnabled = true, Frame = View.Bounds, AutoresizingMask = UIViewAutoresizing.FlexibleDimensions, }; imgView.AddGestureRecognizer(new UITapGestureRecognizer(HandleImageTapped)); View.AddSubview(imgView); } }
public void Classify(byte[] bytes) { var modelUrl = NSBundle.MainBundle.GetUrlForResource("people-or-not", "mlmodel"); var compiledUrl = MLModel.CompileModel(modelUrl, out var error); var compiledModel = MLModel.Create(compiledUrl, out error); var vnCoreModel = VNCoreMLModel.FromMLModel(compiledModel, out error); var classificationRequest = new VNCoreMLRequest(vnCoreModel, HandleVNRequest); var data = NSData.FromArray(bytes); var handler = new VNImageRequestHandler(data, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions()); handler.Perform(new[] { classificationRequest }, out error); }
private VNCoreMLModel LoadModel(string modelUrl) { var webClient = new WebClient(); string documentsPath = Environment.GetFolderPath(Environment.SpecialFolder.MyDocuments); string localFilename = "current.mlmodel"; string localPath = Path.Combine(documentsPath, localFilename); webClient.DownloadFile(modelUrl, localPath); var fileUrl = NSUrl.FromFilename(localPath); // var downloadManager = CrossDownloadManager.Current; // var file = downloadManager.CreateDownloadFile(modelUrl); // downloadManager.Start(file); // while(file.Status != Plugin.DownloadManager.Abstractions.DownloadFileStatus.COMPLETED) // { // System.Threading.Thread.Sleep(1000); //file. //} //var url = new NSUrl(file.Url); //var modelPath = NSBundle.MainBundle.GetUrlForResource(modelName, "mlmodelc") ?? CompileModel(modelName); //if (modelPath == null) //throw new ImageClassifierException($"Model {modelName} does not exist"); var compliedModel = MLModel.CompileModel(fileUrl, out NSError complieErr); if (complieErr != null) { throw new NSErrorException(complieErr); } var mlModel = MLModel.Create(compliedModel, out NSError createErr); if (createErr != null) { throw new NSErrorException(createErr); } var model = VNCoreMLModel.FromMLModel(mlModel, out NSError err); if (err != null) { throw new NSErrorException(err); } return(model); }
public static async Task <MLMultiArray> PlayMN(VNCoreMLModel _VNMLModel, CVPixelBuffer image) { var tcs = new TaskCompletionSource <MLMultiArray>(); if (_VNMLModel == null) { tcs.TrySetCanceled(); } var request = new VNCoreMLRequest(_VNMLModel, (response, e) => { if (e != null) { tcs.SetException(new NSErrorException(e)); } else { var results = response.GetResults <VNCoreMLFeatureValueObservation>(); var r = results.FirstOrDefault(); if (r != null) { var fv = r.FeatureValue.MultiArrayValue; tcs.SetResult(fv); r.FeatureValue.Dispose(); } else { tcs.SetCanceled(); } } }) { ImageCropAndScaleOption = VNImageCropAndScaleOption.ScaleFill }; var requestHandler = new VNImageRequestHandler(image, new NSDictionary()); requestHandler.Perform(new[] { request }, out NSError error); var classifications = await tcs.Task; if (error != null) { throw new NSErrorException(error); } return(classifications); }
public override void ViewDidLoad() { base.ViewDidLoad(); // Configure UI CameraButton.Enabled = UIImagePickerController.IsSourceTypeAvailable(UIImagePickerControllerSourceType.Camera); GalleryButton.Enabled = UIImagePickerController.IsSourceTypeAvailable(UIImagePickerControllerSourceType.SavedPhotosAlbum); // Load the ML model var bundle = NSBundle.MainBundle; var assetPath = bundle.GetUrlForResource("MNISTClassifier", "mlmodelc"); var mlModel = MLModel.FromUrl(assetPath, out NSError mlErr); var model = VNCoreMLModel.FromMLModel(mlModel, out NSError vnErr); // Initialize RectangleRequest = new VNDetectRectanglesRequest(HandleRectangles); ClassificationRequest = new VNCoreMLRequest(model, HandleClassification); }
private VNCoreMLModel LoadModel(string modelName) { var isPath = File.Exists(modelName); var isCompiled = Path.GetExtension(modelName).Equals("mlmodelc"); NSUrl modelPath = null; if (isCompiled) { if (isPath) { modelPath = new NSUrl(modelName, false); } else { modelPath = NSBundle.MainBundle.GetUrlForResource(modelName, "mlmodelc"); } } else { modelPath = CompileModel(modelName, isPath); } if (modelPath == null) { throw new ImageClassifierException($"Model {modelName} does not exist"); } var mlModel = MLModel.Create(modelPath, out NSError err); if (err != null) { throw new NSErrorException(err); } var model = VNCoreMLModel.FromMLModel(mlModel, out err); if (err != null) { throw new NSErrorException(err); } return(model); }
/// <summary> /// Vision classification request and model /// </summary> private VNCoreMLRequest ClassificationRequest() { var model = VNCoreMLModel.FromMLModel(new Inceptionv3().Model, out NSError error); if (error == null) { var request = new VNCoreMLRequest(model, (internalRequest, internalError) => { this.ProcessClassifications(internalRequest, internalError); }); // Crop input images to square area at center, matching the way the ML model was trained. request.ImageCropAndScaleOption = VNImageCropAndScaleOption.CenterCrop; // Use CPU for Vision processing to ensure that there are adequate GPU resources for rendering. request.UsesCpuOnly = true; return(request); } else { throw new Exception($"Failed to load Vision ML model: {error}"); } }
private async void PickCamera() { if (CrossMedia.Current == null) { await CrossMedia.Current.Initialize(); } if (!CrossMedia.Current.IsCameraAvailable || !CrossMedia.Current.IsTakePhotoSupported || !CrossMedia.Current.IsPickPhotoSupported) { UserDialogs.Instance.Alert("Device options not supported.", null, "OK"); return; } Console.WriteLine("Pcking Photo.."); var file = await CrossMedia.Current.PickPhotoAsync(); { }; if (file == null) { UserDialogs.Instance.Alert("You didn't pick a photo.", null, "OK"); return; } Stream s = file.GetStream(); byte[] result = null; var buffer = new byte[16 * 1024]; using (MemoryStream ms = new MemoryStream()) { int read; while ((read = s.Read(buffer, 0, buffer.Length)) > 0) { ms.Write(buffer, 0, read); } result = ms.ToArray(); } //show and tell shit, then Run ML SEEFOOD.Hidden = true; stat.Hidden = false; selectCamRollButton.Hidden = true; takePhotoButton.Hidden = true; image.Hidden = false; spinnyboy.Hidden = false; this.hotdogLbl.Hidden = true; this.ramenLbl.Hidden = true; this.ramenOrHotdog.Hidden = true; spinnyboy.StartAnimating(); returnToMenu.Hidden = false; this.stat.Text = "Analyzing..."; this.stat.TextColor = UIKit.UIColor.Black; showDebugInfo.Hidden = false; var data = NSData.FromArray(result); image.Image = UIImage.LoadFromData(data); await Task.Delay(1000); //ML Console.WriteLine("Selected: " + ViewController.Type.ToString()); //First we check what type of thing we have here if (ViewController.Type.Equals("Hotdog")) { var assetPath = NSBundle.MainBundle.GetUrlForResource("model", "mlmodel"); var transform = MLModel.CompileModel(assetPath, out NSError compErr); MLModel model = MLModel.Create(transform, out NSError fucl); var vnModel = VNCoreMLModel.FromMLModel(model, out NSError rror); var ciImage = new CIImage(image.Image); var classificationRequest = new VNCoreMLRequest(vnModel); //just do it var handler = new VNImageRequestHandler(ciImage, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions()); handler.Perform(new[] { classificationRequest }, out NSError perfError); var results = classificationRequest.GetResults <VNClassificationObservation>(); var thing = results[0]; Console.WriteLine("Hotdog OUT " + thing.Identifier); switch (thing.Identifier) { case "hotdog": if (thing.Confidence > 0.85f) { this.stat.Text = "✅ Hotdog"; this.stat.TextColor = UIKit.UIColor.Green; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } else { this.stat.Text = "❌ Not Hotdog"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } break; case "nothotdog": this.stat.Text = "❌ Not Hotdog"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); break; } this.confidence = thing.Confidence; Vibration.Vibrate(500); } else { NSUrl modelPath = NSBundle.MainBundle.GetUrlForResource("Ramen", "mlmodel"); if (modelPath == null) { Console.WriteLine("peeepee"); } var transform = MLModel.CompileModel(modelPath, out NSError compErr); MLModel model = MLModel.Create(transform, out NSError fucl); var vnModel = VNCoreMLModel.FromMLModel(model, out NSError rror); var ciImage = new CIImage(image.Image); var classificationRequest = new VNCoreMLRequest(vnModel); //just do it var handler = new VNImageRequestHandler(ciImage, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions()); handler.Perform(new[] { classificationRequest }, out NSError perfError); var results = classificationRequest.GetResults <VNClassificationObservation>(); var thing = results[0]; Console.WriteLine("Ramen OUT " + thing.Identifier); switch (thing.Identifier) { case "ramen": if (thing.Confidence > 0.85f) { this.stat.Text = "✅ Ramen"; this.stat.TextColor = UIKit.UIColor.Green; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } else { this.stat.Text = "❌ Not Ramen"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); } break; case "notramen": this.stat.Text = "❌ Not Ramen"; this.stat.TextColor = UIKit.UIColor.Red; this.stat.TextAlignment = UITextAlignment.Center; spinnyboy.Hidden = true; spinnyboy.StopAnimating(); break; } this.confidence = thing.Confidence; Vibration.Vibrate(500); } }
private CoreMlModel(VNCoreMLModel model) { this._model = model; }