Exemple #1
0
        void HandleARTapped()
        {
            if (classifying)
            {
                return;
            }

            var image = arView.Session?.CurrentFrame?.CapturedImage;

            if (image == null)
            {
                Console.WriteLine("NO IMAGE");
                return;
            }

            classifying = true;

            var handler = new VNImageRequestHandler(image, CGImagePropertyOrientation.Up, new VNImageOptions());

            Task.Run(() => {
                handler.Perform(new[] { classificationRequest }, out var error);
                if (error != null)
                {
                    Console.WriteLine($"ERROR PERFORMING REQUEST: {error}");
                }
            });
        }
        public Task <MachineLearningResult> AnalizeImageAsync(string mlModel, Stream imageStream)
        {
            if (_machineLearningTask != null)
            {
                _machineLearningTask.TrySetCanceled();
                _classifierRequestHandler.Dispose();
                _classifierRequestHandler = null;
            }
            _machineLearningTask = new TaskCompletionSource <MachineLearningResult>();

            try
            {
                VNImageOptions options = new VNImageOptions();
                _classifierRequestHandler = new VNImageRequestHandler(NSData.FromStream(imageStream), options);
                _classifierRequestHandler.Perform(new VNRequest[] { GetClassificationRequest(mlModel) }, out var err);
                if (err != null)
                {
                    Debug.WriteLine(err);
                    _machineLearningTask.TrySetResult(null);
                }
            }
            catch (Exception error)
            {
                Debug.WriteLine(error);
                _machineLearningTask.TrySetResult(null);
            }

            return(_machineLearningTask.Task);
        }
        /// <summary>
        /// Set Initial Condition
        /// </summary>
        public void ReadAndDisplayFirstFrame(bool performRectanglesDetection, out NSError error)
        {
            var videoReader = VideoReader.Create(this.videoAsset);

            if (videoReader != null)
            {
                var firstFrame = videoReader.NextFrame();
                if (firstFrame != null)
                {
                    List <TrackedPolyRect> firstFrameRects = null;
                    if (performRectanglesDetection)
                    {
                        // Vision Rectangle Detection
                        var imageRequestHandler = new VNImageRequestHandler(firstFrame, videoReader.Orientation, new NSMutableDictionary());

                        var rectangleDetectionRequest = new VNDetectRectanglesRequest(null)
                        {
                            MinimumAspectRatio  = 0.2f,
                            MaximumAspectRatio  = 1f,
                            MinimumSize         = 0.1f,
                            MaximumObservations = 10
                        };

                        imageRequestHandler.Perform(new VNRequest[] { rectangleDetectionRequest }, out NSError performError);
                        if (performError != null)
                        {
                            error = new VisionTrackerProcessorError(VisionTrackerProcessorErrorType.FirstFrameReadFailed);
                            return;
                        }

                        var rectObservations = rectangleDetectionRequest.GetResults <VNRectangleObservation>();
                        if (rectObservations != null && rectObservations.Any())
                        {
                            this.initialRectObservations = rectObservations.ToList();
                            var detectedRects = new List <TrackedPolyRect>();
                            for (var index = 0; index < this.initialRectObservations.Count; index++)
                            {
                                var rectangleObservation = this.initialRectObservations[index];
                                var rectColor            = TrackedObjectsPalette.Color(index);

                                detectedRects.Add(new TrackedPolyRect(rectangleObservation, rectColor));
                            }

                            firstFrameRects = detectedRects;
                        }
                    }

                    error = null;
                    this.Delegate?.DisplayFrame(firstFrame, videoReader.AffineTransform, firstFrameRects);
                }
                else
                {
                    error = new VisionTrackerProcessorError(VisionTrackerProcessorErrorType.FirstFrameReadFailed);
                }
            }
            else
            {
                error = new VisionTrackerProcessorError(VisionTrackerProcessorErrorType.ReaderInitializationFailed);
            }
        }
Exemple #4
0
        void HandleImageTapped()
        {
            if (classifying)
            {
                return;
            }

            var picker = new UIImagePickerController {
                AllowsEditing = false,
                SourceType    = UIImagePickerControllerSourceType.Camera
            };

            picker.ModalPresentationStyle = UIModalPresentationStyle.FullScreen;

            picker.FinishedPickingMedia += (s, e) => {
                base.DismissViewController(true, () => {
                    var image     = e.OriginalImage;
                    imgView.Image = image;

                    var ciImage = new CIImage(image);

                    Task.Run(() => {
                        var handler = new VNImageRequestHandler(ciImage, new VNImageOptions());
                        handler.Perform(new[] { classificationRequest }, out var error);
                        if (error != null)
                        {
                            Console.WriteLine($"ERROR PERFORMING REQUEST: {error}");
                        }
                    });
                });
            };

            PresentViewController(picker, true, null);
        }
        private async Task <IReadOnlyList <ImageClassification> > Classify(UIImage source)
        {
            var tcs = new TaskCompletionSource <IEnumerable <ImageClassification> >();

            var request = new VNCoreMLRequest(_model, (response, e) =>
            {
                if (e != null)
                {
                    tcs.SetException(new NSErrorException(e));
                }
                else
                {
                    var results = response.GetResults <VNClassificationObservation>();
                    tcs.SetResult(results.Select(r => new ImageClassification(r.Identifier, r.Confidence)).ToList());
                }
            });

            var buffer         = source.ToCVPixelBuffer(_targetImageSize);
            var requestHandler = new VNImageRequestHandler(buffer, new NSDictionary());

            requestHandler.Perform(new[] { request }, out NSError error);

            var classifications = await tcs.Task;

            if (error != null)
            {
                throw new NSErrorException(error);
            }

            return(classifications.OrderByDescending(p => p.Probability)
                   .ToList()
                   .AsReadOnly());
        }
        public async Task <IEnumerable <ImageClassification> > Evalute(CoreMlInput source)
        {
            var tcs = new TaskCompletionSource <IEnumerable <ImageClassification> >();

            var request = new VNCoreMLRequest(_model, (response, e) =>
            {
                if (e != null)
                {
                    tcs.SetException(new NSErrorException(e));
                }
                else
                {
                    var results = response.GetResults <VNClassificationObservation>();
                    tcs.SetResult(results.Select(r => new ImageClassification(r.Identifier, r.Confidence)).ToList());
                }
            });

            // Pre-process image (scale down)
            var buffer = source.Image.ToCVPixelBuffer(_targetImageSize);

            var requestHandler = new VNImageRequestHandler(buffer, new NSDictionary());

            requestHandler.Perform(new[] { request }, out NSError error);

            var classifications = await tcs.Task;

            if (error != null)
            {
                throw new NSErrorException(error);
            }

            return(classifications);
        }
Exemple #7
0
        partial void Analyze_Clicked(UIButton sender)
        {
            //load image
            var handler = new VNImageRequestHandler(pickedImage.CGImage, new VNImageOptions());

            DispatchQueue.DefaultGlobalQueue.DispatchAsync(() =>
            {
                handler.Perform(new VNRequest[] { ClassificationRequest }, out NSError err);
            });
        }
        public void DidFinish(VNDocumentCameraViewController controller, VNDocumentCameraScan scan)
        {
            var pageCount = (int)scan.PageCount;
            var allItems  = new List <List <string> >();

            for (int i = 0; i < pageCount; i++)
            {
                var image = scan.GetImage(nuint.Parse(i.ToString()));
                var imageRequestHandler = new VNImageRequestHandler(image.CGImage, options: new NSDictionary());

                var textRequest = new VNRecognizeTextRequest(new VNRequestCompletionHandler((request, error) =>
                {
                    var results = request.GetResults <VNRecognizedTextObservation>();

                    foreach (var result in results)
                    {
                        var items = new List <string>();

                        foreach (var candidate in result.TopCandidates(100))
                        {
                            items.Add(candidate.String);
                        }

                        allItems.Add(items);
                    }

                    Device.BeginInvokeOnMainThread(() =>
                    {
                        _page.LoadRecognizedTextItems(allItems);
                        DismissViewController(true, null);
                    });
                }));

                switch (_page.TextRecognitionLevel)
                {
                case TextRecognitionLevelEnum.Accurate:
                    textRequest.RecognitionLevel = VNRequestTextRecognitionLevel.Accurate;
                    break;

                case TextRecognitionLevelEnum.Fast:
                    textRequest.RecognitionLevel = VNRequestTextRecognitionLevel.Fast;
                    break;

                default:
                    break;
                }

                textRequest.UsesLanguageCorrection = true;

                DispatchQueue.DefaultGlobalQueue.DispatchAsync(() =>
                {
                    imageRequestHandler.Perform(new VNRequest[] { textRequest }, out NSError error);
                });
            }
        }
Exemple #9
0
        public override IScanResult Decode(CVPixelBuffer pixelBuffer)
        {
            var decoder = PerformanceCounter.Start();

            _barcodeResult = new TaskCompletionSource <IScanResult>();
            var handler = new VNImageRequestHandler(pixelBuffer, new VNImageOptions());

            handler.Perform(new VNRequest[] { barcodesRequest }, out NSError error);
            _barcodeResult.Task.Wait();
            PerformanceCounter.Stop(decoder, "Vision framework Decoder take {0} ms.");
            return(_barcodeResult.Task.Result);
        }
        public void CollectImageData(MediaFile file)
        {
            var imagedata      = NSData.FromStream(file.GetStream());
            var requestHandler = new VNImageRequestHandler(imagedata, new VNImageOptions());

            requestHandler.Perform(ClassificationRequest, out NSError error);


            if (error != null)
            {
                Debug.WriteLine($"Error identifying {error}");
            }
        }
Exemple #11
0
        public void CollectImageData(MediaFile file)
        {
            var imagedata = NSData.FromStream(file.GetStream());
            //VNIImageOptions is a dictionary container used to hold options involved with vision queries
            var requestHandler = new VNImageRequestHandler(imagedata, new VNImageOptions());

            requestHandler.Perform(ClassificationRequest, out NSError error);

            if (error != null)
            {
                Debug.WriteLine($"Error identifying... {error}");
            }
        }
        public void Classify(byte[] bytes)
        {
            var modelUrl = NSBundle.MainBundle.GetUrlForResource("people-or-not", "mlmodel");
            var compiledUrl = MLModel.CompileModel(modelUrl, out var error);
            var compiledModel = MLModel.Create(compiledUrl, out error);

            var vnCoreModel = VNCoreMLModel.FromMLModel(compiledModel, out error);

            var classificationRequest = new VNCoreMLRequest(vnCoreModel, HandleVNRequest);

            var data = NSData.FromArray(bytes);
            var handler = new VNImageRequestHandler(data, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions());
            handler.Perform(new[] { classificationRequest }, out error);
        }
Exemple #13
0
        /// <summary>
        /// Called by `ViewController.OnFrameCaptured` once per frame with the buffer processed by the image-processing pipeline in
        /// `VideoCaptureDelegate.DidOutputSampleBuffer`
        /// </summary>
        /// <param name="buffer">The captured video frame.</param>
        public void OnFrameCaptured(CVPixelBuffer buffer)
        {
            BeginInvokeOnMainThread(() => overlay.Message = $"Scanning...");

            // Run the rectangle detector
            var     handler = new VNImageRequestHandler(buffer, new NSDictionary());
            NSError error;

            handler.Perform(new VNRequest[] { rectangleRequest }, out error);
            if (error != null)
            {
                Console.Error.WriteLine(error);
                BeginInvokeOnMainThread(() => overlay.Message = error.ToString());
            }
        }
Exemple #14
0
        public static async Task <MLMultiArray> PlayMN(VNCoreMLModel _VNMLModel, CVPixelBuffer image)
        {
            var tcs = new TaskCompletionSource <MLMultiArray>();

            if (_VNMLModel == null)
            {
                tcs.TrySetCanceled();
            }

            var request = new VNCoreMLRequest(_VNMLModel, (response, e) =>
            {
                if (e != null)
                {
                    tcs.SetException(new NSErrorException(e));
                }
                else
                {
                    var results = response.GetResults <VNCoreMLFeatureValueObservation>();
                    var r       = results.FirstOrDefault();
                    if (r != null)
                    {
                        var fv = r.FeatureValue.MultiArrayValue;
                        tcs.SetResult(fv);
                        r.FeatureValue.Dispose();
                    }
                    else
                    {
                        tcs.SetCanceled();
                    }
                }
            })
            {
                ImageCropAndScaleOption = VNImageCropAndScaleOption.ScaleFill
            };

            var requestHandler = new VNImageRequestHandler(image, new NSDictionary());

            requestHandler.Perform(new[] { request }, out NSError error);

            var classifications = await tcs.Task;

            if (error != null)
            {
                throw new NSErrorException(error);
            }

            return(classifications);
        }
Exemple #15
0
        public Task <ScanResult> ProcessImage(Stream bitmapStream)
        {
            var imageData = NSData.FromStream(bitmapStream);
            var image     = UIImage.LoadFromData(imageData);
            var v         = new VNImageOptions();

            if (image.CGImage == null)
            {
                throw new Exception("No image");
            }

            // TODO: Find a way to make orientation foolproof
            // (Probably convert stream to UIImage which has orientation encoded...)
            var requestHandler =
                new VNImageRequestHandler(image.CGImage, v);

            var completionSource = new TaskCompletionSource <ScanResult>();

            var request = new VNRecognizeTextRequest((vnRequest, error) =>
            {
                var results = vnRequest.GetResults <VNRecognizedTextObservation>();

                var scanResult = new ScanResult();
                foreach (var textObservation in results)
                {
                    var candidate = textObservation.TopCandidates(1).FirstOrDefault();
                    if (candidate != null)
                    {
                        scanResult.Add(GetBlock(candidate, textObservation));
                    }
                }
                completionSource.TrySetResult(scanResult);
            });

            requestHandler.Perform(new VNRequest[] { request }, out var nsError);
            // ReSharper disable once ConstantConditionalAccessQualifier
            if (!string.IsNullOrEmpty(nsError?.Description))
            {
                throw new Exception(nsError.Description);
            }

            return(completionSource.Task);
        }
Exemple #16
0
        public Task <IList <JudgeResult> > DetectAsync(CIImage ciImage)
        {
            var taskSource = new TaskCompletionSource <IList <JudgeResult> >();

            void handleClassification(VNRequest request, NSError error)
            {
                var observations = request.GetResults <VNClassificationObservation>();

                if (observations == null)
                {
                    taskSource.SetException(new Exception("Unexpected result type from VNCoreMLRequest"));
                    return;
                }

                if (observations.Length == 0)
                {
                    taskSource.SetResult(null);
                    return;
                }

                var result = new List <JudgeResult>();

                foreach (var o in observations)
                {
                    result.Add(new JudgeResult()
                    {
                        Label      = o.Identifier,
                        Confidence = o.Confidence
                    });
                }
                taskSource.SetResult(result);
                _callback?.Invoke(result);
            }

            var handler = new VNImageRequestHandler(ciImage, new VNImageOptions());

            DispatchQueue.DefaultGlobalQueue.DispatchAsync(() =>
            {
                handler.Perform(new VNRequest[] { new VNCoreMLRequest(_vnmodel, handleClassification) }, out _);
            });

            return(taskSource.Task);
        }
        private void ClassifyCurrentImage()
        {
            // Most computer vision tasks are not rotation agnostic so it is important to pass in the orientation of the image with respect to device.
            var orientation = CGImagePropertyOrientationExtensions.ConvertFrom(UIDevice.CurrentDevice.Orientation);

            var requestHandler = new VNImageRequestHandler(this.currentBuffer, orientation, new VNImageOptions());

            visionQueue.DispatchAsync(() =>
            {
                requestHandler.Perform(new VNRequest[] { this.ClassificationRequest() }, out NSError error);
                if (error != null)
                {
                    Console.WriteLine($"Error: Vision request failed with error \"{error}\"");
                }

                // Release the pixel buffer when done, allowing the next buffer to be processed.
                this.currentBuffer.Dispose();
                this.currentBuffer = null;
            });
        }
Exemple #18
0
        async void BtnPicture_TouchUpInside(object sender, EventArgs e)
        {
            await CrossMedia.Current.Initialize();

            activityIndication.StartAnimating();

            if (!CrossMedia.Current.IsCameraAvailable || !CrossMedia.Current.IsTakePhotoSupported)
            {
                Debug.WriteLine("No Camera", ":( No camera available.", "OK");
                return;
            }

            var file = await CrossMedia.Current.TakePhotoAsync(new Plugin.Media.Abstractions.StoreCameraMediaOptions
            {
                Directory = "Sample",
                Name      = "test.jpg"
            });

            if (file == null)
            {
                return;
            }
            Debug.WriteLine("File Location", file.Path, "OK");

            var imagedata = NSData.FromStream(file.GetStream());

            imageViewFoodPhoto.Image =
                UIImage.LoadFromData(imagedata);

            CommonClass.imageViewFoodPhotoSave = UIImage.LoadFromData(imagedata);

            var requestHandler = new VNImageRequestHandler(imagedata, new VNImageOptions());

            requestHandler.Perform(ClassificationRequest, out NSError error);

            if (error != null)
            {
                Debug.WriteLine($"Error identifying {error}");
            }
        }
Exemple #19
0
        public Task <string> DetectAsync(byte[] image)
        {
            var taskSource = new TaskCompletionSource <string>();

            void handleClassification(VNRequest request, NSError error)
            {
                var observations = request.GetResults <VNClassificationObservation>();

                if (observations == null)
                {
                    taskSource.SetException(exception: new Exception(message: "Unexpected result type from VNCoreMLRequest"));
                    return;
                }

                if (observations.Length == 0)
                {
                    taskSource.SetResult(result: null);
                    return;
                }

                var bestObservations = observations.First();

                taskSource.SetResult(result: bestObservations.Identifier);
            }

            using (var data = NSData.FromArray(buffer: image))
            {
                var ciImage = new CIImage(data: data);
                var handler = new VNImageRequestHandler(image: ciImage, imageOptions: new VNImageOptions());

                DispatchQueue.DefaultGlobalQueue.DispatchAsync(() =>
                {
                    handler.Perform(requests: new VNRequest[] {
                        new VNCoreMLRequest(model: VModel, completionHandler: handleClassification)
                    }, error: out _);
                });
            }
            return(taskSource.Task);
        }
        public void FinishedPickingMedia(UIImagePickerController picker, NSDictionary info)
        {
            // Close the picker
            picker.DismissViewController(true, null);

            // Update UI
            ClassificationLabel.Text = "Analyizing Image...";
            CorrectedImageView.Image = null;

            // Read Image from returned data
            var uiImage = info[UIImagePickerController.OriginalImage] as UIImage;

            if (uiImage == null)
            {
                ShowAlert("Processing Error", "Unable to read image from picker.");
                return;
            }

            // Convert to CIImage
            var ciImage = new CIImage(uiImage);

            if (ciImage == null)
            {
                ShowAlert("Processing Error", "Unable to create required CIImage from UIImage.");
                return;
            }
            InputImage = ciImage.CreateWithOrientation(uiImage.Orientation.ToCIImageOrientation());

            // Show source image
            ImageView.Image = uiImage;

            // Run the rectangle detector, which upon completion runs the ML classifier.
            var handler = new VNImageRequestHandler(ciImage, uiImage.Orientation.ToCGImagePropertyOrientation(), new VNImageOptions());

            DispatchQueue.DefaultGlobalQueue.DispatchAsync(() => {
                NSError error;
                handler.Perform(new VNRequest[] { RectangleRequest }, out error);
            });
        }
Exemple #21
0
        public virtual void DidOutputSampleBuffer(AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection)
        {
            CVPixelBuffer         pixelBuffer         = null;
            VNImageRequestHandler imageRequestHandler = null;

            try
            {
                pixelBuffer = sampleBuffer.GetImageBuffer() as CVPixelBuffer;
                if (pixelBuffer == null)
                {
                    return;
                }

                // TODO See if this causes issues disposing directly after
                bufferOutputhandler.Invoke(pixelBuffer);
            }
            catch (Exception x)
            {
                Console.WriteLine(x.Message);
            }
            finally
            {
                if (sampleBuffer != null)
                {
                    sampleBuffer.Dispose();
                }

                if (pixelBuffer != null)
                {
                    pixelBuffer.Dispose();
                }

                if (imageRequestHandler != null)
                {
                    imageRequestHandler.Dispose();
                }
            }
        }
Exemple #22
0
        public Task <FriesOrNotFriesTag> DetectAsync(Stream photo)
        {
            var taskCompletionSource = new TaskCompletionSource <FriesOrNotFriesTag>();

            void handleClassification(VNRequest request, NSError error)
            {
                var observations = request.GetResults <VNClassificationObservation>();

                if (observations == null)
                {
                    taskCompletionSource.SetException(new Exception("Unexpected result type from VNCoreMLRequest"));
                    return;
                }

                if (!observations.Any())
                {
                    taskCompletionSource.SetResult(FriesOrNotFriesTag.None);
                    return;
                }

                var best = observations.First();

                taskCompletionSource.SetResult((FriesOrNotFriesTag)Enum.Parse(typeof(FriesOrNotFriesTag), best.Identifier));
            }

            using (var data = NSData.FromStream(photo))
            {
                var ciImage = new CIImage(data);
                var handler = new VNImageRequestHandler(ciImage, new VNImageOptions());
                DispatchQueue.DefaultGlobalQueue.DispatchAsync(() =>
                {
                    handler.Perform(new VNRequest[] { new VNCoreMLRequest(_model, handleClassification) }, out var _);
                });
            }

            return(taskCompletionSource.Task);
        }
Exemple #23
0
        public void DidOutputSampleBuffer(AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection)
        {
            try
            {
                var currentDate = DateTime.Now;
                //Console.WriteLine("DidOutputSampleBuffer: " + currentDate + " " + lastAnalysis + " " + currentDate.Subtract(lastAnalysis).Milliseconds);
                // control the pace of the machine vision to protect battery life
                if (currentDate - lastAnalysis >= pace)
                {
                    lastAnalysis = currentDate;
                }
                else
                {
                    //Console.WriteLine("-- skip --");
                    return;                     // don't run the classifier more often than we need
                }
                // keep track of performance and log the frame rate
                if (trackPerformance)
                {
                    frameCount = frameCount + 1;
                    if (frameCount % framesPerSample == 0)
                    {
                        var diff = currentDate.Subtract(startDate);
                        if (diff.Seconds > 0)
                        {
                            if (pace > TimeSpan.Zero)
                            {
                                Console.WriteLine("WARNING: Frame rate of image classification is being limited by \"pace\" setting. Set to 0.0 for fastest possible rate.");
                            }
                        }
                        Console.WriteLine($"{diff.Seconds / framesPerSample}s per frame (average");
                    }
                    startDate = currentDate;
                }

                // Crop and resize the image data.
                // Note, this uses a Core Image pipeline that could be appended with other pre-processing.
                // If we don't want to do anything custom, we can remove this step and let the Vision framework handle
                // crop and resize as long as we are careful to pass the orientation properly.
                using (var croppedBuffer = CroppedSampleBuffer(sampleBuffer, targetImageSize))
                {
                    if (croppedBuffer == null)
                    {
                        return;
                    }
                    try
                    {
                        VNImageOptions options = new VNImageOptions();
                        classifierRequestHandler = new VNImageRequestHandler(croppedBuffer, options);
                        NSError err;
                        classifierRequestHandler.Perform(ClassificationRequest, out err);
                        if (err != null)
                        {
                            Console.WriteLine(err);
                        }
                    }
                    catch (Exception error)
                    {
                        Console.WriteLine(error);
                    }
                }
            }
            finally
            {
                sampleBuffer.Dispose();
            }
        }
        private void HandleRectangles(VNRequest request, NSError error)
        {
            var observations = request.GetResults <VNRectangleObservation>();

            if (observations == null)
            {
                ShowAlert("Processing Error", "Unexpected result type from VNDetectRectanglesRequest.");
                return;
            }
            if (observations.Length < 1)
            {
                DispatchQueue.MainQueue.DispatchAsync(() => {
                    ClassificationLabel.Text = "No rectangles detected.";
                });
                return;
            }
            var detectedRectangle = observations[0];
            var imageSize         = InputImage.Extent.Size;

            // Verify detected rectangle is valid.
            var boundingBox = detectedRectangle.BoundingBox.Scaled(imageSize);

            if (!InputImage.Extent.Contains(boundingBox))
            {
                DispatchQueue.MainQueue.DispatchAsync(() => {
                    ClassificationLabel.Text = "Invalid rectangle detected.";
                });
                return;
            }

            // Rectify the detected image and reduce it to inverted grayscale for applying model.
            var topLeft     = detectedRectangle.TopLeft.Scaled(imageSize);
            var topRight    = detectedRectangle.TopRight.Scaled(imageSize);
            var bottomLeft  = detectedRectangle.BottomLeft.Scaled(imageSize);
            var bottomRight = detectedRectangle.BottomRight.Scaled(imageSize);

            var correctedImage = InputImage.ImageByCroppingToRect(boundingBox);

            var fp1 = new Dictionary <string, CGPoint>()
            {
                { "inputTopLeft", topLeft },
                { "inputTopRight", topRight },
                { "inputBottomLeft", bottomLeft },
                { "inputBottomRight", bottomRight }
            };

            correctedImage = correctedImage.CreateByFiltering("CIPerspectiveCorrection", fp1.ToNSDictionary());

            var fp2 = new Dictionary <NSString, NSNumber>()
            {
                { CIFilterInputKey.Saturation, new NSNumber(0) },
                { CIFilterInputKey.Contrast, new NSNumber(32) }
            };

            correctedImage = correctedImage.CreateByFiltering("CIColorControls", fp2.ToNSDictionary());

            var fp3 = new Dictionary <NSString, NSNumber>();

            correctedImage = correctedImage.CreateByFiltering("CIColorInvert", fp3.ToNSDictionary());

            // Show the pre-processed image
            DispatchQueue.MainQueue.DispatchAsync(() =>
            {
                ClassificationLabel.Text = "Selected First Rectangle";
                CorrectedImageView.Image = new UIImage(correctedImage);
            });

            // Run the Core ML MNIST classifier -- results in handleClassification method
            var handler = new VNImageRequestHandler(correctedImage, new VNImageOptions());

            DispatchQueue.DefaultGlobalQueue.DispatchAsync(() => {
                NSError err;
                handler.Perform(new VNRequest[] { ClassificationRequest }, out err);
            });
        }
Exemple #25
0
        public async Task <Tuple <float[], float[]> > PlayMNSSD(object pixelBuffer, int labelsCount, int bbCount)
        {
            var image = pixelBuffer as CVPixelBuffer;

            PreprocessTime = TimeSpan.FromTicks(0);

            var startInferTime = DateTimeOffset.UtcNow;

            var tcs = new TaskCompletionSource <MLMultiArray[]>();

            if (_VNMLModel == null)
            {
                tcs.TrySetCanceled();
            }

            var request = new VNCoreMLRequest(_VNMLModel, (response, e) =>
            {
                if (e != null)
                {
                    tcs.SetException(new NSErrorException(e));
                }
                else
                {
                    var results = response.GetResults <VNCoreMLFeatureValueObservation>();
                    var probs   = results[0].FeatureValue;
                    var bboxes  = results[1].FeatureValue;
                    if (bboxes != null && probs != null)
                    {
                        tcs.SetResult(new MLMultiArray[] { probs.MultiArrayValue, bboxes.MultiArrayValue });
                        bboxes.Dispose();
                        probs.Dispose();
                    }
                    else
                    {
                        tcs.SetCanceled();
                    }
                }
            });

            request.ImageCropAndScaleOption = VNImageCropAndScaleOption.ScaleFill;

            var requestHandler = new VNImageRequestHandler(image, new NSDictionary());

            requestHandler.Perform(new[] { request }, out NSError error);

            var outs = await tcs.Task;

            if (error != null)
            {
                throw new NSErrorException(error);
            }

            InferenceTime = DateTimeOffset.UtcNow - startInferTime;

            // Debug.WriteLine($"MNSSD infer: {InferenceTime.TotalMilliseconds}");

            // ------------------------------------------- //

            var startPostprocTime = DateTimeOffset.UtcNow;

            var outProbs  = new double[bbCount * labelsCount];
            var outBBoxes = new double[bbCount * 4];

            Marshal.Copy(outs[0].DataPointer, outProbs, 0, outProbs.Length);
            Marshal.Copy(outs[1].DataPointer, outBBoxes, 0, outBBoxes.Length);
            outs[0].Dispose();
            outs[1].Dispose();

            float[] probsAll  = Array.ConvertAll(outProbs, x => (float)x);
            float[] bboxesAll = Array.ConvertAll(outBBoxes, x => (float)x);

            PostprocessTime = DateTimeOffset.UtcNow - startPostprocTime;

            // Debug.WriteLine($"MNSSD postproc: {InferenceTime.TotalMilliseconds}");

            // ------------------------------------------- //

            return(Tuple.Create(probsAll, bboxesAll));
        }
        private async void PickCamera()
        {
            if (CrossMedia.Current == null)
            {
                await CrossMedia.Current.Initialize();
            }


            if (!CrossMedia.Current.IsCameraAvailable ||
                !CrossMedia.Current.IsTakePhotoSupported || !CrossMedia.Current.IsPickPhotoSupported)
            {
                UserDialogs.Instance.Alert("Device options not supported.", null, "OK");
                return;
            }

            Console.WriteLine("Pcking Photo..");

            var file = await CrossMedia.Current.PickPhotoAsync();

            {
            };

            if (file == null)
            {
                UserDialogs.Instance.Alert("You didn't pick a photo.", null, "OK");
                return;
            }

            Stream s = file.GetStream();

            byte[] result = null;
            var    buffer = new byte[16 * 1024];

            using (MemoryStream ms = new MemoryStream())
            {
                int read;
                while ((read = s.Read(buffer, 0, buffer.Length)) > 0)
                {
                    ms.Write(buffer, 0, read);
                }
                result = ms.ToArray();
            }

            //show and tell shit, then Run ML
            SEEFOOD.Hidden             = true;
            stat.Hidden                = false;
            selectCamRollButton.Hidden = true;
            takePhotoButton.Hidden     = true;
            image.Hidden               = false;
            spinnyboy.Hidden           = false;
            this.hotdogLbl.Hidden      = true;
            this.ramenLbl.Hidden       = true;
            this.ramenOrHotdog.Hidden  = true;
            spinnyboy.StartAnimating();
            returnToMenu.Hidden  = false;
            this.stat.Text       = "Analyzing...";
            this.stat.TextColor  = UIKit.UIColor.Black;
            showDebugInfo.Hidden = false;
            var data = NSData.FromArray(result);

            image.Image = UIImage.LoadFromData(data);
            await Task.Delay(1000);

            //ML

            Console.WriteLine("Selected: " + ViewController.Type.ToString());

            //First we check what type of thing we have here
            if (ViewController.Type.Equals("Hotdog"))
            {
                var     assetPath             = NSBundle.MainBundle.GetUrlForResource("model", "mlmodel");
                var     transform             = MLModel.CompileModel(assetPath, out NSError compErr);
                MLModel model                 = MLModel.Create(transform, out NSError fucl);
                var     vnModel               = VNCoreMLModel.FromMLModel(model, out NSError rror);
                var     ciImage               = new CIImage(image.Image);
                var     classificationRequest = new VNCoreMLRequest(vnModel);

                //just do it
                var handler = new VNImageRequestHandler(ciImage, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions());
                handler.Perform(new[] { classificationRequest }, out NSError perfError);
                var results = classificationRequest.GetResults <VNClassificationObservation>();
                var thing   = results[0];
                Console.WriteLine("Hotdog OUT " + thing.Identifier);
                switch (thing.Identifier)
                {
                case "hotdog":
                    if (thing.Confidence > 0.85f)
                    {
                        this.stat.Text          = "✅ Hotdog";
                        this.stat.TextColor     = UIKit.UIColor.Green;
                        this.stat.TextAlignment = UITextAlignment.Center;
                        spinnyboy.Hidden        = true;
                        spinnyboy.StopAnimating();
                    }
                    else
                    {
                        this.stat.Text          = "❌ Not Hotdog";
                        this.stat.TextColor     = UIKit.UIColor.Red;
                        this.stat.TextAlignment = UITextAlignment.Center;
                        spinnyboy.Hidden        = true;
                        spinnyboy.StopAnimating();
                    }

                    break;

                case "nothotdog":
                    this.stat.Text          = "❌ Not Hotdog";
                    this.stat.TextColor     = UIKit.UIColor.Red;
                    this.stat.TextAlignment = UITextAlignment.Center;
                    spinnyboy.Hidden        = true;
                    spinnyboy.StopAnimating();
                    break;
                }
                this.confidence = thing.Confidence;
                Vibration.Vibrate(500);
            }
            else
            {
                NSUrl modelPath = NSBundle.MainBundle.GetUrlForResource("Ramen", "mlmodel");
                if (modelPath == null)
                {
                    Console.WriteLine("peeepee");
                }
                var     transform             = MLModel.CompileModel(modelPath, out NSError compErr);
                MLModel model                 = MLModel.Create(transform, out NSError fucl);
                var     vnModel               = VNCoreMLModel.FromMLModel(model, out NSError rror);
                var     ciImage               = new CIImage(image.Image);
                var     classificationRequest = new VNCoreMLRequest(vnModel);

                //just do it
                var handler = new VNImageRequestHandler(ciImage, ImageIO.CGImagePropertyOrientation.Up, new VNImageOptions());
                handler.Perform(new[] { classificationRequest }, out NSError perfError);
                var results = classificationRequest.GetResults <VNClassificationObservation>();
                var thing   = results[0];
                Console.WriteLine("Ramen OUT " + thing.Identifier);
                switch (thing.Identifier)
                {
                case "ramen":
                    if (thing.Confidence > 0.85f)
                    {
                        this.stat.Text          = "✅ Ramen";
                        this.stat.TextColor     = UIKit.UIColor.Green;
                        this.stat.TextAlignment = UITextAlignment.Center;
                        spinnyboy.Hidden        = true;
                        spinnyboy.StopAnimating();
                    }
                    else
                    {
                        this.stat.Text          = "❌ Not Ramen";
                        this.stat.TextColor     = UIKit.UIColor.Red;
                        this.stat.TextAlignment = UITextAlignment.Center;
                        spinnyboy.Hidden        = true;
                        spinnyboy.StopAnimating();
                    }

                    break;

                case "notramen":
                    this.stat.Text          = "❌ Not Ramen";
                    this.stat.TextColor     = UIKit.UIColor.Red;
                    this.stat.TextAlignment = UITextAlignment.Center;
                    spinnyboy.Hidden        = true;
                    spinnyboy.StopAnimating();
                    break;
                }
                this.confidence = thing.Confidence;
                Vibration.Vibrate(500);
            }
        }