private async void Analyze()
        {
            this.analyzer = MLSceneDetectionAnalyzerFactory.Instance.SceneDetectionAnalyzer;
            // Create an MLFrame by using android.graphics.Bitmap. Recommended image size: large than 224*224.
            Bitmap  originBitmap = BitmapFactory.DecodeResource(this.Resources, Resource.Drawable.superresolution_image);
            MLFrame frame        = new MLFrame.Creator()
                                   .SetBitmap(originBitmap)
                                   .Create();

            Task <IList <MLSceneDetection> > task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;
                if (task.IsCompleted && task.Result != null && task.Result.Count != 0)
                {
                    // Analyze success
                    IList <MLSceneDetection> sceneInfos = task.Result;
                    DisplaySuccess(sceneInfos);
                }
                else
                {
                    // Analyze failed
                    Log.Debug(Tag, "Analyze Failed");
                    DisplayFailure();
                }
            }
            catch (Exception ex)
            {
                // Operation failed
                Log.Error(Tag, ex.Message);
            }
        }
Example #2
0
        private async void Analyze()
        {
            /**
             * Configure image segmentation analyzer with custom parameter MLImageSegmentationSetting.
             *
             * SetExact(): Set the segmentation fine mode, true is the fine segmentation mode,
             *     and false is the speed priority segmentation mode.
             * SetAnalyzerType(): Set the segmentation mode. When segmenting a static image, support setting
             *     MLImageSegmentationSetting.BodySeg (only segment human body and background)
             *     and MLImageSegmentationSetting.ImageSeg (segment 10 categories of scenes, including human bodies)
             * SetScene(): Set the type of the returned results. This configuration takes effect only in
             *     MLImageSegmentationSetting.BodySeg mode. In MLImageSegmentationSetting.ImageSeg mode,
             *     only pixel-level tagging information is returned.
             *     Supports setting MLImageSegmentationScene.All (returns all segmentation results,
             *     including: pixel-level tag information, portrait images with transparent backgrounds
             *     and portraits are white, gray background with black background),
             *     MLImageSegmentationScene.MaskOnly (returns only pixel-level tag information),
             *     MLImageSegmentationScene.ForegroundOnly (returns only portrait images with transparent background),
             *     MLImageSegmentationScene.GrayscaleOnly (returns only grayscale images with white portrait and black background).
             */
            MLImageSegmentationSetting setting = new MLImageSegmentationSetting.Factory()
                                                 .SetExact(false)
                                                 .SetAnalyzerType(MLImageSegmentationSetting.BodySeg)
                                                 .SetScene(MLImageSegmentationScene.All)
                                                 .Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetImageSegmentationAnalyzer(setting);
            // Create an MLFrame by using android.graphics.Bitmap. Recommended image size: large than 224*224.
            MLFrame mlFrame = new MLFrame.Creator().SetBitmap(this.mBitmapTarget).Create();

            Task <MLImageSegmentation> task = this.analyzer.AnalyseFrameAsync(mlFrame);

            try
            {
                await task;
                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success
                    var imageSegmentationResult = task.Result;
                    this.DisplaySuccess(imageSegmentationResult);
                }
                else
                {
                    // Analyze failure
                    this.DisplayFailure();
                }
            }
            catch (Exception e)
            {
                // Operation failure
                this.DisplayFailure();
            }
        }
        private async void RemoteAnalyzer()
        {
            /* Create a landmark analyzer.
             * For use default parameter settings.
             * analyzer = MLAnalyzerFactory.Instance.RemoteLandmarkAnalyzer;
             * ////
             * For use customized parameter settings.
             * SetLargestNumOfReturns: maximum number of recognition results.
             * SetPatternType: analyzer mode.
             * MLRemoteLandmarkAnalyzerSetting.SteadyPattern: The value 1 indicates the stable mode.
             * MLRemoteLandmarkAnalyzerSetting.NewestPattern: The value 2 indicates the latest mode.
             */
            MLRemoteLandmarkAnalyzerSetting setting = new MLRemoteLandmarkAnalyzerSetting.Factory()
                                                      .SetLargestNumOfReturns(1)
                                                      .SetPatternType(MLRemoteLandmarkAnalyzerSetting.SteadyPattern)
                                                      .Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetRemoteLandmarkAnalyzer(setting);
            // Create an MLFrame by using android.graphics.Bitmap. Recommended image size: large than 640*640.
            MLFrame mlFrame = new MLFrame.Creator().SetBitmap(this.mBitmap).Create();
            Task <IList <MLRemoteLandmark> > task = this.analyzer.AnalyseFrameAsync(mlFrame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success.
                    var landmark = task.Result;
                    DisplaySuccess(landmark);
                }
                else
                {
                    // Analyze failure.
                    Log.Info(Tag, " Analyze failure ");
                }
            }
            catch (Exception e)
            {
                // Operation failure.
                DisplayFailure(e);
            }
        }
Example #4
0
        private async void DetectImage(int type)
        {
            if (type == IndexOriginal)
            {
                SetImage(srcBitmap);
                return;
            }

            if (analyzer == null)
            {
                return;
            }

            // Create an MLFrame by using the bitmap.
            MLFrame frame = new MLFrame.Creator().SetBitmap(srcBitmap).Create();
            Task <MLTextImageSuperResolution> task = analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;
                if (task.IsCompleted && task.Result != null)
                {
                    MLTextImageSuperResolution result = task.Result;
                    // Detection success
                    Toast.MakeText(ApplicationContext, "Success", ToastLength.Short).Show();
                    SetImage(result.Bitmap);
                }
                else
                {
                    // Detection failed
                    Log.Debug(Tag, "Detection Failed");
                }
            }
            catch (Exception e)
            {
                // Operation failed
                Log.Error(Tag, e.Message);
            }
        }