Ejemplo n.º 1
0
        private async void Analyzer()
        {
            // Use default parameter settings.
            this.analyzer = MLAnalyzerFactory.Instance.FaceAnalyzer;
            // Create an MLFrame by using the bitmap. Recommended image size: large than 320*320, less than 1920*1920.
            MLFrame frame = MLFrame.FromBitmap(this.mBitmap);

            // Call the AnalyseFrameAsync method to perform face detection
            System.Threading.Tasks.Task <IList <MLFace> > faceAnalyseTask = this.analyzer.AnalyseFrameAsync(frame);
            try
            {
                await faceAnalyseTask;

                if (faceAnalyseTask.IsCompleted && faceAnalyseTask.Result != null)
                {
                    IList <MLFace> faces = faceAnalyseTask.Result;
                    if (faces.Count > 0)
                    {
                        DisplaySuccess(faces.ElementAt(0));
                    }
                }
                else
                {
                    DisplayFailure();
                }
            }
            catch (Exception e)
            {
                //Operation failed.
                DisplayFailure();
            }
        }
Ejemplo n.º 2
0
        /// <summary>
        /// Image classification on the device
        /// </summary>
        private async void LocalAnalyzer()
        {
            // Use customized parameter settings for device-based recognition.
            MLLocalClassificationAnalyzerSetting deviceSetting =
                new MLLocalClassificationAnalyzerSetting.Factory().SetMinAcceptablePossibility(0.8f).Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetLocalImageClassificationAnalyzer(deviceSetting);
            // Create an MLFrame by using Android.Graphics.Bitmap.
            Bitmap  bitmap = BitmapFactory.DecodeResource(Resources, Resource.Drawable.classification_image);
            MLFrame frame  = MLFrame.FromBitmap(bitmap);

            Task <IList <MLImageClassification> > taskLocalAnalyzer = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await taskLocalAnalyzer;

                if (taskLocalAnalyzer.IsCompleted && taskLocalAnalyzer.Result != null)
                {
                    //Analyze success
                    var classifications = taskLocalAnalyzer.Result;
                    DisplaySuccess(classifications);
                }
                else
                {
                    //Analyze failure
                    Log.Debug(Tag, " Local analyze failed");
                }
            }
            catch (Exception e)
            {
                //Operation failed
                DisplayFailure(e);
            }
        }
        private void CreateAnalyzer()
        {
            // Create an MLFrame by using the bitmap.
            Bitmap originBitmap = BitmapFactory.DecodeResource(this.Resources, Resource.Drawable.skeleton_image);

            // Gets the targeted width / height, only portrait.
            int maxHeight   = ((View)previewView.Parent).Height;
            int targetWidth = ((View)previewView.Parent).Width;

            // Determine how much to scale down the image
            float scaleFactor =
                Math.Max(
                    (float)originBitmap.Width / (float)targetWidth,
                    (float)originBitmap.Height / (float)maxHeight);

            Bitmap resizedBitmap =
                Bitmap.CreateScaledBitmap(
                    originBitmap,
                    (int)(originBitmap.Width / scaleFactor),
                    (int)(originBitmap.Height / scaleFactor),
                    true);

            mFrame   = new MLFrame.Creator().SetBitmap(resizedBitmap).Create();
            analyzer = MLSkeletonAnalyzerFactory.Instance.SkeletonAnalyzer;
        }
        /// <summary>
        /// Create analyzer
        /// </summary>
        private void CreateAnalyzer()
        {
            // Create an MLFrame by using the bitmap.
            Bitmap originBitmap = BitmapFactory.DecodeResource(this.Resources, Resource.Drawable.hand);

            // Gets the targeted width / height, only portrait.
            int maxHeight   = ((View)mPreviewView.Parent).Height;
            int targetWidth = ((View)mPreviewView.Parent).Width;
            // Determine how much to scale down the image.
            float scaleFactor = Math.Max(
                (float)originBitmap.Width / (float)targetWidth,
                (float)originBitmap.Height / (float)maxHeight);

            Bitmap resizedBitmap = Bitmap.CreateScaledBitmap(
                originBitmap,
                (int)(originBitmap.Width / scaleFactor),
                (int)(originBitmap.Height / scaleFactor),
                true);

            mlFrame = new MLFrame.Creator().SetBitmap(resizedBitmap).Create();
            //Create analyzer setting.
            MLHandKeypointAnalyzerSetting setting =
                new MLHandKeypointAnalyzerSetting.Factory()
                .SetMaxHandResults(2)
                .SetSceneType(MLHandKeypointAnalyzerSetting.TypeAll)
                .Create();

            //Obtain analyzer instance with custom configuration.
            this.mAnalyzer = MLHandKeypointAnalyzerFactory.Instance.GetHandKeypointAnalyzer(setting);
        }
Ejemplo n.º 5
0
        /// <summary>
        /// Perform Document Skew Correction operation
        /// </summary>
        private async void Analyze()
        {
            // Create the setting.
            MLDocumentSkewCorrectionAnalyzerSetting setting = new MLDocumentSkewCorrectionAnalyzerSetting
                                                              .Factory()
                                                              .Create();

            // Get the analyzer.
            this.analyzer = MLDocumentSkewCorrectionAnalyzerFactory.Instance.GetDocumentSkewCorrectionAnalyzer(setting);

            // Create the bitmap.
            this.bitmap = BitmapFactory.DecodeResource(this.Resources, Resource.Drawable.document_correct_image);

            // Create a MLFrame by using the bitmap.
            this.mlFrame = new MLFrame.Creator().SetBitmap(this.bitmap).Create();

            Task <MLDocumentSkewDetectResult> detectTask = this.analyzer.DocumentSkewDetectAsync(this.mlFrame);

            try
            {
                await detectTask;

                if (detectTask.IsCompleted && detectTask.Result != null)
                {
                    // Analyze success.
                    var   detectResult = detectTask.Result;
                    Point leftTop      = detectResult.LeftTopPosition;
                    Point rightTop     = detectResult.RightTopPosition;
                    Point leftBottom   = detectResult.LeftBottomPosition;
                    Point rightBottom  = detectResult.RightBottomPosition;

                    IList <Point> coordinates = new List <Point>();
                    coordinates.Add(leftTop);
                    coordinates.Add(rightTop);
                    coordinates.Add(leftBottom);
                    coordinates.Add(rightBottom);

                    this.SetDetectData(new MLDocumentSkewCorrectionCoordinateInput(coordinates));
                    this.RefineImg();
                }
                else
                {
                    // Analyze failure.
                    this.DisplayFailure();
                }
            }
            catch (Exception e)
            {
                // Operation failure.
                Log.Info(Tag, " Operation failure: " + e.Message);
                this.DisplayFailure();
            }
        }
        /// <summary>
        /// Performs remote analyze action
        /// </summary>
        private async void RemoteAnalyzer()
        {
            // Set the list of languages to be recognized.
            IList <string> languageList = new List <string>();

            languageList.Add("zh");
            languageList.Add("en");
            // Create a document analyzer. You can create an analyzer using the provided custom document recognition
            // parameter MLDocumentSetting
            MLDocumentSetting setting = new MLDocumentSetting.Factory()
                                        .SetBorderType(MLRemoteTextSetting.Arc)
                                        .SetLanguageList(languageList)
                                        .Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetRemoteDocumentAnalyzer(setting);
            // Create a document analyzer that uses the default configuration.
            // analyzer = MLAnalyzerFactory.Instance.RemoteDocumentAnalyzer;

            Bitmap bitmap = BitmapFactory.DecodeResource(Resources, Resource.Drawable.document_image);
            // Create an MLFrame by using Android.Graphics.Bitmap.
            MLFrame frame = MLFrame.FromBitmap(bitmap);

            // Pass the MLFrame object to the AnalyseFrameAsync method for document recognition.
            Task <MLDocument> task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success.
                    var document = task.Result;
                    this.DisplaySuccess(document);
                }
                else
                {
                    // Analyze failure.
                    Log.Info(Tag, " Analyze failure ");
                }
            }
            catch (Exception e)
            {
                // Operation failure.
                Log.Info(Tag, " Operation failure: " + e.Message);
                this.DisplayFailure(e);
            }
        }
Ejemplo n.º 7
0
        /// <summary>
        /// Text recognition on the cloud. If you want to use cloud text analyzer,
        /// you need to apply for an agconnect-services.json file
        /// in the developer alliance(https://developer.huawei.com/consumer/en/doc/development/HMS-Guides/ml-add-agc),
        /// add agconnect-services.json to Assets folder in the project.
        /// </summary>
        private async void RemoteAnalyzer()
        {
            // Set the list of languages to be recognized.
            IList <string> languageList = new List <string>();

            languageList.Add("zh");
            languageList.Add("en");
            // Create an analyzer. You can customize the analyzer by creating MLRemoteTextSetting
            MLRemoteTextSetting setting =
                new MLRemoteTextSetting.Factory()
                .SetTextDensityScene(MLRemoteTextSetting.OcrCompactScene)
                .SetLanguageList(languageList)
                .SetBorderType(MLRemoteTextSetting.Arc)
                .Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetRemoteTextAnalyzer(setting);
            // Use default parameter settings.
            //analyzer = MLAnalyzerFactory.Instance.RemoteTextAnalyzer;

            // Create an MLFrame by using Android.Graphics.Bitmap.
            Bitmap  bitmap = BitmapFactory.DecodeResource(this.Resources, Resource.Drawable.text_image);
            MLFrame frame  = MLFrame.FromBitmap(bitmap);

            Task <MLText> task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success.
                    var result = task.Result;
                    this.RemoteDisplaySuccess(result);
                }
                else
                {
                    // Analyze failure.
                    Log.Info(Tag, " Analyze failure ");
                }
            }
            catch (Exception e)
            {
                // Operation failure.
                Log.Info(Tag, " Operation failure: " + e.Message);
                this.DisplayFailure(e);
            }
        }
Ejemplo n.º 8
0
        private async void RemoteAnalyze()
        {
            // Use customized parameter settings for cloud-based recognition.
            MLRemoteProductVisionSearchAnalyzerSetting setting =
                new MLRemoteProductVisionSearchAnalyzerSetting.Factory()
                // Set the maximum number of products that can be returned.
                .SetLargestNumOfReturns(MaxResults)
                .SetProductSetId("vmall")
                .SetRegion(MLRemoteProductVisionSearchAnalyzerSetting.RegionDrChina)
                .Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetRemoteProductVisionSearchAnalyzer(setting);
            // Create an MLFrame by using the bitmap.
            MLFrame frame = MLFrame.FromBitmap(bitmap);
            Task <IList <MLProductVisionSearch> > task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success.
                    var productVisionSearchList = task.Result;
                    if (productVisionSearchList.Count != 0)
                    {
                        Toast.MakeText(this, "Product detected successfully", ToastLength.Long).Show();
                        this.DisplaySuccess(productVisionSearchList);
                    }
                    else
                    {
                        Toast.MakeText(this, "Product not found", ToastLength.Long);
                    }
                }
                else
                {
                    // Analyze failure.
                    Log.Debug(Tag, " remote analyze failed");
                }
            }
            catch (System.Exception e)
            {
                // Operation failure.
                this.DisplayFailure(e);
            }
        }
Ejemplo n.º 9
0
        private async void Analyze()
        {
            // Create a face analyzer. You can create an analyzer using the provided customized face detection parameter
            ML3DFaceAnalyzerSetting setting = new ML3DFaceAnalyzerSetting.Factory()
                                              // Fast detection of continuous video frames.
                                              // MLFaceAnalyzerSetting.TypePrecision: indicating the precision preference mode.
                                              // This mode will detect more faces and be more precise in detecting key points and contours, but will run slower.
                                              // MLFaceAnalyzerSetting.TypeSpeed: representing a preference for speed.
                                              // This will detect fewer faces and be less precise in detecting key points and contours, but will run faster.
                                              .SetPerformanceType(MLFaceAnalyzerSetting.TypePrecision)
                                              .SetTracingAllowed(false)
                                              .Create();

            this.analyzer = MLAnalyzerFactory.Instance.Get3DFaceAnalyzer(setting);
            // Create an MLFrame by using the bitmap. Recommended image size: large than 320*320, less than 1920*1920.
            Bitmap  bitmap = BitmapFactory.DecodeResource(this.Resources, Resource.Drawable.face_image);
            MLFrame frame  = MLFrame.FromBitmap(bitmap);
            // Call the AnalyseFrameAsync method to perform face detection
            Task <IList <ML3DFace> > task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    //Analyze success
                    var faces = task.Result;
                    if (faces.Count > 0)
                    {
                        this.DisplaySuccess(faces.ElementAt(0));
                    }
                }
                else
                {
                    //Analyze failed
                    this.DisplayFailure();
                }
            }
            catch (Exception e)
            {
                //Operation failed
                Log.Error(Tag, e.Message);
            }
        }
Ejemplo n.º 10
0
        /// <summary>
        /// Text recognition on the device
        /// </summary>
        private async void LocalAnalyzer()
        {
            // Create the text analyzer MLTextAnalyzer to recognize characters in images. You can set MLLocalTextSetting to
            // specify languages that can be recognized.
            // If you do not set the languages, only Romance languages can be recognized by default.
            // Use default parameter settings to configure the on-device text analyzer. Only Romance languages can be
            // recognized.
            // analyzer = MLAnalyzerFactory.Instance.LocalTextAnalyzer;
            // Use the customized parameter MLLocalTextSetting to configure the text analyzer on the device.
            MLLocalTextSetting setting = new MLLocalTextSetting.Factory()
                                         .SetOCRMode(MLLocalTextSetting.OcrDetectMode)
                                         .SetLanguage("en")
                                         .Create();

            this.analyzer = MLAnalyzerFactory.Instance.GetLocalTextAnalyzer(setting);
            // Create an MLFrame by using android.graphics.Bitmap.
            Bitmap  bitmap = BitmapFactory.DecodeResource(Resources, Resource.Drawable.text_image);
            MLFrame frame  = MLFrame.FromBitmap(bitmap);

            Task <MLText> task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success.
                    var result = task.Result;
                    this.DisplaySuccess(result);
                }
                else
                {
                    // Analyze failure.
                    Log.Info(Tag, " Analyze failure ");
                }
            }
            catch (Exception e)
            {
                // Operation failure.
                Log.Info(Tag, " Operation failure: " + e.Message);
                this.DisplayFailure(e);
            }
        }
        /// <summary>
        /// Detect Image
        /// </summary>
        private async void DetectImage(int type)
        {
            if (type == IndexOriginal)
            {
                imageView.SetImageBitmap(srcBitmap);
                return;
            }

            if (analyzer == null)
            {
                return;
            }

            // Create an MLFrame by using the bitmap.
            MLFrame frame = MLFrame.FromBitmap(srcBitmap);
            Task <MLImageSuperResolutionResult> task = this.analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    // Analyze success.
                    Toast.MakeText(ApplicationContext, "Success", ToastLength.Short).Show();
                    var result = task.Result;
                    SetImage(result.Bitmap);
                }
                else
                {
                    // Analyze failure.
                    Toast.MakeText(ApplicationContext, "Failed", ToastLength.Short).Show();
                }
            }
            catch (Exception e)
            {
                // Operation failure.
                Log.Info(Tag, " Operation failure: " + e.Message);
                Toast.MakeText(ApplicationContext, "Failed:" + e.Message, ToastLength.Short).Show();
            }
        }
Ejemplo n.º 12
0
        private async void Analyze()
        {
            // Get bitmap.
            Bitmap bitmap = BitmapFactory.DecodeResource(Resources, Resource.Drawable.form_recognition);
            // Convert bitmap to MLFrame.
            MLFrame frame = MLFrame.FromBitmap(bitmap);
            // Create analyzer.
            MLFormRecognitionAnalyzer analyzer = MLFormRecognitionAnalyzerFactory.Instance.FormRecognitionAnalyzer;

            Task <JsonObject> task = analyzer.AnalyseFrameAsync(frame);

            try
            {
                await task;

                if (task.IsCompleted && task.Result != null)
                {
                    //Recognition success
                    JsonObject jsonObject = task.Result;
                    if (jsonObject.Get("retCode").AsInt == MLFormRecognitionConstant.Success)
                    {
                        string str = jsonObject.ToString();
                        form_result.Text = str;
                        try
                        {
                            Gson gson = new Gson();
                            MLFormRecognitionTablesAttribute attribute = (MLFormRecognitionTablesAttribute)gson.FromJson(str, Java.Lang.Class.FromType(typeof(MLFormRecognitionTablesAttribute)));
                            Log.Debug(Tag, "RetCode: " + attribute.RetCode);
                            MLFormRecognitionTablesAttribute.TablesContent tablesContent = attribute.GetTablesContent();
                            Log.Debug(Tag, "tableCount: " + tablesContent.TableCount);
                            IList <MLFormRecognitionTablesAttribute.TablesContent.TableAttribute> tableAttributeArrayList = tablesContent.TableAttributes;
                            Log.Debug(Tag, "tableID: " + tableAttributeArrayList.ElementAt(0).Id);
                            IList <MLFormRecognitionTablesAttribute.TablesContent.TableAttribute.TableCellAttribute> tableCellAttributes = tableAttributeArrayList.ElementAt(0).TableCellAttributes;
                            for (int i = 0; i < tableCellAttributes.Count; i++)
                            {
                                Log.Debug(Tag, "startRow: " + tableCellAttributes.ElementAt(i).StartRow);
                                Log.Debug(Tag, "endRow: " + tableCellAttributes.ElementAt(i).EndRow);
                                Log.Debug(Tag, "startCol: " + tableCellAttributes.ElementAt(i).StartCol);
                                Log.Debug(Tag, "endCol: " + tableCellAttributes.ElementAt(i).EndCol);
                                Log.Debug(Tag, "textInfo: " + tableCellAttributes.ElementAt(i).TextInfo);
                                Log.Debug(Tag, "cellCoordinate: ");
                                MLFormRecognitionTablesAttribute.TablesContent.TableAttribute.TableCellAttribute.TableCellCoordinateAttribute coordinateAttribute = tableCellAttributes.ElementAt(i).GetTableCellCoordinateAttribute();
                                Log.Debug(Tag, "topLeft_x: " + coordinateAttribute.TopLeftX);
                                Log.Debug(Tag, "topLeft_y: " + coordinateAttribute.TopLeftY);
                                Log.Debug(Tag, "topRight_x: " + coordinateAttribute.TopRightX);
                                Log.Debug(Tag, "topRight_y: " + coordinateAttribute.TopRightY);
                                Log.Debug(Tag, "bottomLeft_x: " + coordinateAttribute.BottomLeftX);
                                Log.Debug(Tag, "bottomLeft_y: " + coordinateAttribute.BottomLeftY);
                                Log.Debug(Tag, "bottomRight_x: " + coordinateAttribute.BottomRightX);
                                Log.Debug(Tag, "bottomRight_y: " + coordinateAttribute.BottomRightY);
                            }
                        }
                        catch (Exception e)
                        {
                            Log.Error(Tag, e.Message);
                        }
                    }
                }
                else
                {
                    //Recognition failed
                    Log.Debug(Tag, "Recognition Failed");
                }
            }
            catch (Exception ex)
            {
                //Operation failed
                Log.Error(Tag, ex.Message);
            }
        }