private Rect GetRoi(Mat mat)
        {
            var boxWidth  = mat.Width() - (mat.Width() / 3);
            var boxHeight = mat.Height() / 6;
            var scanBox   = new Size(boxWidth, boxHeight);

            var xStartingPoint       = mat.Width() / 6;
            var yStartingPoint       = (mat.Height() / 2) - (mat.Height() / 10);
            var scanBoxStartingPoint = new Point(xStartingPoint, yStartingPoint);

            return(new Rect(scanBoxStartingPoint, scanBox));
        }
Exemplo n.º 2
0
 private void CreateSrcDstArrays(double maxWidth, double maxHeight, OpenCV.Core.Point pTopLeft, OpenCV.Core.Point pBottomLeft, OpenCV.Core.Point pBottomRight, OpenCV.Core.Point pTopRight, out MatOfPoint2f src, out MatOfPoint2f dst)
 {
     src = new MatOfPoint2f(new OpenCV.Core.Point[4] {
         pBottomLeft, pTopLeft, pTopRight, pBottomRight
     });
     dst = new MatOfPoint2f(new OpenCV.Core.Point[4]
     {
         new OpenCV.Core.Point(0, 0),
         new OpenCV.Core.Point(maxWidth - 1, 0),
         new OpenCV.Core.Point(maxWidth - 1, maxHeight - 1),
         new OpenCV.Core.Point(0, maxHeight - 1)
     });
 }
Exemplo n.º 3
0
        private void GetImageMaxSize(out double maxWidth, out double maxHeight, out OpenCV.Core.Point pTopLeft, out OpenCV.Core.Point pBottomLeft, out OpenCV.Core.Point pBottomRight, out OpenCV.Core.Point pTopRight)
        {
            var pointArray = detectedBorder.ToArray();

            pTopLeft     = new OpenCV.Core.Point(pointArray[0].X, pointArray[0].Y);
            pBottomLeft  = new OpenCV.Core.Point(pointArray[1].X, pointArray[1].Y);
            pBottomRight = new OpenCV.Core.Point(pointArray[2].X, pointArray[2].Y);
            pTopRight    = new OpenCV.Core.Point(pointArray[3].X, pointArray[3].Y);
            var w1 = pTopLeft.X - pBottomRight.X;
            var w2 = pTopRight.X - pBottomLeft.X;
            var h1 = pTopRight.Y - pTopLeft.Y;
            var h2 = pBottomLeft.Y - pBottomRight.Y;

            maxWidth  = w1 > w2 ? w1 : w2;
            maxHeight = h1 > h2 ? h1 : h2;
        }
Exemplo n.º 4
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="img"></param>
        /// <returns></returns>
        public static Bitmap greyImg(Bitmap img, double threshValue1, double blurValue)
        {
            //Matrix für das Bild
            Mat imgMat = new Mat();

            //Bild zu Matrix umwandeln
            Utils.BitmapToMat(img, imgMat);

            //-----------------Bild bearbeiten---------------------

            //Variablen
            //Size s = new Size(10.0, 10.0);
            Size s = new Size(blurValue, blurValue);

            OpenCV.Core.Point p = new OpenCV.Core.Point(0, 0);

            //TODO Matrix größe beachten?
            Bitmap bmp       = null;
            Mat    tmpgrey   = new Mat(10, 10, CvType.Cv8uc1, new Scalar(4));
            Mat    tmpblur   = new Mat(10, 10, CvType.Cv8uc1, new Scalar(4));
            Mat    tmpthresh = new Mat(10, 10, CvType.Cv8uc1, new Scalar(4));
            Mat    imgresult = new Mat(10, 10, CvType.Cv8uc1, new Scalar(4));

            try
            {
                //Grau
                Imgproc.CvtColor(imgMat, tmpgrey, Imgproc.ColorBgr2gray, 4);

                //Blur
                Imgproc.Blur(tmpgrey, tmpblur, s, p);

                //Thresh
                //Orginal
                //Imgproc.Threshold(tmpblur, tmpthresh, 90, 255, Imgproc.ThreshBinary);
                Imgproc.Threshold(tmpblur, tmpthresh, threshValue1, 255, Imgproc.ThreshBinary);

                //Kontrast
                //tmpthresh.ConvertTo(imgresult, -1, 9.0, 10);

                bmp = Bitmap.CreateBitmap(tmpthresh.Cols(), tmpthresh.Rows(), Bitmap.Config.Argb8888);
                Utils.MatToBitmap(tmpthresh, bmp);
            }
            catch (CvException e) { System.Console.WriteLine(e.Message); }


            return(bmp);
        }
Exemplo n.º 5
0
        public static async Task <string> detectAndExtractText(Bitmap img)
        {
            //Matrix für die Bilder
            Mat large = new Mat();
            Mat small = new Mat();
            Mat rgb   = new Mat();

            //Bild zu Matrix umwandeln
            Utils.BitmapToMat(img, large);

            // downsample and use it for processing
            Imgproc.PyrDown(large, rgb);

            //Grey
            Imgproc.CvtColor(rgb, small, Imgproc.ColorBgr2gray);

            //Gradiant
            Mat  grad        = new Mat();
            Size morphsize   = new Size(3.0, 3.0);
            Mat  morphKernel = Imgproc.GetStructuringElement(Imgproc.MorphEllipse, morphsize);

            Imgproc.MorphologyEx(small, grad, Imgproc.MorphGradient, morphKernel);

            //Binarize
            Mat bw = new Mat();

            Imgproc.Threshold(grad, bw, 0.0, 255.0, Imgproc.ThreshBinary | Imgproc.ThreshOtsu);

            // connect horizontally oriented regions
            Mat  connected   = new Mat();
            Size connectsize = new Size(9.0, 1.0);

            morphKernel = Imgproc.GetStructuringElement(Imgproc.MorphRect, connectsize);
            Imgproc.MorphologyEx(bw, connected, Imgproc.MorphClose, morphKernel);

            // find contours
            Mat mask = Mat.Zeros(bw.Size(), CvType.Cv8uc1);

            JavaList <MatOfPoint> contours = new JavaList <MatOfPoint>();
            Mat hierarchy = new Mat();

            OpenCV.Core.Point contourPoint = new OpenCV.Core.Point(0, 0);

            Imgproc.FindContours(connected, contours, hierarchy, Imgproc.RetrCcomp, Imgproc.ChainApproxSimple, contourPoint);

            Scalar zero        = new Scalar(0, 0, 0);
            Scalar contourscal = new Scalar(255, 255, 255);

            Scalar rectScalar = new Scalar(0, 255, 0);


            OpenCV.Core.Rect rect;
            Mat    maskROI;
            double r;

            double[] contourInfo;

            string resulttext = "";
            string part;

            Bitmap bmpOcr;
            Mat    croppedPart;


            for (int i = 0; i >= 0;)
            {
                rect = Imgproc.BoundingRect(contours[i]);

                maskROI = new Mat(mask, rect);
                maskROI.SetTo(zero);

                //fill the contour
                Imgproc.DrawContours(mask, contours, i, contourscal, Core.Filled);

                // ratio of non-zero pixels in the filled region
                r = (double)Core.CountNonZero(maskROI) / (rect.Width * rect.Height);

                /* assume at least 45% of the area is filled if it contains text */
                /* constraints on region size */

                /* these two conditions alone are not very robust. better to use something
                 * like the number of significant peaks in a horizontal projection as a third condition */
                if (r > .45 && (rect.Height > 8 && rect.Width > 8))
                {
                    //Imgproc.Rectangle(rgb, rect.Br(), rect.Tl(), rectScalar, 2);
                    try
                    {
                        croppedPart = rgb.Submat(rect);

                        bmpOcr = Bitmap.CreateBitmap(croppedPart.Width(), croppedPart.Height(), Bitmap.Config.Argb8888);
                        Utils.MatToBitmap(croppedPart, bmpOcr);

                        part = await OCR.getText(bmpOcr);

                        resulttext = resulttext + part;
                        Console.WriteLine("------------------Durchlauf-------------");
                    }
                    catch (Exception e)
                    {
                        Android.Util.Log.Debug("Fehler", "cropped part data error " + e.Message);
                    }
                }


                //Nächste Element bestimmen
                contourInfo = hierarchy.Get(0, i);
                i           = (int)contourInfo[0];
            }


            return(resulttext);
        }