Exemplo n.º 1
0
        public CornerDetect()
        {
            int cornerCount = 150;

            using (IplImage dstImg1 = new IplImage(FilePath.Image.Lenna, LoadMode.AnyColor | LoadMode.AnyDepth))
                using (IplImage dstImg2 = dstImg1.Clone())
                    using (IplImage srcImgGray = new IplImage(FilePath.Image.Lenna, LoadMode.GrayScale))
                        using (IplImage eigImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
                            using (IplImage tempImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
                            {
                                CvPoint2D32f[] corners;
                                Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15);
                                Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));

                                for (int i = 0; i < cornerCount; i++)
                                {
                                    Cv.Circle(dstImg1, corners[i], 3, new CvColor(255, 0, 0), 2);
                                }

                                cornerCount = 150;
                                Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15, null, 3, true, 0.01);
                                Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));

                                for (int i = 0; i < cornerCount; i++)
                                {
                                    Cv.Circle(dstImg2, corners[i], 3, new CvColor(0, 0, 255), 2);
                                }

                                using (new CvWindow("EigenVal", WindowMode.AutoSize, dstImg1))
                                    using (new CvWindow("Harris", WindowMode.AutoSize, dstImg2))
                                    {
                                        Cv.WaitKey(0);
                                    }
                            }
        }
Exemplo n.º 2
0
        public IplImage PreProcess(IplImage image)
        {
            double min, max;

            Cv.MinMaxLoc(image, out min, out max);

            image.GetSize()
        }
Exemplo n.º 3
0
        //src_img:入出力
        public static void Median8(IplImage src_img)
        {
            IplImage dst_img = Cv.CreateImage(src_img.GetSize(), BitDepth.U8, 1);

            Median8(src_img, dst_img);
            Cv.Copy(dst_img, src_img);//dst_img->src_img
            Cv.ReleaseImage(dst_img);
        }
Exemplo n.º 4
0
 /// <summary>
 /// 指定されたPictureBoxに画像を描画する
 /// </summary>
 /// <param name="pb">描画先のPictureBox</param>
 /// <param name="img">描画する画像</param>
 private void drawPicture(OpenCvSharp.UserInterface.PictureBoxIpl pb, IplImage img)
 {
     using (IplImage pictureImage = new IplImage(img.GetSize(), img.Depth, img.NChannels))
     {
         pictureImage.SetROI(0, 0, pb.Width, pb.Height);
         img.Resize(pictureImage);
         pb.ImageIpl = pictureImage;
     }
 }
Exemplo n.º 5
0
        public CornerDetect()
        {
            // cvGoodFeaturesToTrack, cvFindCornerSubPix
            // 画像中のコーナー(特徴点)検出

            int cornerCount = 150;

            using (IplImage dstImg1 = new IplImage(Const.ImageLenna, LoadMode.AnyColor | LoadMode.AnyDepth))
                using (IplImage dstImg2 = dstImg1.Clone())
                    using (IplImage srcImgGray = new IplImage(Const.ImageLenna, LoadMode.GrayScale))
                        using (IplImage eigImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
                            using (IplImage tempImg = new IplImage(srcImgGray.GetSize(), BitDepth.F32, 1))
                            {
                                CvPoint2D32f[] corners;
                                // (1)cvCornerMinEigenValを利用したコーナー検出
                                Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15);
                                Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
                                // (2)コーナーの描画
                                for (int i = 0; i < cornerCount; i++)
                                {
                                    Cv.Circle(dstImg1, corners[i], 3, new CvColor(255, 0, 0), 2);
                                }
                                // (3)cvCornerHarrisを利用したコーナー検出
                                cornerCount = 150;
                                Cv.GoodFeaturesToTrack(srcImgGray, eigImg, tempImg, out corners, ref cornerCount, 0.1, 15, null, 3, true, 0.01);
                                Cv.FindCornerSubPix(srcImgGray, corners, cornerCount, new CvSize(3, 3), new CvSize(-1, -1), new CvTermCriteria(20, 0.03));
                                // (4)コーナーの描画
                                for (int i = 0; i < cornerCount; i++)
                                {
                                    Cv.Circle(dstImg2, corners[i], 3, new CvColor(0, 0, 255), 2);
                                }
                                // (5)画像の表示
                                using (new CvWindow("EigenVal", WindowMode.AutoSize, dstImg1))
                                    using (new CvWindow("Harris", WindowMode.AutoSize, dstImg2))
                                    {
                                        Cv.WaitKey(0);
                                    }
                            }
        }
Exemplo n.º 6
0
        /*
         * Convert OpenCvImage into 2D byte array
         */
        public static byte[,] IplImageToMatlabImage(IplImage image)
        {
            int h = image.GetSize().Height;
            int w = image.GetSize().Width;

            //Set size and colour mode, to grayscale, use unsigned 8 bit, one channel
            IplImage gray = new IplImage(new CvSize(w, h), BitDepth.U8, 1);

            Cv.CvtColor(image, gray, ColorConversion.BgrToGray);

            byte[,] array = new byte[h, w];
            for (int i = 0; i < h - 1; i++)
            {
                for (int j = 0; j < w - 1; j++)
                {
                    double value = gray.GetReal2D(i, j);
                    array[i, j] = Convert.ToByte(value);
                }
            }

            return(array);
        }
Exemplo n.º 7
0
        //src_img:入出力
        public static bool ApplyMask(int[] Mask, IplImage src_img)
        {
            if ((Mask.Length != Const.Neighborhood8) && (Mask.Length != Const.Neighborhood4))
            {
                return(false);
            }
            IplImage dst_img = Cv.CreateImage(src_img.GetSize(), BitDepth.U8, 1);

            ApplyMask(Mask, src_img, dst_img);
            Cv.Copy(dst_img, src_img);//dst_img->src_img
            Cv.ReleaseImage(dst_img);
            return(true);
        }
Exemplo n.º 8
0
        }/* */

        //src_img:入出力
        public static bool FastestMedian(IplImage src_img, int n)
        {
            if ((n & 1) == 0)
            {
                return(false);       //偶数はさいなら
            }
            IplImage dst_img = Cv.CreateImage(src_img.GetSize(), BitDepth.U8, 1);

            FastestMedian(src_img, dst_img, n);
            Cv.Copy(dst_img, src_img);//dst_img->src_img
            Cv.ReleaseImage(dst_img);
            return(true);
        }
Exemplo n.º 9
0
 /// <summary>
 /// フルカラー画像(24bit)をグレースケール画像(8bit)に変換する
 /// mImageFull --> mImageGray
 /// </summary>
 private void convertFullToGray()
 {
     mImageGray = new IplImage(mImageFull.GetSize(), BitDepth.U8, 1);
     mImageFull.CvtColor(mImageGray, ColorConversion.BgrToGray);
 }
Exemplo n.º 10
0
        public static string[] Recognize(IplImage input, TextDetectionParams _params, Chain[] chains, List <Tuple <Point2d, Point2d> > compBB, List <Tuple <CvPoint, CvPoint> > chainBB, DigitsRecognitionMethod digitsRecognition)
        {
            List <string> variants = new List <string>();

            //convert to grayscale
            IplImage grayImage = Cv.CreateImage(input.GetSize(), BitDepth.U8, 1);

            Cv.CvtColor(input, grayImage, ColorConversion.RgbToGray);

            for (int i = 0; i < chainBB.Count; i++)
            {
                Rect    chainRect = new Rect(chainBB[i].Item1.X, chainBB[i].Item1.Y, chainBB[i].Item2.X - chainBB[i].Item1.X, chainBB[i].Item2.Y - chainBB[i].Item1.Y);
                CvPoint center    = new CvPoint((chainBB[i].Item1.X + chainBB[i].Item2.X) / 2, (chainBB[i].Item1.Y + chainBB[i].Item2.Y) / 2);

                //work out if total width of chain is large enough
                if (chainBB[i].Item2.X - chainBB[i].Item1.X < input.Width / _params.MaxImgWidthToTextRatio)
                {
                    continue;
                }

                //eliminate chains with components of lower height than required minimum
                int minHeight = chainBB[i].Item2.Y - chainBB[i].Item1.Y;
                for (int j = 0; j < chains[i].components.Count; j++)
                {
                    minHeight = Math.Min(minHeight, compBB[chains[i].components[j]].Item2.y - compBB[chains[i].components[j]].Item1.y);
                }

                if (minHeight < _params.MinCharacterHeight)
                {
                    continue;
                }

                //invert direction if angle is in 3rd/4th quadrants
                if (chains[i].direction.x < 0)
                {
                    chains[i].direction.x = -chains[i].direction.x;
                    chains[i].direction.y = -chains[i].direction.y;
                }

                //work out chain angle
                double theta_deg = 180 * Math.Atan2(chains[i].direction.y, chains[i].direction.x) / Math.PI;

                if (Math.Abs(theta_deg) > _params.MaxAngle)
                {
                    continue;
                }

                if ((chainBB.Count == 2) && (Math.Abs(theta_deg) > 5))
                {
                    continue;
                }

                //Console.WriteLine("Chain #" + i + " angle: " + theta_deg + " degress");

                //create copy of input image including only the selected components
                Mat inputMat      = new Mat(input);
                Mat grayMat       = new Mat(grayImage);
                Mat componentsImg = Mat.Zeros(new Size(grayMat.Cols, grayMat.Rows), grayMat.Type());
                //CvMat componentsImg = _componentsImg.ToCvMat();
                Mat            componentsImgRoi = null;
                List <CvPoint> compCoords       = new List <CvPoint>();

                chains[i].components = chains[i].components.Distinct().ToList();

                int order = 0;
                //ordering components bounding boxes by x coord
                var ordCompBB = compBB.OrderBy(x => x.Item1.x).ToList();

                List <string> digits = new List <string>();
                for (int j = 0; j < ordCompBB.Count; j++)
                {
                    Rect roi = new Rect(ordCompBB[j].Item1.x, ordCompBB[j].Item1.y, ordCompBB[j].Item2.x - ordCompBB[j].Item1.x, ordCompBB[j].Item2.y - ordCompBB[j].Item1.y);
                    if (!chainRect.Contains(roi))
                    {
                        continue;
                    }

                    Mat componentRoi = new Mat(grayMat, roi);
                    compCoords.Add(new CvPoint(ordCompBB[j].Item1.x, ordCompBB[j].Item1.y));
                    compCoords.Add(new CvPoint(ordCompBB[j].Item2.x, ordCompBB[j].Item2.y));
                    compCoords.Add(new CvPoint(ordCompBB[j].Item1.x, ordCompBB[j].Item2.y));
                    compCoords.Add(new CvPoint(ordCompBB[j].Item2.x, ordCompBB[j].Item1.y));

                    Mat thresholded = new Mat(grayMat, roi);

                    Cv2.Threshold(componentRoi, thresholded, 0, 255, ThresholdType.Otsu | ThresholdType.BinaryInv);

                    componentsImgRoi = new Mat(componentsImg, roi);

                    Cv2.Threshold(componentRoi, componentsImgRoi, 0, 255, ThresholdType.Otsu | ThresholdType.BinaryInv);

                    //var size = thresholded.Size();
                    //digits.Add(new Bitmap(size.Width, size.Height, (int)thresholded.Step1(), System.Drawing.Imaging.PixelFormat.Format24bppRgb, thresholded.Data));

                    if (digitsRecognition == DigitsRecognitionMethod.Neural || digitsRecognition == DigitsRecognitionMethod.Both)
                    {
                        string file = FileManager.TempBitmap;
                        Cv2.ImWrite(file, thresholded);
                        try
                        {
                            digits.Add(file);
                        }
                        catch
                        {
                            GC.Collect();
                            GC.WaitForFullGCComplete();
                        }
                        //digits.Last().Save("test" + order + ".bmp");
                        order++;
                    }
                    //else if (digitsRecognition == DigitsRecognitionMethod.Tesseract || digitsRecognition == DigitsRecognitionMethod.Both)
                    //{
                    // DO NOTHING
                    //}
                }

                if (digitsRecognition == DigitsRecognitionMethod.Neural || digitsRecognition == DigitsRecognitionMethod.Both)
                {
                    //TODO: neural recognition
                    var result = OCRParser.ParseNeural(digits.ToArray());
                    variants.Add(result.Value);
                    //variants.AddRange(OCRParser.ParseNeural(digits.ToArray()));
                    //variants.Add(BibOCR.OCRParser.ParseBib(digits.ToArray()));
                }
                if (digitsRecognition == DigitsRecognitionMethod.Tesseract || digitsRecognition == DigitsRecognitionMethod.Both)
                {
                    CvRect _roi = GetBoundingBox(compCoords, new CvSize(input.Width, input.Height));
                    //ROI area can be null if outside of clipping area
                    if ((_roi.Width == 0) || (_roi.Height == 0))
                    {
                        continue;
                    }

                    //rotate each component coordinates
                    const int border = 3;

                    Mat _mat = new Mat(_roi.Height + 2 * border, _roi.Width + 2 * border, grayMat.Type());

                    Mat tmp = new Mat(grayMat, _roi);
                    //copy bounded box from rotated mat to new mat with borders - borders are needed to improve OCR success rate
                    Mat mat = new Mat(_mat, new Rect(border, border, _roi.Width, _roi.Height));
                    tmp.CopyTo(mat);

                    //resize image to improve OCR success rate
                    float upscale = 5.0f;
                    Cv2.Resize(mat, mat, new Size(0, 0), upscale, upscale);

                    //erode text to get rid of thin joints
                    int s    = (int)(0.05 * mat.Rows); // 5% of up-scaled size
                    Mat elem = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, new Size(2 * s + 1, 2 * s + 1), new Point(s, s));
                    //Cv2.Erode(mat, mat, elem);

                    //Cv2.Threshold(mat, mat, 0, 255, ThresholdType.Otsu | ThresholdType.BinaryInv);

                    string file = FileManager.TempPng;
                    Cv2.ImWrite(file, mat);

                    // TODO: Pass it to Tesseract API
                    variants.Add(OCRParser.ParseTesseract(file));
                }

                //for (int j = 0; j < digits.Count; j++)
                //    digits[j].Dispose();
                digits.Clear();

                GC.Collect();
                GC.WaitForFullGCComplete(5000);
            }

            Cv.ReleaseImage(grayImage);

            return(variants.Distinct().ToArray());
        }