Exemple #1
1
      /*
      public void TestCodeBookBGModel()
      {
         using (Capture capture = new Capture())
         using (BGCodeBookModel<Bgr> model = new BGCodeBookModel<Bgr>())
         {
            ImageViewer viewer = new ImageViewer();
            Image<Gray, byte> fgMask = capture.QueryFrame().Convert<Gray, Byte>();

            Application.Idle += delegate(Object sender, EventArgs args)
            {
               Mat frame = capture.QueryFrame();
               model.Apply(frame);
               viewer.Image = model.ForegroundMask; 
            };
            viewer.ShowDialog();
         }
      }

      public void TestBlobTracking()
      {
         MCvFGDStatModelParams fgparam = new MCvFGDStatModelParams();
         fgparam.alpha1 = 0.1f;
         fgparam.alpha2 = 0.005f;
         fgparam.alpha3 = 0.1f;
         fgparam.delta = 2;
         fgparam.is_obj_without_holes = 1;
         fgparam.Lc = 32;
         fgparam.Lcc = 16;
         fgparam.minArea = 15;
         fgparam.N1c = 15;
         fgparam.N1cc = 25;
         fgparam.N2c = 25;
         fgparam.N2cc = 35;
         fgparam.perform_morphing = 0;
         fgparam.T = 0.9f;

         BlobTrackerAutoParam<Bgr> param = new BlobTrackerAutoParam<Bgr>();
         param.BlobDetector = new BlobDetector(Emgu.CV.CvEnum.BlobDetectorType.CC);
         param.FGDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.ForgroundDetectorType.Fgd, fgparam);
         param.BlobTracker = new BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.MSFG);
         param.FGTrainFrames = 10;
         BlobTrackerAuto<Bgr> tracker = new BlobTrackerAuto<Bgr>(param);

         //MCvFont font = new MCvFont(Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, 1.0);

         using(ImageViewer viewer = new ImageViewer())
         using (Capture capture = new Capture())
         {
            capture.ImageGrabbed += delegate(object sender, EventArgs e)
            {
               tracker.Process(capture.RetrieveBgrFrame());
               
               //Image<Bgr, Byte> img = capture.RetrieveBgrFrame();

               Image<Bgr, Byte> img = tracker.ForegroundMask.Convert<Bgr, Byte>();
               foreach (MCvBlob blob in tracker)
               {
                  img.Draw((Rectangle)blob, new Bgr(255.0, 255.0, 255.0), 2);
                  img.Draw(blob.ID.ToString(), Point.Round(blob.Center), CvEnum.FontFace.HersheySimplex, 1.0, new Bgr(255.0, 255.0, 255.0));
               }
               viewer.Image = img;
            };
            capture.Start();
            viewer.ShowDialog();
         }
      }*/
      
      public void TestCvBlob()
      {
         //MCvFont font = new MCvFont(Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, 0.5);
         using (CvTracks tracks = new CvTracks())
         using (ImageViewer viewer = new ImageViewer())
         using (Capture capture = new Capture())
         using (Mat fgMask = new Mat())
         {
            //BGStatModel<Bgr> bgModel = new BGStatModel<Bgr>(capture.QueryFrame(), Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL);
            BackgroundSubtractorMOG2 bgModel = new BackgroundSubtractorMOG2(0, 0, true);
            //BackgroundSubstractorMOG bgModel = new BackgroundSubstractorMOG(0, 0, 0, 0);

            capture.ImageGrabbed += delegate(object sender, EventArgs e)
            {
               Mat frame = new Mat();
               capture.Retrieve(frame);
               bgModel.Apply(frame, fgMask);

               using (CvBlobDetector detector = new CvBlobDetector())
               using (CvBlobs blobs = new CvBlobs())
               {
                  detector.Detect(fgMask.ToImage<Gray, Byte>(), blobs);
                  blobs.FilterByArea(100, int.MaxValue);

                  tracks.Update(blobs, 20.0, 10, 0);

                  Image<Bgr, Byte> result = new Image<Bgr, byte>(frame.Size);

                  using (Image<Gray, Byte> blobMask = detector.DrawBlobsMask(blobs))
                  {
                     frame.CopyTo(result, blobMask);
                  }
                  //CvInvoke.cvCopy(frame, result, blobMask);

                  foreach (KeyValuePair<uint, CvTrack> pair in tracks)
                  {
                     if (pair.Value.Inactive == 0) //only draw the active tracks.
                     {
                        CvBlob b = blobs[pair.Value.BlobLabel];
                        Bgr color = detector.MeanColor(b, frame.ToImage<Bgr, Byte>());
                        result.Draw(pair.Key.ToString(), pair.Value.BoundingBox.Location, CvEnum.FontFace.HersheySimplex, 0.5, color);
                        result.Draw(pair.Value.BoundingBox, color, 2);
                        Point[] contour = b.GetContour();
                        result.Draw(contour, new Bgr(0, 0, 255), 1);
                     }
                  }

                  viewer.Image = frame.ToImage<Bgr, Byte>().ConcateVertical(fgMask.ToImage<Bgr, Byte>().ConcateHorizontal(result));
               }
            };
            capture.Start();
            viewer.ShowDialog();
         }
      }
Exemple #2
0
      public void TestMatToArr()
      {
         
         Mat mat = new Mat(new Size(320, 240), DepthType.Cv32F, 1);

         Matrix<float> m = new Matrix<float>(mat.Rows, mat.Cols, mat.NumberOfChannels);
         mat.CopyTo(m);

         EmguAssert.IsTrue(m.Mat.Depth == DepthType.Cv32F);
         EmguAssert.IsTrue(mat.Depth == DepthType.Cv32F);
      }
Exemple #3
0
      /// <summary>
      /// Finds line segments in a binary image using the probabilistic Hough transform.
      /// </summary>
      /// <param name="image">8-bit, single-channel binary source image. The image may be modified by the function.</param>
      /// <param name="rho">Distance resolution of the accumulator in pixels</param>
      /// <param name="theta">Angle resolution of the accumulator in radians</param>
      /// <param name="threshold">Accumulator threshold parameter. Only those lines are returned that get enough votes</param>
      /// <param name="minLineLength">Minimum line length. Line segments shorter than that are rejected.</param>
      /// <param name="maxGap">Maximum allowed gap between points on the same line to link them.</param>
      /// <returns>The found line segments</returns>
      public static LineSegment2D[] HoughLinesP(IInputArray image, double rho, double theta, int threshold, double minLineLength = 0, double maxGap = 0)
      {
         using (Mat lines = new Mat())
         {
            HoughLinesP(image, lines, rho, theta, threshold, minLineLength, maxGap);
            Size s = lines.Size;

            LineSegment2D[] segments = new LineSegment2D[s.Height];
            GCHandle handle = GCHandle.Alloc(segments, GCHandleType.Pinned);
            using (Mat tmp = new Mat(s.Height, s.Width, CV.CvEnum.DepthType.Cv32S, 4, handle.AddrOfPinnedObject(), sizeof(int) * 4))
            {
               lines.CopyTo(tmp);
            }
            handle.Free();

            return segments;
         }
      }
Exemple #4
0
      /// <summary>
      /// Finds circles in a grayscale image using the Hough transform
      /// </summary>
      /// <param name="image">8-bit, single-channel, grayscale input image.</param>
      /// <param name="method">Detection method to use. Currently, the only implemented method is CV_HOUGH_GRADIENT , which is basically 21HT</param>
      /// <param name="dp">Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as big width and height.</param>
      /// <param name="minDist">Minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.</param>
      /// <param name="param1">First method-specific parameter. In case of CV_HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny() edge detector (the lower one is twice smaller).</param>
      /// <param name="param2">Second method-specific parameter. In case of CV_HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first.</param>
      /// <param name="minRadius"> Minimum circle radius.</param>
      /// <param name="maxRadius">Maximum circle radius.</param>
      /// <returns>The circles detected</returns>
      public static CircleF[] HoughCircles(
         IInputArray image,
         CvEnum.HoughType method,
         double dp,
         double minDist,
         double param1 = 100,
         double param2 = 100,
         int minRadius = 0,
         int maxRadius = 0)
      {
         using (Mat circles = new Mat())
         {
            HoughCircles(image, circles, method, dp, minDist, param1, param2, minRadius, maxRadius);
            Size s = circles.Size;
            CircleF[] results = new CircleF[s.Width];
            GCHandle handle = GCHandle.Alloc(results, GCHandleType.Pinned);
            using (Mat tmp = new Mat(s.Height, s.Width, CV.CvEnum.DepthType.Cv32F, 3, handle.AddrOfPinnedObject(), sizeof(float) * 3))
            {
               circles.CopyTo(tmp);
            }
            handle.Free();

            return results;
         }
      }
Exemple #5
0
      /*
      /// <summary>
      /// Initializes and returns a pointer to the contour scanner. The scanner is used in
      /// cvFindNextContour to retrieve the rest of the contours.
      /// </summary>
      /// <param name="image">The 8-bit, single channel, binary source image</param>
      /// <param name="storage">Container of the retrieved contours</param>
      /// <param name="headerSize">Size of the sequence header, &gt;=sizeof(CvChain) if method=CHAIN_CODE, and &gt;=sizeof(CvContour) otherwise</param>
      /// <param name="mode">Retrieval mode</param>
      /// <param name="method">Approximation method (for all the modes, except CV_RETR_RUNS, which uses built-in approximation). </param>
      /// <param name="offset">Offset, by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context</param>
      /// <returns>Pointer to the contour scaner</returns>
#if ANDROID
      public static IntPtr cvStartFindContours(
         IntPtr image,
         IntPtr storage,
         int headerSize,
         CvEnum.RETR_TYPE mode,
         CvEnum.CHAIN_APPROX_METHOD method,
         Point offset)
      {
         return cvStartFindContours(image, storage, headerSize, mode, method, offset.X, offset.Y);
      }
      [DllImport(OpencvImgprocLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      private static extern IntPtr cvStartFindContours(
         IntPtr image,
         IntPtr storage,
         int headerSize,
         CvEnum.RETR_TYPE mode,
         CvEnum.CHAIN_APPROX_METHOD method,
         int offsetX, int offsetY);
#else
      [DllImport(OpencvImgprocLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvStartFindContours(
         IntPtr image,
         IntPtr storage,
         int headerSize,
         CvEnum.RetrType mode,
         CvEnum.ChainApproxMethod method,
         Point offset);
#endif

      /// <summary>
      /// Finds the next contour in the image
      /// </summary>
      /// <param name="scanner">Pointer to the contour scaner</param>
      /// <returns>The next contour in the image</returns>
      [DllImport(OpencvImgprocLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern IntPtr cvFindNextContour(IntPtr scanner);

      /// <summary>
      /// The function replaces the retrieved contour, that was returned from the preceding call of
      /// cvFindNextContour and stored inside the contour scanner state, with the user-specified contour.
      /// The contour is inserted into the resulting structure, list, two-level hierarchy, or tree, depending on
      /// the retrieval mode. If the parameter new contour is IntPtr.Zero, the retrieved contour is not included
      /// in the resulting structure, nor are any of its children that might be added to this structure later.
      /// </summary>
      /// <param name="scanner">Contour scanner initialized by cvStartFindContours</param>
      /// <param name="newContour">Substituting contour</param>
      [DllImport(OpencvImgprocLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern void cvSubstituteContour(
         IntPtr scanner,
         IntPtr newContour);*/

#if !( IOS || ANDROID || NETFX_CORE || UNITY_ANDROID || UNITY_IPHONE || UNITY_STANDALONE || UNITY_METRO )
      /// <summary>
      /// Convert raw data to bitmap
      /// </summary>
      /// <param name="scan0">The pointer to the raw data</param>
      /// <param name="step">The step</param>
      /// <param name="size">The size of the image</param>
      /// <param name="srcColorType">The source image color type</param>
      /// <param name="numberOfChannels">The number of channels</param>
      /// <param name="srcDepthType">The source image depth type</param>
      /// <param name="tryDataSharing">Try to create Bitmap that shares the data with the image</param>
      /// <returns>The Bitmap</returns>
      public static Bitmap RawDataToBitmap(IntPtr scan0, int step, Size size, Type srcColorType, int numberOfChannels, Type srcDepthType, bool tryDataSharing = false)
      {
         if (tryDataSharing)
         {
            if (srcColorType == typeof(Gray) && srcDepthType == typeof(Byte))
            {   //Grayscale of Bytes
               Bitmap bmpGray = new Bitmap(
                   size.Width,
                   size.Height,
                   step,
                   System.Drawing.Imaging.PixelFormat.Format8bppIndexed,
                   scan0
                   );

               bmpGray.Palette = CvToolbox.GrayscalePalette;

               return bmpGray;
            }
            // Mono in Linux doesn't support scan0 constructure with Format24bppRgb, use ToBitmap instead
            // See https://bugzilla.novell.com/show_bug.cgi?id=363431
            // TODO: check mono buzilla Bug 363431 to see when it will be fixed 
            else if (
               Emgu.Util.Platform.OperationSystem == Emgu.Util.TypeEnum.OS.Windows &&
               Emgu.Util.Platform.ClrType == Emgu.Util.TypeEnum.ClrType.DotNet &&
               srcColorType == typeof(Bgr) && srcDepthType == typeof(Byte)
               && (step & 3) == 0)
            {   //Bgr byte    
               return new Bitmap(
                   size.Width,
                   size.Height,
                   step,
                   System.Drawing.Imaging.PixelFormat.Format24bppRgb,
                   scan0);
            }
            else if (srcColorType == typeof(Bgra) && srcDepthType == typeof(Byte))
            {   //Bgra byte
               return new Bitmap(
                   size.Width,
                   size.Height,
                   step,
                   System.Drawing.Imaging.PixelFormat.Format32bppArgb,
                   scan0);
            }

            //PixelFormat.Format16bppGrayScale is not supported in .NET
            //else if (typeof(TColor) == typeof(Gray) && typeof(TDepth) == typeof(UInt16))
            //{
            //   return new Bitmap(
            //      size.width,
            //      size.height,
            //      step,
            //      PixelFormat.Format16bppGrayScale;
            //      scan0);
            //}
         }

         System.Drawing.Imaging.PixelFormat format = System.Drawing.Imaging.PixelFormat.Undefined;

         if (srcColorType == typeof(Gray)) // if this is a gray scale image
         {
            format = System.Drawing.Imaging.PixelFormat.Format8bppIndexed;
         }
         else if (srcColorType == typeof(Bgra)) //if this is Bgra image
         {
            format = System.Drawing.Imaging.PixelFormat.Format32bppArgb;
         }
         else if (srcColorType == typeof(Bgr))  //if this is a Bgr Byte image
         {
            format = System.Drawing.Imaging.PixelFormat.Format24bppRgb;
         }
         else
         {
            using (Mat m = new Mat(size.Height, size.Width, CvInvoke.GetDepthType(srcDepthType), numberOfChannels, scan0, step))
            using (Mat m2 = new Mat())
            {
               CvInvoke.CvtColor(m, m2, srcColorType, typeof(Bgr));
               return RawDataToBitmap(m2.DataPointer, m2.Step, m2.Size, typeof(Bgr), 3, srcDepthType, false);
            }
         }

         Bitmap bmp = new Bitmap(size.Width, size.Height, format);
         System.Drawing.Imaging.BitmapData data = bmp.LockBits(
             new Rectangle(Point.Empty, size),
              System.Drawing.Imaging.ImageLockMode.WriteOnly,
             format);
         using (Mat bmpMat = new Mat(size.Height, size.Width, CvEnum.DepthType.Cv8U, numberOfChannels, data.Scan0, data.Stride))
         using (Mat dataMat = new Mat(size.Height, size.Width, CvInvoke.GetDepthType(srcDepthType), numberOfChannels, scan0, step))
         {
            if (srcDepthType == typeof(Byte))
               dataMat.CopyTo(bmpMat);
            else
            {

               double scale = 1.0, shift = 0.0;
               RangeF range = dataMat.GetValueRange();
               if (range.Max > 255.0 || range.Min < 0)
               {
                  scale = (range.Max == range.Min) ? 0.0 : 255.0 / (range.Max - range.Min);
                  shift = (scale == 0) ? range.Min : -range.Min * scale;
               }
               CvInvoke.ConvertScaleAbs(dataMat, bmpMat, scale, shift);
            }
         }
         bmp.UnlockBits(data);

         if (format == System.Drawing.Imaging.PixelFormat.Format8bppIndexed)
            bmp.Palette = CvToolbox.GrayscalePalette;
         return bmp;
      }
Exemple #6
0
      /// <summary>
      /// Retrieves contours from the binary image as a contour tree. The pointer firstContour is filled by the function. It is provided as a convenient way to obtain the hierarchy value as int[,].
      /// The function modifies the source image content
      /// </summary>
      /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1s, zero pixels remain 0s - that is image treated as binary. To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content</param>
      /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
      /// <param name="method">Approximation method (for all the modes, except CV_RETR_RUNS, which uses built-in approximation). </param>
      /// <param name="offset">Offset, by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context</param>
      /// <returns>The contour hierarchy</returns>
      public static int[,] FindContourTree(
         IInputOutputArray image, IOutputArray contours,
         CvEnum.ChainApproxMethod method,
         Point offset = new Point())
      {
         using (Mat hierachyMat = new Mat())
         {
            FindContours(image, contours, hierachyMat, RetrType.Tree, method, offset);
            int[,] hierachy = new int[hierachyMat.Cols, 4];
            GCHandle handle = GCHandle.Alloc(hierachy, GCHandleType.Pinned);
            using (Mat tmp = new Mat(hierachyMat.Rows, hierachyMat.Cols, hierachyMat.Depth, 4, handle.AddrOfPinnedObject(), hierachyMat.Step))
            {
               hierachyMat.CopyTo(tmp);
            }
            handle.Free();
            return hierachy;
         }

      }
        private void ProcessFrame(object sender, EventArgs e)
        {
            Mat image = new Mat();
            Mat diffImage = new Mat();
            capture.Retrieve(image);
            if (lastImage != null)
            {
                CvInvoke.AbsDiff(image, lastImage, diffImage);
            }
            Image<Gray, byte> mask = new Image<Gray, byte>(image.Width, image.Height);
            if (lastImage != null)
            {
                VectorOfPoint vp = new VectorOfPoint(RegionOfInterestPoints.ToArray());
                CvInvoke.Polylines(image, vp, true, new Bgr(0, 0, 255).MCvScalar, 2);
                if (vp.Size >= 3)
                {
                    CvInvoke.FillConvexPoly(mask, vp, new MCvScalar(255));

                    overlayImage = new Mat((int)lastImage.Height, (int)lastImage.Width, DepthType.Cv8U, 3);
                    diffImage.CopyTo(overlayImage, mask);

                    byte[] data = new byte[overlayImage.Width * overlayImage.Height * 3];

                    GCHandle handle = GCHandle.Alloc(data, GCHandleType.Pinned);
                    using (Mat m2 = new Mat(overlayImage.Size, DepthType.Cv8U, 3, handle.AddrOfPinnedObject(), overlayImage.Width * 3))
                        CvInvoke.BitwiseNot(overlayImage, m2);
                    handle.Free();

                    CheckTrigger(data, overlayImage.Width, overlayImage.Height);
                }
            }
            if (FrameCallback != null)
            {
                FrameCallback.FrameUpdate(
                        image,
                        overlayImage
                    );
            }

            lastImage = image;
        }
        private void ProcessFrame()
        {
            try
            {
                #region Background/Foreground
                Image<Bgr, byte> difference = BackgroundSubstractionOptions.Substract(_currentFrame, _frameHistoryBuffer);

                Rectangle? handArea = ForegoundExtractionOptions.HighlightForeground(difference);
                Image<Bgr, byte> skinDetectionFrame = _currentFrame.Copy();

                if (handArea.HasValue)
                    ForegoundExtractionOptions.CutBackground(skinDetectionFrame, handArea.Value);
                #endregion

                #region Skin filtering / Morphological / Smooth filtering
                Image<Gray, byte> skinDetectionFrameGray = SkinFilteringOptions.ActiveItem.FilterFrame(skinDetectionFrame);

                MorphologicalFilteringOptions.StackSync.EnterReadLock();
                foreach (var operation in MorphologicalFilteringOptions.OperationStack)
                {
                    if (operation.FilterType == Model.Enums.MorphologicalFilterType.Dilatation)
                    {
                        CvInvoke.Dilate(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(),
                            new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType,
                            new MCvScalar(operation.KernelBorderThickness));
                    }
                    else
                    {
                        CvInvoke.Erode(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(),
                            new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType,
                            new MCvScalar(operation.KernelBorderThickness));
                    }
                }
                MorphologicalFilteringOptions.StackSync.ExitReadLock();

                skinDetectionFrameGray = SmoothFilteringOptions.FilterFrame(skinDetectionFrameGray);
                #endregion

                #region Contours / ConvexHull / ConvexityDefects
                Image<Bgr, byte> fingerTrackerFrame = _currentFrame.Copy();

                List<Point> fingers = new List<Point>();

                using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                {
                    CvInvoke.FindContours(skinDetectionFrameGray.Copy(), contours, null, RetrType.List, FingerTrackingOptions.ApproxMethod);

                    if (contours.Size > 0)
                    {
                        VectorOfPoint biggestContour = contours[0];

                        if (contours.Size > 1)
                        {
                            for (int i = 1; i < contours.Size; i++)
                            {
                                if (CvInvoke.ContourArea(contours[i], false) > CvInvoke.ContourArea(biggestContour, false))
                                    biggestContour = contours[i];
                            }
                        }

                        if (CvInvoke.ContourArea(biggestContour, false) > FingerTrackingOptions.MinContourArea)
                        {
                            using (VectorOfPoint contour = biggestContour)
                            {
                                using (VectorOfPoint approxContour = new VectorOfPoint())
                                {
                                    CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * FingerTrackingOptions.PerimeterScalingFactor.Value, true);

                                    fingerTrackerFrame.Draw(approxContour.ToArray(), new Bgr(FingerTrackingOptions.ContourHighlightColor), 2);
                                    VectorOfPoint convexHull = new VectorOfPoint();
                                    VectorOfInt intHull = new VectorOfInt();
                                    CvInvoke.ConvexHull(approxContour, convexHull, FingerTrackingOptions.ConvexHullCW);
                                    CvInvoke.ConvexHull(approxContour, intHull, FingerTrackingOptions.ConvexHullCW);
                                    fingerTrackerFrame.DrawPolyline(convexHull.ToArray(), true, new Bgr(FingerTrackingOptions.ConvexHullColor), 2);

                                    var countourRect = CvInvoke.MinAreaRect(approxContour);
                                    fingerTrackerFrame.Draw(new CircleF(new PointF(countourRect.Center.X, countourRect.Center.Y), 3), new Bgr(FingerTrackingOptions.DefectLinesColor), 2);

                                    Mat defects = new Mat();
                                    CvInvoke.ConvexityDefects(approxContour, intHull, defects);

                                    if (!defects.IsEmpty)
                                    {
                                        var contourPoints = approxContour.ToArray();

                                        Matrix<int> m = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels);
                                        defects.CopyTo(m);

                                        for (int i = 0; i < m.Rows; i++)
                                        {
                                            int startIdx = m.Data[i, 0];
                                            int endIdx = m.Data[i, 1];
                                            int depthIdx = m.Data[i, 2];

                                            Point startPoint = contourPoints[startIdx];
                                            Point endPoint = contourPoints[endIdx];
                                            Point depthPoint = contourPoints[depthIdx];

                                            LineSegment2D startDepthLine = new LineSegment2D(startPoint, depthPoint);
                                            LineSegment2D depthEndLine = new LineSegment2D(depthPoint, endPoint);

                                            LineSegment2D startCenterLine = new LineSegment2D(startPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));
                                            LineSegment2D depthCenterLine = new LineSegment2D(depthPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));
                                            LineSegment2D endCenterLine = new LineSegment2D(endPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));

                                            CircleF startCircle = new CircleF(startPoint, 5);
                                            CircleF depthCircle = new CircleF(depthPoint, 5);
                                            CircleF endCircle = new CircleF(endPoint, 5);

                                            if (startPoint.Y < countourRect.Center.Y)
                                                fingers.Add(startPoint);

                                            if (!FingerTrackingOptions.TrackOnlyControlPoint)
                                            {
                                                fingerTrackerFrame.Draw(startCircle, new Bgr(FingerTrackingOptions.DefectStartPointHighlightColor), 2);
                                                fingerTrackerFrame.Draw(depthCircle, new Bgr(FingerTrackingOptions.DefectDepthPointHighlightColor), 2);
                                                fingerTrackerFrame.Draw(endCircle, new Bgr(FingerTrackingOptions.DefectEndPointHighlightColor), 2);

                                                fingerTrackerFrame.Draw(startDepthLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                                //fingerTrackerFrame.Draw(depthEndLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);

                                                fingerTrackerFrame.Draw(startCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                                //fingerTrackerFrame.Draw(depthCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                               // fingerTrackerFrame.Draw(endCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                            }
                                        }

                                        _lastControlPoint = _currentControlPoint;
                                        _currentControlPoint = MouseControlOptions.UseHandCenter ? new Point((int)countourRect.Center.X, (int)countourRect.Center.Y)
                                                    : fingers.FirstOrDefault(f => f.Y == fingers.Min(line => line.Y));
                                        fingers.Clear();

                                        if (FingerTrackingOptions.TrackOnlyControlPoint)
                                        {
                                            fingerTrackerFrame = new Image<Bgr, byte>(fingerTrackerFrame.Width, fingerTrackerFrame.Height, new Bgr(Color.Black));
                                            fingerTrackerFrame.Draw(new CircleF(_currentControlPoint, 5), new Bgr(Color.Red), 2);
                                        }

                                    }
                                }
                            }
                        }
                    }
                }
                #endregion

                #region Mouse control
                if (_currentControlPoint.X != -1 && _currentControlPoint.Y != -1 && _lastControlPoint.X != -1 && _lastControlPoint.Y != -1
                         && _currentControlPoint.X != _lastControlPoint.X && _currentControlPoint.Y != _lastControlPoint.Y
                            && Math.Abs(_currentControlPoint.X - _lastControlPoint.X) < (MouseControlOptions.FrameWidth / 10)
                                 && Math.Abs(_currentControlPoint.Y - _lastControlPoint.Y) < (MouseControlOptions.FrameHeight / 10))
                {
                    int frameX = _currentControlPoint.X;
                    int frameY = _currentControlPoint.Y;

                    int moveX = _currentControlPoint.X - _lastControlPoint.X;
                    int moveY = _currentControlPoint.Y - _lastControlPoint.Y;

                    int sensitiveX = 1;
                    int sensitiveY = 1;

                    if (MouseControlOptions.MouseSensitive.Value > 0)
                    {
                        sensitiveX = (int)(((double)MouseControlOptions.ScreenWidth / MouseControlOptions.FrameWidth) * MouseControlOptions.MouseSensitive.Value);
                        sensitiveY = (int)(((double)MouseControlOptions.ScreenHeight / MouseControlOptions.FrameHeight) * MouseControlOptions.MouseSensitive.Value);
                    }
                    else if (MouseControlOptions.MouseSensitive.Value < 0)
                    {
                        sensitiveX = (int)(((double)MouseControlOptions.FrameWidth / MouseControlOptions.ScreenWidth) * MouseControlOptions.MouseSensitive.Value * -1);
                        sensitiveY = (int)(((double)MouseControlOptions.FrameHeight / MouseControlOptions.ScreenHeight) * MouseControlOptions.MouseSensitive.Value * -1);
                    }

                    moveX *= sensitiveX * -1;
                    moveY *= sensitiveY;

                    Point currentMousePosition = GetMousePosition();

                    int destinationX = currentMousePosition.X + moveX;
                    int destinationY = currentMousePosition.Y + moveY;

                    Messanger.PublishOnCurrentThread(new FingerMovedMessage(MouseControlOptions.ControlMouse, frameX, frameY, destinationX, destinationY));

                    if (MouseControlOptions.ControlMouse && MouseControlOptions.MouseSensitive.Value != 0 && destinationX >= 0 && destinationY >= 0)
                        SetCursorPos(destinationX, destinationY);
                }
                #endregion

                Messanger.PublishOnCurrentThread(new FrameProcessedMessage(_currentFrame, difference, skinDetectionFrameGray, fingerTrackerFrame));
            }
            catch { }
        }
Exemple #9
0
        public LaserTrackerResult UpdateFromFrame(Mat frame)
        {
            _timer.Reset();
            _timer.Start();
            Bitmap camBitmap, threshBitmap;

            var rects = new List<Rectangle>();
            using (var threshFrame = new Mat())
            {
                using (var hsvFrame = new Mat())
                {
                    using (var resizeFrame = new Mat())
                    {
                        var size = new Size(_width, _height);
                        CvInvoke.Resize(frame, resizeFrame, size);
                        if (_warp)
                        {
                            using (var warpedFrame = new Mat())
                            {
                                CvInvoke.WarpPerspective(resizeFrame, warpedFrame, _homographyMat, size);
                                warpedFrame.CopyTo(resizeFrame);
                            }
                        }
                        CvInvoke.CvtColor(resizeFrame, hsvFrame, ColorConversion.Bgr2Hsv);
                        camBitmap = resizeFrame.Bitmap.Clone(new Rectangle(0, 0, _width, _height), PixelFormat.Format32bppArgb);
                    }
                    float hueMin = _hueCenter - _hueWidth;
                    float hueMax = _hueCenter + _hueWidth;
                    HueThreshold(hueMin, hueMax, hsvFrame, threshFrame);
                    if (_dilate > 0)
                    {
                        CvInvoke.Dilate(threshFrame, threshFrame, null, new Point(-1, -1), _dilate, BorderType.Default, new MCvScalar());
                    }

                }
                threshBitmap = threshFrame.Bitmap.Clone(new Rectangle(0, 0, _width, _height), PixelFormat.Format32bppArgb);

                using (var dummyFrame = threshFrame.Clone())
                {
                    using (var contours = new VectorOfVectorOfPoint())
                    {
                        CvInvoke.FindContours(dummyFrame, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
                        for (var i = 0; i < contours.Size; i++)
                        {
                            var rect = CvInvoke.BoundingRectangle(contours[i]);
                            if (rect.Width*rect.Height < _minPixels) continue;
                            rects.Add(rect);
                        }
                    }
                }
            }
            rects.Sort((r1, r2) =>
            {
                var s1 = r1.Width * r1.Height;
                var s2 = r2.Width * r2.Height;
                return s1.CompareTo(s2);
            });
            return new LaserTrackerResult(camBitmap, threshBitmap, rects, _timer.Elapsed);
        }
Exemple #10
0
      public void TestConvexityDefacts()
      {
         Image<Bgr, Byte> image = new Image<Bgr, byte>(300, 300);
         Point[] polyline = new Point[] {
            new Point(10, 10),
            new Point(10, 250),
            new Point(100, 100),
            new Point(250, 250),
            new Point(250, 10)};
         using (VectorOfPoint vp = new VectorOfPoint(polyline))
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(vp))
         using (VectorOfInt convexHull = new VectorOfInt())
         using (Mat convexityDefect = new Mat())
         {
            //Draw the contour in white thick line
            CvInvoke.DrawContours(image, contours, -1, new MCvScalar(255, 255, 255), 3);
            CvInvoke.ConvexHull(vp, convexHull);
            CvInvoke.ConvexityDefects(vp, convexHull, convexityDefect);

            //convexity defect is a four channel mat, when k rows and 1 cols, where k = the number of convexity defects. 
            if (!convexityDefect.IsEmpty)
            {
               //Data from Mat are not directly readable so we convert it to Matrix<>
               Matrix<int> m = new Matrix<int>(convexityDefect.Rows, convexityDefect.Cols,
                  convexityDefect.NumberOfChannels);
               convexityDefect.CopyTo(m);

               for (int i = 0; i < m.Rows; i++)
               {
                  int startIdx = m.Data[i, 0];
                  int endIdx = m.Data[i, 1];
                  Point startPoint = polyline[startIdx];
                  Point endPoint = polyline[endIdx];
                  //draw  a line connecting the convexity defect start point and end point in thin red line
                  CvInvoke.Line(image, startPoint, endPoint, new MCvScalar(0, 0, 255));
               }
            }

            //Emgu.CV.UI.ImageViewer.Show(image);
         }
      }
Exemple #11
0
      public void TestDenseHistogram2()
      {
         Mat img = new Mat(400, 400, DepthType.Cv8U, 3);
         CvInvoke.Randu(img, new MCvScalar(), new MCvScalar(255,255,255));
         Mat hist = new Mat();
         using (VectorOfMat vms = new VectorOfMat(img))
         {
            CvInvoke.CalcHist(vms, new int[] {0, 1, 2}, null, hist, new int[] {20, 20, 20},
               new float[] {0, 255, 0, 255, 0, 255}, true);
            byte[] bytes = hist.GetData();
            hist.SetTo(bytes);

            float[] bins = new float[20*20*20];
            hist.CopyTo(bins);
         }
      }
Exemple #12
0
        void detectHair(PersonFace personFace, Mat hairImage)
        {
            Rect faceRect = personFace.GetFace();

            double adjWidth = faceRect.width * 0.85;
            double adjHeight = faceRect.height * 1.2;
            double adjX = faceRect.x + (faceRect.width - adjWidth) / 2;
            double adjY = faceRect.y + (faceRect.height - adjHeight) / 2;

            Rect adjFaceRect = new Rect((int)adjX, (int)adjY, (int)adjWidth, (int)adjHeight);

            double[] faceLineData = personFace.GetFaceLineData();
            PointGenerator faceLine = new PointGenerator(faceLineData[0], faceLineData[1]);
            Point faceTopPoint = faceLine.GetFromY(adjFaceRect.y);
            Point faceBottomPoint = faceLine.GetFromY(adjFaceRect.y + adjFaceRect.height);

            //Draw face line
            //Imgproc.line(hairImage, faceTopPoint, faceBottomPoint, new Scalar(255, 0, 0), 2);

            //Get face feature angle
            double faceFeatureAngle = Math.Atan(faceLineData[0]);
            faceFeatureAngle = RadianToDegree(faceFeatureAngle);
            faceFeatureAngle += faceFeatureAngle > 0 ? -90 : 90;

            //Imgproc.rectangle(hairImage, adjFaceRect, new Scalar(0, 255, 255), 2);

            /*Imgproc.ellipse(hairImage,
                new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                new Size(adjFaceRect.width/2, adjFaceRect.height/2), faceFeatureAngle, 0, 360, new Scalar(255, 0, 0), 2);

            Imgproc.ellipse(hairImage,
                new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                new Size((int)(adjFaceRect.width / 1.8), (int)(adjFaceRect.height / 1.8)), faceFeatureAngle, 0, 360, new Scalar(255, 0, 0), 2);*/

            Mat[] imgComponents = hairImage.Split();

            for (int i = 0; i < 5; i++)
            {
                double factor = 1.8 - i * 0.2;

                Mat maskImage = new Image<Gray, byte>(hairImage.width(), hairImage.height(), new Gray(0)).Mat;

                Imgproc.ellipse(maskImage,
                    new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                    new Size((int)(adjFaceRect.width / factor), (int)(adjFaceRect.height / factor)), faceFeatureAngle + 180, 0, 180, new Scalar(255), -1);

                Imgproc.ellipse(maskImage,
                    new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                    new Size(adjFaceRect.width / 2, adjFaceRect.height / 2), faceFeatureAngle, 0, 360, new Scalar(0), -1);

                //imageBox13.Image = maskImage;

                Mat testImg = new Mat();

                hairImage.CopyTo(testImg, maskImage);

                imageBox13.Image = testImg;

                Stack<string> titleStack = new Stack<string>();
                titleStack.Push("Red");
                titleStack.Push("Green");
                titleStack.Push("Blue");

                HistogramForm histForm = new HistogramForm();

                foreach (Mat img in imgComponents)
                {
                    //try histogram only the upper half to detect the most probable hair color range

                    Mat hist = new Mat();
                    CvInvoke.CalcHist(new VectorOfMat(img), new int[] { 0 }, maskImage, hist, new int[] { 256 }, new float[] { 0, 255 }, false);

                    string color = titleStack.Pop();

                    histForm.AddHist(hist, color, System.Drawing.Color.FromName(color));

                    /*byte[] testBuffer = new byte[256];
                    hist.CopyTo(testBuffer);

                    string msg = "";

                    foreach (byte value in testBuffer)
                        msg += value + " ";

                    msg += Environment.NewLine;
                    msg += Environment.NewLine;

                    textBox1.AppendText(msg);*/

                }

                histForm.Show(i.ToString());

            }

            Image<Bgr, byte> testImg2 = new Image<Bgr, byte>(hairImage.Bitmap);

            imageBox13.Image = testImg2.InRange(new Bgr(25, 25, 25), new Bgr(100, 85, 100));

            //createHistogram(new Image<Bgr, byte>(maskImage.Bitmap), 256, "teste");

            /*Imgproc.ellipse(hairImage,
                new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                new Size((int)(adjFaceRect.width / 1.6), (int)(adjFaceRect.height / 1.6)), faceFeatureAngle, 0, 360, new Scalar(255, 0, 0), 2);

            Imgproc.ellipse(hairImage,
                new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                new Size((int)(adjFaceRect.width / 1.4), (int)(adjFaceRect.height / 1.4)), faceFeatureAngle, 0, 360, new Scalar(255, 0, 0), 2);

            Imgproc.ellipse(hairImage,
                new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                new Size((int)(adjFaceRect.width / 1.2), (int)(adjFaceRect.height / 1.2)), faceFeatureAngle, 0, 360, new Scalar(255, 0, 0), 2);

            Imgproc.ellipse(hairImage,
                new Point(adjFaceRect.x + adjFaceRect.width / 2, adjFaceRect.y + adjFaceRect.height / 2),
                new Size((int)(adjFaceRect.width / 1), (int)(adjFaceRect.height / 1)), faceFeatureAngle, 0, 360, new Scalar(255, 0, 0), 2);*/
        }
         /// <summary>
         /// Filter the matched Features, such that if a match is not unique, it is rejected.
         /// </summary>
         /// <param name="uniquenessThreshold">The distance different ratio which a match is consider unique, a good number will be 0.8</param>
         /// <param name="mask">This is both input and output. This matrix indicates which row is valid for the matches.</param>
         /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param> 
         public static void VoteForUniqueness(VectorOfVectorOfDMatch matches, double uniquenessThreshold, Mat mask)
         {
            MDMatch[][] mArr = matches.ToArrayOfArray();
            byte[] maskData = new byte[mArr.Length];
            GCHandle maskHandle = GCHandle.Alloc(maskData, GCHandleType.Pinned);
            using (Mat m = new Mat(mArr.Length, 1, DepthType.Cv8U, 1, maskHandle.AddrOfPinnedObject(), 1))
            {
               mask.CopyTo(m);
               for (int i = 0; i < mArr.Length; i++)
               {
                  if (maskData[i] != 0 && (mArr[i][0].Distance / mArr[i][1].Distance) <= uniquenessThreshold)
                  {
                     maskData[i] = (byte)255;
                  }
               }

               m.CopyTo(mask);
            }
            maskHandle.Free();

         }
Exemple #14
0
      public void TestConvecityDefect()
      {
         Mat frame = EmguAssert.LoadMat("lena.jpg");
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
         using (Image<Gray, byte> canny = frame.ToImage<Gray, byte>())
         {
            IOutputArray hierarchy = null;
            CvInvoke.FindContours(canny, contours, hierarchy, RetrType.List, ChainApproxMethod.ChainApproxSimple);

            for (int i = 0; i < contours.Size; i++)
            {
               CvInvoke.ApproxPolyDP(contours[i], contours[i], 5, false);
               using (VectorOfInt hull = new VectorOfInt())
               using (Mat defects = new Mat())
               using (VectorOfPoint c = contours[i])
               {
                  CvInvoke.ConvexHull(c, hull, false, false);
                  CvInvoke.ConvexityDefects(c, hull, defects);
                  if (!defects.IsEmpty)
                  {
                     using (Matrix<int> value = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels))
                     {
                        defects.CopyTo(value);
                        //you can iterate through the defect here:
                        for (int j = 0; j < value.Rows; j++)
                        {
                           int startIdx = value.Data[j, 0];
                           int endIdx = value.Data[j, 1];
                           int farthestPtIdx = value.Data[j, 2];
                           double fixPtDepth = value.Data[j, 3]/256.0;
                           
                        }
                     }
                  }
               }
            }
         }
      }
Exemple #15
0
      private void loadImageButton_Click(object sender, EventArgs e)
      {
         if (openImageFileDialog.ShowDialog() == System.Windows.Forms.DialogResult.OK)
         {
            fileNameTextBox.Text = openImageFileDialog.FileName;
            imageBox1.Image = null;
            ocrTextBox.Text = String.Empty;
            hocrTextBox.Text = String.Empty;

            Bgr drawCharColor = new Bgr(Color.Blue);
            try
            {
               Mat image = new Mat(openImageFileDialog.FileName, ImreadModes.AnyColor);

               Mat imageColor = new Mat();
               if (image.NumberOfChannels == 1)
                  CvInvoke.CvtColor(image, imageColor, ColorConversion.Gray2Bgr);
               else
                  image.CopyTo(imageColor);

               if (Mode == OCRMode.FullPage)
               {
                  _ocr.Recognize(image);
                  Tesseract.Character[] characters = _ocr.GetCharacters();
                  if (characters.Length == 0)
                  {
                     Mat imgGrey = new Mat();
                     CvInvoke.CvtColor(image, imgGrey, ColorConversion.Bgr2Gray);
                     Mat imgThresholded = new Mat();
                     CvInvoke.Threshold(imgGrey, imgThresholded,65, 255, ThresholdType.Binary);
                     _ocr.Recognize(imgThresholded);
                     characters = _ocr.GetCharacters();
                     imageColor = imgThresholded;
                     if (characters.Length == 0)
                     {
                        CvInvoke.Threshold(image, imgThresholded, 190, 255, ThresholdType.Binary);
                        _ocr.Recognize(imgThresholded);
                        characters = _ocr.GetCharacters();
                        imageColor = imgThresholded;
                     }
                  }
                  foreach (Tesseract.Character c in characters)
                  {
                     CvInvoke.Rectangle(imageColor, c.Region, drawCharColor.MCvScalar);
                  }

                  imageBox1.Image = imageColor;

                  String text = _ocr.GetText();
                  ocrTextBox.Text = text;
                  String hocrText = _ocr.GetHOCRText();
                  hocrTextBox.Text = hocrText;
               }
               else
               {
                  bool checkInvert = true;

                  Rectangle[] regions;

                  using (ERFilterNM1 er1 = new ERFilterNM1("trained_classifierNM1.xml", 8, 0.00025f, 0.13f, 0.4f, true, 0.1f))
                  using (ERFilterNM2 er2 = new ERFilterNM2("trained_classifierNM2.xml", 0.3f))
                  {
                     int channelCount = image.NumberOfChannels;
                     UMat[] channels = new UMat[checkInvert ? channelCount * 2 : channelCount];

                     for (int i = 0; i < channelCount; i++)
                     {
                        UMat c = new UMat();
                        CvInvoke.ExtractChannel(image, c, i);
                        channels[i] = c;
                     }

                     if (checkInvert)
                     {
                        for (int i = 0; i < channelCount; i++)
                        {
                           UMat c = new UMat();
                           CvInvoke.BitwiseNot(channels[i], c);
                           channels[i + channelCount] = c;
                        }
                     }

                     VectorOfERStat[] regionVecs = new VectorOfERStat[channels.Length];
                     for (int i = 0; i < regionVecs.Length; i++)
                        regionVecs[i] = new VectorOfERStat();

                     try
                     {
                        for (int i = 0; i < channels.Length; i++)
                        {
                           er1.Run(channels[i], regionVecs[i]);
                           er2.Run(channels[i], regionVecs[i]);
                        }
                        using (VectorOfUMat vm = new VectorOfUMat(channels))
                        {
                           regions = ERFilter.ERGrouping(image, vm, regionVecs, ERFilter.GroupingMethod.OrientationHoriz, "trained_classifier_erGrouping.xml", 0.5f);
                        }
                     }
                     finally
                     {
                        foreach (UMat tmp in channels)
                           if (tmp != null)
                              tmp.Dispose();
                        foreach (VectorOfERStat tmp in regionVecs)
                           if (tmp != null)
                              tmp.Dispose();
                     }

                     Rectangle imageRegion = new Rectangle(Point.Empty, imageColor.Size);
                     for (int i = 0; i < regions.Length; i++)
                     {
                        Rectangle r = ScaleRectangle( regions[i], 1.1);
                        
                        r.Intersect(imageRegion);
                        regions[i] = r;
                     }
                     
                  }

                  
                  List<Tesseract.Character> allChars = new List<Tesseract.Character>();
                  String allText = String.Empty;
                  foreach (Rectangle rect in regions)
                  {  
                     using (Mat region = new Mat(image, rect))
                     {
                        _ocr.Recognize(region);
                        Tesseract.Character[] characters = _ocr.GetCharacters();
                        
                        //convert the coordinates from the local region to global
                        for (int i = 0; i < characters.Length; i++)
                        {
                           Rectangle charRegion = characters[i].Region;
                           charRegion.Offset(rect.Location);
                           characters[i].Region = charRegion;
                           
                        }
                        allChars.AddRange(characters);
       
                        allText += _ocr.GetText() + Environment.NewLine;

                     }
                  }

                  Bgr drawRegionColor = new Bgr(Color.Red);
                  foreach (Rectangle rect in regions)
                  {
                     CvInvoke.Rectangle(imageColor, rect, drawRegionColor.MCvScalar);
                  }
                  foreach (Tesseract.Character c in allChars)
                  {
                     CvInvoke.Rectangle(imageColor, c.Region, drawCharColor.MCvScalar);
                  }
                  imageBox1.Image = imageColor;
                  ocrTextBox.Text = allText;

               }
            }
            catch (Exception exception)
            {
               MessageBox.Show(exception.Message);
            }
         }
      }