Example #1
1
        public static VectorOfPoint FindLargestContour(IInputOutputArray cannyEdges, IInputOutputArray result)
        {
            int largest_contour_index = 0;
            double largest_area = 0;
            VectorOfPoint largestContour;

            using (Mat hierachy = new Mat())
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                IOutputArray hirarchy;

                CvInvoke.FindContours(cannyEdges, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);

                for (int i = 0; i < contours.Size; i++)
                {
                    MCvScalar color = new MCvScalar(0, 0, 255);

                    double a = CvInvoke.ContourArea(contours[i], false);  //  Find the area of contour
                    if (a > largest_area)
                    {
                        largest_area = a;
                        largest_contour_index = i;                //Store the index of largest contour
                    }

                    CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
                }

                CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
                largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
            }

            return largestContour;
        }
      private void ProcessImage(IInputOutputArray image)
      {
         Stopwatch watch = Stopwatch.StartNew(); // time the detection process

         List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
         List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
         List<RotatedRect> licenseBoxList = new List<RotatedRect>();
         List<string> words = _licensePlateDetector.DetectLicensePlate(
            image,
            licensePlateImagesList,
            filteredLicensePlateImagesList,
            licenseBoxList);

         watch.Stop(); //stop the timer
         processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);

         panel1.Controls.Clear();
         Point startPoint = new Point(10, 10);
         for (int i = 0; i < words.Count; i++)
         {
            Mat dest = new Mat();
            CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
            AddLabelAndImage(
               ref startPoint,
               String.Format("License: {0}", words[i]),
               dest);
            PointF[] verticesF = licenseBoxList[i].GetVertices();
            Point[] vertices = Array.ConvertAll(verticesF, Point.Round);
            using(VectorOfPoint pts = new VectorOfPoint(vertices))
               CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar,2  );
            
         }

      }
Example #3
0
      /// <summary>
      /// Compute the red pixel mask for the given image. 
      /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
      /// </summary>
      /// <param name="image">The color image to find red mask from</param>
      /// <param name="mask">The red pixel mask</param>
      private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
      {
         bool useUMat;
         using (InputOutputArray ia = mask.GetInputOutputArray())
            useUMat = ia.IsUMat;

         using (IImage hsv = useUMat ? (IImage)new UMat() : (IImage)new Mat())
         using (IImage s = useUMat ? (IImage)new UMat() : (IImage)new Mat())
         {
            CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
            CvInvoke.ExtractChannel(hsv, mask, 0);
            CvInvoke.ExtractChannel(hsv, s, 1);

            //the mask for hue less than 20 or larger than 160
            using (ScalarArray lower = new ScalarArray(20))
            using (ScalarArray upper = new ScalarArray(160))
               CvInvoke.InRange(mask, lower, upper, mask);
            CvInvoke.BitwiseNot(mask, mask);

            //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
            CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
            CvInvoke.BitwiseAnd(mask, s, mask, null);

         }
      }
Example #4
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
    using (InputArray iaPrevImg = prevImg.GetInputArray())
    using (InputArray iaNextImg = nextImg.GetInputArray())
    using (InputArray iaPrevPts = prevPts.GetInputArray())
    using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
    using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
    using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
       cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
          oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
 /// <summary>
 /// Draw the keypoints found on the image.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="keypoints">The keypoints to be drawn</param>
 /// <param name="color">The color used to draw the keypoints</param>
 /// <param name="type">The drawing type</param>
 /// <param name="outImage">The image with the keypoints drawn</param> 
 public static void DrawKeypoints(
    IInputArray image,
    VectorOfKeyPoint keypoints,
    IInputOutputArray outImage,
    Bgr color,
    Features2DToolbox.KeypointDrawType type)
 {
    MCvScalar c = color.MCvScalar;
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
    CvInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
 }
Example #8
0
 public static void DrawLine(IInputOutputArray image,
     Point start,
     Point end,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveLine(array, ref start, ref end, ref color, thickness, lineType, shift);
     }
 }
Example #9
0
 public static void DrawCircle(IInputOutputArray image,
     Point center,
     int radius,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
     }
 }
Example #10
0
 /// <summary>
 /// Given the pose estimation of a marker or board, this function draws the axis of the world coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">rotation vector of the coordinate system that will be drawn.</param>
 /// <param name="tvec">translation vector of the coordinate system that will be drawn.</param>
 /// <param name="length">length of the painted axis in the same unit than tvec (usually in meters)</param>
 public static void DrawAxis(
    IInputOutputArray image, IInputArray cameraMatrix, IInputArray distCoeffs,
    IInputArray rvec, IInputArray tvec, float length)
 {
    using (InputOutputArray ioaImage = image.GetInputOutputArray())
    using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
    using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
    using (InputArray iaRvec = rvec.GetInputArray())
    using (InputArray iaTvec = tvec.GetInputArray())
    {
       cveArucoDrawAxis(ioaImage, iaCameraMatrix, iaDistCoeffs, iaRvec, iaTvec, length);
    }
 }
Example #11
0
        public static VectorOfVectorOfPoint FindRectangle(IInputOutputArray cannyEdges, IInputOutputArray result, int areaSize = 250)
        {
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
                int count = contours.Size;
                for (int i = 0; i < count; i++)
                {
                    var rect = CvInvoke.MinAreaRect(contours[i]).MinAreaRect();
                    CvInvoke.Rectangle(result, rect, new MCvScalar(0, 0, 255), 3);

                    using (VectorOfPoint contour = contours[i])
                    using (VectorOfPoint approxContour = new VectorOfPoint())
                    {
                        CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);

                        if (CvInvoke.ContourArea(approxContour, false) > areaSize) //only consider contours with area greater than 250
                        {
                            if (approxContour.Size >= 4) //The contour has 4 vertices.
                            {
                                #region determine if all the angles in the contour are within [80, 100] degree
                                bool isRectangle = true;
                                Point[] pts = approxContour.ToArray();
                                LineSegment2D[] edges = PointCollection.PolyLine(pts, true);

                                for (int j = 0; j < edges.Length; j++)
                                {
                                    double angle = Math.Abs(
                                       edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
                                    if (angle < 80 || angle > 100)
                                    {
                                        isRectangle = false;
                                        break;
                                    }
                                }
                                #endregion

                                //if (isRectangle)
                                //{
                                //    var rect = CvInvoke.MinAreaRect(approxContour).MinAreaRect();
                                //    CvInvoke.Rectangle(result, rect, new MCvScalar(0, 0, 255), 3);
                                //    //boxList.Add(CvInvoke.MinAreaRect(approxContour));
                                //}
                            }
                        }
                    }
                }

                return contours;
            }
        }
Example #12
0
        public static void DrawEllipse(IInputOutputArray image,
            RotatedRect box,
            MCvScalar color,
            int thickness = 1,
            LineType lineType = LineType.EightConnected,
            int shift = 0)
        {
            int width = (int)Math.Round(box.Size.Height * 0.5F);
            int height = (int)Math.Round(box.Size.Width * 0.5F);
            Size axesSize = new Size(width, height);
            Point center = Point.Round(box.Center);

            DrawEllipse(image, center, axesSize, box.Angle, 0.0D, 360.0D, color, thickness, lineType, shift);
        }
Example #13
0
 /// <summary>
 /// The grab cut algorithm for segmentation
 /// </summary>
 /// <param name="img">The 8-bit 3-channel image to be segmented</param>
 /// <param name="mask">Input/output 8-bit single-channel mask. The mask is initialized by the function
 /// when mode is set to GC_INIT_WITH_RECT. Its elements may have one of following values:
 /// 0 (GC_BGD) defines an obvious background pixels.
 /// 1 (GC_FGD) defines an obvious foreground (object) pixel.
 /// 2 (GC_PR_BGR) defines a possible background pixel.
 /// 3 (GC_PR_FGD) defines a possible foreground pixel.
 ///</param>
 /// <param name="rect">The rectangle to initialize the segmentation</param>
 /// <param name="bgdModel">
 /// Temporary array for the background model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="fgdModel">
 /// Temporary arrays for the foreground model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="iterCount">The number of iterations</param>
 /// <param name="type">The initialization type</param>
 public static void GrabCut(
    IInputArray img,
    IInputOutputArray mask,
    Rectangle rect,
    IInputOutputArray bgdModel,
    IInputOutputArray fgdModel,
    int iterCount,
    CvEnum.GrabcutInitType type)
 {
    using (InputArray iaImg = img.GetInputArray())
    using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
    using (InputOutputArray ioaBgdModel = bgdModel.GetInputOutputArray())
    using (InputOutputArray ioaFgdModel = fgdModel.GetInputOutputArray())
       cveGrabCut(iaImg, ioaMask, ref rect, ioaBgdModel, ioaFgdModel, iterCount, type);
 }
Example #14
0
 public static void DrawEllipse(IInputOutputArray image,
     Point center,
     Size axes,
     double angle,
     double startAngle,
     double endAngle,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveEllipse(array, ref center, ref axes, angle, startAngle, endAngle, ref color, thickness, lineType, shift);
     }
 }
Example #15
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observered image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observerdImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
    IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
    IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
    VectorOfVectorOfDMatch matches,
    IInputOutputArray result,
    MCvScalar matchColor, MCvScalar singlePointColor,
    IInputArray mask = null,
    KeypointDrawType flags = KeypointDrawType.Default)
 {
    using (InputArray iaModelImage = modelImage.GetInputArray())
    using (InputArray iaObserverdImage = observerdImage.GetInputArray())
    using (InputOutputArray ioaResult = result.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
       modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
 }
Example #16
0
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">
        /// The output image, can be the same as <paramref name="imageIn"/>, in which case we will render directly into the input image.
        /// Note that if no bar codes are detected, <paramref name="imageOut"/> will remain unchanged.
        /// If bar codes are detected, we will draw the code and (rectangle) regions on top of the existing pixels of <paramref name="imageOut"/>.
        /// If the <paramref name="imageOut"/> is not the same object as <paramref name="imageIn"/>, it is a good idea to copy the pixels over from the input image before passing it to this function.
        /// </param>
        /// <returns>The messages that we want to display.</returns>
        public String ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            using (VectorOfMat points = new VectorOfMat())
            {
                Stopwatch watch         = Stopwatch.StartNew();
                var       barcodesFound = _barcodeDetector.DetectAndDecode(imageIn);
                watch.Stop();

                for (int i = 0; i < barcodesFound.Length; i++)
                {
                    Point[] contour = Array.ConvertAll(barcodesFound[i].Points, Point.Round);

                    using (VectorOfVectorOfPoint vpp = new VectorOfVectorOfPoint(new Point[][] { contour }))
                    {
                        CvInvoke.DrawContours(imageOut, vpp, -1, RenderColor);
                    }

                    CvInvoke.PutText(
                        imageOut,
                        barcodesFound[i].DecodedInfo,
                        Point.Round(barcodesFound[i].Points[0]),
                        FontFace.HersheySimplex,
                        1.0,
                        RenderColor
                        );
                }


                if (barcodesFound.Length == 0)
                {
                    return(String.Format("No barcodes found (in {0} milliseconds)", watch.ElapsedMilliseconds));
                }

                String[] barcodesTexts = Array.ConvertAll(barcodesFound,
                                                          delegate(BarcodeDetector.Barcode input) { return(input.DecodedInfo); });
                String allBarcodeText = String.Join(";", barcodesTexts);
                return(String.Format(
                           "Barcodes found (in {1} milliseconds): {0}",
                           allBarcodeText,
                           watch.ElapsedMilliseconds));
            }
        }
Example #17
0
 /// <summary>
 /// Draws the line segments on a given image.
 /// </summary>
 /// <param name="image">The image, where the lines will be drawn. Should be bigger or equal to the image, where the lines were found.</param>
 /// <param name="lines">A vector of the lines that needed to be drawn.</param>
 /// <param name="drawArrows">If true, arrow heads will be drawn.</param>
 public void DrawSegments(IInputOutputArray image, LineSegment2DF[] lines, bool drawArrows = false)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (Mat matLines = new Mat(lines.Length, 1, DepthType.Cv32F, 4))
         {
             float[] pointData = new float[lines.Length * 4];
             for (int i = 0; i < lines.Length; i++)
             {
                 pointData[i * 4]       = lines[i].P1.X;
                 pointData[(i * 4) + 1] = lines[i].P1.Y;
                 pointData[(i * 4) + 2] = lines[i].P2.X;
                 pointData[(i * 4) + 3] = lines[i].P2.Y;
             }
             matLines.SetTo(pointData);
             using (InputArray iaLines = matLines.GetInputArray())
             {
                 XImgprocInvoke.cveFastLineDetectorDrawSegments(_ptr, ioaImage, iaLines, drawArrows);
             }
         }
 }
Example #18
0
        private static void DrawMotion(IInputOutputArray image, Rectangle motionRegion, double angle, Bgr color)
        {
            //CvInvoke.Rectangle(image, motionRegion, new MCvScalar(255, 255, 0));
            float circleRadius = (motionRegion.Width + motionRegion.Height) >> 2;
            Point center       = new Point(motionRegion.X + (motionRegion.Width >> 1), motionRegion.Y + (motionRegion.Height >> 1));

            CircleF circle = new CircleF(
                center,
                circleRadius);

            int   xDirection    = (int)(Math.Cos(angle * (Math.PI / 180.0)) * circleRadius);
            int   yDirection    = (int)(Math.Sin(angle * (Math.PI / 180.0)) * circleRadius);
            Point pointOnCircle = new Point(
                center.X + xDirection,
                center.Y - yDirection);
            LineSegment2D line = new LineSegment2D(center, pointOnCircle);

            CvInvoke.Circle(image, Point.Round(circle.Center), (int)circle.Radius, color.MCvScalar);
            CvInvoke.Line(image, line.P1, line.P2, color.MCvScalar);
        }
Example #19
0
 public static void DrawContours(IInputOutputArray image,
                                 IInputArray contours,
                                 int contourIdx,
                                 MCvScalar color,
                                 int thickness         = 1,
                                 LineType lineType     = LineType.EightConnected,
                                 IInputArray hierarchy = null,
                                 int maxLevel          = int.MaxValue,
                                 Point offset          = default(Point))
 {
     using (InputOutputArray imageArray = image.GetInputOutputArray())
     {
         using (InputArray contoursArray = contours.GetInputArray())
         {
             using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray <InputArray> .Value)
             {
                 cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
             }
         }
     }
 }
Example #20
0
 public static void DrawContours(IInputOutputArray image,
     IInputArray contours,
     int contourIdx,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     IInputArray hierarchy = null,
     int maxLevel = int.MaxValue,
     Point offset = default(Point))
 {
     using (InputOutputArray imageArray = image.GetInputOutputArray())
     {
         using (InputArray contoursArray = contours.GetInputArray())
         {
             using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray<InputArray>.Value)
             {
                 cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
             }
         }
     }
 }
Example #21
0
 /// <summary>
 /// Implements sparse iterative version of Lucas-Kanade optical flow in pyramids ([Bouguet00]). It calculates coordinates of the feature points on the current video frame given their coordinates on the previous frame. The function finds the coordinates with sub-pixel accuracy.
 /// </summary>
 /// <remarks>Both parameters prev_pyr and curr_pyr comply with the following rules: if the image pointer is 0, the function allocates the buffer internally, calculates the pyramid, and releases the buffer after processing. Otherwise, the function calculates the pyramid and stores it in the buffer unless the flag CV_LKFLOW_PYR_A[B]_READY is set. The image should be large enough to fit the Gaussian pyramid data. After the function call both pyramids are calculated and the readiness flag for the corresponding image can be set in the next call (i.e., typically, for all the image pairs except the very first one CV_LKFLOW_PYR_A_READY is set). </remarks>
 /// <param name="prevImg">First frame, at time t. </param>
 /// <param name="nextImg">Second frame, at time t + dt .</param>
 /// <param name="prevPts">Array of points for which the flow needs to be found. </param>
 /// <param name="nextPts">Array of 2D points containing calculated new positions of input </param>
 /// <param name="winSize">Size of the search window of each pyramid level.</param>
 /// <param name="maxLevel">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc. </param>
 /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise.</param>
 /// <param name="err">Array of double numbers containing difference between patches around the original and moved points. Optional parameter; can be NULL </param>
 /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped.</param>
 /// <param name="flags">Miscellaneous flags</param>
 /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
 public static void CalcOpticalFlowPyrLK(
     IInputArray prevImg,
     IInputArray nextImg,
     IInputArray prevPts,
     IInputOutputArray nextPts,
     IOutputArray status,
     IOutputArray err,
     Size winSize,
     int maxLevel,
     MCvTermCriteria criteria,
     CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
     double minEigThreshold  = 1.0e-4)
 {
     using (InputArray iaPrevImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = status.GetOutputArray())
                         using (OutputArray oaErr = err.GetOutputArray())
                             cveCalcOpticalFlowPyrLK(iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts, oaStatus, oaErr, ref winSize, maxLevel, ref criteria, flags, minEigThreshold);
 }
Example #22
0
 /// <summary>
 /// Pose estimation for a board of markers.
 /// </summary>
 /// <param name="corners">Vector of already detected markers corners. For each marker, its four corners are provided, (e.g std::vector&gt;std::vector&gt;cv::Point2f&lt; &lt; ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="board">Layout of markers in the board. The layout is composed by the marker identifiers and the positions of each marker corner in the board reference system.</param>
 /// <param name="cameraMatrix">Input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">Vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board (see cv::Rodrigues). Used as initial guess if not empty.</param>
 /// <param name="tvec">Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.</param>
 /// <param name="useExtrinsicGuess">Defines whether initial guess for rvec and tvec will be used or not. Used as initial guess if not empty.</param>
 /// <returns>The function returns the number of markers from the input employed for the board pose estimation. Note that returning a 0 means the pose has not been estimated.</returns>
 public static int EstimatePoseBoard(
     IInputArrayOfArrays corners,
     IInputArray ids,
     IBoard board,
     IInputArray cameraMatrix,
     IInputArray distCoeffs,
     IInputOutputArray rvec,
     IInputOutputArray tvec,
     bool useExtrinsicGuess = false)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaIds = ids.GetInputArray())
             using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                 using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                     using (InputOutputArray ioaRvec = rvec.GetInputOutputArray())
                         using (InputOutputArray ioaTvec = tvec.GetInputOutputArray())
                         {
                             return(cveArucoEstimatePoseBoard(iaCorners, iaIds, board.BoardPtr, iaCameraMatrix, iaDistCoeffs, ioaRvec,
                                                              ioaTvec, useExtrinsicGuess));
                         }
 }
Example #23
0
 /// <summary>
 /// High level function to execute a single rapid iteration
 /// </summary>
 /// <param name="img">The video frame</param>
 /// <param name="num">Number of search lines</param>
 /// <param name="len">Search line radius</param>
 /// <param name="pts3d">The 3D points of the mesh</param>
 /// <param name="tris">Triangle face connectivity</param>
 /// <param name="K">Camera matrix</param>
 /// <param name="rvec">Rotation between mesh and camera. Input values are used as an initial solution.</param>
 /// <param name="tvec">Translation between mesh and camera. Input values are used as an initial solution.</param>
 /// <param name="rmsd">The 2d reprojection difference</param>
 /// <returns>Ratio of search lines that could be extracted and matched</returns>
 public static float Rapid(
     IInputArray img,
     int num,
     int len,
     IInputArray pts3d,
     IInputArray tris,
     IInputArray K,
     IInputOutputArray rvec,
     IInputOutputArray tvec,
     ref double rmsd)
 {
     using (InputArray iaImg = img.GetInputArray())
         using (InputArray iaPts3d = pts3d.GetInputArray())
             using (InputArray iaTris = tris.GetInputArray())
                 using (InputArray iaK = K.GetInputArray())
                     using (InputOutputArray ioaRvec = rvec.GetInputOutputArray())
                         using (InputOutputArray ioaTvec = tvec.GetInputOutputArray())
                         {
                             return(cveRapid(iaImg, num, len, iaPts3d, iaTris, iaK, ioaRvec, ioaTvec, ref rmsd));
                         }
 }
Example #24
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="opticalFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as prevImg.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found.Otherwise, it is set to 0.</param>
 /// <param name="error">Optional output vector that contains error response for each point (inverse confidence).</param>
 public static void Calc(
     this ISparseOpticalFlow opticalFlow,
     IInputArray prevImg, IInputArray nextImg,
     IInputArray prevPts, IInputOutputArray nextPts,
     IOutputArray status,
     IOutputArray error = null
     )
 {
     using (InputArray iaPreImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = status.GetOutputArray())
                         using (OutputArray oaError = error == null ? OutputArray.GetEmpty() : error.GetOutputArray())
                             CvInvoke.cveSparseOpticalFlowCalc(
                                 opticalFlow.SparseOpticalFlowPtr,
                                 iaPreImg, iaNextImg,
                                 iaPrevPts, ioaNextPts,
                                 oaStatus, oaError
                                 );
 }
Example #25
0
        public void Process(IInputOutputArray image, string path)
        {
            Stopwatch watch = Stopwatch.StartNew();

            _licensePlateRecognitionEngine = new LicensePlateRecognitionEngine(path);
            List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
            List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
            List <RotatedRect>       licenseBoxList = new List <RotatedRect>();
            List <string>            words          = _licensePlateRecognitionEngine.DetectLicensePlate(image, licensePlateImagesList, filteredLicensePlateImagesList, licenseBoxList);

            watch.Stop();
            businessLogic.SendComandToGate(businessLogic.FindLicensePlateInDB(words));

            //TO DO
            //Compare Licens Plate with DB ()
            //Send Messege to Client if we can open the gate
            //CRUD for
            //Node
            //Dot
            //Car
        }
Example #26
0
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
        /// <returns>The messages that we want to display.</returns>

        public String ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            Stopwatch watch           = Stopwatch.StartNew();
            var       detectedObjects = Detect(imageIn);

            watch.Stop();

            if (imageOut != imageIn)
            {
                using (InputArray iaImageIn = imageIn.GetInputArray())
                {
                    iaImageIn.CopyTo(imageOut);
                }
            }

            foreach (var detected in detectedObjects)
            {
                detected.Render(imageOut, new MCvScalar(0, 0, 255));
            }
            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">
        /// The output image, can be the same as <paramref name="imageIn"/>, in which case we will render directly into the input image.
        /// Note that if no faces are detected, <paramref name="imageOut"/> will remain unchanged.
        /// If faces/landmarks are detected, we will draw the regions and markers on top of the existing pixels of <paramref name="imageOut"/>.
        /// If the <paramref name="imageOut"/> is not the same object as <paramref name="imageIn"/>, it is a good idea to copy the pixels over from the input image before passing it to this function.
        /// </param>
        /// <returns>The messages that we want to display.</returns>
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            Stopwatch watch = Stopwatch.StartNew();

            List <DetectedObject> fullFaceRegions    = new List <DetectedObject>();
            List <DetectedObject> partialFaceRegions = new List <DetectedObject>();

            _faceDetector.Detect(imageIn, fullFaceRegions, partialFaceRegions);

            if (partialFaceRegions.Count > 0)
            {
                foreach (DetectedObject face in partialFaceRegions)
                {
                    CvInvoke.Rectangle(imageOut, face.Region, RenderColorRectangle);
                }
            }

            if (fullFaceRegions.Count > 0)
            {
                foreach (DetectedObject face in fullFaceRegions)
                {
                    CvInvoke.Rectangle(imageOut, face.Region, RenderColorRectangle);
                }

                var fullFaceRegionsArr = fullFaceRegions.ToArray();
                var rectRegionArr      = Array.ConvertAll(fullFaceRegionsArr, r => r.Region);

                using (VectorOfVectorOfPointF landmarks = _facemarkDetector.Detect(imageIn, rectRegionArr))
                {
                    int len = landmarks.Size;
                    for (int i = 0; i < len; i++)
                    {
                        using (VectorOfPointF vpf = landmarks[i])
                            FaceInvoke.DrawFacemarks(imageOut, vpf, RenderColorLandmark);
                    }
                }
            }
            watch.Stop();
            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
Example #28
0
        private static LineSegment2D[] GetContours(IInputOutputArray img)
        {
            #region Canny and edge detection

            var cannyThreshold        = 180;
            var cannyThresholdLinking = 120;
            var cannyEdges            = new UMat();

            CvInvoke.Canny(img, cannyEdges, cannyThreshold, cannyThresholdLinking);

            var lines = CvInvoke.HoughLinesP(
                cannyEdges,
                1,              //Distance resolution in pixel-related units
                Math.PI / 45.0, //Angle resolution measured in radians.
                22,             //threshold
                12,             //min Line width
                10);            //gap between lines

            #endregion

            return(lines);
        }
Example #29
0
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
        /// <returns>The messages that we want to display.</returns>
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            Stopwatch watch = Stopwatch.StartNew();

            Rectangle[] pedestrians = Find(imageIn);
            watch.Stop();

            if (imageOut != imageIn)
            {
                using (InputArray iaImageIn = imageIn.GetInputArray())
                {
                    iaImageIn.CopyTo(imageOut);
                }
            }

            foreach (Rectangle rect in pedestrians)
            {
                CvInvoke.Rectangle(imageOut, rect, new MCvScalar(0, 0, 255), 2);
            }

            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
Example #30
0
        public UploadFile ProcessImage(IInputOutputArray image)
        {
            string                   path                           = AppContext.BaseDirectory;
            string                   base64string                   = string.Empty;
            UploadFile               uploadFile                     = new UploadFile();
            LicensePlateDetector     _licensePlateDetector          = new LicensePlateDetector(path);
            List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
            List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
            List <RotatedRect>       licenseBoxList                 = new List <RotatedRect>();
            List <string>            words                          = _licensePlateDetector.DetectLicensePlate(
                image,
                licensePlateImagesList,
                filteredLicensePlateImagesList,
                licenseBoxList);
            Point startPoint = new Point(10, 10);

            for (int i = 0; i < words.Count; i++)
            {
                Mat dest = new Mat();
                CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
                PointF[] verticesF = licenseBoxList[i].GetVertices();
                Point[]  vertices  = Array.ConvertAll(verticesF, Point.Round);
                using (VectorOfPoint pts = new VectorOfPoint(vertices))
                    CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar, 2);
                UMat mat = (UMat)filteredLicensePlateImagesList[i];
                Image <Bgr, byte> LpImage = mat.ToImage <Bgr, byte>();
                byte[]            LpBytes = LpImage.ToJpegData();
                uploadFile.RawData.Add(new RawData {
                    RawImage = "data:image/jpg;base64," + Convert.ToBase64String(LpBytes, 0, LpBytes.Length), Number = words[i]
                });
            }
            using (Image <Bgr, byte> img = (Image <Bgr, byte>)image)
            {
                Byte[] bytes = img.ToJpegData();
                base64string          = "data:image/jpg;base64," + Convert.ToBase64String(bytes, 0, bytes.Length);
                uploadFile.OutputFile = base64string;
            }
            return(uploadFile);
        }
Example #31
0
        private void ProcessImage(IInputOutputArray image)
        {
            Stopwatch watch = Stopwatch.StartNew(); // time the detection process

            List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
            List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
            List <RotatedRect>       licenseBoxList = new List <RotatedRect>();
            List <string>            words          = _recognator.DetectLicensePlate(
                image,
                licensePlateImagesList,
                filteredLicensePlateImagesList,
                licenseBoxList);

            watch.Stop(); //stop the timer


            Point startPoint = new Point(10, 10);

            if (words.Count != 0)
            {
                this.Controls.Owner.Invoke(new _ClearPanel(clearPanel));
                for (int i = 0; i < words.Count; i++)
                {
                    Mat dest = new Mat();
                    CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);

                    this.Controls.Owner.Invoke(_addLabelAndImage,
                                               startPoint,
                                               String.Format("{0}", words[i]),
                                               dest, watch);

                    PointF[] verticesF = licenseBoxList[i].GetVertices();
                    Point[]  vertices  = Array.ConvertAll(verticesF, Point.Round);
                    using (VectorOfPoint pts = new VectorOfPoint(vertices))
                        CvInvoke.Polylines(image, pts, true, new Bgr(Color.Yellow).MCvScalar, 2);
                }
                Thread.Sleep(3000);
            }
        }
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
        /// <returns>The messages that we want to display.</returns>

        public String ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            using (VectorOfMat points = new VectorOfMat())
            {
                Stopwatch watch        = Stopwatch.StartNew();
                String[]  qrCodesFound = _weChatQRCodeDetectionModel.DetectAndDecode(imageIn, points);
                watch.Stop();
                for (int i = 0; i < qrCodesFound.Length; i++)
                {
                    using (Mat p = points[i])
                    {
                        Point[] contour = MatToPoints(p);

                        using (VectorOfVectorOfPoint vpp = new VectorOfVectorOfPoint(new Point[][] { contour }))
                        {
                            CvInvoke.DrawContours(imageOut, vpp, -1, new MCvScalar(255, 0, 0));
                        }
                    }
                    //CvInvoke.DrawContours(imageOut, points, i, new MCvScalar(255, 0, 0));
                    //CvInvoke.PutText(imageOut, qrCodesFound[i],  );
                }
                if (imageOut != imageIn)
                {
                    using (InputArray iaImageIn = imageIn.GetInputArray())
                    {
                        iaImageIn.CopyTo(imageOut);
                    }
                }

                //foreach (var detected in detectedObjects)
                //    detected.Render(imageOut, new MCvScalar(0, 0, 255));
                return(String.Format(
                           "QR codes found (in {1} milliseconds): {0}",
                           String.Join(";", String.Format("\"{0}\"", qrCodesFound)),
                           watch.ElapsedMilliseconds));
            }

            //var detectedObjects = Detect(imageIn);
        }
Example #33
0
        private void ProcessImage(IInputOutputArray image)
        {
            try
            {
                Stopwatch watch = Stopwatch.StartNew(); // time the detection process

                List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
                List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
                List <RotatedRect>       licenseBoxList = new List <RotatedRect>();
                List <string>            words          = _licensePlateDetector.DetectLicensePlate(
                    image,
                    licensePlateImagesList,
                    filteredLicensePlateImagesList,
                    licenseBoxList);

                watch.Stop(); //stop the timer



                Point startPoint = new Point(10, 10);
                for (int i = 0; i < words.Count; i++)
                {
                    Mat dest = new Mat();
                    CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
                    AddLabelAndImage(
                        ref startPoint,
                        String.Format("License: {0}", words[i]),
                        dest, Stopwatch.GetTimestamp());
                    PointF[] verticesF = licenseBoxList[i].GetVertices();
                    Point[]  vertices  = Array.ConvertAll(verticesF, Point.Round);
                    using (VectorOfPoint pts = new VectorOfPoint(vertices))
                        CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar, 2);
                }
            }
            finally
            {
                //_timer.Start();
            }
        }
Example #34
0
            /// <summary>

            ///  Dibuja un contorno

            /// </summary>

            /// <param name="target">Matriz donde dibujar</param>

            /// <param name="Color">Color</param>

            /// <param name="Closed">Indica si el contorno es cerrado o no</param>

            /// <param name="Thickness">Grosor del contorno</param>

            public static void DrawTo(this VectorOfPoint contour, IInputOutputArray target, MCvScalar Color, bool Closed = true, int Thickness = 1)
            {
                int size = contour.Size;

                if (size < 2)
                {
                    return;
                }

                for (int i = 0; i <= size - 2; i++)
                {
                    Point p1 = contour[i];

                    Point p2 = contour[i + 1];

                    CvInvoke.Line(target, p1, p2, Color, Thickness);
                }

                if (Closed)
                {
                    CvInvoke.Line(target, contour[size - 1], contour[0], Color, Thickness);
                }
            }
Example #35
0
 /// <summary>
 /// Performs camera calibration.
 /// </summary>
 /// <param name="objectPoints">vector of vectors of calibration pattern points in the calibration pattern coordinate space.</param>
 /// <param name="imagePoints">vector of vectors of the projections of calibration pattern points. imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="K">Output 3x3 floating-point camera matrix. If UseIntrisicGuess is specified, some or all of fx, fy, cx, cy must be initialized before calling the function. </param>
 /// <param name="D">Output vector of distortion coefficients (k1,k2,k3,k4).</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Different flags</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 public static double Calibrate(IInputArray objectPoints, IInputArray imagePoints, Size imageSize,
                                IInputOutputArray K, IInputOutputArray D, IOutputArray rvecs, IOutputArray tvecs, CalibrationFlag flags,
                                MCvTermCriteria criteria)
 {
     using (InputArray iaObjectPoints = objectPoints.GetInputArray())
         using (InputArray iaImagePoints = imagePoints.GetInputArray())
             using (InputOutputArray ioaK = K.GetInputOutputArray())
                 using (InputOutputArray ioaD = D.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs.GetOutputArray())
                         {
                             return(CvInvoke.cveFisheyeCalibrate(
                                        iaObjectPoints,
                                        iaImagePoints,
                                        ref imageSize,
                                        ioaK,
                                        ioaD,
                                        oaRvecs,
                                        oaTvecs,
                                        (int)flags,
                                        ref criteria));
                         }
 }
Example #36
0
 public void drawOn(IInputOutputArray img)
 {
     if (Shape == Type.circle)
     {
         CvInvoke.Circle(img, Position, Radius, Color, -1);
         if (isPick)
         {
             CvInvoke.Circle(img, Position, Radius, new MCvScalar(Color.V0 + 50, Color.V1 + 50, Color.V2 + 50), 3);
         }
     }
     else if (Shape == Type.square)
     {
         CvInvoke.Rectangle(img, new System.Drawing.Rectangle(Position.X - Radius / 2, Position.Y - Radius / 2, Radius, Radius), Color, -1);
         if (isPick)
         {
             CvInvoke.Line(img, new Point(Position.X - Radius / 2, Position.Y - Radius / 2), new Point(Position.X - Radius / 2, Position.Y + Radius / 2), new MCvScalar(Color.V0 + 50, Color.V1 + 50, Color.V2 + 50), 3);
             CvInvoke.Line(img, new Point(Position.X + Radius / 2, Position.Y + Radius / 2), new Point(Position.X - Radius / 2, Position.Y + Radius / 2), new MCvScalar(Color.V0 + 50, Color.V1 + 50, Color.V2 + 50), 3);
             CvInvoke.Line(img, new Point(Position.X + Radius / 2, Position.Y + Radius / 2), new Point(Position.X + Radius / 2, Position.Y - Radius / 2), new MCvScalar(Color.V0 + 50, Color.V1 + 50, Color.V2 + 50), 3);
             CvInvoke.Line(img, new Point(Position.X - Radius / 2, Position.Y - Radius / 2), new Point(Position.X + Radius / 2, Position.Y - Radius / 2), new MCvScalar(Color.V0 + 50, Color.V1 + 50, Color.V2 + 50), 3);
         }
     }
     CvInvoke.PutText(img, index.ToString(), Position, FontFace.HersheySimplex, 0.5, new MCvScalar(0, 0, 0), 2);
 }
Example #37
0
 /// <summary>
 /// Calculates Optical Flow using NVIDIA Optical Flow SDK.
 /// NVIDIA GPUs starting with Turing contain a dedicated hardware accelerator for computing optical flow vectors between pairs of images.
 /// The optical flow hardware accelerator generates block-based optical flow vectors.
 /// The size of the block depends on hardware in use, and can be queried using the function getGridSize().
 /// The block-based flow vectors generated by the hardware can be converted to dense representation(i.e.per-pixel flow vectors) using upSampler() helper function, if needed.
 /// The flow vectors are stored in CV_16SC2 format with x and y components of each flow vector in 16-bit signed fixed point representation S10.5.
 /// </summary>
 /// <param name="nvidiaOpticalFlow">The nvidia optical flow object</param>
 /// <param name="inputImage">Input image</param>
 /// <param name="referenceImage">Reference image of the same size and the same type as input image.</param>
 /// <param name="flow">A buffer consisting of inputImage.Size() / getGridSize() flow vectors in CV_16SC2 format.</param>
 /// <param name="stream">Stream for the asynchronous version.</param>
 /// <param name="hint">Hint buffer if client provides external hints. Must have same size as flow buffer. Caller can provide flow vectors as hints for optical flow calculation.</param>
 /// <param name="cost">Cost buffer contains numbers indicating the confidence associated with each of the generated flow vectors. Higher the cost, lower the confidence. Cost buffer is of type CV_32SC1.</param>
 public static void Calc(
     this INvidiaOpticalFlow nvidiaOpticalFlow,
     IInputArray inputImage,
     IInputArray referenceImage,
     IInputOutputArray flow,
     Stream stream     = null,
     IInputArray hint  = null,
     IOutputArray cost = null)
 {
     using (InputArray iaInputImage = inputImage.GetInputArray())
         using (InputArray iaReferenceImage = referenceImage.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 using (InputArray iaHint = (hint == null ? InputArray.GetEmpty() : hint.GetInputArray()))
                     using (OutputArray oaCost = (cost == null ? OutputArray.GetEmpty() : cost.GetOutputArray()))
                         cudaNvidiaOpticalFlowCalc(
                             nvidiaOpticalFlow.NvidiaOpticalFlowPtr,
                             iaInputImage,
                             iaReferenceImage,
                             ioaFlow,
                             (stream == null) ? IntPtr.Zero : stream.Ptr,
                             iaHint,
                             oaCost);
 }
Example #38
0
 /// <summary>
 /// Refine not detected markers based on the already detected and the board layout.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="board">Layout of markers in the board.</param>
 /// <param name="detectedCorners">Vector of already detected marker corners.</param>
 /// <param name="detectedIds">Vector of already detected marker identifiers.</param>
 /// <param name="rejectedCorners">Vector of rejected candidates during the marker detection process</param>
 /// <param name="cameraMatrix">Optional input 3x3 floating-point camera matrix </param>
 /// <param name="distCoeffs">Optional vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="minRepDistance">Minimum distance between the corners of the rejected candidate and the reprojected marker in order to consider it as a correspondence. (default 10)</param>
 /// <param name="errorCorrectionRate">Rate of allowed erroneous bits respect to the error correction capability of the used dictionary. -1 ignores the error correction step. (default 3)</param>
 /// <param name="checkAllOrders">Consider the four posible corner orders in the rejectedCorners array. If it set to false, only the provided corner order is considered (default true).</param>
 /// <param name="recoveredIdxs">Optional array to returns the indexes of the recovered candidates in the original rejectedCorners array.</param>
 /// <param name="parameters">marker detection parameters</param>
 public static void RefineDetectedMarkers(
     IInputArray image, IBoard board, IInputOutputArray detectedCorners,
     IInputOutputArray detectedIds, IInputOutputArray rejectedCorners,
     IInputArray cameraMatrix, IInputArray distCoeffs,
     float minRepDistance, float errorCorrectionRate,
     bool checkAllOrders,
     IOutputArray recoveredIdxs, DetectorParameters parameters)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputOutputArray ioaDetectedCorners = detectedCorners.GetInputOutputArray())
             using (InputOutputArray ioaDetectedIds = detectedIds.GetInputOutputArray())
                 using (InputOutputArray ioaRejectedCorners = rejectedCorners.GetInputOutputArray())
                     using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                         using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             using (
                                 OutputArray oaRecovervedIdx = recoveredIdxs == null
           ? OutputArray.GetEmpty()
           : recoveredIdxs.GetOutputArray())
                             {
                                 cveArucoRefineDetectedMarkers(iaImage, board.BoardPtr, ioaDetectedCorners, ioaDetectedIds, ioaRejectedCorners,
                                                               iaCameraMatrix, iaDistCoeffs, minRepDistance, errorCorrectionRate, checkAllOrders, oaRecovervedIdx, ref parameters);
                             }
 }
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">
        /// The output image, can be the same as <paramref name="imageIn"/>, in which case we will render directly into the input image.
        /// Note that if no faces are detected, <paramref name="imageOut"/> will remain unchanged.
        /// If faces/eyes are detected, we will draw the (rectangle) regions on top of the existing pixels of <paramref name="imageOut"/>.
        /// If the <paramref name="imageOut"/> is not the same object as <paramref name="imageIn"/>, it is a good idea to copy the pixels over from the input image before passing it to this function.
        /// </param>
        /// <returns>The messages that we want to display.</returns>
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            List <Rectangle> faces = new List <Rectangle>();
            List <Rectangle> eyes  = new List <Rectangle>();

            Stopwatch watch = Stopwatch.StartNew();

            Detect(imageIn, faces, eyes);
            watch.Stop();

            //Draw the faces
            foreach (Rectangle rect in faces)
            {
                CvInvoke.Rectangle(imageOut, rect, RenderColorFace, 2);
            }

            //Draw the eyes
            foreach (Rectangle rect in eyes)
            {
                CvInvoke.Rectangle(imageOut, rect, RenderColorEye, 2);
            }

            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
Example #40
0
 /// <summary>
 /// Draws a single or multiple polygonal curves
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pts">Array points</param>
 /// <param name="isClosed">
 /// Indicates whether the polylines must be drawn closed. 
 /// If !=0, the function draws the line from the last vertex of every contour to the first vertex.
 /// </param>
 /// <param name="color">Polyline color</param>
 /// <param name="thickness">Thickness of the polyline edges</param>
 /// <param name="lineType">Type of the line segments, see cvLine description</param>
 /// <param name="shift">Number of fractional bits in the vertex coordinates</param>
 public static void Polylines(IInputOutputArray img, Point[] pts, bool isClosed, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {    
    using (VectorOfPoint vps = new VectorOfPoint(pts))
       Polylines(img, vps, isClosed, color, thickness, lineType, shift);
 }
Example #41
0
 /// <summary>
 /// Draws a arrow segment pointing from the first point to the second one.
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pt1">The point the arrow starts from.</param>
 /// <param name="pt2">The point the arrow points to.</param>
 /// <param name="color">Line color.</param>
 /// <param name="thickness">Line thickness.</param>
 /// <param name="lineType">Type of the line.</param>
 /// <param name="shift">Number of fractional bits in the point coordinates.</param>
 /// <param name="tipLength">The length of the arrow tip in relation to the arrow length</param>
 public static void ArrowedLine(IInputOutputArray img, Point pt1, Point pt2, MCvScalar color, int thickness = 1,
    CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0, double tipLength = 0.1)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
    {
       cveArrowedLine(ioaImg, ref pt1, ref pt2, ref color, thickness, lineType, shift, tipLength);
    }
 }
Example #42
0
 /// <summary>
 /// Draws the line segment between pt1 and pt2 points in the image. The line is clipped by the image or ROI rectangle. For non-antialiased lines with integer coordinates the 8-connected or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased lines are drawn using Gaussian filtering.
 /// </summary>
 /// <param name="img">The image</param>
 /// <param name="pt1">First point of the line segment</param>
 /// <param name="pt2">Second point of the line segment</param>
 /// <param name="color">Line color</param>
 /// <param name="thickness">Line thickness. </param>
 /// <param name="lineType">Type of the line:
 /// 8 (or 0) - 8-connected line.
 /// 4 - 4-connected line.
 /// CV_AA - antialiased line. 
 /// </param>
 /// <param name="shift">Number of fractional bits in the point coordinates</param>
 public static void Line(IInputOutputArray img, Point pt1, Point pt2, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
       cveLine(ioaImg, ref pt1, ref pt2, ref color, thickness, lineType, shift);
 }
Example #43
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
Example #44
0
 /// <summary>
 /// Converts NaN's to the given number
 /// </summary>
 /// <param name="a">The array where NaN needs to be converted</param>
 /// <param name="val">The value to convert to</param>
 public static void PatchNaNs(IInputOutputArray a, double val = 0)
 {
     using (InputOutputArray ioaA = a.GetInputOutputArray())
         cvePatchNaNs(ioaA, val);
 }
Example #45
0
 /// <summary>
 /// Implements one of the variants of watershed, non-parametric marker-based segmentation algorithm, described in [Meyer92] Before passing the image to the function, user has to outline roughly the desired regions in the image markers with positive (>0) indices, i.e. every region is represented as one or more connected components with the pixel values 1, 2, 3 etc. Those components will be "seeds" of the future image regions. All the other pixels in markers, which relation to the outlined regions is not known and should be defined by the algorithm, should be set to 0's. On the output of the function, each pixel in markers is set to one of values of the "seed" components, or to -1 at boundaries between the regions.
 /// </summary>
 /// <remarks>Note, that it is not necessary that every two neighbor connected components are separated by a watershed boundary (-1's pixels), for example, in case when such tangent components exist in the initial marker image. </remarks>
 /// <param name="image">The input 8-bit 3-channel image</param>
 /// <param name="markers">The input/output Int32 depth single-channel image (map) of markers. </param>
 public static void Watershed(IInputArray image, IInputOutputArray markers)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaMarkers = markers.GetInputOutputArray())
       cveWatershed(iaImage, ioaMarkers);
 }
Example #46
0
 /// <summary>
 /// Adds the input <paramref name="src"/> or its selected region, raised to power 2, to the accumulator sqsum
 /// </summary>
 /// <param name="src">Input image, 1- or 3-channel, 8-bit or 32-bit floating point (each channel of multi-channel image is processed independently)</param>
 /// <param name="dst">Accumulator of the same number of channels as input image, 32-bit or 64-bit floating-point</param>
 /// <param name="mask">Optional operation mask</param>
 public static void AccumulateSquare(IInputArray src, IInputOutputArray dst, IInputArray mask = null)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       cveAccumulateSquare(iaSrc, ioaDst, iaMask);
 }
Example #47
0
      /// <summary>
      /// Retrieves contours from the binary image as a contour tree. The pointer firstContour is filled by the function. It is provided as a convenient way to obtain the hierarchy value as int[,].
      /// The function modifies the source image content
      /// </summary>
      /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1s, zero pixels remain 0s - that is image treated as binary. To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content</param>
      /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
      /// <param name="method">Approximation method (for all the modes, except CV_RETR_RUNS, which uses built-in approximation). </param>
      /// <param name="offset">Offset, by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context</param>
      /// <returns>The contour hierarchy</returns>
      public static int[,] FindContourTree(
         IInputOutputArray image, IOutputArray contours,
         CvEnum.ChainApproxMethod method,
         Point offset = new Point())
      {
         using (Mat hierachyMat = new Mat())
         {
            FindContours(image, contours, hierachyMat, RetrType.Tree, method, offset);
            int[,] hierachy = new int[hierachyMat.Cols, 4];
            GCHandle handle = GCHandle.Alloc(hierachy, GCHandleType.Pinned);
            using (Mat tmp = new Mat(hierachyMat.Rows, hierachyMat.Cols, hierachyMat.Depth, 4, handle.AddrOfPinnedObject(), hierachyMat.Step))
            {
               hierachyMat.CopyTo(tmp);
            }
            handle.Free();
            return hierachy;
         }

      }
Example #48
0
 /// <summary>
 /// Retrieves contours from the binary image and returns the number of retrieved contours. The pointer firstContour is filled by the function. It will contain pointer to the first most outer contour or IntPtr.Zero if no contours is detected (if the image is completely black). Other contours may be reached from firstContour using h_next and v_next links. The sample in cvDrawContours discussion shows how to use contours for connected component detection. Contours can be also used for shape analysis and object recognition - see squares.c in OpenCV sample directory
 /// The function modifies the source image content
 /// </summary>
 /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1s, zero pixels remain 0s - that is image treated as binary. To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content</param>
 /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
 /// <param name="hierarchy">Optional output vector, containing information about the image topology.</param>
 /// <param name="mode">Retrieval mode</param>
 /// <param name="method">Approximation method (for all the modes, except CV_RETR_RUNS, which uses built-in approximation). </param>
 /// <param name="offset">Offset, by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context</param>
 /// <returns>The number of countours</returns>
 public static void FindContours(
    IInputOutputArray image, IOutputArray contours, IOutputArray hierarchy,
    CvEnum.RetrType mode,
    CvEnum.ChainApproxMethod method,
    Point offset = new Point())
 {
    using (InputOutputArray ioaImage = image.GetInputOutputArray())
    using (OutputArray oaContours = contours.GetOutputArray())
    using (OutputArray oaHierarchy = hierarchy == null ? OutputArray.GetEmpty() : hierarchy.GetOutputArray())
       cveFindContours(ioaImage, oaContours, oaHierarchy, mode, method, ref offset);
 }
Example #49
0
 /// <summary>
 /// Calculates a dense optical flow.
 /// </summary>
 /// <param name="denseFlow">The dense optical flow object</param>
 /// <param name="i0">first input image.</param>
 /// <param name="i1">second input image of the same size and the same type as <paramref name="i0"/>.</param>
 /// <param name="flow">computed flow image that has the same size as I0 and type CV_32FC2.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
Example #50
0
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            Stopwatch watch = Stopwatch.StartNew();

            #region Pre-processing
            //Convert the image to grayscale and filter out the noise
            CvInvoke.CvtColor(imageIn, _gray, ColorConversion.Bgr2Gray);
            //Remove noise
            CvInvoke.GaussianBlur(_gray, _gray, new Size(3, 3), 1);
            double cannyThreshold        = 180.0;
            double cannyThresholdLinking = 120.0;
            CvInvoke.Canny(_gray, _cannyEdges, cannyThreshold, cannyThresholdLinking);
            #endregion

            #region circle detection
            double    circleAccumulatorThreshold = 120;
            CircleF[] circles = CvInvoke.HoughCircles(_gray, HoughModes.Gradient, 2.0, 20.0, cannyThreshold,
                                                      circleAccumulatorThreshold, 5);
            #endregion

            #region Edge detection
            LineSegment2D[] lines = CvInvoke.HoughLinesP(
                _cannyEdges,
                1,              //Distance resolution in pixel-related units
                Math.PI / 45.0, //Angle resolution measured in radians.
                20,             //threshold
                30,             //min Line width
                10);            //gap between lines
            #endregion

            #region Find triangles and rectangles
            List <Triangle2DF> triangleList = new List <Triangle2DF>();
            List <RotatedRect> boxList      = new List <RotatedRect>(); //a box is a rotated rectangle
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.FindContours(_cannyEdges, contours, null, RetrType.List,
                                      ChainApproxMethod.ChainApproxSimple);
                int count = contours.Size;
                for (int i = 0; i < count; i++)
                {
                    using (VectorOfPoint contour = contours[i])
                        using (VectorOfPoint approxContour = new VectorOfPoint())
                        {
                            CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05,
                                                  true);
                            if (CvInvoke.ContourArea(approxContour, false) > 250
                                )                            //only consider contours with area greater than 250
                            {
                                if (approxContour.Size == 3) //The contour has 3 vertices, it is a triangle
                                {
                                    Point[] pts = approxContour.ToArray();
                                    triangleList.Add(new Triangle2DF(
                                                         pts[0],
                                                         pts[1],
                                                         pts[2]
                                                         ));
                                }
                                else if (approxContour.Size == 4) //The contour has 4 vertices.
                                {
                                    #region determine if all the angles in the contour are within [80, 100] degree

                                    bool            isRectangle = true;
                                    Point[]         pts         = approxContour.ToArray();
                                    LineSegment2D[] edges       = PointCollection.PolyLine(pts, true);

                                    for (int j = 0; j < edges.Length; j++)
                                    {
                                        double angle = Math.Abs(
                                            edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
                                        if (angle < 80 || angle > 100)
                                        {
                                            isRectangle = false;
                                            break;
                                        }
                                    }

                                    #endregion

                                    if (isRectangle)
                                    {
                                        boxList.Add(CvInvoke.MinAreaRect(approxContour));
                                    }
                                }
                            }
                        }
                }
            }
            #endregion

            watch.Stop();

            using (Mat triangleRectangleImage = new Mat(_gray.Size, DepthType.Cv8U, 3)) //image to draw triangles and rectangles on
                using (Mat circleImage = new Mat(_gray.Size, DepthType.Cv8U, 3))        //image to draw circles on
                    using (Mat lineImage = new Mat(_gray.Size, DepthType.Cv8U, 3))      //image to draw lines on
                    {
                        #region draw triangles and rectangles

                        triangleRectangleImage.SetTo(new MCvScalar(0));
                        foreach (Triangle2DF triangle in triangleList)
                        {
                            CvInvoke.Polylines(triangleRectangleImage,
                                               Array.ConvertAll(triangle.GetVertices(), Point.Round),
                                               true, new Bgr(Color.DarkBlue).MCvScalar, 2);
                        }

                        foreach (RotatedRect box in boxList)
                        {
                            CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(box.GetVertices(), Point.Round),
                                               true,
                                               new Bgr(Color.DarkOrange).MCvScalar, 2);
                        }

                        //Drawing a light gray frame around the image
                        CvInvoke.Rectangle(triangleRectangleImage,
                                           new Rectangle(Point.Empty,
                                                         new Size(triangleRectangleImage.Width - 1, triangleRectangleImage.Height - 1)),
                                           new MCvScalar(120, 120, 120));
                        //Draw the labels
                        CvInvoke.PutText(triangleRectangleImage, "Triangles and Rectangles", new Point(20, 20),
                                         FontFace.HersheyDuplex, 0.5, new MCvScalar(120, 120, 120));

                        #endregion

                        #region draw circles

                        circleImage.SetTo(new MCvScalar(0));
                        foreach (CircleF circle in circles)
                        {
                            CvInvoke.Circle(circleImage, Point.Round(circle.Center), (int)circle.Radius,
                                            new Bgr(Color.Brown).MCvScalar, 2);
                        }

                        //Drawing a light gray frame around the image
                        CvInvoke.Rectangle(circleImage,
                                           new Rectangle(Point.Empty, new Size(circleImage.Width - 1, circleImage.Height - 1)),
                                           new MCvScalar(120, 120, 120));
                        //Draw the labels
                        CvInvoke.PutText(circleImage, "Circles", new Point(20, 20), FontFace.HersheyDuplex, 0.5,
                                         new MCvScalar(120, 120, 120));

                        #endregion

                        #region draw lines

                        lineImage.SetTo(new MCvScalar(0));
                        foreach (LineSegment2D line in lines)
                        {
                            CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(Color.Green).MCvScalar, 2);
                        }
                        //Drawing a light gray frame around the image
                        CvInvoke.Rectangle(lineImage,
                                           new Rectangle(Point.Empty, new Size(lineImage.Width - 1, lineImage.Height - 1)),
                                           new MCvScalar(120, 120, 120));
                        //Draw the labels
                        CvInvoke.PutText(lineImage, "Lines", new Point(20, 20), FontFace.HersheyDuplex, 0.5,
                                         new MCvScalar(120, 120, 120));

                        #endregion


                        using (InputArray iaImageIn = imageIn.GetInputArray())
                            using (Mat imageInMat = iaImageIn.GetMat())
                                CvInvoke.VConcat(new Mat[] { imageInMat, triangleRectangleImage, circleImage, lineImage }, imageOut);
                    }
            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
Example #51
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="image"></param>
        /// <param name="ocr_mode"></param>
        /// <param name="count"></param>
        /// <param name="canny_thres">Canny threshold will take 3 values 20, 30, 40, 50</param>
        /// <returns></returns>
        private bool ProcessImage(IInputOutputArray image, int ocr_mode)
        {
            Stopwatch watch = Stopwatch.StartNew(); // time the detection process
            List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
            List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
            List <RotatedRect>       licenseBoxList = new List <RotatedRect>();
            List <string>            words          = new List <string>();
            var           result        = false;
            bool          validValue    = false;
            UMat          filteredPlate = new UMat();
            StringBuilder strBuilder    = new StringBuilder();

            CvInvoke.CvtColor(img, filteredPlate, ColorConversion.Bgr2Gray);

            words = _licensePlateDetector.DetectLicensePlate(
                image,
                licensePlateImagesList,
                filteredLicensePlateImagesList,
                licenseBoxList,
                ocr_mode);

            if (ocr_mode == 3)
            {
                strBuilder = ComputerVisionOCR.GetText(filteredPlate);
                if (strBuilder != null)
                {
                    words.Clear();
                    List <String> licenses = new List <String>
                    {
                        strBuilder.ToString()
                    };
                    licenses.ForEach(
                        x =>
                    {
                        words.Add(x);
                    });
                }
            }

            var validWords         = new List <string>();
            var validLicencePlates = new List <IInputOutputArray>();

            for (int w = 0; w < words.Count; w++)
            {
                string replacement2 = Regex.Replace(words[w], @"\t|\n|\r", "");
                string replacement  = Regex.Replace(replacement2, "[^0-9a-zA-Z]+", "");
                if (replacement.Length >= 6 && replacement != null)
                {
                    var filteredLicence = FilterLicenceSpain(replacement);
                    if (!string.IsNullOrWhiteSpace(filteredLicence))
                    {
                        validValue = true;
                        if (!validWords.Contains(replacement))
                        {
                            validWords.Add(filteredLicence);
                            validLicencePlates.Add(licensePlateImagesList[w]);
                        }
                    }
                }
            }

            if (validValue)
            {
                ShowResults(image, watch, validLicencePlates, filteredLicensePlateImagesList, licenseBoxList, validWords);
            }
            else
            {
                ShowResults(image, watch, licensePlateImagesList, filteredLicensePlateImagesList, licenseBoxList, words);
            }


            result = true;
            return(result);
        }
Example #52
0
 /// <summary>
 /// Draws a single or multiple polygonal curves
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pts">Array of pointers to polylines</param>
 /// <param name="isClosed">
 /// Indicates whether the polylines must be drawn closed. 
 /// If !=0, the function draws the line from the last vertex of every contour to the first vertex.
 /// </param>
 /// <param name="color">Polyline color</param>
 /// <param name="thickness">Thickness of the polyline edges</param>
 /// <param name="lineType">Type of the line segments, see cvLine description</param>
 /// <param name="shift">Number of fractional bits in the vertex coordinates</param>
 public static void Polylines(IInputOutputArray img, IInputArray pts, bool isClosed, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
    using (InputArray iaPts = pts.GetInputArray())
       cvePolylines(ioaImg, iaPts, isClosed, ref color, thickness, lineType, shift);
 }
Example #53
0
 /// <summary>
 /// Draws a rectangle specified by a CvRect structure
 /// </summary>
 /// /// <param name="img">Image</param>
 /// <param name="rect">The rectangle to be drawn</param>
 /// <param name="color">Line color </param>
 /// <param name="thickness">Thickness of lines that make up the rectangle. Negative values make the function to draw a filled rectangle.</param>
 /// <param name="lineType">Type of the line</param>
 /// <param name="shift">Number of fractional bits in the point coordinates</param>
 public static void Rectangle(IInputOutputArray img, Rectangle rect, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
       cveRectangle(ioaImg, ref rect, ref color, thickness, lineType, shift);
 }
Example #54
0
 /// <summary>
 /// Adds product of 2 images or thier selected regions to accumulator acc
 /// </summary>
 /// <param name="src1">First input image, 1- or 3-channel, 8-bit or 32-bit floating point (each channel of multi-channel image is processed independently)</param>
 /// <param name="src2">Second input image, the same format as the first one</param>
 /// <param name="dst">Accumulator of the same number of channels as input images, 32-bit or 64-bit floating-point</param>
 /// <param name="mask">Optional operation mask</param>
 public static void AccumulateProduct(IInputArray src1, IInputArray src2, IInputOutputArray dst, IInputArray mask = null)
 {
    using (InputArray iaSrc1 = src1.GetInputArray())
    using (InputArray iaSrc2 = src2.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       cveAccumulateProduct(iaSrc1, iaSrc2, ioaDst, iaMask);
 }
Example #55
0
 /// <summary>
 /// Calculates weighted sum of input <paramref name="src"/> and the accumulator acc so that acc becomes a running average of frame sequence:
 /// acc(x,y)=(1-<paramref name="alpha"/>) * acc(x,y) + <paramref name="alpha"/> * image(x,y) if mask(x,y)!=0
 /// where <paramref name="alpha"/> regulates update speed (how fast accumulator forgets about previous frames). 
 /// </summary>
 /// <param name="src">Input image, 1- or 3-channel, 8-bit or 32-bit floating point (each channel of multi-channel image is processed independently). </param>
 /// <param name="dst">Accumulator of the same number of channels as input image, 32-bit or 64-bit floating-point. </param>
 /// <param name="alpha">Weight of input image</param>
 /// <param name="mask">Optional operation mask</param>
 public static void AccumulateWeighted(IInputArray src, IInputOutputArray dst, double alpha, IInputArray mask = null)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       cveAccumulateWeighted(iaSrc, ioaDst, alpha, iaMask);
 }
Example #56
0
 /// <summary>
 /// Iterates to find the sub-pixel accurate location of corners, or radial saddle points
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="corners">Initial coordinates of the input corners and refined coordinates on output</param>
 /// <param name="win">Half sizes of the search window. For example, if win=(5,5) then 5*2+1 x 5*2+1 = 11 x 11 search window is used</param>
 /// <param name="zeroZone">Half size of the dead region in the middle of the search zone over which the summation in formulae below is not done. It is used sometimes to avoid possible singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such size</param>
 /// <param name="criteria">Criteria for termination of the iterative process of corner refinement. That is, the process of corner position refinement stops either after certain number of iteration or when a required accuracy is achieved. The criteria may specify either of or both the maximum number of iteration and the required accuracy</param>
 public static void CornerSubPix(
    IInputArray image,
    IInputOutputArray corners,
    Size win,
    Size zeroZone,
    MCvTermCriteria criteria)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaCorners = corners.GetInputOutputArray())
       cveCornerSubPix(iaImage, ioaCorners, ref win, ref zeroZone, ref criteria);
 }
Example #57
0
 /// <summary>
 /// Updates the motion history image as following:
 /// mhi(x,y)=timestamp  if silhouette(x,y)!=0
 ///         0          if silhouette(x,y)=0 and mhi(x,y)&lt;timestamp-duration
 ///         mhi(x,y)   otherwise
 /// That is, MHI pixels where motion occurs are set to the current timestamp, while the pixels where motion happened far ago are cleared.
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs. </param>
 /// <param name="mhi">Motion history image, that is updated by the function (single-channel, 32-bit floating-point) </param>
 /// <param name="timestamp">Current time in milliseconds or other units. </param>
 /// <param name="duration">Maximal duration of motion track in the same units as timestamp. </param>
 public static void UpdateMotionHistory(IInputArray silhouette, IInputOutputArray mhi, double timestamp, double duration)
 {
     using (InputArray iaSilhouette = silhouette.GetInputArray())
         using (InputOutputArray ioaMhi = mhi.GetInputOutputArray())
             cveUpdateMotionHistory(iaSilhouette, ioaMhi, timestamp, duration);
 }
Example #58
0
 /// <summary>
 /// Fills a connected component with given color.
 /// </summary>
 /// <param name="src">Input 1- or 3-channel, 8-bit or floating-point image. It is modified by the function unless CV_FLOODFILL_MASK_ONLY flag is set.</param>
 /// <param name="seedPoint">The starting point.</param>
 /// <param name="newVal">New value of repainted domain pixels.</param>
 /// <param name="loDiff">Maximal lower brightness/color difference
 /// between the currently observed pixel and one of its neighbor belong to the component
 /// or seed pixel to add the pixel to component.
 /// In case of 8-bit color images it is packed value.</param>
 /// <param name="upDiff">Maximal upper brightness/color difference
 /// between the currently observed pixel and one of its neighbor belong to the component
 /// or seed pixel to add the pixel to component.
 /// In case of 8-bit color images it is packed value.</param>
 /// <param name="flags">The operation flags.
 /// Lower bits contain connectivity value, 4 (by default) or 8, used within the function.
 /// Connectivity determines which neighbors of a pixel are considered.
 /// Upper bits can be 0 or combination of the following flags:
 /// CV_FLOODFILL_FIXED_RANGE - if set the difference between the current pixel and seed pixel is considered,
 /// otherwise difference between neighbor pixels is considered (the range is floating).
 /// CV_FLOODFILL_MASK_ONLY - if set, the function does not fill the image (new_val is ignored),
 /// but the fills mask (that must be non-NULL in this case). </param>
 /// <param name="mask">Operation mask,
 /// should be singe-channel 8-bit image, 2 pixels wider and 2 pixels taller than image.
 /// If not IntPtr.Zero, the function uses and updates the mask, so user takes responsibility of initializing mask content.
 /// Floodfilling can't go across non-zero pixels in the mask, for example, an edge detector output can be used as a mask to stop filling at edges.
 /// Or it is possible to use the same mask in multiple calls to the function to make sure the filled area do not overlap.
 /// Note: because mask is larger than the filled image, pixel in mask that corresponds to (x,y) pixel in image will have coordinates (x+1,y+1).</param>
 /// <param name="rect">Output parameter set by the function to the minimum bounding rectangle of the repainted domain.</param>
 /// <param name="connectivity">Flood fill connectivity</param>
 public static int FloodFill(
    IInputOutputArray src,
    IInputOutputArray mask,
    Point seedPoint,
    MCvScalar newVal,
    out Rectangle rect,
    MCvScalar loDiff,
    MCvScalar upDiff,
    CvEnum.Connectivity connectivity = CvEnum.Connectivity.FourConnected,
    CvEnum.FloodFillType flags = CvEnum.FloodFillType.Default)
 {
    rect = new Rectangle();
    using (InputOutputArray ioaSrc = src.GetInputOutputArray())
    using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
       return cveFloodFill(
          ioaSrc,
          ioaMask,
          ref seedPoint, ref newVal,
          ref rect,
          ref loDiff, ref upDiff, (int)connectivity | (int)flags);
 }
Example #59
0
        private void ProcessImage(IInputOutputArray image)
        {
            Stopwatch watch = Stopwatch.StartNew(); // time the detection process

            List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
            List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
            List <RotatedRect>       licenseBoxList = new List <RotatedRect>();

            var found = new List <string>();

            for (double rWidth = 1; rWidth < 12; rWidth += 0.2)
            {
                for (double rHeight = 1; rHeight < 12; rHeight += 0.2)
                {
                    List <string> words1 = _licensePlateDetector.DetectLicensePlate(
                        image,
                        licensePlateImagesList,
                        filteredLicensePlateImagesList,
                        licenseBoxList, rWidth, rHeight);

                    if (words1.Any())
                    {
                        var f = $"FOUND: {rWidth}-{rHeight} = {string.Concat(words1)}";

                        found.Add(f);

                        Console.WriteLine(f);
                    }
                    else
                    {
                        //Console.WriteLine($"FAILED: {rWidth}-{rHeight}");
                    }
                }
            }
            List <string> words = new List <string>();

            //List<string> words = _licensePlateDetector.DetectLicensePlate(
            //   image,
            //   licensePlateImagesList,
            //   filteredLicensePlateImagesList,
            //   licenseBoxList, 6, 12);

            watch.Stop(); //stop the timer
            //processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);

            //panel1.Controls.Clear();
            System.Drawing.Point startPoint = new System.Drawing.Point(10, 10);
            for (int i = 0; i < words.Count; i++)
            {
                Mat dest = new Mat();
                CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
                AddLabelAndImage(
                    ref startPoint,
                    String.Format("License: {0}", words[i]),
                    dest);
                PointF[] verticesF = licenseBoxList[i].GetVertices();
                System.Drawing.Point[] vertices = Array.ConvertAll(verticesF, System.Drawing.Point.Round);
                using (VectorOfPoint pts = new VectorOfPoint(vertices))
                    CvInvoke.Polylines(image, pts, true, new Bgr(System.Drawing.Color.Red).MCvScalar, 2);
            }
        }
Example #60
0
      /// <summary>
      /// Swap channels.
      /// </summary>
      /// <param name="src">The image where the channels will be swapped</param>
      /// <param name="dstOrder">
      /// Integer array describing how channel values are permutated. The n-th entry
      /// of the array contains the number of the channel that is stored in the n-th channel of
      /// the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR
      /// channel order.
      /// </param>
      /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
      public static void SwapChannels(IInputOutputArray src, int[] dstOrder, Stream stream)
      {
         if (dstOrder == null || dstOrder.Length < 4)
            throw new ArgumentException("dstOrder must be an int array of size 4");
         GCHandle handle = GCHandle.Alloc(dstOrder, GCHandleType.Pinned);
         using (InputOutputArray ioaSrc = src.GetInputOutputArray())
            cudaSwapChannels(ioaSrc, handle.AddrOfPinnedObject(), stream);

         handle.Free();
      }