public static Mat Draw(Mat image, VectorOfInt markerIds, VectorOfVectorOfPointF markerCorners, VectorOfInt charucoIds, VectorOfPointF charucoCorners)
        {
            Mat result = image.ToImage <Rgb, byte>().Mat;

            ArucoInvoke.DrawDetectedMarkers(result, markerCorners, markerIds, new MCvScalar(255, 0, 0));
            ArucoInvoke.DrawDetectedCornersCharuco(result, charucoCorners, charucoIds, new MCvScalar(255, 255, 0));

            return(result);
        }
示例#2
0
 private static void PushParameters(VectorOfInt vec, KeyValuePair<CvEnum.ImwriteFlags, int>[] parameters)
 {
    if (parameters == null || parameters.Length == 0)
       return;
    foreach (KeyValuePair<CvEnum.ImwriteFlags, int> p in parameters)
    {
       vec.Push(new int[] {(int) p.Key, p.Value});
    }
 }
示例#3
0
 /// <summary>
 /// Train the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used in the training.</param>
 /// <param name="labels">The labels of the images.</param>
 public void Train(Mat[] images, int[] labels)
 {
     using (VectorOfMat imgVec = new VectorOfMat())
         using (VectorOfInt labelVec = new VectorOfInt(labels))
         {
             imgVec.Push(images);
             Train(imgVec, labelVec);
         }
 }
示例#4
0
 /// <summary>
 /// encode image and store the result as a byte vector.
 /// </summary>
 /// <param name="ext">The image format</param>
 /// <param name="image">The image</param>
 /// <param name="buf">Output buffer resized to fit the compressed image.</param>
 /// <param name="parameters">The pointer to the array of integers, which contains the parameter for encoding, use IntPtr.Zero for default</param>
 public static void Imencode(String ext, IInputArray image, VectorOfByte buf, params KeyValuePair <CvEnum.ImwriteFlags, int>[] parameters)
 {
     using (CvString extStr = new CvString(ext))
         using (VectorOfInt p = new VectorOfInt())
         {
             PushParameters(p, parameters);
             using (InputArray iaImage = image.GetInputArray())
                 cveImencode(extStr, iaImage, buf, p);
         }
 }
示例#5
0
 /// <summary>
 /// Update the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used for updating the face recognizer</param>
 /// <param name="labels">The labels of the images</param>
 public void Update(Mat[] images, int[] labels)
 {
     Debug.Assert(images.Length == labels.Length, "The number of labels must equals the number of images");
     using (VectorOfMat imgVec = new VectorOfMat())
         using (VectorOfInt labelVec = new VectorOfInt(labels))
         {
             imgVec.Push(images);
             Update(imgVec, labelVec);
         }
 }
示例#6
0
 /// <summary>
 /// Performs non maximum suppression given boxes and corresponding scores.
 /// </summary>
 /// <param name="bboxes">A set of bounding boxes to apply NMS.</param>
 /// <param name="scores">A set of corresponding confidences.</param>
 /// <param name="scoreThreshold">A threshold used to filter boxes by score.</param>
 /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param>
 /// <param name="eta">A coefficient in adaptive threshold</param>
 /// <param name="topK">If &gt;0, keep at most top_k picked indices.</param>
 /// <returns>The indices of the boxes to keep after NMS</returns>
 public static int[] NMSBoxes(RotatedRect[] bboxes, float[] scores, float scoreThreshold, float nmsThreshold, float eta = 1.0f, int topK = 0)
 {
     using (VectorOfRotatedRect vBoxes = new VectorOfRotatedRect(bboxes))
         using (VectorOfFloat vScores = new VectorOfFloat(scores))
             using (VectorOfInt indices = new VectorOfInt())
             {
                 NMSBoxes(vBoxes, vScores, scoreThreshold, nmsThreshold, indices, eta, topK);
                 return(indices.ToArray());
             }
 }
示例#7
0
 /// <summary>
 /// Train the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used in the training.</param>
 /// <param name="labels">The labels of the images.</param>
 public void Train <TColor, TDepth>(Image <TColor, TDepth>[] images, int[] labels)
     where TColor : struct, IColor
     where TDepth : new()
 {
     using (VectorOfMat imgVec = new VectorOfMat())
         using (VectorOfInt labelVec = new VectorOfInt(labels))
         {
             imgVec.Push <TDepth>(images);
             Train(imgVec, labelVec);
         }
 }
示例#8
0
        public static VectorOfInt <D> operator +(VectorOfInt <D> lhs, VectorOfInt <D> rhs)
        {
            var result = new VectorOfInt <D>();
            var dim    = new D().Value;

            for (var i = 0; i < dim; i++)
            {
                result[i] = lhs[i] + rhs[i];
            }
            return(result);
        }
示例#9
0
 private static void PushParameters(VectorOfInt vec, KeyValuePair <CvEnum.ImwriteFlags, int>[] parameters)
 {
     if (parameters == null || parameters.Length == 0)
     {
         return;
     }
     foreach (KeyValuePair <CvEnum.ImwriteFlags, int> p in parameters)
     {
         vec.Push(new int[] { (int)p.Key, p.Value });
     }
 }
示例#10
0
        /// <summary>
        /// Create a video writer using the specific information
        /// </summary>
        /// <param name="fileName">The name of the video file to be written to </param>
        /// <param name="compressionCode">Compression code. Usually computed using CvInvoke.CV_FOURCC.
        /// On windows use -1 to open a codec selection dialog.
        /// On Linux, use VideoWriter.Fourcc('I', 'Y', 'U', 'V') for default codec for the specific file name.
        /// </param>
        /// <param name="fps">frame rate per second</param>
        /// <param name="size">the size of the frame</param>
        /// <param name="apiPreference">Allows to specify API backends to use.</param>
        /// <param name="writerProperties">Optional writer properties. e.g. new Tuple&lt;VideoWriter.WriterProperty&gt;(VideoWriter.WriterProperty.HwAcceleration, (int) VideoAccelerationType.Any)</param>
        public VideoWriter(String fileName, int apiPreference, int compressionCode, double fps, System.Drawing.Size size, params Tuple <WriterProperty, int>[] writerProperties)
        {
            using (CvString s = new CvString(fileName))
                using (VectorOfInt vectInt = ConvertWriterProperties(writerProperties))
                    _ptr = CvInvoke.cveVideoWriterCreate3(s, apiPreference, compressionCode, fps, ref size, vectInt);

            if (_ptr == IntPtr.Zero || IsOpened == false)
            {
                throw new NullReferenceException("Unable to create VideoWriter. Make sure you have the specific codec installed");
            }
        }
示例#11
0
文件: Form1.cs 项目: RimasR/TTISR
        public Image <Bgr, byte> GetPalm(Mat mask)
        {
            int width  = mask.Width;
            int height = mask.Height;
            var temp   = new Mat();
            var result = mask.ToImage <Bgr, byte>();
            VectorOfVectorOfPoint contours       = new VectorOfVectorOfPoint();
            VectorOfPoint         biggestContour = new VectorOfPoint();

            CvInvoke.FindContours(mask, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
            if (contours.Size > 0)
            {
                biggestContour = contours[0];
                for (int i = 0; i < contours.Size; i++)
                {
                    if (contours[i].Size > biggestContour.Size)
                    {
                        biggestContour = contours[i];
                    }
                }
            }
            if (biggestContour.Size != 0)
            {
                //Gaunam rankos konturus
                CvInvoke.ApproxPolyDP(biggestContour, biggestContour, 0.00000001, false);
                var         points = biggestContour.ToArray();
                VectorOfInt hull   = new VectorOfInt();
                //find the palm hand area using convexitydefect
                CvInvoke.ConvexHull(biggestContour, hull, true);
                var box     = CvInvoke.MinAreaRect(biggestContour);
                Mat defects = new Mat();
                CvInvoke.ConvexityDefects(biggestContour, hull, defects);

                if (!defects.IsEmpty)
                {
                    //Data from Mat are not directly readable so we convert it to Matrix<>
                    Matrix <int> m = new Matrix <int>(defects.Rows, defects.Cols, defects.NumberOfChannels);
                    defects.CopyTo(m);

                    for (int i = 0; i < m.Rows; i++)
                    {
                        int   startIdx   = m.Data[i, 0];
                        int   endIdx     = m.Data[i, 1];
                        Point startPoint = points[startIdx];
                        Point endPoint   = points[endIdx];
                        //draw  a line connecting the convexity defect start point and end point in thin red line
                        CvInvoke.Line(result, startPoint, endPoint, new MCvScalar(0, 0, 255));
                    }
                }
            }

            return(result);
        }
示例#12
0
 /// <summary>
 /// Update the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used for updating the face recognizer</param>
 /// <param name="labels">The labels of the images</param>
 public void Update <TColor, TDepth>(Image <TColor, TDepth>[] images, int[] labels)
     where TColor : struct, IColor
     where TDepth : new()
 {
     Debug.Assert(images.Length == labels.Length, "The number of labels must equals the number of images");
     using (VectorOfMat imgVec = new VectorOfMat())
         using (VectorOfInt labelVec = new VectorOfInt(labels))
         {
             imgVec.Push(images);
             Update(imgVec, labelVec);
         }
 }
示例#13
0
 /// <summary>
 /// Performs soft non maximum suppression given boxes and corresponding scores. Reference: https://arxiv.org/abs/1704.04503
 /// </summary>
 /// <param name="bboxes">A set of bounding boxes to apply Soft NMS.</param>
 /// <param name="scores">A set of corresponding confidences.</param>
 /// <param name="updatedScores">A set of corresponding updated confidences.</param>
 /// <param name="scoreThreshold">A threshold used to filter boxes by score.</param>
 /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param>
 /// <param name="indices">The kept indices of bboxes after NMS.</param>
 /// <param name="topK">Keep at most <paramref name="topK"/> picked indices.</param>
 /// <param name="sigma">Parameter of Gaussian weighting.</param>
 /// <param name="method">Gaussian or linear.</param>
 public static void SoftNMSBoxes(
     VectorOfRect bboxes,
     VectorOfFloat scores,
     VectorOfFloat updatedScores,
     float scoreThreshold,
     float nmsThreshold,
     VectorOfInt indices,
     int topK             = 0,
     float sigma          = 0.5f,
     SoftNMSMethod method = SoftNMSMethod.Gaussian)
 {
     cveDnnSoftNMSBoxes(bboxes, scores, updatedScores, scoreThreshold, nmsThreshold, indices, topK, sigma, method);
 }
示例#14
0
            protected override TOperatorReturn OperatorMultiply(VectorOfInt <TSelf, TDim, TOperatorReturn> dist)
            {
                var result = new TOperatorReturn()
                {
                    Vec = (int[])Vec.Clone(), Dim = Dim
                };

                for (int i = 0; i < result.Dim; i++)
                {
                    result[i] *= dist[i];
                }
                return(result);
            }
示例#15
0
 /// <summary>
 /// encode image and store the result as a byte vector.
 /// </summary>
 /// <param name="ext">The image format</param>
 /// <param name="image">The image</param>
 /// <param name="buf">Output buffer resized to fit the compressed image.</param>
 /// <param name="parameters">The pointer to the array of intergers, which contains the parameter for encoding, use IntPtr.Zero for default</param>
 public static void Imencode(String ext, IInputArray image, VectorOfByte buf, params int[] parameters)
 {
     using (CvString extStr = new CvString(ext))
         using (VectorOfInt p = new VectorOfInt())
         {
             if (parameters.Length > 0)
             {
                 p.Push(parameters);
             }
             using (InputArray iaImage = image.GetInputArray())
                 cveImencode(extStr, iaImage, buf, p);
         }
 }
示例#16
0
        private static VectorOfInt ConvertWriterProperties(Tuple <WriterProperty, int>[] captureProperties)
        {
            VectorOfInt vectInt = new VectorOfInt();

            if (captureProperties != null)
            {
                foreach (Tuple <WriterProperty, int> cp in captureProperties)
                {
                    vectInt.Push(new int[] { (int)cp.Item1, cp.Item2 });
                }
            }

            return(vectInt);
        }
示例#17
0
        private void button2_Click(object sender, EventArgs e)
        {
            textBox1.Text = "";
            double             Thershold1 = Convert.ToDouble(numericUpDown8.Value);
            double             Thershold2 = Convert.ToDouble(numericUpDown9.Value);
            Image <Gray, byte> imagedst   = new Image <Gray, byte>(src.Size);

            //CvInvoke.Threshold(imagesrc, imagedst, Thershold1, 255, ThresholdType.Binary);
            CvInvoke.Canny(src, imagedst, Thershold1, Thershold2);
            //CvInvoke.CvtColor(imagedst, imagedst, ColorConversion.BayerBg2Gray);//灰度化
            pictureBox2.Image = imagedst.Bitmap;
            VectorOfInt vp  = new VectorOfInt();
            double      rho = Convert.ToDouble(numericUpDown7.Value);
            int         HoughLinesThershold = Convert.ToInt32(numericUpDown12.Value);//大于此阈值的交点,才会被认为是一条直线
            double      minLineLenth        = Convert.ToDouble(numericUpDown11.Value);
            double      maxGrap             = Convert.ToDouble(numericUpDown10.Value);

            try
            {
                // CvInvoke.HoughLines(imagedst, vp, rho, theta, HoughLinesThershold, minLineLenth, maxGrap);
                // LineSegment2D[][] _lines = imagedst.HoughLines(100,100, rho, theta,HoughLinesThershold,minLineLenth,maxGrap);
                //for(int i=0;i<_lines[0].Length;i++)
                //{
                //    //for(int j = 0; j < _lines[0].Length; j++)
                //    CvInvoke.Line(src, _lines[0][i].P1, _lines[0][i].P2, new Bgr(0, 0, 255).MCvScalar, 2);//在原图像中画线
                //}

                LineSegment2D[] lines = CvInvoke.HoughLinesP(imagedst,            //8位单通道图像
                                                             rho,                 //距离分辨率
                                                             Math.PI / 180,       //角度分辨率
                                                             HoughLinesThershold, //交点个数阈值
                                                             minLineLenth,        //最小直线长度
                                                             maxGrap              //最大直线间隙,间隙大于该值,则被认为是两条线段,否则是一条
                                                             );
                textBox1.Text = "总共找到" + lines.Length + "根直线。" + "\r\n";
                src2          = src.Copy();
                for (int i = 0; i < lines.Length; ++i)
                {
                    CvInvoke.Line(src2, lines[i].P1, lines[i].P2, new Bgr(0, 0, 255).MCvScalar, 3);//在原图像中画线
                    double angle = Math.Atan2(lines[i].P1.Y - lines[i].P2.Y, lines[i].P1.X - lines[i].P2.X);
                    textBox1.Text += ("第" + i + "根直线的角度:" + angle + "\r\n").ToString();
                }
            }
            catch
            {
                throw;
            }
            pictureBox1.Image = src2.Bitmap;
        }
示例#18
0
 /// <summary>
 /// Both detects and decodes barcode
 /// </summary>
 /// <param name="image">grayscale or color (BGR) image containing barcode.</param>
 /// <param name="decodedInfo">UTF8-encoded output vector of string(s) or empty vector of string if the codes cannot be decoded.</param>
 /// <param name="decodedType">vector of BarcodeType, specifies the type of these barcodes</param>
 /// <param name="points">Optional output vector of vertices of the found  barcode rectangle. Will be empty if not found.</param>
 /// <returns>True if barcode is detected and decoded.</returns>
 public bool DetectAndDecode(
     IInputArray image,
     VectorOfCvString decodedInfo,
     VectorOfInt decodedType,
     IOutputArray points = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaPoints = points == null ? OutputArray.GetEmpty() : points.GetOutputArray())
             return(BarcodeInvoke.cveBarcodeDetectorDetectAndDecode(
                        _ptr,
                        iaImage,
                        decodedInfo,
                        decodedType,
                        oaPoints));
 }
示例#19
0
 /// <summary>
 /// LOGOS (Local geometric support for high-outlier spatial verification) feature matching strategy
 /// </summary>
 /// <param name="keypoints1">Input keypoints of image1.</param>
 /// <param name="keypoints2">Input keypoints of image2.</param>
 /// <param name="nn1">Index to the closest BoW centroid for each descriptors of image1.</param>
 /// <param name="nn2">Index to the closest BoW centroid for each descriptors of image2.</param>
 /// <param name="matches1to2">Matches returned by the LOGOS matching strategy.</param>
 public static void MatchLOGOS(
     VectorOfKeyPoint keypoints1,
     VectorOfKeyPoint keypoints2,
     VectorOfInt nn1,
     VectorOfInt nn2,
     VectorOfDMatch matches1to2)
 {
     cveMatchLOGOS(
         keypoints1,
         keypoints2,
         nn1,
         nn2,
         matches1to2
         );
 }
示例#20
0
        /// <summary>
        /// Create a capture from file or a video stream
        /// </summary>
        /// <param name="fileName">The name of a file, or an url pointed to a stream.</param>
        /// <param name="captureApi">The preferred Capture API backends to use. Can be used to enforce a specific reader implementation if multiple are available.</param>
        /// <param name="captureProperties">Optional capture properties. e.g. new Tuple&lt;CvEnum.CapProp&gt;(CvEnum.CapProp.HwAcceleration, (int) VideoAccelerationType.Any)</param>
        public VideoCapture(String fileName, API captureApi = API.Any, params Tuple <CvEnum.CapProp, int>[] captureProperties)
        {
            using (CvString s = new CvString(fileName))
                using (VectorOfInt vectInt = ConvertCaptureProperties(captureProperties))
                {
                    _captureModuleType = CaptureModuleType.Highgui;
                    _ptr = CvInvoke.cveVideoCaptureCreateFromFile(s, captureApi, vectInt);

                    if (_ptr == IntPtr.Zero)
                    {
                        throw new NullReferenceException(String.Format("Unable to create capture from {0}", fileName));
                    }
                }
            _needDispose = true;
        }
示例#21
0
            protected override TOperatorReturn OperatorAdd(VectorOfInt <TSelf, TDim, TOperatorReturn> dist)
            {
                // if we type {Vec = Vec} this will pass Vec value by reference so , but we just need a copy
                // we need to persist the actual value
                var result = new TOperatorReturn()
                {
                    Vec = (int[])Vec.Clone(), Dim = Dim
                };

                for (int i = 0; i < result.Dim; i++)
                {
                    result[i] += dist[i];
                }
                return(result);
            }
示例#22
0
 private static void Imencode(string extension, IInputArray image, VectorOfByte buffer, params int[] parameters)
 {
     using (CvString cvString = new CvString(extension))
     {
         using (VectorOfInt intVector = new VectorOfInt())
         {
             if (parameters != null && parameters.Length > 0)
             {
                 intVector.Push(parameters);
             }
             using (InputArray array = image.GetInputArray())
             {
                 cveImencode(cvString, array, buffer, intVector);
             }
         }
     }
 }
示例#23
0
 public Result(int trainValue, VectorOfPoint trainContour, MCvScalar trainContourColor, float bestROIMatch,
               Mat referenceTrainImage, VectorOfKeyPoint referenceTrainKeyPoints, VectorOfKeyPoint keypointsEvalImage, ref
               VectorOfDMatch matches, ref VectorOfDMatch inliers, ref VectorOfInt inliersMatcheMask, ref Mat homography)
 {
     this._trainValue              = trainValue;
     this._trainContour            = trainContour;
     this._trainContourColor       = trainContourColor;
     this._bestROIMatch            = bestROIMatch;
     this._referenceTrainImage     = referenceTrainImage;
     this._referenceTrainKeyPoints = referenceTrainKeyPoints;
     this._keypointsEvalImag       = keypointsEvalImage;
     this._matches           = matches;
     this._inliers           = inliers;
     this._inliersMatcheMask = inliersMatcheMask;
     this._homography        = homography;
     this._inliersKeyPoints  = new VectorOfKeyPoint();
 }
示例#24
0
    Image <Bgr, byte> ProcessFrame(Mat colorPicture, MCvScalar skinHsv)
    {//, Mat binPicture) {
        Mat picture = colorPicture.Clone();

        picture = BackgroundSubtraction(picture, skinHsv);
        //picture = binPicture;
        //return new Image<Bgr, byte>(picture.Bitmap);

        //contour stuff
        VectorOfVectorOfPoint contoursss = new VectorOfVectorOfPoint();

        CvInvoke.FindContours(picture, contoursss, null, RetrType.List, ChainApproxMethod.ChainApproxNone);
        VectorOfPoint handContour = FindLargestContour(contoursss);

        if ((handContour == null || CvInvoke.ContourArea(handContour) < 100 || CvInvoke.ContourArea(handContour) > 200000))
        {
            return(new Image <Bgr, byte>(colorPicture.Bitmap));
        }

        VectorOfVectorOfPoint hulls = new VectorOfVectorOfPoint(1);
        //VectorOfVectorOfPoint hullDefects = new VectorOfVectorOfPoint(1);
        VectorOfInt hullI = new VectorOfInt();

        CvInvoke.ConvexHull(handContour, hullI, false, false);
        CvInvoke.ConvexHull(handContour, hulls[0], false, true);

        //convexity defects
        Mat defects = new Mat();

        CvInvoke.ConvexityDefects(handContour, hullI, defects);
        try
        {
            Matrix <int> m = new Matrix <int>(defects.Rows, defects.Cols, defects.NumberOfChannels); // copy Mat to a matrix...
            defects.CopyTo(m);
            CvInvoke.DrawContours(colorPicture, hulls, -1, new MCvScalar(0, 0, 255), 1);
            Image <Bgr, byte> image = new Image <Bgr, byte>(colorPicture.Bitmap);
            return(DrawPoints(image, m, handContour));
        }
        catch (Exception)
        {
            return(new Image <Bgr, byte>(colorPicture.Bitmap));
        }

        //CvInvoke.Imshow("picture", colorPicture);
        //CvInvoke.WaitKey(); // Render image and keep window opened until any key is pressed
    }
示例#25
0
        /// <summary> Create a capture using the specific camera</summary>
        /// <param name="camIndex"> The index of the camera to create capture from, starting from 0</param>
        /// <param name="captureApi">The preferred Capture API backends to use. Can be used to enforce a specific reader implementation if multiple are available.</param>
        /// <param name="captureProperties">Optional capture properties. e.g. new Tuple&lt;CvEnum.CapProp&gt;(CvEnum.CapProp.HwAcceleration, (int) VideoAccelerationType.Any)</param>
        public VideoCapture(int camIndex = 0, API captureApi = API.Any, params Tuple <CvEnum.CapProp, int>[] captureProperties)
        {
            _captureModuleType = CaptureModuleType.Camera;

#if TEST_CAPTURE
#else
            using (VectorOfInt vectInt = ConvertCaptureProperties(captureProperties))
            {
                _ptr = CvInvoke.cveVideoCaptureCreateFromDevice(camIndex, captureApi, vectInt);
            }

            if (_ptr == IntPtr.Zero)
            {
                throw new NullReferenceException(String.Format("Error: Unable to create capture from camera {0}", camIndex));
            }
#endif
        }
        public static double ValidateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner, Mat cameraMatrix, Mat distCoeffs)
        {
            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();
            VectorOfPoint3D32F         rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F         tvecs = new VectorOfPoint3D32F();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame       = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints          = new VectorOfPointF();
                VectorOfPointF     currentImgPointsUndistort = new VectorOfPointF();
                VectorOfInt        currentIds       = new VectorOfInt();
                VectorOfPoint3D32F currentObjPoints = new VectorOfPoint3D32F();
                Mat                tvec             = new Mat();
                Mat                rvec             = new Mat();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentIds.Push(new int[] { charucoIds[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                Mat distCoeffsNew = new Mat(1, 4, DepthType.Cv64F, 1);
                distCoeffsNew.SetValue(0, 0, 0);
                distCoeffsNew.SetValue(0, 1, 0);
                distCoeffsNew.SetValue(0, 2, 0);
                distCoeffsNew.SetValue(0, 3, 0);

                Fisheye.UndistorPoints(currentImgPoints, currentImgPointsUndistort, cameraMatrix, distCoeffs, Mat.Eye(3, 3, DepthType.Cv64F, 1), Mat.Eye(3, 3, DepthType.Cv64F, 1));
                if (ArucoInvoke.EstimatePoseCharucoBoard(currentImgPointsUndistort, currentIds, CreateBoard(squaresX, squaresY, squareLength, markerLength, new Dictionary(dictionary)), Mat.Eye(3, 3, DepthType.Cv64F, 1), distCoeffsNew, rvec, tvec))
                {
                    rvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)rvec.GetValue(0, 0), (float)rvec.GetValue(1, 0), (float)rvec.GetValue(2, 0)) });
                    tvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)tvec.GetValue(0, 0), (float)tvec.GetValue(1, 0), (float)tvec.GetValue(2, 0)) });

                    processedImagePoints.Push(currentImgPoints);
                    processedObjectPoints.Push(currentObjPoints);
                }
            }

            return(Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye));
        }
 private static void Imencode(string extension, IInputArray image, VectorOfByte buffer, params int[] parameters)
 {
     using (CvString cvString = new CvString(extension))
     {
         using (VectorOfInt intVector = new VectorOfInt())
         {
             if (parameters != null && parameters.Length > 0)
             {
                 intVector.Push(parameters);
             }
             using (InputArray array = image.GetInputArray())
             {
                 cveImencode(cvString, array, buffer, intVector);
             }
         }
     }
 }
        /// Calculate convex hull and convexity defects for accurate finger calculation
        private Matrix <int> CalculateConvexityDefects(Image <Gray, Byte> img, VectorOfPoint biggestContour, VectorOfVectorOfPoint contours)
        {
            VectorOfPoint currentContour = new VectorOfPoint();
            VectorOfInt   hullIndices    = new VectorOfInt();

            CvInvoke.ApproxPolyDP(biggestContour, currentContour, CvInvoke.ArcLength(biggestContour, true) * .005, true);
            biggestContour = currentContour;
            CvInvoke.ConvexHull(biggestContour, hullIndices, false, false);

            /// Calcualate convexity defects
            /// Defects is a 4-element integer vector
            /// (start_index, end_index, farthest_pt_index, fixpt_depth)
            /// stored in a matrix where each row is a defect
            Matrix <int> defects = null;
            Mat          mat     = new Mat();

            CvInvoke.ConvexityDefects(biggestContour, hullIndices, mat);
            if (mat.Rows > 0)
            {
                defects = new Matrix <int>(mat.Rows, mat.Cols, mat.NumberOfChannels);
                mat.CopyTo(defects);

                /// For debugging and training purposes
                /// Draws finger points using convexity defects
                Matrix <int>[] channels = defects.Split();
                /// channel[0] = start_point, channel[1] = end_point, channel[2] = fixpt_depth

                for (int j = 0; j < defects.Rows; ++j)
                {
                    if (j < 5)
                    {
                        CvInvoke.Circle(img, System.Drawing.Point.Round(new System.Drawing.PointF(biggestContour[channels[0][j, 0]].X, biggestContour[channels[0][j, 0]].Y)), 10, new MCvScalar(255, 255, 255), 10);
                    }
                }
            }

            /// For debugging and training purposes
            /// Draws convex hull of biggest contour
            VectorOfPoint hullPoints = new VectorOfPoint();

            CvInvoke.ConvexHull(biggestContour, hullPoints, false);
            CvInvoke.Polylines(img, hullPoints.ToArray(), true, new MCvScalar(255, 255, 255), 10);

            return(defects);
        }
示例#29
0
        public void TestKMeans2()
        {
            int clustersCount = 2;
            int sampleCount = 300;
            int maxVal = 500;

            Random r = new Random();
            float[] values = new float[sampleCount];
            for (int i = 0; i < sampleCount; i++)
            {
                values[i] = (float) (r.NextDouble() * maxVal);
            }
            using (VectorOfInt labels = new VectorOfInt())
            using (VectorOfFloat vd = new VectorOfFloat(values))
            {
                CvInvoke.Kmeans(vd, 2, labels, new MCvTermCriteria(1000, 0.00001), 2, KMeansInitType.RandomCenters);
            }
        }
示例#30
0
        public static double Convexity(VectorOfPoint contour, VectorOfInt convexHull)
        {
            Point[] ptArray = new Point[convexHull.Size];

            for (int i = 0; i < convexHull.Size; i++)
            {
                Point pt = contour[convexHull[i]];
                ptArray[i] = pt;
            }

            VectorOfPoint vpHull = new VectorOfPoint(ptArray);

            double arcLengthContour = CvInvoke.ArcLength(contour, true);
            double arcLengthHull    = CvInvoke.ArcLength(vpHull, true);
            double convexity        = arcLengthHull / arcLengthContour;

            return(convexity);
        }
示例#31
0
        public static BackendTargetPair[] GetAvailableBackends()
        {
            using (VectorOfInt viBackends = new VectorOfInt())
                using (VectorOfInt viTargets = new VectorOfInt())
                {
                    cveDNNGetAvailableBackends(viBackends, viTargets);
                    int[] backendArr = viBackends.ToArray();
                    int[] targetArr  = viTargets.ToArray();

                    BackendTargetPair[] availableBackends = new BackendTargetPair[backendArr.Length];
                    for (int i = 0; i < backendArr.Length; i++)
                    {
                        availableBackends[i] = new BackendTargetPair((Backend)backendArr[i], (Target)targetArr[i]);
                    }

                    return(availableBackends);
                }
        }
示例#32
0
        /// <summary>
        /// Both detects and decodes barcode
        /// </summary>
        /// <returns>The barcode found. If nothing is found, an empty array is returned.</returns>
        public Barcode[] DetectAndDecode(IInputArray image)
        {
            using (VectorOfCvString decodedInfoVec = new VectorOfCvString())
                using (VectorOfInt decodedTypeVec = new VectorOfInt())
                    //using (VectorOfMat pointsVec = new VectorOfMat())
                    //using (VectorOfPointF pointsVec = new VectorOfPointF())
                    using (Mat pointsVec = new Mat())
                    {
                        if (!DetectAndDecode(image, decodedInfoVec, decodedTypeVec, pointsVec))
                        {
                            return(new Barcode[0]);
                        }

                        PointF[] points = new PointF[4 * pointsVec.Rows];

                        if (points.Length > 0)
                        {
                            GCHandle handle = GCHandle.Alloc(points, GCHandleType.Pinned);
                            CvInvoke.cveMemcpy(handle.AddrOfPinnedObject(), pointsVec.DataPointer,
                                               points.Length * Marshal.SizeOf <PointF>());
                            handle.Free();
                        }


                        string[] decodedInfo = decodedInfoVec.ToArray();
                        int[]    decodedType = decodedTypeVec.ToArray();
                        //Point[][] points = WeChatQRCode.VectorOfMatToPoints(pointsVec);
                        //points = pointsVec.ToArray();
                        Barcode[] barcodes = new Barcode[decodedInfo.Length];
                        for (int i = 0; i < barcodes.Length; i++)
                        {
                            Barcode barcode = new Barcode();
                            barcode.DecodedInfo = decodedInfo[i];
                            barcode.Type        = (BarcodeType)decodedType[i];
                            PointF[] region = new PointF[4];
                            Array.Copy(points, 4 * i, region, 0, 4);
                            barcode.Points = region;

                            barcodes[i] = barcode;
                        }

                        return(barcodes);
                    }
        }
示例#33
0
 /// <summary>
 /// Calculates the back projection of a histogram.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">Number of source images.</param>
 /// <param name="hist">Input histogram that can be dense or sparse.</param>
 /// <param name="backProject">Destination back projection array that is a single-channel array of the same size and depth as images[0] .</param>
 /// <param name="ranges">Array of arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="scale"> Optional scale factor for the output back projection.</param>
 public static void CalcBackProject(IInputArray images, int[] channels, IInputArray hist, IOutputArray backProject, float[] ranges, double scale = 1.0)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfFloat rangeVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaHist = hist.GetInputArray())
    using (OutputArray oaBackProject = backProject.GetOutputArray())
    {
       cveCalcBackProject(iaImages, channelsVec, iaHist, oaBackProject, rangeVec, scale);
    }
 }
示例#34
0
 /// <summary>
 /// Calculates a histogram of a set of arrays.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">List of the channels used to compute the histogram. </param>
 /// <param name="mask">Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.</param>
 /// <param name="hist">Output histogram</param>
 /// <param name="histSize">Array of histogram sizes in each dimension.</param>
 /// <param name="ranges">Array of the dims arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="accumulate">Accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.</param>
 public static void CalcHist(IInputArray images, int[] channels, IInputArray mask, IOutputArray hist, int[] histSize, float[] ranges, bool accumulate)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfInt histSizeVec = new VectorOfInt(histSize))
    using (VectorOfFloat rangesVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaHist = hist.GetOutputArray())
    {
       cveCalcHist(iaImages, channelsVec, iaMask, oaHist, histSizeVec, rangesVec, accumulate);
    }
 }
示例#35
0
      public void TestConvecityDefect()
      {
         Mat frame = EmguAssert.LoadMat("lena.jpg");
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
         using (Image<Gray, byte> canny = frame.ToImage<Gray, byte>())
         {
            IOutputArray hierarchy = null;
            CvInvoke.FindContours(canny, contours, hierarchy, RetrType.List, ChainApproxMethod.ChainApproxSimple);

            for (int i = 0; i < contours.Size; i++)
            {
               CvInvoke.ApproxPolyDP(contours[i], contours[i], 5, false);
               using (VectorOfInt hull = new VectorOfInt())
               using (Mat defects = new Mat())
               using (VectorOfPoint c = contours[i])
               {
                  CvInvoke.ConvexHull(c, hull, false, false);
                  CvInvoke.ConvexityDefects(c, hull, defects);
                  if (!defects.IsEmpty)
                  {
                     using (Matrix<int> value = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels))
                     {
                        defects.CopyTo(value);
                        //you can iterate through the defect here:
                        for (int j = 0; j < value.Rows; j++)
                        {
                           int startIdx = value.Data[j, 0];
                           int endIdx = value.Data[j, 1];
                           int farthestPtIdx = value.Data[j, 2];
                           double fixPtDepth = value.Data[j, 3]/256.0;
                           
                        }
                     }
                  }
               }
            }
         }
      }
示例#36
0
      public void TestConvexityDefacts()
      {
         Image<Bgr, Byte> image = new Image<Bgr, byte>(300, 300);
         Point[] polyline = new Point[] {
            new Point(10, 10),
            new Point(10, 250),
            new Point(100, 100),
            new Point(250, 250),
            new Point(250, 10)};
         using (VectorOfPoint vp = new VectorOfPoint(polyline))
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(vp))
         using (VectorOfInt convexHull = new VectorOfInt())
         using (Mat convexityDefect = new Mat())
         {
            //Draw the contour in white thick line
            CvInvoke.DrawContours(image, contours, -1, new MCvScalar(255, 255, 255), 3);
            CvInvoke.ConvexHull(vp, convexHull);
            CvInvoke.ConvexityDefects(vp, convexHull, convexityDefect);

            //convexity defect is a four channel mat, when k rows and 1 cols, where k = the number of convexity defects. 
            if (!convexityDefect.IsEmpty)
            {
               //Data from Mat are not directly readable so we convert it to Matrix<>
               Matrix<int> m = new Matrix<int>(convexityDefect.Rows, convexityDefect.Cols,
                  convexityDefect.NumberOfChannels);
               convexityDefect.CopyTo(m);

               for (int i = 0; i < m.Rows; i++)
               {
                  int startIdx = m.Data[i, 0];
                  int endIdx = m.Data[i, 1];
                  Point startPoint = polyline[startIdx];
                  Point endPoint = polyline[endIdx];
                  //draw  a line connecting the convexity defect start point and end point in thin red line
                  CvInvoke.Line(image, startPoint, endPoint, new MCvScalar(0, 0, 255));
               }
            }

            //Emgu.CV.UI.ImageViewer.Show(image);
         }
      }
        private void ProcessFrame()
        {
            try
            {
                #region Background/Foreground
                Image<Bgr, byte> difference = BackgroundSubstractionOptions.Substract(_currentFrame, _frameHistoryBuffer);

                Rectangle? handArea = ForegoundExtractionOptions.HighlightForeground(difference);
                Image<Bgr, byte> skinDetectionFrame = _currentFrame.Copy();

                if (handArea.HasValue)
                    ForegoundExtractionOptions.CutBackground(skinDetectionFrame, handArea.Value);
                #endregion

                #region Skin filtering / Morphological / Smooth filtering
                Image<Gray, byte> skinDetectionFrameGray = SkinFilteringOptions.ActiveItem.FilterFrame(skinDetectionFrame);

                MorphologicalFilteringOptions.StackSync.EnterReadLock();
                foreach (var operation in MorphologicalFilteringOptions.OperationStack)
                {
                    if (operation.FilterType == Model.Enums.MorphologicalFilterType.Dilatation)
                    {
                        CvInvoke.Dilate(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(),
                            new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType,
                            new MCvScalar(operation.KernelBorderThickness));
                    }
                    else
                    {
                        CvInvoke.Erode(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(),
                            new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType,
                            new MCvScalar(operation.KernelBorderThickness));
                    }
                }
                MorphologicalFilteringOptions.StackSync.ExitReadLock();

                skinDetectionFrameGray = SmoothFilteringOptions.FilterFrame(skinDetectionFrameGray);
                #endregion

                #region Contours / ConvexHull / ConvexityDefects
                Image<Bgr, byte> fingerTrackerFrame = _currentFrame.Copy();

                List<Point> fingers = new List<Point>();

                using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                {
                    CvInvoke.FindContours(skinDetectionFrameGray.Copy(), contours, null, RetrType.List, FingerTrackingOptions.ApproxMethod);

                    if (contours.Size > 0)
                    {
                        VectorOfPoint biggestContour = contours[0];

                        if (contours.Size > 1)
                        {
                            for (int i = 1; i < contours.Size; i++)
                            {
                                if (CvInvoke.ContourArea(contours[i], false) > CvInvoke.ContourArea(biggestContour, false))
                                    biggestContour = contours[i];
                            }
                        }

                        if (CvInvoke.ContourArea(biggestContour, false) > FingerTrackingOptions.MinContourArea)
                        {
                            using (VectorOfPoint contour = biggestContour)
                            {
                                using (VectorOfPoint approxContour = new VectorOfPoint())
                                {
                                    CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * FingerTrackingOptions.PerimeterScalingFactor.Value, true);

                                    fingerTrackerFrame.Draw(approxContour.ToArray(), new Bgr(FingerTrackingOptions.ContourHighlightColor), 2);
                                    VectorOfPoint convexHull = new VectorOfPoint();
                                    VectorOfInt intHull = new VectorOfInt();
                                    CvInvoke.ConvexHull(approxContour, convexHull, FingerTrackingOptions.ConvexHullCW);
                                    CvInvoke.ConvexHull(approxContour, intHull, FingerTrackingOptions.ConvexHullCW);
                                    fingerTrackerFrame.DrawPolyline(convexHull.ToArray(), true, new Bgr(FingerTrackingOptions.ConvexHullColor), 2);

                                    var countourRect = CvInvoke.MinAreaRect(approxContour);
                                    fingerTrackerFrame.Draw(new CircleF(new PointF(countourRect.Center.X, countourRect.Center.Y), 3), new Bgr(FingerTrackingOptions.DefectLinesColor), 2);

                                    Mat defects = new Mat();
                                    CvInvoke.ConvexityDefects(approxContour, intHull, defects);

                                    if (!defects.IsEmpty)
                                    {
                                        var contourPoints = approxContour.ToArray();

                                        Matrix<int> m = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels);
                                        defects.CopyTo(m);

                                        for (int i = 0; i < m.Rows; i++)
                                        {
                                            int startIdx = m.Data[i, 0];
                                            int endIdx = m.Data[i, 1];
                                            int depthIdx = m.Data[i, 2];

                                            Point startPoint = contourPoints[startIdx];
                                            Point endPoint = contourPoints[endIdx];
                                            Point depthPoint = contourPoints[depthIdx];

                                            LineSegment2D startDepthLine = new LineSegment2D(startPoint, depthPoint);
                                            LineSegment2D depthEndLine = new LineSegment2D(depthPoint, endPoint);

                                            LineSegment2D startCenterLine = new LineSegment2D(startPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));
                                            LineSegment2D depthCenterLine = new LineSegment2D(depthPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));
                                            LineSegment2D endCenterLine = new LineSegment2D(endPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));

                                            CircleF startCircle = new CircleF(startPoint, 5);
                                            CircleF depthCircle = new CircleF(depthPoint, 5);
                                            CircleF endCircle = new CircleF(endPoint, 5);

                                            if (startPoint.Y < countourRect.Center.Y)
                                                fingers.Add(startPoint);

                                            if (!FingerTrackingOptions.TrackOnlyControlPoint)
                                            {
                                                fingerTrackerFrame.Draw(startCircle, new Bgr(FingerTrackingOptions.DefectStartPointHighlightColor), 2);
                                                fingerTrackerFrame.Draw(depthCircle, new Bgr(FingerTrackingOptions.DefectDepthPointHighlightColor), 2);
                                                fingerTrackerFrame.Draw(endCircle, new Bgr(FingerTrackingOptions.DefectEndPointHighlightColor), 2);

                                                fingerTrackerFrame.Draw(startDepthLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                                //fingerTrackerFrame.Draw(depthEndLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);

                                                fingerTrackerFrame.Draw(startCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                                //fingerTrackerFrame.Draw(depthCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                               // fingerTrackerFrame.Draw(endCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                            }
                                        }

                                        _lastControlPoint = _currentControlPoint;
                                        _currentControlPoint = MouseControlOptions.UseHandCenter ? new Point((int)countourRect.Center.X, (int)countourRect.Center.Y)
                                                    : fingers.FirstOrDefault(f => f.Y == fingers.Min(line => line.Y));
                                        fingers.Clear();

                                        if (FingerTrackingOptions.TrackOnlyControlPoint)
                                        {
                                            fingerTrackerFrame = new Image<Bgr, byte>(fingerTrackerFrame.Width, fingerTrackerFrame.Height, new Bgr(Color.Black));
                                            fingerTrackerFrame.Draw(new CircleF(_currentControlPoint, 5), new Bgr(Color.Red), 2);
                                        }

                                    }
                                }
                            }
                        }
                    }
                }
                #endregion

                #region Mouse control
                if (_currentControlPoint.X != -1 && _currentControlPoint.Y != -1 && _lastControlPoint.X != -1 && _lastControlPoint.Y != -1
                         && _currentControlPoint.X != _lastControlPoint.X && _currentControlPoint.Y != _lastControlPoint.Y
                            && Math.Abs(_currentControlPoint.X - _lastControlPoint.X) < (MouseControlOptions.FrameWidth / 10)
                                 && Math.Abs(_currentControlPoint.Y - _lastControlPoint.Y) < (MouseControlOptions.FrameHeight / 10))
                {
                    int frameX = _currentControlPoint.X;
                    int frameY = _currentControlPoint.Y;

                    int moveX = _currentControlPoint.X - _lastControlPoint.X;
                    int moveY = _currentControlPoint.Y - _lastControlPoint.Y;

                    int sensitiveX = 1;
                    int sensitiveY = 1;

                    if (MouseControlOptions.MouseSensitive.Value > 0)
                    {
                        sensitiveX = (int)(((double)MouseControlOptions.ScreenWidth / MouseControlOptions.FrameWidth) * MouseControlOptions.MouseSensitive.Value);
                        sensitiveY = (int)(((double)MouseControlOptions.ScreenHeight / MouseControlOptions.FrameHeight) * MouseControlOptions.MouseSensitive.Value);
                    }
                    else if (MouseControlOptions.MouseSensitive.Value < 0)
                    {
                        sensitiveX = (int)(((double)MouseControlOptions.FrameWidth / MouseControlOptions.ScreenWidth) * MouseControlOptions.MouseSensitive.Value * -1);
                        sensitiveY = (int)(((double)MouseControlOptions.FrameHeight / MouseControlOptions.ScreenHeight) * MouseControlOptions.MouseSensitive.Value * -1);
                    }

                    moveX *= sensitiveX * -1;
                    moveY *= sensitiveY;

                    Point currentMousePosition = GetMousePosition();

                    int destinationX = currentMousePosition.X + moveX;
                    int destinationY = currentMousePosition.Y + moveY;

                    Messanger.PublishOnCurrentThread(new FingerMovedMessage(MouseControlOptions.ControlMouse, frameX, frameY, destinationX, destinationY));

                    if (MouseControlOptions.ControlMouse && MouseControlOptions.MouseSensitive.Value != 0 && destinationX >= 0 && destinationY >= 0)
                        SetCursorPos(destinationX, destinationY);
                }
                #endregion

                Messanger.PublishOnCurrentThread(new FrameProcessedMessage(_currentFrame, difference, skinDetectionFrameGray, fingerTrackerFrame));
            }
            catch { }
        }
示例#38
0
 /// <summary>
 /// encode image and store the result as a byte vector.
 /// </summary>
 /// <param name="ext">The image format</param>
 /// <param name="image">The image</param>
 /// <param name="buf">Output buffer resized to fit the compressed image.</param>
 /// <param name="parameters">The pointer to the array of intergers, which contains the parameter for encoding, use IntPtr.Zero for default</param>
 public static void Imencode(String ext, IInputArray image, VectorOfByte buf, params KeyValuePair<CvEnum.ImwriteFlags, int>[] parameters)
 {
    using (CvString extStr = new CvString(ext))
    using (VectorOfInt p = new VectorOfInt())
    {
       PushParameters(p, parameters);
       using (InputArray iaImage = image.GetInputArray())
          cveImencode(extStr, iaImage, buf, p);
    }
 }
示例#39
0
      /// <summary>
      /// Obtains the list of Voronoi Facets 
      /// </summary>
      /// <returns>The list of Voronoi Facets</returns>
      public VoronoiFacet[] GetVoronoiFacets(int[] idx = null)
      {
         using (VectorOfInt vi = new VectorOfInt())
         using (VectorOfVectorOfPointF facetVec = new VectorOfVectorOfPointF())
         using (VectorOfPointF centerVec = new VectorOfPointF())
         {
            if (idx != null)
               vi.Push(idx);
         
            CvInvoke.cveSubdiv2DGetVoronoiFacetList(_ptr, vi, facetVec, centerVec);
            PointF[][] vertices = facetVec.ToArrayOfArray();
            PointF[] centers = centerVec.ToArray();

            VoronoiFacet[] facets = new VoronoiFacet[centers.Length];
            for (int i = 0; i < facets.Length; i++)
            {
               facets[i] = new VoronoiFacet(centers[i], vertices[i]);
            }
            return facets;
         }
         
      }
示例#40
0
 /// <summary>
 /// encode image and store the result as a byte vector.
 /// </summary>
 /// <param name="ext">The image format</param>
 /// <param name="image">The image</param>
 /// <param name="buf">Output buffer resized to fit the compressed image.</param>
 /// <param name="parameters">The pointer to the array of intergers, which contains the parameter for encoding, use IntPtr.Zero for default</param>
 public static void Imencode(String ext, IInputArray image, VectorOfByte buf, params int[] parameters)
 {
    using (CvString extStr = new CvString(ext))
    using (VectorOfInt p = new VectorOfInt())
    {
       if (parameters.Length > 0)
          p.Push(parameters);
       using (InputArray iaImage = image.GetInputArray())
          cveImencode(extStr, iaImage, buf, p);
    }
 }
示例#41
0
文件: Form1.cs 项目: neutmute/emgucv
      private void ProcessFrame(object sender, EventArgs arg)
      {
         if (_capture != null && _capture.Ptr != IntPtr.Zero)
         {
            _capture.Retrieve(_frame, 0);

            //cameraImageBox.Image = _frame;

            using (VectorOfInt ids = new VectorOfInt())
            using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
            using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
            {
               DetectorParameters p = DetectorParameters.GetDefault();
               ArucoInvoke.DetectMarkers(_frame, ArucoDictionary, corners, ids, p, rejected);
               ArucoInvoke.RefineDetectedMarkers(_frame, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, p);
               _frame.CopyTo(_frameCopy);
               if (ids.Size > 0)
               {
                  //cameraButton.Text = "Calibrate camera";
                  this.Invoke((Action) delegate
                  {
                     useThisFrameButton.Enabled = true;
                  });
                  ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));

                  if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                  {
                     ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                     for (int i = 0; i < ids.Size; i++)
                     {
                        using (Mat rvecMat = rvecs.Row(i))
                        using (Mat tvecMat = tvecs.Row(i))
                        using (VectorOfDouble rvec = new VectorOfDouble())
                        using (VectorOfDouble tvec = new VectorOfDouble())
                        {
                           double[] values = new double[3];
                           rvecMat.CopyTo(values);
                           rvec.Push(values);
                           tvecMat.CopyTo(values);
                           tvec.Push(values);

                           
                              ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                 markersLength*0.5f);
                           
                        }
                     }
                  }

                  if (_useThisFrame)
                  {
                     _allCorners.Push(corners);
                     _allIds.Push(ids);
                     _markerCounterPerFrame.Push(new int[] { corners.Size });
                     _imageSize = _frame.Size;
                     UpdateMessage(String.Format("Using {0} points", _markerCounterPerFrame.ToArray().Sum()));
                     _useThisFrame = false;
                  }
               }
               else
               {
                  this.Invoke((Action) delegate
                  {
                     useThisFrameButton.Enabled = false;
                  });

                  //cameraButton.Text = "Stop Capture";
               }
               cameraImageBox.Image = _frameCopy;
            }
         }
      }