public PointF[][] Fit(IImage image)
        {
            lock (syncLock)
            {
                var faces          = new VectorOfRect(faceDetector.DetectBoxFaces(image));
                var facesLandmarks = new VectorOfVectorOfPointF();
                if (!facemark.Fit(image, faces, facesLandmarks))
                {
                    throw new ArgumentException("No landamarks point detected for input image");
                }

                var face      = faces.ToArray().First();
                var landmarks = facesLandmarks.ToArrayOfArray().First();
                var componentLandmarkPoints = new PointF[6][];

                // extract landarmarks points for each component
                componentLandmarkPoints[0] = GetComponentPoints(landmarks, EYEBROWS_POINT_RANGE);
                componentLandmarkPoints[1] = GetComponentPoints(landmarks, EYES_POINT_RANGE);
                componentLandmarkPoints[2] = GetComponentPoints(landmarks, NOSE_POINT_RANGE);
                componentLandmarkPoints[3] = GetComponentPoints(landmarks, MOUTH_POINT_RANGE);
                componentLandmarkPoints[4] = landmarks;
                // face bounding box
                componentLandmarkPoints[5] = new PointF[] {
                    new PointF(face.Left, face.Top),
                    new PointF(face.Right, face.Bottom)
                };
                return(componentLandmarkPoints);
            }
        }
示例#2
0
        public MarkerResult[] FindMarkers(Bitmap image)
        {
            Image <Bgr, byte> openCVImage = new Image <Bgr, byte>(image);

            Dictionary.PredefinedDictionaryName name = new Dictionary.PredefinedDictionaryName();
            Dictionary             Dict       = new Dictionary(name);
            VectorOfVectorOfPointF Corners    = new VectorOfVectorOfPointF();
            VectorOfInt            Ids        = new VectorOfInt();
            DetectorParameters     Parameters = DetectorParameters.GetDefault();

            VectorOfVectorOfPointF Rejected = new VectorOfVectorOfPointF();

            ArucoInvoke.DetectMarkers(openCVImage, Dict, Corners, Ids, Parameters, Rejected);

            var markers = new MarkerResult[Corners.Size];

            for (int i = 0; i < Corners.Size; i++)
            {
                var markerCorners = new Vector2[4];

                for (int y = 0; y < 4; y++)
                {
                    markerCorners[y] = new Vector2(Corners[i][y].X, Corners[i][y].Y);
                }

                markers[i] = new MarkerResult(Ids[i], markerCorners);
            }

            return(markers);
        }
示例#3
0
    private void HandleGrab(object sender, EventArgs e)
    {
        Mat image = new Mat();

        if (capture.IsOpened)
        {
            capture.Retrieve(image);
        }
        if (image.IsEmpty)
        {
            return;
        }
        Mat grayImg = image.Clone();

        CvInvoke.CvtColor(image, grayImg, ColorConversion.Bgr2Gray);
        CvInvoke.AdaptiveThreshold(grayImg, grayImg, 255, AdaptiveThresholdType.MeanC, ThresholdType.BinaryInv, 21, 11);

        VectorOfInt            ids      = new VectorOfInt();
        VectorOfVectorOfPointF corners  = new VectorOfVectorOfPointF();
        VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF();

        ArucoInvoke.DetectMarkers(image, dico, corners, ids, arucoParam, rejected);

        if (ids.Size > 0)
        {
            ArucoInvoke.DrawDetectedMarkers(image, corners, ids, new MCvScalar(0, 0, 255));
        }

        CvInvoke.Imshow("Original", image);
        CvInvoke.Imshow("Gray", grayImg);
    }
示例#4
0
        // EMGU's calibrate camera method has a bug.
        // Refer this case: https://stackoverflow.com/questions/33127581/how-do-i-access-the-rotation-and-translation-vectors-after-camera-calibration-in
        public static double CalibrateCamera(MCvPoint3D32f[][] objectPoints, PointF[][] imagePoints, Size imageSize, IInputOutputArray cameraMatrix, IInputOutputArray distortionCoeffs, CalibType calibrationType, MCvTermCriteria termCriteria, out Mat[] rotationVectors, out Mat[] translationVectors)
        {
            System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints)) {
                    double reprojectionError;
                    using (VectorOfMat rVecs = new VectorOfMat())
                        using (VectorOfMat tVecs = new VectorOfMat()) {
                            reprojectionError  = CvInvoke.CalibrateCamera(vvObjPts, vvImgPts, imageSize, cameraMatrix, distortionCoeffs, rVecs, tVecs, calibrationType, termCriteria);
                            rotationVectors    = new Mat[imageCount];
                            translationVectors = new Mat[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                rotationVectors[i] = new Mat();
                                using (Mat matR = rVecs[i])
                                    matR.CopyTo(rotationVectors[i]);
                                translationVectors[i] = new Mat();
                                using (Mat matT = tVecs[i])
                                    matT.CopyTo(translationVectors[i]);
                            }
                        }
                    return(reprojectionError);
                }
        }
    public static VectorOfVectorOfPointF FindContour(this UMat img, Mat hierarchy = null, Emgu.CV.CvEnum.RetrType type = Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod method = Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple)
    {
        VectorOfVectorOfPointF contours = new VectorOfVectorOfPointF();

        CvInvoke.FindContours(img, contours, hierarchy, type, method);
        return(contours);
    }
示例#6
0
        public Image <Bgr, Byte> GetFacePoints()
        {
            //facemark.SetFaceDetector(MyDetector);

            Image <Bgr, Byte> image = new Image <Bgr, byte>("test.png");


            Image <Gray, byte> grayImage = imgInput.Convert <Gray, byte>();

            grayImage._EqualizeHist();

            VectorOfRect           faces     = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();


            bool success = facemark.Fit(grayImage, faces, landmarks);

            PointF[][] f = landmarks.ToArrayOfArray();
            if (success)
            {
                Rectangle[] facesRect = faces.ToArray();
                for (int i = 0; i < facesRect.Length; i++)
                {
                    imgInput.Draw(facesRect[0], new Bgr(Color.Blue), 2);
                    FaceInvoke.DrawFacemarks(imgInput, landmarks[i], new Bgr(Color.Blue).MCvScalar);
                }
                return(imgInput);
            }
            return(null);
        }
示例#7
0
        private void AutoCrop(string path)
        {
            using var image = new Image <Bgr, byte>(path);
            // Grayscale
            var grayScaleImage = image.Convert <Gray, byte>();

            // Applying GaussianBlur
            var blurredImage = grayScaleImage.SmoothGaussian(5, 5, 0, 0);

            // OR
            CvInvoke.GaussianBlur(grayScaleImage, blurredImage, new Size(5, 5), 0);

            // Applying Canny algorithm
            var cannyImage = new UMat();

            CvInvoke.Canny(blurredImage, cannyImage, 50, 150);

            // Finding largest contours
            var contours = new VectorOfVectorOfPointF();

            CvInvoke.FindContours(cannyImage, contours, null, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);

            for (int i = 0; i < contours.Size; i++)
            {
                var contourVector = contours[i];
                using var contour = new VectorOfPoint();
                var peri = CvInvoke.ArcLength(contourVector, true);
                CvInvoke.ApproxPolyDP(contourVector, contour, 0.1 * peri, true);
                if (contour.ToArray().Length == 4 && CvInvoke.IsContourConvex(contour))
                {
                    Debug.WriteLine(contour.ToString());
                    // return contour;
                }
            }
        }
        private List <VectorOfVectorOfPointF> findCorners(float squareEdge, Size patternSize, string[] imagesLeft, string[] imagesRight)
        {
            VectorOfVectorOfPointF allCornersLeft  = new VectorOfVectorOfPointF();
            VectorOfVectorOfPointF allCornersRight = new VectorOfVectorOfPointF();
            VectorOfPointF         cornersLeft     = new VectorOfPointF();
            VectorOfPointF         cornersRight    = new VectorOfPointF();

            Image <Gray, Byte> imageLeft;
            Image <Gray, Byte> imageRight;
            bool findLeft, findRight;

            for (int i = 0; i < imagesLeft.Length; i++)
            {
                imageLeft  = new Image <Gray, Byte>(imagesLeft[i]);
                imageRight = new Image <Gray, Byte>(imagesRight[i]);

                findLeft = CvInvoke.FindChessboardCorners(
                    imageLeft,
                    patternSize,
                    cornersLeft);

                findRight = CvInvoke.FindChessboardCorners(
                    imageRight,
                    patternSize,
                    cornersRight);

                if (!findLeft || !findRight)
                {
                    continue;
                }

                CvInvoke.CornerSubPix(
                    imageLeft,
                    cornersLeft,
                    new Size(11, 11),
                    new Size(-1, -1),
                    new MCvTermCriteria(30, 0.1));

                CvInvoke.CornerSubPix(
                    imageRight,
                    cornersRight,
                    new Size(11, 11),
                    new Size(-1, -1),
                    new MCvTermCriteria(30, 0.1));

                allCornersLeft.Push(cornersLeft);
                allCornersRight.Push(cornersRight);

                imageLeft.Dispose();
                imageRight.Dispose();
                GC.Collect();
            }

            return(new List <VectorOfVectorOfPointF>()
            {
                allCornersLeft, allCornersRight
            });
        }
示例#9
0
 /// <summary>
 /// Detect the facial landmarks from the face regions
 /// </summary>
 /// <param name="image">The image to detect facial landmarks from</param>
 /// <param name="fullFaceRegions">The face regions to detect landmarks from</param>
 /// <returns>Vector of facial landmarks</returns>
 public VectorOfVectorOfPointF Detect(IInputArray image, Rectangle[] fullFaceRegions)
 {
     using (VectorOfRect vr = new VectorOfRect(fullFaceRegions))
     {
         VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();
         _facemark.Fit(image, vr, landmarks);
         return(landmarks);
     }
 }
        public static Mat Draw(Mat image, VectorOfInt markerIds, VectorOfVectorOfPointF markerCorners, VectorOfInt charucoIds, VectorOfPointF charucoCorners)
        {
            Mat result = image.ToImage <Rgb, byte>().Mat;

            ArucoInvoke.DrawDetectedMarkers(result, markerCorners, markerIds, new MCvScalar(255, 0, 0));
            ArucoInvoke.DrawDetectedCornersCharuco(result, charucoCorners, charucoIds, new MCvScalar(255, 255, 0));

            return(result);
        }
示例#11
0
        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(9, 6);

            Image <Gray, Byte> chessboardImage = EmguAssert.LoadImage <Gray, byte>("left01.jpg");

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            chessboardImage.FindCornerSubPix(
                new PointF[][] { corners.ToArray() },
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);

            using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts }))
                using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners))
                    using (Mat cameraMatrix = new Mat())
                        using (Mat distortionCoeff = new Mat())
                            using (VectorOfMat rotations = new VectorOfMat())
                                using (VectorOfMat translations = new VectorOfMat())
                                {
                                    Mat             calMat  = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0);
                                    Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels);
                                    calMat.CopyTo(calMatF);
                                    double error = CvInvoke.CalibrateCamera(ptsVec, imgPtsVec, chessboardImage.Size, cameraMatrix,
                                                                            distortionCoeff,
                                                                            rotations, translations, CalibType.Default, new MCvTermCriteria(30, 1.0e-10));
                                    using (Mat rotation = new Mat())
                                        using (Mat translation = new Mat())
                                            using (VectorOfPoint3D32F vpObject = new VectorOfPoint3D32F(objectPts))
                                            {
                                                CvInvoke.SolvePnPRansac(
                                                    vpObject,
                                                    corners,
                                                    cameraMatrix,
                                                    distortionCoeff,
                                                    rotation,
                                                    translation,
                                                    true);
                                            }

                                    CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
                                    using (Mat undistorted = new Mat())
                                    {
                                        CvInvoke.Undistort(chessboardImage, undistorted, cameraMatrix, distortionCoeff);
                                        String title = String.Format("Reprojection error: {0}", error);
                                        //CvInvoke.NamedWindow(title);
                                        //CvInvoke.Imshow(title, undistorted);
                                        //CvInvoke.WaitKey();
                                        //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
                                    }
                                }
        }
        static private VectorOfPointF MarkFacialPoints(FacemarkLBF facemark, Image <Gray, byte> image, Rectangle faceRect, out bool isSuccess)
        {
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();
            VectorOfRect           faces     = new VectorOfRect(new Rectangle[] { faceRect });

            isSuccess = facemark.Fit(image, faces, landmarks);
            if (isSuccess)
            {
                return(landmarks[0]);     // return the landmarks for the first (and only) face rectangle
            }
            return(new VectorOfPointF()); // return an empty vector
        }
示例#13
0
        public static void Calibrate(string[] imgFiles, out LensParams lensParams)
        {
            Size patternSize = new Size(CHESS_PATTERN_WIDTH, CHESS_PATTERN_HEIGHT);

            VectorOfVectorOfPoint3D32F objPoints   = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     imagePoints = new VectorOfVectorOfPointF();

            Size imageSize = Size.Empty;

            foreach (string file in imgFiles)
            {
                Mat img = CvInvoke.Imread(file, ImreadModes.Grayscale);
                if (imageSize == Size.Empty)
                {
                    imageSize = new Size(img.Width, img.Height);
                }
                //CvInvoke.Imshow("input", img);
                VectorOfPointF corners = new VectorOfPointF(patternSize.Width * patternSize.Height);
                bool           find    = CvInvoke.FindChessboardCorners(img, patternSize, corners);
                if (find)
                {
                    MCvPoint3D32f[] points    = new MCvPoint3D32f[patternSize.Width * patternSize.Height];
                    int             loopIndex = 0;
                    for (int i = 0; i < patternSize.Height; i++)
                    {
                        for (int j = 0; j < patternSize.Width; j++)
                        {
                            points[loopIndex++] = new MCvPoint3D32f(j, i, 0);
                        }
                    }
                    objPoints.Push(new VectorOfPoint3D32F(points));
                    imagePoints.Push(corners);
                }
            }

            Matrix <double> K           = new Matrix <double>(3, 3);
            Matrix <double> D           = new Matrix <double>(4, 1);
            Mat             rotation    = new Mat();
            Mat             translation = new Mat();

            Fisheye.Calibrate(objPoints,
                              imagePoints,
                              imageSize,
                              K,
                              D,
                              rotation,
                              translation,
                              Fisheye.CalibrationFlag.CheckCond,
                              new MCvTermCriteria(30, 0.1)
                              );
            lensParams = new LensParams(K, D);
        }
示例#14
0
        private void FindFacialFeaturePoints()
        {
            string facePath;

            try
            {
                // get face detect dataset
                facePath = Path.GetFileName(@"data/haarcascade_frontalface_default.xml");

                // get FFP dataset
                facemarkParam = new FacemarkLBFParams();
                facemark      = new FacemarkLBF(facemarkParam);
                facemark.LoadModel(@"data/lbfmodel.yaml");
            }

            catch (Exception ex)
            {
                throw new Exception(ex.Message);
            }

            // initialize imageMat
            currImageMat = CurrImageI.Mat;
            nextImageMat = NextImageI.Mat;

            // Current Face
            FacesListCurr = facesArrCurr.OfType <Rectangle>().ToList();

            // Find facial feature points
            VectorOfRect vrLeft = new VectorOfRect(facesArrCurr);

            landmarksCurr = new VectorOfVectorOfPointF();

            facemark.Fit(currImageMat, vrLeft, landmarksCurr);
            ffpCurr = landmarksCurr[curr.SelectedFace];


            // Next Face
            FacesListNext = facesArrNext.OfType <Rectangle>().ToList();

            // Find facial feature points
            VectorOfRect vrRight = new VectorOfRect(facesArrNext);

            landmarksNext = new VectorOfVectorOfPointF();

            facemark.Fit(nextImageMat, vrRight, landmarksNext);
            ffpNext = landmarksNext[next.SelectedFace];

            // Add Corner points
            ffpCurr = AddCornerPoints(ffpCurr, this.curr.ResizedImage.Mat);
            ffpNext = AddCornerPoints(ffpNext, this.next.ResizedImage.Mat);
        }
        /// <summary>
        /// Process the input image and render into the output image
        /// </summary>
        /// <param name="imageIn">The input image</param>
        /// <param name="imageOut">The output image, can be the same as imageIn, in which case we will render directly into the input image</param>
        /// <returns>The messages that we want to display.</returns>
        public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut)
        {
            if (imageOut != imageIn)
            {
                using (InputArray iaImageIn = imageIn.GetInputArray())
                {
                    iaImageIn.CopyTo(imageOut);
                }
            }

            Stopwatch watch = Stopwatch.StartNew();

            List <DetectedObject> fullFaceRegions    = new List <DetectedObject>();
            List <DetectedObject> partialFaceRegions = new List <DetectedObject>();

            _faceDetector.Detect(imageIn, fullFaceRegions, partialFaceRegions);

            if (partialFaceRegions.Count > 0)
            {
                foreach (DetectedObject face in partialFaceRegions)
                {
                    CvInvoke.Rectangle(imageOut, face.Region, new MCvScalar(0, 255, 0));
                }
            }

            if (fullFaceRegions.Count > 0)
            {
                foreach (DetectedObject face in fullFaceRegions)
                {
                    CvInvoke.Rectangle(imageOut, face.Region, new MCvScalar(0, 255, 0));
                }

                var fullFaceRegionsArr = fullFaceRegions.ToArray();
                var rectRegionArr      = Array.ConvertAll(fullFaceRegionsArr, r => r.Region);

                using (VectorOfVectorOfPointF landmarks = _facemarkDetector.Detect(imageIn, rectRegionArr))
                {
                    int len = landmarks.Size;
                    for (int i = 0; i < len; i++)
                    {
                        using (VectorOfPointF vpf = landmarks[i])
                            FaceInvoke.DrawFacemarks(imageOut, vpf, new MCvScalar(255, 0, 0));
                    }
                }
            }
            watch.Stop();
            return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds));
        }
        private void button2_Click(object sender, EventArgs e)
        {
            CvInvoke.Threshold(image, image, 100, 255, Emgu.CV.CvEnum.ThresholdType.Binary);
            Mat b = new Mat();
            VectorOfVectorOfPointF contours   = new VectorOfVectorOfPointF();
            Image <Gray, byte>     cannyImage = new Image <Gray, byte>(image.Width, image.Height);

            // CvInvoke.Canny(image,cannyImage,100,3);
            //CvInvoke.FindContours(cannyImage,contours,b,Emgu.CV.CvEnum.RetrType.Ccomp,Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
            CvInvoke.FindContours(cannyImage, contours, null, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
            for (int i = 0; i < contours.Size; i++)
            {
                CvInvoke.DrawContours(image, contours, i, new MCvScalar(130));
            }
            imageBox.Image = image;
        }
示例#17
0
        private FaceModel GetFaceModel(Image <Bgr, Byte> image, Image <Gray, byte> grayImage)
        {
            grayImage._EqualizeHist();
            VectorOfRect faces = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));

            Rectangle[]            rects     = faces.ToArray();
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();
            bool success = facemark.Fit(grayImage, faces, landmarks);

            PointF[] points = landmarks.ToArrayOfArray()[0];
            if (!success)
            {
                return(null);
            }
            return(new FaceModel(points, rects[0]));
        }
示例#18
0
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CalibType calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
                {
                    double reprojectionError = -1;
                    using (VectorOfMat rotationVectors = new VectorOfMat())
                        using (VectorOfMat translationVectors = new VectorOfMat())
                        {
                            Mat cameraMat   = new Mat();
                            Mat distorCoeff = new Mat();
                            reprojectionError = CvInvoke.CalibrateCamera(
                                vvObjPts,
                                vvImgPts,
                                imageSize,
                                intrinsicParam.IntrinsicMatrix,
                                intrinsicParam.DistortionCoeffs,
                                rotationVectors,
                                translationVectors,
                                calibrationType,
                                termCriteria);

                            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                using (Mat matR = rotationVectors[i])
                                    matR.CopyTo(p.RotationVector);
                                using (Mat matT = translationVectors[i])
                                    matT.CopyTo(p.TranslationVector);
                                extrinsicParams[i] = p;
                            }
                        }
                    return(reprojectionError);
                }
        }
        public static double ValidateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner, Mat cameraMatrix, Mat distCoeffs)
        {
            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();
            VectorOfPoint3D32F         rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F         tvecs = new VectorOfPoint3D32F();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame       = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints          = new VectorOfPointF();
                VectorOfPointF     currentImgPointsUndistort = new VectorOfPointF();
                VectorOfInt        currentIds       = new VectorOfInt();
                VectorOfPoint3D32F currentObjPoints = new VectorOfPoint3D32F();
                Mat                tvec             = new Mat();
                Mat                rvec             = new Mat();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentIds.Push(new int[] { charucoIds[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                Mat distCoeffsNew = new Mat(1, 4, DepthType.Cv64F, 1);
                distCoeffsNew.SetValue(0, 0, 0);
                distCoeffsNew.SetValue(0, 1, 0);
                distCoeffsNew.SetValue(0, 2, 0);
                distCoeffsNew.SetValue(0, 3, 0);

                Fisheye.UndistorPoints(currentImgPoints, currentImgPointsUndistort, cameraMatrix, distCoeffs, Mat.Eye(3, 3, DepthType.Cv64F, 1), Mat.Eye(3, 3, DepthType.Cv64F, 1));
                if (ArucoInvoke.EstimatePoseCharucoBoard(currentImgPointsUndistort, currentIds, CreateBoard(squaresX, squaresY, squareLength, markerLength, new Dictionary(dictionary)), Mat.Eye(3, 3, DepthType.Cv64F, 1), distCoeffsNew, rvec, tvec))
                {
                    rvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)rvec.GetValue(0, 0), (float)rvec.GetValue(1, 0), (float)rvec.GetValue(2, 0)) });
                    tvecs.Push(new MCvPoint3D32f[] { new MCvPoint3D32f((float)tvec.GetValue(0, 0), (float)tvec.GetValue(1, 0), (float)tvec.GetValue(2, 0)) });

                    processedImagePoints.Push(currentImgPoints);
                    processedObjectPoints.Push(currentObjPoints);
                }
            }

            return(Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye));
        }
        public static (Mat cameraMatrix, Mat distCoeffs, double rms) CalibrateCharuco(int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary, Size imageSize, VectorOfInt charucoIds, VectorOfPointF charucoCorners, VectorOfInt markerCounterPerFrame, bool fisheye, Func <byte[], byte[]> GetRemoteChessboardCorner)
        {
            Mat    cameraMatrix = new Mat(3, 3, Emgu.CV.CvEnum.DepthType.Cv64F, 1);
            Mat    distCoeffs   = new Mat(1, 4, Emgu.CV.CvEnum.DepthType.Cv64F, 1);
            double rms          = 0.0;

            VectorOfVectorOfPoint3D32F processedObjectPoints = new VectorOfVectorOfPoint3D32F();
            VectorOfVectorOfPointF     processedImagePoints  = new VectorOfVectorOfPointF();

            int k = 0;

            for (int i = 0; i < markerCounterPerFrame.Size; i++)
            {
                int                nMarkersInThisFrame = markerCounterPerFrame[i];
                VectorOfPointF     currentImgPoints    = new VectorOfPointF();
                VectorOfPoint3D32F currentObjPoints    = new VectorOfPoint3D32F();

                for (int j = 0; j < nMarkersInThisFrame; j++)
                {
                    currentImgPoints.Push(new PointF[] { charucoCorners[k] });
                    currentObjPoints.Push(new MCvPoint3D32f[] { GetChessboardCorner(squaresX, squaresY, squareLength, markerLength, charucoIds[k], dictionary, GetRemoteChessboardCorner) });
                    k++;
                }

                processedImagePoints.Push(currentImgPoints);
                processedObjectPoints.Push(currentObjPoints);
            }

            VectorOfPoint3D32F rvecs = new VectorOfPoint3D32F();
            VectorOfPoint3D32F tvecs = new VectorOfPoint3D32F();

            if (fisheye)
            {
                Fisheye.Calibrate(processedObjectPoints, processedImagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, Fisheye.CalibrationFlag.FixSkew | Fisheye.CalibrationFlag.RecomputeExtrinsic, new MCvTermCriteria(400, double.Epsilon));
            }
            else
            {
                CvInvoke.CalibrateCamera(processedObjectPoints, processedImagePoints, imageSize, cameraMatrix, distCoeffs, new Mat(), new Mat(), CalibType.FixK3, new MCvTermCriteria(30, 1e-4));
            }

            rms = Validate(processedObjectPoints, processedImagePoints, cameraMatrix, distCoeffs, rvecs, tvecs, fisheye);

            return(cameraMatrix, distCoeffs, rms);
        }
示例#21
0
        public void TestChessboardCalibration()
        {
            Size patternSize = new Size(9, 6);

            Image <Gray, Byte> chessboardImage = EmguAssert.LoadImage <Gray, byte>("left01.jpg");

            Util.VectorOfPointF corners = new Util.VectorOfPointF();
            bool patternWasFound        = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

            chessboardImage.FindCornerSubPix(
                new PointF[][] { corners.ToArray() },
                new Size(10, 10),
                new Size(-1, -1),
                new MCvTermCriteria(0.05));

            MCvPoint3D32f[]           objectPts = CalcChessboardCorners(patternSize, 1.0f);
            IntrinsicCameraParameters intrisic  = new IntrinsicCameraParameters(8);

            ExtrinsicCameraParameters[] extrinsic;

            using (VectorOfVectorOfPoint3D32F ptsVec = new VectorOfVectorOfPoint3D32F(new MCvPoint3D32f[][] { objectPts }))
                using (VectorOfVectorOfPointF imgPtsVec = new VectorOfVectorOfPointF(corners))
                {
                    Mat             calMat  = CvInvoke.InitCameraMatrix2D(ptsVec, imgPtsVec, chessboardImage.Size, 0);
                    Matrix <double> calMatF = new Matrix <double>(calMat.Rows, calMat.Cols, calMat.NumberOfChannels);
                    calMat.CopyTo(calMatF);
                }

            double error = CameraCalibration.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
                                                             chessboardImage.Size, intrisic, CvEnum.CalibType.Default, new MCvTermCriteria(30, 1.0e-10), out extrinsic);

            CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
            //CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners);
            Image <Gray, Byte> undistorted = intrisic.Undistort(chessboardImage);

            //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));

            Mat[] rotationVectors, translationVectors;
            CvInvoke.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
                                     chessboardImage.Size, intrisic.IntrinsicMatrix, intrisic.DistortionCoeffs, CalibType.Default,
                                     new MCvTermCriteria(30, 1.0e-10),
                                     out rotationVectors, out translationVectors);
        }
示例#22
0
        public Mat DrawAllCellContourBoundingBoxes()
        {
            var matToReturn         = ContourImage.CreateNewMatLikeThis();
            var boxVecOfVectorPoint = new VectorOfVectorOfPointF();

            foreach (var contour in Contours.ToArrayOfArray())
            {
                var tempVector = new VectorOfPoint(contour);
                var tempRect   = CvInvoke.MinAreaRect(tempVector);
                var box        = CvInvoke.BoxPoints(tempRect);
                var boxVec     = new VectorOfPointF(box);
                boxVecOfVectorPoint.Push(boxVec);
            }

            var convertedVectorOfVectorPoint = boxVecOfVectorPoint.ConvertToVectorOfPoint();

            CvInvoke.DrawContours(matToReturn, convertedVectorOfVectorPoint, -1, new MCvScalar(0, 255, 0, 255), 2);
            return(matToReturn);
        }
        public bool calibrate(float squareEdge, Size patternSize, string[] images)
        {
            VectorOfVectorOfPointF corners = findCorners(squareEdge, patternSize, images);

            if (corners.Size == 0)
            {
                Console.WriteLine("Cannot find chessboard!");
                return(false);
            }

            VectorOfPoint3D32F         chessboard   = getChessboardCorners(squareEdge, patternSize);
            VectorOfVectorOfPoint3D32F objectPoints = new VectorOfVectorOfPoint3D32F();

            for (int i = corners.Size; i > 0; i--)
            {
                objectPoints.Push(chessboard);
            }

            CameraParam param = new CameraParam();

            // set mats
            Mat rotationMat    = new Mat();
            Mat translationMat = new Mat();

            Image <Gray, Byte> image = new Image <Gray, Byte>(images[0]);

            imgSize = image.Size;

            CvInvoke.CalibrateCamera(
                objectPoints,
                corners,
                image.Size,
                param.cameraMatrix.Mat,
                param.distortionCoeffs.Mat,
                rotationMat,
                translationMat,
                CalibType.Default,
                new MCvTermCriteria(30, 0.1));

            cameraParam.Clear();
            cameraParam.Add(param);
            return(_isCalibrated = true);
        }
        public Image <Bgr, Byte> GetFacePoints()
        {
            String facePath = Path.GetFullPath(@"../../data/haarcascade_frontalface_default.xml");

            //CascadeClassifier faceDetector = new CascadeClassifier(@"..\..\Resource\EMGUCV\haarcascade_frontalface_default.xml");
            CascadeClassifier faceDetector = new CascadeClassifier(facePath);
            FacemarkLBFParams fParams      = new FacemarkLBFParams();

            //fParams.ModelFile = @"..\..\Resource\EMGUCV\lbfmodel.yaml";
            fParams.ModelFile  = @"lbfmodel.yaml";
            fParams.NLandmarks = 68; // number of landmark points
            fParams.InitShapeN = 10; // number of multiplier for make data augmentation
            fParams.StagesN    = 5;  // amount of refinement stages
            fParams.TreeN      = 6;  // number of tree in the model for each landmark point
            fParams.TreeDepth  = 5;  //he depth of decision tree
            FacemarkLBF facemark = new FacemarkLBF(fParams);
            //facemark.SetFaceDetector(MyDetector);

            Image <Bgr, Byte>  image     = new Image <Bgr, byte>("test.png");
            Image <Gray, byte> grayImage = image.Convert <Gray, byte>();

            grayImage._EqualizeHist();

            VectorOfRect           faces     = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));
            VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();

            facemark.LoadModel(fParams.ModelFile);

            bool success = facemark.Fit(grayImage, faces, landmarks);

            if (success)
            {
                Rectangle[] facesRect = faces.ToArray();
                for (int i = 0; i < facesRect.Length; i++)
                {
                    image.Draw(facesRect[i], new Bgr(Color.Blue), 2);
                    FaceInvoke.DrawFacemarks(image, landmarks[i], new Bgr(Color.Blue).MCvScalar);
                }
                return(image);
            }
            return(null);
        }
示例#25
0
        private void DrawSelectedCellContourBoxToMat(Mat imgToMod, VectorOfPoint tempVector)
        {
            var matToReturn = imgToMod.CreateNewHardCopyFromMat();

            if (matToReturn.NumberOfChannels < 3)
            {
                CvInvoke.CvtColor(matToReturn, matToReturn, ColorConversion.Gray2Bgr);
            }


            var boxVecOfVectorPoint = new VectorOfVectorOfPointF();
            var tempRect            = CvInvoke.MinAreaRect(tempVector);
            var box    = CvInvoke.BoxPoints(tempRect);
            var boxVec = new VectorOfPointF(box);

            boxVecOfVectorPoint.Push(boxVec);
            var convertedVectorOfVectorPoint = boxVecOfVectorPoint.ConvertToVectorOfPoint();

            CvInvoke.DrawContours(matToReturn, convertedVectorOfVectorPoint, -1, new MCvScalar(0, 114, 251, 0), 3);
        }
示例#26
0
        public void Calibrate(VectorOfVectorOfPointF cornersPoints, Size imageSize, int innerCornersPerChessboardCols,
            int innerCornersPerChessboardRows)
        {
            modelPoints = CreateModelPoints(cornersPoints.Size, innerCornersPerChessboardCols,
                innerCornersPerChessboardRows);

            var rotationVectors = new VectorOfMat();
            var translationVectors = new VectorOfMat();

            CvInvoke.CalibrateCamera(modelPoints, cornersPoints, imageSize, cameraMatrix, cameraDistortionCoeffs,
                rotationVectors, translationVectors, CalibType.Default, new MCvTermCriteria(10));

            translation = new Matrix<double>(translationVectors[0].Rows, translationVectors[0].Cols,
                translationVectors[0].DataPointer);

            var rotationMatrix = new Matrix<double>(rotationVectors[0].Rows, rotationVectors[0].Cols,
                rotationVectors[0].DataPointer);

            rotation = new RotationVector3D(new[] {rotationMatrix[0, 0], rotationMatrix[1, 0], rotationMatrix[2, 0]});
        }
示例#27
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems;
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">The essential matrix</param>
        /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">The fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CalibType flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> foundamentalMatrix,
            out Matrix <double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

            using (VectorOfVectorOfPoint3D32F objectPointVec = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF imagePoints1Vec = new VectorOfVectorOfPointF(imagePoints1))
                    using (VectorOfVectorOfPointF imagePoints2Vec = new VectorOfVectorOfPointF(imagePoints2))
                    {
                        extrinsicParams    = new ExtrinsicCameraParameters();
                        essentialMatrix    = new Matrix <double>(3, 3);
                        foundamentalMatrix = new Matrix <double>(3, 3);

                        CvInvoke.StereoCalibrate(
                            objectPointVec,
                            imagePoints1Vec,
                            imagePoints2Vec,

                            intrinsicParam1.IntrinsicMatrix,
                            intrinsicParam1.DistortionCoeffs,
                            intrinsicParam2.IntrinsicMatrix,
                            intrinsicParam2.DistortionCoeffs,
                            imageSize,
                            extrinsicParams.RotationVector,
                            extrinsicParams.TranslationVector,
                            essentialMatrix,
                            foundamentalMatrix,
                            flags,
                            termCrit);
                    }
        }
        public static (VectorOfInt markerIds, VectorOfVectorOfPointF markerCorners, VectorOfInt charucoIds, VectorOfPointF charucoCorners) Detect(Mat image, int squaresX, int squaresY, float squareLength, float markerLength, PredefinedDictionaryName dictionary)
        {
            VectorOfInt            markerIds             = new VectorOfInt();
            VectorOfVectorOfPointF markerCorners         = new VectorOfVectorOfPointF();
            VectorOfInt            charucoIds            = new VectorOfInt();
            VectorOfPointF         charucoCorners        = new VectorOfPointF();
            VectorOfVectorOfPointF rejectedMarkerCorners = new VectorOfVectorOfPointF();

            DetectorParameters decParameters = DetectorParameters.GetDefault();

            ArucoInvoke.DetectMarkers(image, new Dictionary(dictionary), markerCorners, markerIds, decParameters, rejectedMarkerCorners);

            ArucoInvoke.RefineDetectedMarkers(image, CreateBoard(squaresX, squaresY, squareLength, markerLength, new Dictionary(dictionary)), markerCorners, markerIds, rejectedMarkerCorners, null, null, 10, 3, true, null, decParameters);

            if (markerIds.Size > 0)
            {
                ArucoInvoke.InterpolateCornersCharuco(markerCorners, markerIds, image, CreateBoard(squaresX, squaresY, squareLength, markerLength, new Dictionary(dictionary)), charucoCorners, charucoIds, null, null, 2);
            }

            return(markerIds, markerCorners, charucoIds, charucoCorners);
        }
示例#29
0
        public Mat DrawAllCellContourBoundingBoxes(Mat imgToMod)
        {
            var matToReturn = imgToMod.CreateNewHardCopyFromMat();

            CvInvoke.CvtColor(matToReturn, matToReturn, ColorConversion.Gray2Bgr);
            var boxVecOfVectorPoint = new VectorOfVectorOfPointF();

            foreach (var contour in Contours.ToArrayOfArray())
            {
                var tempVector = new VectorOfPoint(contour);
                var tempRect   = CvInvoke.MinAreaRect(tempVector);
                var box        = CvInvoke.BoxPoints(tempRect);
                var boxVec     = new VectorOfPointF(box);
                boxVecOfVectorPoint.Push(boxVec);
            }

            var convertedVectorOfVectorPoint = boxVecOfVectorPoint.ConvertToVectorOfPoint();

            CvInvoke.DrawContours(matToReturn, convertedVectorOfVectorPoint, -1, new MCvScalar(0, 255, 0, 255), 2);
            return(matToReturn);
        }
示例#30
0
        /// <summary>
        /// Obtains the list of Voronoi Facets
        /// </summary>
        /// <returns>The list of Voronoi Facets</returns>
        public VoronoiFacet[] GetVoronoiFacets(int[] idx = null)
        {
            using (VectorOfInt vi = new VectorOfInt())
                using (VectorOfVectorOfPointF facetVec = new VectorOfVectorOfPointF())
                    using (VectorOfPointF centerVec = new VectorOfPointF())
                    {
                        if (idx != null)
                        {
                            vi.Push(idx);
                        }

                        CvInvoke.cveSubdiv2DGetVoronoiFacetList(_ptr, vi, facetVec, centerVec);
                        PointF[][] vertices = facetVec.ToArrayOfArray();
                        PointF[]   centers  = centerVec.ToArray();

                        VoronoiFacet[] facets = new VoronoiFacet[centers.Length];
                        for (int i = 0; i < facets.Length; i++)
                        {
                            facets[i] = new VoronoiFacet(centers[i], vertices[i]);
                        }
                        return(facets);
                    }
        }
        private VectorOfVectorOfPointF findCorners(float squareEdge, Size patternSize, string[] imagePaths)
        {
            VectorOfVectorOfPointF allCorners = new VectorOfVectorOfPointF();
            VectorOfPointF         corners    = new VectorOfPointF();

            Image <Gray, Byte> image;
            bool find;

            for (int i = 0; i < imagePaths.Length; i++)
            {
                image = new Image <Gray, Byte>(imagePaths[i]);

                find = CvInvoke.FindChessboardCorners(
                    image,
                    patternSize,
                    corners);

                if (!find)
                {
                    continue;
                }

                CvInvoke.CornerSubPix(
                    image,
                    corners,
                    new Size(11, 11),
                    new Size(-1, -1),
                    new MCvTermCriteria(30, 0.1));

                allCorners.Push(corners);
                image.Dispose();
                GC.Collect();
            }

            return(allCorners);
        }
示例#32
0
      /// <summary>
      /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
      /// R2=R*R1,
      /// T2=R*T1 + T
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="cameraMatrix1">The input/output camera matrices [fxk 0 cxk; 0 fyk cyk; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of the elements of the matrices must be initialized</param>
      /// <param name="distCoeffs1">The input/output vectors of distortion coefficients for each camera, 4x1, 1x4, 5x1 or 1x5</param>
      /// <param name="cameraMatrix2">The input/output camera matrices [fxk 0 cxk; 0 fyk cyk; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of the elements of the matrices must be initialized</param>
      /// <param name="distCoeffs2">The input/output vectors of distortion coefficients for each camera, 4x1, 1x4, 5x1 or 1x5</param>
      /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="r">The rotation matrix between the 1st and the 2nd cameras' coordinate systems </param>
      /// <param name="t">The translation vector between the cameras' coordinate systems</param>
      /// <param name="e">The optional output essential matrix</param>
      /// <param name="f">The optional output fundamental matrix </param>
      /// <param name="termCrit">Termination criteria for the iterative optimization algorithm</param>
      /// <param name="flags">The calibration flags</param>
      public static void StereoCalibrate(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints1,
         PointF[][] imagePoints2,
         IInputOutputArray cameraMatrix1,
         IInputOutputArray distCoeffs1,
         IInputOutputArray cameraMatrix2,
         IInputOutputArray distCoeffs2,
         Size imageSize,
         IOutputArray r,
         IOutputArray t,
         IOutputArray e,
         IOutputArray f,
         CvEnum.CalibType flags,
         MCvTermCriteria termCrit)
      {
         System.Diagnostics.Debug.Assert(
            objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length,
            "The number of images for objects points should be equal to the number of images for image points");

         using (VectorOfVectorOfPoint3D32F objectPointVec = new VectorOfVectorOfPoint3D32F(objectPoints))
         using (VectorOfVectorOfPointF imagePoints1Vec = new VectorOfVectorOfPointF(imagePoints1))
         using (VectorOfVectorOfPointF imagePoints2Vec = new VectorOfVectorOfPointF(imagePoints2))
         {
            CvInvoke.StereoCalibrate(
               objectPointVec,
               imagePoints1Vec,
               imagePoints2Vec,
               cameraMatrix1,
               distCoeffs1,
               cameraMatrix2,
               distCoeffs2,
               imageSize,
               r,
               t,
               e,
               f,
               flags,
               termCrit);
         }
      }
示例#33
0
      /// <summary>
      /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
      /// </summary>
      /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
      /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
      /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
      /// <param name="rotationVectors">The output 3xM or Mx3 array of rotation vectors (compact representation of rotation matrices, see cvRodrigues2). </param>
      /// <param name="translationVectors">The output 3xM or Mx3 array of translation vectors</param>/// <param name="calibrationType">cCalibration type</param>
      /// <param name="termCriteria">The termination criteria</param>
      /// <param name="cameraMatrix">The output camera matrix (A) [fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized</param>
      /// <param name="distortionCoeffs">The output 4x1 or 1x4 vector of distortion coefficients [k1, k2, p1, p2]</param>
      /// <returns>The final reprojection error</returns>
      public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IInputOutputArray cameraMatrix,
         IInputOutputArray distortionCoeffs,
         CvEnum.CalibType calibrationType,
         MCvTermCriteria termCriteria,
         out Mat[] rotationVectors,
         out Mat[] translationVectors)
      {
         System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length,
            "The number of images for objects points should be equal to the number of images for image points");
         int imageCount = objectPoints.Length;

         using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
         using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
         {
            double reprojectionError;
            using (VectorOfMat rVecs = new VectorOfMat())
            using (VectorOfMat tVecs = new VectorOfMat())
            {
               reprojectionError = CvInvoke.CalibrateCamera(
                  vvObjPts,
                  vvImgPts,
                  imageSize,
                  cameraMatrix,
                  distortionCoeffs,
                  rVecs,
                  tVecs,
                  calibrationType,
                  termCriteria);

               rotationVectors = new Mat[imageCount];
               translationVectors = new Mat[imageCount];
               for (int i = 0; i < imageCount; i++)
               {
                  rotationVectors[i] = new Mat();
                  using (Mat matR = rotationVectors[i])
                     matR.CopyTo(rotationVectors[i]);
                  translationVectors[i] = new Mat();
                  using (Mat matT = translationVectors[i])
                     matT.CopyTo(translationVectors[i]);
               }
            }
            return reprojectionError;
         }
      }
示例#34
0
文件: Form1.cs 项目: neutmute/emgucv
      private void ProcessFrame(object sender, EventArgs arg)
      {
         if (_capture != null && _capture.Ptr != IntPtr.Zero)
         {
            _capture.Retrieve(_frame, 0);

            //cameraImageBox.Image = _frame;

            using (VectorOfInt ids = new VectorOfInt())
            using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
            using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
            {
               DetectorParameters p = DetectorParameters.GetDefault();
               ArucoInvoke.DetectMarkers(_frame, ArucoDictionary, corners, ids, p, rejected);
               ArucoInvoke.RefineDetectedMarkers(_frame, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, p);
               _frame.CopyTo(_frameCopy);
               if (ids.Size > 0)
               {
                  //cameraButton.Text = "Calibrate camera";
                  this.Invoke((Action) delegate
                  {
                     useThisFrameButton.Enabled = true;
                  });
                  ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));

                  if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                  {
                     ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                     for (int i = 0; i < ids.Size; i++)
                     {
                        using (Mat rvecMat = rvecs.Row(i))
                        using (Mat tvecMat = tvecs.Row(i))
                        using (VectorOfDouble rvec = new VectorOfDouble())
                        using (VectorOfDouble tvec = new VectorOfDouble())
                        {
                           double[] values = new double[3];
                           rvecMat.CopyTo(values);
                           rvec.Push(values);
                           tvecMat.CopyTo(values);
                           tvec.Push(values);

                           
                              ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                 markersLength*0.5f);
                           
                        }
                     }
                  }

                  if (_useThisFrame)
                  {
                     _allCorners.Push(corners);
                     _allIds.Push(ids);
                     _markerCounterPerFrame.Push(new int[] { corners.Size });
                     _imageSize = _frame.Size;
                     UpdateMessage(String.Format("Using {0} points", _markerCounterPerFrame.ToArray().Sum()));
                     _useThisFrame = false;
                  }
               }
               else
               {
                  this.Invoke((Action) delegate
                  {
                     useThisFrameButton.Enabled = false;
                  });

                  //cameraButton.Text = "Stop Capture";
               }
               cameraImageBox.Image = _frameCopy;
            }
         }
      }
示例#35
0
      /// <summary>
      /// Obtains the list of Voronoi Facets 
      /// </summary>
      /// <returns>The list of Voronoi Facets</returns>
      public VoronoiFacet[] GetVoronoiFacets(int[] idx = null)
      {
         using (VectorOfInt vi = new VectorOfInt())
         using (VectorOfVectorOfPointF facetVec = new VectorOfVectorOfPointF())
         using (VectorOfPointF centerVec = new VectorOfPointF())
         {
            if (idx != null)
               vi.Push(idx);
         
            CvInvoke.cveSubdiv2DGetVoronoiFacetList(_ptr, vi, facetVec, centerVec);
            PointF[][] vertices = facetVec.ToArrayOfArray();
            PointF[] centers = centerVec.ToArray();

            VoronoiFacet[] facets = new VoronoiFacet[centers.Length];
            for (int i = 0; i < facets.Length; i++)
            {
               facets[i] = new VoronoiFacet(centers[i], vertices[i]);
            }
            return facets;
         }
         
      }
示例#36
0
        public void Calibrate(VectorOfVectorOfPointF cornersPointsLeft, VectorOfVectorOfPointF cornersPointsRight,
            int innerCornersPerChessboardCols, int innerCornersPerChessboardRows, Size imageSize)
        {
            Initialize(cornersPointsLeft.Size, cornersPointsRight.Size, innerCornersPerChessboardCols,
                innerCornersPerChessboardRows, imageSize);

            CvInvoke.StereoCalibrate(modelPoints, cornersPointsLeft.ToArrayOfArray(),
                cornersPointsRight.ToArrayOfArray(), LeftCameraMatrix,
                LeftCameraDistortionCoeffs, RightCameraMatrix, RightCameraDistortionCoeffs, imageSize, Rotation,
                Translation,
                Essential, Fundamental, CalibType.Default, new MCvTermCriteria(0.1e5));

            CvInvoke.StereoRectify(LeftCameraMatrix, LeftCameraDistortionCoeffs,
                RightCameraMatrix, RightCameraDistortionCoeffs, imageSize,
                Rotation, Translation, LeftCameraRectification, RightCameraRectification,
                LeftCameraProjection, RightCameraProjection, DisparityToDepth, StereoRectifyType.Default, 0, imageSize,
                ref rightImageRoi,
                ref rightImageRoi);

            CvInvoke.InitUndistortRectifyMap(LeftCameraMatrix, LeftCameraDistortionCoeffs,
                null, LeftCameraMatrix, imageSize, DepthType.Cv32F, LeftMapX, LeftMapY);

            CvInvoke.InitUndistortRectifyMap(RightCameraMatrix, RightCameraDistortionCoeffs,
                null, RightCameraMatrix, imageSize, DepthType.Cv32F, RightMapX, RightMapY);
        }