예제 #1
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="calibrationInfo"></param>
        /// <param name="trackedFeaturesKp"></param>
        /// <param name="bootstrapKp"></param>
        /// <param name="p"></param>
        /// <param name="p1"></param>
        /// <returns></returns>
        public static TriangulateAndCheckReprojResult TriangulateAndCheckReproj(CameraCalibrationInfo calibrationInfo, VectorOfKeyPoint trackedFeaturesKp, VectorOfKeyPoint bootstrapKp, Matrix <double> p, Matrix <double> p1)
        {
            var result = new TriangulateAndCheckReprojResult();

            var trackedFeaturesPoints = Utils.GetPointsVector(trackedFeaturesKp);
            var bootstrapPoints       = Utils.GetPointsVector(bootstrapKp);

            //undistort
            var normalizedTrackedPts   = new VectorOfPointF();
            var normalizedBootstrapPts = new VectorOfPointF();

            CvInvoke.UndistortPoints(trackedFeaturesPoints, normalizedTrackedPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);
            CvInvoke.UndistortPoints(bootstrapPoints, normalizedBootstrapPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);

            //triangulate
            var pt3Dh = new Mat();

            CvInvoke.TriangulatePoints(p, p1, normalizedBootstrapPts, normalizedTrackedPts, pt3Dh);
            var pt3DhMatrix = new Matrix <float>(pt3Dh.Rows, pt3Dh.Cols, pt3Dh.DataPointer);

            var pt3DMat = new Mat();

            CvInvoke.ConvertPointsFromHomogeneous(pt3DhMatrix.Transpose(), pt3DMat);
            var pt3D = new Matrix <float>(pt3DMat.Rows, pt3DMat.Cols * pt3DMat.NumberOfChannels, pt3DMat.DataPointer);

            var statusArray = new byte[pt3D.Rows];

            for (int i = 0; i < pt3D.Rows; i++)
            {
                statusArray[i] = (pt3D[i, 2] > 0) ? (byte)1 : (byte)0;
            }

            var status = new VectorOfByte(statusArray);
            int count  = CvInvoke.CountNonZero(status);

            double percentage = count / (double)pt3D.Rows;

            if (percentage > 0.75)
            {
                //calculate reprojection
                var rvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var tvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var reprojectedPtSet1 = new VectorOfPointF();
                CvInvoke.ProjectPoints(pt3D, rvec, tvec, calibrationInfo.Intrinsic, calibrationInfo.Distortion,
                                       reprojectedPtSet1);

                double reprojErr = CvInvoke.Norm(reprojectedPtSet1, bootstrapPoints) / bootstrapPoints.Size;
                if (reprojErr < 5)
                {
                    statusArray = new byte[bootstrapPoints.Size];
                    for (int i = 0; i < bootstrapPoints.Size; ++i)
                    {
                        var pointsDiff    = Utils.SubstarctPoints(bootstrapPoints[i], reprojectedPtSet1[i]);
                        var vectorOfPoint = new VectorOfPointF(new[] { pointsDiff });
                        statusArray[i] = CvInvoke.Norm(vectorOfPoint) < 20.0 ? (byte)1 : (byte)0;
                    }

                    status = new VectorOfByte(statusArray);

                    var trackedFeatures3D = new VectorOfPoint3D32F(Utils.Get3dPointsArray(pt3D));

                    Utils.KeepVectorsByStatus(ref trackedFeaturesKp, ref trackedFeatures3D, status);

                    result.Error                     = reprojErr;
                    result.TrackedFeatures3D         = new VectorOfPoint3D32F(trackedFeatures3D.ToArray());
                    result.FilteredTrackedFeaturesKp = new VectorOfKeyPoint(trackedFeaturesKp.ToArray());
                    result.FilteredBootstrapKp       = new VectorOfKeyPoint(bootstrapKp.ToArray());
                    result.Result                    = true;
                }

                rvec.Dispose();
                tvec.Dispose();
                reprojectedPtSet1.Dispose();
            }

            normalizedTrackedPts.Dispose();
            normalizedBootstrapPts.Dispose();
            pt3Dh.Dispose();
            pt3DhMatrix.Dispose();
            pt3DMat.Dispose();
            pt3D.Dispose();
            status.Dispose();

            return(result);
        }
예제 #2
0
        public static void ProcessFrames()
        {
            var   cornersObjectList = new List <MCvPoint3D32f[]>();
            var   cornersPointsList = new List <PointF[]>();
            var   width             = 8;                       //width of chessboard no. squares in width - 1
            var   height            = 6;                       // heght of chess board no. squares in heigth - 1
            float squareSize        = width * height;
            var   patternSize       = new Size(width, height); //size of chess board to be detected
            var   corners           = new VectorOfPointF();    //corners found from chessboard

            Mat[] _rvecs, _tvecs;

            var frameArrayBuffer = new List <Mat>();

            var cameraMatrix = new Mat(3, 3, DepthType.Cv64F, 1);
            var distCoeffs   = new Mat(8, 1, DepthType.Cv64F, 1);

            // Glob our frames from the static dir, loop for them
            string[] filePaths = Directory.GetFiles(@"/home/dietpi/", "*.jpg");
            var      frames    = filePaths.Select(path => CvInvoke.Imread(path)).ToList();

            LogUtil.Write("We have " + frames.Count + " frames.");
            var fc = 0;

            foreach (var frame in frames)
            {
                var grayFrame = new Mat();

                // Convert to grayscale
                CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray);

                //apply chess board detection
                var boardFound = CvInvoke.FindChessboardCorners(grayFrame, patternSize, corners,
                                                                CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage);
                //we use this loop so we can show a colour image rather than a gray:
                if (boardFound)
                {
                    LogUtil.Write("Found board in frame " + fc);
                    //make measurements more accurate by using FindCornerSubPixel
                    CvInvoke.CornerSubPix(grayFrame, corners, new Size(11, 11), new Size(-1, -1),
                                          new MCvTermCriteria(30, 0.1));
                    frameArrayBuffer.Add(grayFrame);
                }

                fc++;
                corners = new VectorOfPointF();
            }

            LogUtil.Write("We have " + frameArrayBuffer.Count + " frames to use for mapping.");
            // Loop through frames where board was detected
            foreach (var frame in frameArrayBuffer)
            {
                var frameVect = new VectorOfPointF();
                CvInvoke.FindChessboardCorners(frame, patternSize, frameVect,
                                               CalibCbType.AdaptiveThresh
                                               | CalibCbType.FastCheck | CalibCbType.NormalizeImage);
                //for accuracy
                CvInvoke.CornerSubPix(frame, frameVect, new Size(11, 11), new Size(-1, -1),
                                      new MCvTermCriteria(30, 0.1));

                //Fill our objects list with the real world measurements for the intrinsic calculations
                var objectList = new List <MCvPoint3D32f>();
                for (int i = 0; i < height; i++)
                {
                    for (int j = 0; j < width; j++)
                    {
                        objectList.Add(new MCvPoint3D32f(j * squareSize, i * squareSize, 0.0F));
                    }
                }

                //corners_object_list[k] = new MCvPoint3D32f[];
                cornersObjectList.Add(objectList.ToArray());
                cornersPointsList.Add(frameVect.ToArray());
                frameVect.Dispose();
            }

            //our error should be as close to 0 as possible

            double error = CvInvoke.CalibrateCamera(cornersObjectList.ToArray(), cornersPointsList.ToArray(),
                                                    frames[0].Size,
                                                    cameraMatrix, distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out _rvecs,
                                                    out _tvecs);

            LogUtil.Write("Correction error: " + error);
            var sk = JsonConvert.SerializeObject(cameraMatrix);
            var sd = JsonConvert.SerializeObject(distCoeffs);

            LogUtil.Write("Camera matrix: " + sk);
            LogUtil.Write("Dist coefficient: " + sd);
            DataUtil.SetItem("K", sk);
            DataUtil.SetItem("D", sd);
        }