/// <summary>
        /// Выполняет кодирование указанного изображения в целевой формат и возвращает результат в виде массива байтов
        /// </summary>
        /// <param name="image">Кодируемое изображение</param>
        /// <param name="encoding">Значение перечисления <see cref="ImageEncoding"/>, определяющее целевой формат</param>
        /// <param name="parameters">Необязательные параметры кодирования</param>
        /// <returns>Массив <see cref="byte"/>, представляющий кодированное изображение</returns>
        public static byte[] Imencode(IInputArray image, ImageEncoding encoding = ImageEncoding.Jpeg, params int[] parameters)
        {
            byte[] result = null;

            string destEncoding = GetEncoding(ref encoding);

            using (VectorOfByte buffer = new VectorOfByte())
            {
                Imencode(destEncoding, image, buffer, parameters);

                result = buffer.ToArray();
            }

            return result;
        }
 private static void Imencode(string extension, IInputArray image, VectorOfByte buffer, params int[] parameters)
 {
     using (CvString cvString = new CvString(extension))
     {
         using (VectorOfInt intVector = new VectorOfInt())
         {
             if (parameters != null && parameters.Length > 0)
             {
                 intVector.Push(parameters);
             }
             using (InputArray array = image.GetInputArray())
             {
                 cveImencode(cvString, array, buffer, intVector);
             }
         }
     }
 }
示例#3
0
        /// <summary>
        /// Performs images matching.
        /// </summary>
        /// <param name="features1">First image features</param>
        /// <param name="features2">Second image features</param>
        /// <returns>Found matches</returns>
        public virtual MatchesInfo Apply(
            ImageFeatures features1, ImageFeatures features2)
        {
            ThrowIfDisposed();

            if (features1 == null)
            {
                throw new ArgumentNullException(nameof(features1));
            }
            if (features2 == null)
            {
                throw new ArgumentNullException(nameof(features2));
            }
            if (features1.Descriptors == null)
            {
                throw new ArgumentException($"{nameof(features1)}.Descriptors == null", nameof(features1));
            }
            if (features2.Descriptors == null)
            {
                throw new ArgumentException($"{nameof(features2)}.Descriptors == null", nameof(features1));
            }
            features1.Descriptors.ThrowIfDisposed();
            features2.Descriptors.ThrowIfDisposed();

            using var keypointsVec1 = new VectorOfKeyPoint(features1.Keypoints);
            using var keypointsVec2 = new VectorOfKeyPoint(features2.Keypoints);
            var features1Cpp = new WImageFeatures
            {
                ImgIdx      = features1.ImgIdx,
                ImgSize     = features1.ImgSize,
                Keypoints   = keypointsVec1.CvPtr,
                Descriptors = features1.Descriptors.CvPtr,
            };
            var features2Cpp = new WImageFeatures
            {
                ImgIdx      = features2.ImgIdx,
                ImgSize     = features2.ImgSize,
                Keypoints   = keypointsVec2.CvPtr,
                Descriptors = features2.Descriptors.CvPtr,
            };

            using var matchesVec     = new VectorOfDMatch();
            using var inliersMaskVec = new VectorOfByte();
            var h = new Mat();

            NativeMethods.HandleException(
                NativeMethods.stitching_FeaturesMatcher_apply(
                    ptr,
                    ref features1Cpp,
                    ref features2Cpp,
                    out var srcImgIdx,
                    out var dstImgIdx,
                    matchesVec.CvPtr,
                    inliersMaskVec.CvPtr,
                    out var numInliers,
                    h.CvPtr,
                    out var confidence));

            GC.KeepAlive(this);

            return(new MatchesInfo(
                       srcImgIdx, dstImgIdx, matchesVec.ToArray(), inliersMaskVec.ToArray(),
                       numInliers, h, confidence));
        }
示例#4
0
        /// <summary>
        /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm
        /// </summary>
        /// <param name="prevImg"></param>
        /// <param name="nextImg"></param>
        /// <param name="prevPts"></param>
        /// <param name="nextPts"></param>
        /// <param name="status"></param>
        /// <param name="err"></param>
        /// <param name="winSize"></param>
        /// <param name="maxLevel"></param>
        /// <param name="criteria"></param>
        /// <param name="flags"></param>
        /// <param name="minEigThreshold"></param>
        public static void CalcOpticalFlowPyrLK(
            InputArray prevImg, InputArray nextImg,
            Point2f[] prevPts, ref Point2f[] nextPts,
            out byte[] status, out float[] err,
            Size? winSize = null,
            int maxLevel = 3,
            TermCriteria? criteria = null,
            OpticalFlowFlags flags = OpticalFlowFlags.None,
            double minEigThreshold = 1e-4)
        {
            if (prevImg == null)
                throw new ArgumentNullException("prevImg");
            if (nextImg == null)
                throw new ArgumentNullException("nextImg");
            if (prevPts == null)
                throw new ArgumentNullException("prevPts");
            if (nextPts == null)
                throw new ArgumentNullException("nextPts");
            prevImg.ThrowIfDisposed();
            nextImg.ThrowIfDisposed();

            Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21));
            TermCriteria criteria0 = criteria.GetValueOrDefault(
                TermCriteria.Both(30, 0.01));

            using (var nextPtsVec = new VectorOfPoint2f())
            using (var statusVec = new VectorOfByte())
            using (var errVec = new VectorOfFloat())
            {
                NativeMethods.video_calcOpticalFlowPyrLK_vector(
                    prevImg.CvPtr, nextImg.CvPtr, prevPts, prevPts.Length,
                    nextPtsVec.CvPtr, statusVec.CvPtr, errVec.CvPtr, 
                    winSize0, maxLevel, criteria0, (int)flags, minEigThreshold);
                nextPts = nextPtsVec.ToArray();
                status = statusVec.ToArray();
                err = errVec.ToArray();
            }
        }
示例#5
0
文件: Tesseract.cs 项目: v5chn/emgucv
 private String UtfByteVectorToString(VectorOfByte bytes)
 {
     byte[] bArr = bytes.ToArray();
     return(_utf8.GetString(bArr, 0, bArr.Length).Replace("\n", Environment.NewLine));
 }
示例#6
0
 /// <summary>
 /// Compresses the image and stores it in the memory buffer
 /// </summary>
 /// <param name="ext">The file extension that defines the output format</param>
 /// <param name="img">The image to be written</param>
 /// <param name="buf"></param>
 /// <param name="prms"></param>
 public static bool ImEncode(string ext, InputArray img, out byte[] buf, int[] prms = null)
 {
     if (string.IsNullOrEmpty(ext))
         throw new ArgumentNullException("ext");
     if (img == null)
         throw new ArgumentNullException("img");
     if (prms == null)
         prms = new int[0];
     img.ThrowIfDisposed();
     using (VectorOfByte bufVec = new VectorOfByte())
     {
         int ret = NativeMethods.highgui_imencode_vector(ext, img.CvPtr, bufVec.CvPtr, prms, prms.Length);
         buf = bufVec.ToArray();
         return ret != 0;
     }
 }
        public void Bootstrap_Track_Logic_Test()
        {
            var capture = new Capture($@"{TestCaseProjectPath}\Videos\cube2.avi");

            //var capture = new Capture(@"C:\Users\zakharov\Documents\Repos\Mine\Rc\src\RubiksCube.OpenCV.TestCase\Videos\cube2.avi");
            for (int i = 0; i < 40; i++)
            {
                capture.QueryFrame();
            }

            var prevGray = capture.QueryFrame();

            CvInvoke.CvtColor(prevGray, prevGray, ColorConversion.Bgr2Gray);

            var currentGray = capture.QueryFrame();

            CvInvoke.CvtColor(currentGray, currentGray, ColorConversion.Bgr2Gray);

            var bootstrapKp = new VectorOfKeyPoint();

            new ORBDetector().DetectRaw(prevGray, bootstrapKp);

            var trackedFeatures = new VectorOfKeyPoint(bootstrapKp.ToArray());

            //-------------------------------------------------------------------------

            var pointComparer = Comparer <PointF> .Create((p1, p2) => Math.Abs(p1.X - p2.X) < 0.0001f && Math.Abs(p1.Y - p2.Y) < 0.0001f? 0 : 1);

            var point3DComparer = Comparer <MCvPoint3D32f> .Create((p1, p2) => Math.Abs(p1.X - p2.X) < 0.0001f && Math.Abs(p1.Y - p2.Y) < 0.0001f && Math.Abs(p1.Z - p2.Z) < 0.0001f? 0 : 1);

            var matrixComparer = Comparer <double> .Create((x, y) => Math.Abs(x - y) < 0.0001f? 0 : 1);

            for (int i = 41; i <= 95; i++)
            {
                var bootstrapPointsBeforeOpticalFlowCplusPlus = GetPoints($"I = {i}txt - Bootstrap points before optical flow.txt");
                var trackedPointsBeforeOpticalFlowCplusPlus   = GetPoints($"I = {i}txt - Tracked points before optical flow.txt");
                var bootstrapPointsAfterOpticalFlowCplusPlus  = GetPoints($"I = {i}txt - Bootstrap points after optical flow.txt");
                var trackedPointsAfterOpticalFlowCplusPlus    = GetPoints($"I = {i}txt - Tracked points after optical flow.txt");
                var bootstrapPointsAfterHomographyCplusPlus   = GetPoints($"I = {i}txt - Bootstrap points after homography.txt");
                var trackedPointsAfterHomographyCplusPlus     = GetPoints($"I = {i}txt - Tracked points after homography.txt");

                var homographyCplusPlus     = Getmatrix3X3($"I = {i}txt - Homography.txt");
                var homographyMaskCplusPlus = GetByteVector($"I = {i}txt - Homography mask.txt");

                var corners = new VectorOfPointF();
                var status  = new VectorOfByte();
                var errors  = new VectorOfFloat();

                CollectionAssert.AreEqual(Utils.GetPointsVector(bootstrapKp).ToArray(), bootstrapPointsBeforeOpticalFlowCplusPlus.ToArray(), pointComparer);
                CollectionAssert.AreEqual(Utils.GetPointsVector(trackedFeatures).ToArray(), trackedPointsBeforeOpticalFlowCplusPlus.ToArray(), pointComparer);

                CvInvoke.CalcOpticalFlowPyrLK(prevGray, currentGray, Utils.GetPointsVector(trackedFeatures), corners,
                                              status, errors, new Size(11, 11), 3, new MCvTermCriteria(30, 0.01));
                currentGray.CopyTo(prevGray);

                if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
                {
                    throw new Exception("Tracking failed.");
                }

                trackedFeatures = Utils.GetKeyPointsVector(corners);
                Utils.KeepVectorsByStatus(ref trackedFeatures, ref bootstrapKp, status);

                CollectionAssert.AreEqual(Utils.GetPointsVector(bootstrapKp).ToArray(), bootstrapPointsAfterOpticalFlowCplusPlus.ToArray(), pointComparer);
                CollectionAssert.AreEqual(Utils.GetPointsVector(trackedFeatures).ToArray(), trackedPointsAfterOpticalFlowCplusPlus.ToArray(), pointComparer);

                if (trackedFeatures.Size != bootstrapKp.Size)
                {
                    const string error = "Tracked features vector size is not equal to bootstrapped one.";
                    throw new Exception(error);
                }

                //verify features with a homography
                var inlierMask = new VectorOfByte();
                var homography = new Mat();
                if (trackedFeatures.Size > 4)
                {
                    CvInvoke.FindHomography(Utils.GetPointsVector(trackedFeatures), Utils.GetPointsVector(bootstrapKp), homography, HomographyMethod.Ransac, 0.99,
                                            inlierMask);
                }

                var homographyMatrix = new Matrix <double>(homography.Rows, homography.Cols, homography.DataPointer);
                CollectionAssert.AreEqual(homographyMatrix.Data, homographyCplusPlus.Data, matrixComparer);

                int inliersNum = CvInvoke.CountNonZero(inlierMask);
                CollectionAssert.AreEqual(inlierMask.ToArray(), homographyMaskCplusPlus.ToArray());

                if (inliersNum != trackedFeatures.Size && inliersNum >= 4 && !homography.IsEmpty)
                {
                    Utils.KeepVectorsByStatus(ref trackedFeatures, ref bootstrapKp, inlierMask);
                }
                else if (inliersNum < 10)
                {
                    throw new Exception("Not enough features survived homography.");
                }

                CollectionAssert.AreEqual(Utils.GetPointsVector(bootstrapKp).ToArray(), bootstrapPointsAfterHomographyCplusPlus.ToArray(), pointComparer);
                CollectionAssert.AreEqual(Utils.GetPointsVector(trackedFeatures).ToArray(), trackedPointsAfterHomographyCplusPlus.ToArray(), pointComparer);

                var bootstrapKpOrig     = new VectorOfKeyPoint(bootstrapKp.ToArray());
                var trackedFeaturesOrig = new VectorOfKeyPoint(trackedFeatures.ToArray());

                //TODO: Compare all these to c++ version
                //Attempt at 3D reconstruction (triangulation) if conditions are right
                var rigidT = CvInvoke.EstimateRigidTransform(Utils.GetPointsVector(trackedFeatures).ToArray(), Utils.GetPointsVector(bootstrapKp).ToArray(), false);
                var matrix = new Matrix <double>(rigidT.Rows, rigidT.Cols, rigidT.DataPointer);

                if (CvInvoke.Norm(matrix.GetCol(2)) > 100)
                {
                    var points3DCplusPlus      = GetPoints3d($"I = {i}txt - 3d points.txt");
                    var eigenvectorsCplusPlus  = Getmatrix3X3($"I = {i}txt - eigenvectors.txt");
                    var normalOfPlaneCplusPlus = GetDoubleArray($"I = {i}txt - normal of plane.txt");

                    //camera motion is sufficient
                    var p1Init = new Matrix <double>(3, 4);
                    p1Init.SetIdentity();
                    var result = OpenCvUtilities.CameraPoseAndTriangulationFromFundamental(_calibration, trackedFeatures, bootstrapKp, p1Init);

                    trackedFeatures = result.FilteredTrackedFeaturesKp;
                    bootstrapKp     = result.FilteredBootstrapKp;

                    if (result.Result)
                    {
                        double pToPlaneTrashCplusPlus = GetDouble($"I = {i}txt - p_to_plane_thresh.txt");
                        int    numInliersCplusPlus    = GetInt($"I = {i}txt - num inliers.txt");
                        var    statusArrCplusPlus     = GetByteVector($"I = {i}txt - status arr.txt");

                        var trackedFeatures3D = result.TrackedFeatures3D;

                        CollectionAssert.AreEqual(trackedFeatures3D.ToArray(), points3DCplusPlus.ToArray(), point3DComparer);

                        //var trackedFeatures3Dm = Utils.Get3dPointsMat(trackedFeatures3D);
                        var tf3D = new double[trackedFeatures3D.Size, 3];
                        var trackedFeatures3Dm = new Matrix <double>(trackedFeatures3D.Size, 3);
                        for (int k = 0; k < trackedFeatures3D.Size; k++)
                        {
                            trackedFeatures3Dm[k, 0] = trackedFeatures3D[k].X;
                            trackedFeatures3Dm[k, 1] = trackedFeatures3D[k].Y;
                            trackedFeatures3Dm[k, 2] = trackedFeatures3D[k].Z;

                            tf3D[k, 0] = trackedFeatures3D[k].X;
                            tf3D[k, 1] = trackedFeatures3D[k].Y;
                            tf3D[k, 2] = trackedFeatures3D[k].Z;
                        }

                        var eigenvectors = new Mat();
                        var mean         = new Mat();
                        CvInvoke.PCACompute(trackedFeatures3Dm, mean, eigenvectors);
                        var eigenvectorsMatrix = new Matrix <double>(eigenvectors.Rows, eigenvectors.Cols, eigenvectors.DataPointer);

                        CollectionAssert.AreEqual(eigenvectorsMatrix.Data, eigenvectorsCplusPlus.Data, matrixComparer);

                        var method = PrincipalComponentMethod.Center;
                        var pca    = new PrincipalComponentAnalysis(method);
                        pca.Learn(tf3D.ToJagged());

                        var meanMatrix = new Matrix <double>(mean.Rows, mean.Cols, mean.DataPointer);
                        CollectionAssert.AreEqual(meanMatrix.Data.ToJagged()[0], pca.Means, matrixComparer);

                        int numInliers    = 0;
                        var normalOfPlane = eigenvectorsMatrix.GetRow(2).ToUMat().ToMat(AccessType.Fast);
                        CvInvoke.Normalize(normalOfPlane, normalOfPlane);

                        var normalOfPlaneMatrix = new Matrix <double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);
                        var normalOfPlaneArray  = new[] { normalOfPlaneMatrix[0, 0], normalOfPlaneMatrix[0, 1], normalOfPlaneMatrix[0, 2] };

                        CollectionAssert.AreEqual(normalOfPlaneArray, normalOfPlaneCplusPlus, matrixComparer);

                        double pToPlaneThresh = Math.Sqrt(pca.Eigenvalues.ElementAt(2));

                        Assert.AreEqual(pToPlaneTrashCplusPlus, pToPlaneThresh, 0.0001);

                        var statusArray = new byte[trackedFeatures3D.Size];
                        for (int k = 0; k < trackedFeatures3D.Size; k++)
                        {
                            var t1 = new double[] { trackedFeatures3D[k].X, trackedFeatures3D[k].Y, trackedFeatures3D[k].Z };
                            var t2 = t1.Subtract(pca.Means);
                            var w  = new Matrix <double>(new[, ] {
                                { t2[0], t2[1], t2[2] }
                            });
                            double d = Math.Abs(normalOfPlane.Dot(w));
                            if (d < pToPlaneThresh)
                            {
                                numInliers++;
                                statusArray[k] = 1;
                            }
                        }

                        Assert.AreEqual(numInliersCplusPlus, numInliers);

                        var statusVector = new VectorOfByte(statusArray);
                        CollectionAssert.AreEqual(statusArrCplusPlus.ToArray(), statusVector.ToArray());

                        bool bootstrapping = numInliers / (double)trackedFeatures3D.Size < 0.75;
                        if (!bootstrapping)
                        {
                            //enough features are coplanar, keep them and flatten them on the XY plane
                            Utils.KeepVectorsByStatus(ref trackedFeatures, ref trackedFeatures3D, statusVector);

                            //the PCA has the major axes of the plane
                            var projected = new Mat();
                            CvInvoke.PCAProject(trackedFeatures3Dm, mean, eigenvectors, projected);
                            var projectedMatrix = new Matrix <double>(projected.Rows, projected.Cols, projected.DataPointer);
                            projectedMatrix.GetCol(2).SetValue(0);
                            projectedMatrix.CopyTo(trackedFeatures3Dm);
                        }
                        else
                        {
                            bootstrapKp     = bootstrapKpOrig;
                            trackedFeatures = trackedFeaturesOrig;
                        }
                    }
                }

                currentGray = capture.QueryFrame();
                CvInvoke.CvtColor(currentGray, currentGray, ColorConversion.Bgr2Gray);
            }
        }
示例#8
0
        public static void KeepVectorsByStatus(ref VectorOfKeyPoint f1, ref VectorOfKeyPoint f2, ref VectorOfKeyPoint f3, VectorOfByte status)
        {
            var newF1 = new VectorOfKeyPoint();
            var newF2 = new VectorOfKeyPoint();
            var newF3 = new VectorOfKeyPoint();

            for (int i = 0; i < status.Size; i++)
            {
                if (status[i] > 0)
                {
                    newF1.Push(new[] { f1[i] });
                    newF2.Push(new[] { f2[i] });
                    newF3.Push(new[] { f3[i] });
                }
            }

            f1 = newF1;
            f2 = newF2;
            f3 = newF3;
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="calibrationInfo"></param>
        /// <param name="trackedFeaturesKp"></param>
        /// <param name="bootstrapKp"></param>
        /// <param name="initialP1"></param>
        /// <param name="minInliers"></param>
        /// <returns></returns>
        public static CameraPoseAndTriangulationFromFundamentalResult CameraPoseAndTriangulationFromFundamental(CameraCalibrationInfo calibrationInfo, VectorOfKeyPoint trackedFeaturesKp,
                                                                                                                VectorOfKeyPoint bootstrapKp, Matrix <double> initialP1, int minInliers = 10)
        {
            var result = new CameraPoseAndTriangulationFromFundamentalResult();

            //find fundamental matrix
            double minVal, maxVal;
            var    minIdx             = new int[2];
            var    maxIdx             = new int[2];
            var    trackedFeaturesPts = Utils.GetPointsVector(trackedFeaturesKp);
            var    bootstrapPts       = Utils.GetPointsVector(bootstrapKp);

            CvInvoke.MinMaxIdx(Utils.GetPointsMatrix(trackedFeaturesPts), out minVal, out maxVal, minIdx, maxIdx);
            result.Min = (float)minVal;
            result.Max = (float)maxVal;

            var f      = new Mat();
            var status = new VectorOfByte();

            CvInvoke.FindFundamentalMat(trackedFeaturesPts, bootstrapPts, f, FmType.Ransac, 0.006 * maxVal, 0.99, status);
            var fMat = new Matrix <double>(f.Rows, f.Cols, f.DataPointer);

            int inliersNum = CvInvoke.CountNonZero(status);

            Trace.WriteLine($"Fundamental keeping {inliersNum} / {status.Size}");

            Utils.KeepVectorsByStatus(ref trackedFeaturesKp, ref bootstrapKp, status);

            if (inliersNum > minInliers)
            {
                //Essential matrix: compute then extract cameras [R|t]
                var e = calibrationInfo.Intrinsic.Transpose() * fMat * calibrationInfo.Intrinsic; //according to HZ (9.12)
                result.Esential = e;

                //according to http://en.wikipedia.org/wiki/Essential_matrix#Properties_of_the_essential_matrix
                double determinant = Math.Abs(CvInvoke.Determinant(e));
                if (determinant > 1e-07)
                {
                    Console.WriteLine($"det(E) != 0 : {determinant}");
                    return(result);
                }

                Matrix <double> r1;
                Matrix <double> r2;
                Matrix <double> t1;
                Matrix <double> t2;
                if (!DecomposeEtoRandT(e, out r1, out r2, out t1, out t2))
                {
                    return(result);
                }

                determinant = CvInvoke.Determinant(r1);
                if (determinant + 1.0 < 1e-09)
                {
                    //according to http://en.wikipedia.org/wiki/Essential_matrix#Showing_that_it_is_valid
                    Trace.WriteLine($"det(R) == -1 [{determinant}]: flip E's sign");
                    Utils.Negotiate(ref e);
                    if (!DecomposeEtoRandT(e, out r1, out r2, out t1, out t2))
                    {
                        return(result);
                    }
                }
                if (Math.Abs(determinant) - 1.0 > 1e-07)
                {
                    Trace.WriteLine($"det(R) != +-1.0, this is not a rotation matrix");
                    return(result);
                }

                var p = initialP1.Clone();

                //TODO: there are 4 different combinations for P1...
                var pMat1 = new Matrix <double>(new[, ] {
                    { r1[0, 0], r1[0, 1], r1[0, 2], t1[0, 0] },
                    { r1[1, 0], r1[1, 1], r1[1, 2], t1[1, 0] },
                    { r1[2, 0], r1[2, 1], r1[2, 2], t1[2, 0] }
                });

                var triangulationResult = TriangulateAndCheckReproj(calibrationInfo, trackedFeaturesKp, bootstrapKp, p, pMat1);
                if (!triangulationResult.Result)
                {
                    pMat1 = new Matrix <double>(new[, ] {
                        { r1[0, 0], r1[0, 1], r1[0, 2], t2[0, 0] },
                        { r1[1, 0], r1[1, 1], r1[1, 2], t2[1, 0] },
                        { r1[2, 0], r1[2, 1], r1[2, 2], t2[2, 0] }
                    });

                    triangulationResult = TriangulateAndCheckReproj(calibrationInfo, trackedFeaturesKp, bootstrapKp, p, pMat1);
                    if (!triangulationResult.Result)
                    {
                        pMat1 = new Matrix <double>(new[, ] {
                            { r2[0, 0], r2[0, 1], r2[0, 2], t2[0, 0] },
                            { r2[1, 0], r2[1, 1], r2[1, 2], t2[1, 0] },
                            { r2[2, 0], r2[2, 1], r2[2, 2], t2[2, 0] }
                        });

                        triangulationResult = TriangulateAndCheckReproj(calibrationInfo, trackedFeaturesKp, bootstrapKp, p, pMat1);
                        if (!triangulationResult.Result)
                        {
                            pMat1 = new Matrix <double>(new[, ] {
                                { r2[0, 0], r2[0, 1], r2[0, 2], t1[0, 0] },
                                { r2[1, 0], r2[1, 1], r2[1, 2], t1[1, 0] },
                                { r2[2, 0], r2[2, 1], r2[2, 2], t1[2, 0] }
                            });

                            triangulationResult = TriangulateAndCheckReproj(calibrationInfo, trackedFeaturesKp, bootstrapKp, p, pMat1);
                            if (!triangulationResult.Result)
                            {
                                Trace.WriteLine("Can't find the right P matrix.");
                            }
                        }
                    }
                }

                result.P1 = p;
                result.P2 = pMat1;
                result.FilteredTrackedFeaturesKp = triangulationResult.FilteredTrackedFeaturesKp;
                result.FilteredBootstrapKp       = triangulationResult.FilteredBootstrapKp;
                result.TrackedFeatures3D         = triangulationResult.TrackedFeatures3D;
                result.Result = triangulationResult.Result;
                return(result);
            }

            return(result);
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="calibrationInfo"></param>
        /// <param name="trackedFeaturesKp"></param>
        /// <param name="bootstrapKp"></param>
        /// <param name="p"></param>
        /// <param name="p1"></param>
        /// <returns></returns>
        public static TriangulateAndCheckReprojResult TriangulateAndCheckReproj(CameraCalibrationInfo calibrationInfo, VectorOfKeyPoint trackedFeaturesKp, VectorOfKeyPoint bootstrapKp, Matrix <double> p, Matrix <double> p1)
        {
            var result = new TriangulateAndCheckReprojResult();

            var trackedFeaturesPoints = Utils.GetPointsVector(trackedFeaturesKp);
            var bootstrapPoints       = Utils.GetPointsVector(bootstrapKp);

            //undistort
            var normalizedTrackedPts   = new VectorOfPointF();
            var normalizedBootstrapPts = new VectorOfPointF();

            CvInvoke.UndistortPoints(trackedFeaturesPoints, normalizedTrackedPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);
            CvInvoke.UndistortPoints(bootstrapPoints, normalizedBootstrapPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);

            //triangulate
            var pt3Dh = new Mat();

            CvInvoke.TriangulatePoints(p, p1, normalizedBootstrapPts, normalizedTrackedPts, pt3Dh);
            var pt3DhMatrix = new Matrix <float>(pt3Dh.Rows, pt3Dh.Cols, pt3Dh.DataPointer);

            var pt3DMat = new Mat();

            CvInvoke.ConvertPointsFromHomogeneous(pt3DhMatrix.Transpose(), pt3DMat);
            var pt3D = new Matrix <float>(pt3DMat.Rows, pt3DMat.Cols * pt3DMat.NumberOfChannels, pt3DMat.DataPointer);

            var statusArray = new byte[pt3D.Rows];

            for (int i = 0; i < pt3D.Rows; i++)
            {
                statusArray[i] = (pt3D[i, 2] > 0) ? (byte)1 : (byte)0;
            }

            var status = new VectorOfByte(statusArray);
            int count  = CvInvoke.CountNonZero(status);

            double percentage = count / (double)pt3D.Rows;

            if (percentage > 0.75)
            {
                //calculate reprojection
                var rvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var tvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var reprojectedPtSet1 = new VectorOfPointF();
                CvInvoke.ProjectPoints(pt3D, rvec, tvec, calibrationInfo.Intrinsic, calibrationInfo.Distortion,
                                       reprojectedPtSet1);

                double reprojErr = CvInvoke.Norm(reprojectedPtSet1, bootstrapPoints) / bootstrapPoints.Size;
                if (reprojErr < 5)
                {
                    statusArray = new byte[bootstrapPoints.Size];
                    for (int i = 0; i < bootstrapPoints.Size; ++i)
                    {
                        var pointsDiff    = Utils.SubstarctPoints(bootstrapPoints[i], reprojectedPtSet1[i]);
                        var vectorOfPoint = new VectorOfPointF(new[] { pointsDiff });
                        statusArray[i] = CvInvoke.Norm(vectorOfPoint) < 20.0 ? (byte)1 : (byte)0;
                    }

                    status = new VectorOfByte(statusArray);

                    var trackedFeatures3D = new VectorOfPoint3D32F(Utils.Get3dPointsArray(pt3D));

                    Utils.KeepVectorsByStatus(ref trackedFeaturesKp, ref trackedFeatures3D, status);

                    result.Error                     = reprojErr;
                    result.TrackedFeatures3D         = new VectorOfPoint3D32F(trackedFeatures3D.ToArray());
                    result.FilteredTrackedFeaturesKp = new VectorOfKeyPoint(trackedFeaturesKp.ToArray());
                    result.FilteredBootstrapKp       = new VectorOfKeyPoint(bootstrapKp.ToArray());
                    result.Result                    = true;
                }

                rvec.Dispose();
                tvec.Dispose();
                reprojectedPtSet1.Dispose();
            }

            normalizedTrackedPts.Dispose();
            normalizedBootstrapPts.Dispose();
            pt3Dh.Dispose();
            pt3DhMatrix.Dispose();
            pt3DMat.Dispose();
            pt3D.Dispose();
            status.Dispose();

            return(result);
        }
        public bool BootstrapTrack(Mat img)
        {
            #region Trace

            Trace.WriteLine($"BootstrapTrack iteration ({ _trackedFeatures.Size}).");
            Trace.WriteLine("--------------------------");
            Trace.Indent();

            #endregion

            //Track detected features
            if (_prevGray.IsEmpty)
            {
                const string error = "Previous frame is empty. Bootstrap first.";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            if (img.IsEmpty || !img.IsEmpty && img.NumberOfChannels != 3)
            {
                const string error = "Image is not appropriate (Empty or wrong number of channels).";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            var corners = new VectorOfPointF();
            var status  = new VectorOfByte();
            var errors  = new VectorOfFloat();

            var currGray = new Mat();
            CvInvoke.CvtColor(img, currGray, ColorConversion.Bgr2Gray);

            CvInvoke.CalcOpticalFlowPyrLK(_prevGray, currGray, Utils.GetPointsVector(_trackedFeatures), corners,
                                          status, errors, new Size(11, 11), 3, new MCvTermCriteria(20, 0.03));
            currGray.CopyTo(_prevGray);

            #region Trace

            Trace.WriteLine($"Tracked first point: ({_trackedFeatures[0].Point.X}, {_trackedFeatures[0].Point.Y}) / Found first corner = ({corners[0].X}, {corners[0].Y})");
            Trace.WriteLine($"Tracked second point: ({_trackedFeatures[1].Point.X}, {_trackedFeatures[1].Point.Y}) / Found second corner = ({corners[1].X}, {corners[1].Y})");
            Trace.WriteLine($"Tracked third point: ({_trackedFeatures[2].Point.X}, {_trackedFeatures[2].Point.Y}) / Found third corner = ({corners[2].X}, {corners[2].Y})");

            #endregion

            for (int j = 0; j < corners.Size; j++)
            {
                if (status[j] == 1)
                {
                    var p1 = new Point((int)_trackedFeatures[j].Point.X, (int)_trackedFeatures[j].Point.Y);
                    var p2 = new Point((int)corners[j].X, (int)corners[j].Y);

                    CvInvoke.Line(img, p1, p2, new MCvScalar(120, 10, 20));
                }
            }

            if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
            {
                Trace.TraceError("Tracking failed.");
                throw new Exception("Tracking failed.");
            }

            _trackedFeatures = Utils.GetKeyPointsVector(corners);

            Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _bootstrapKp, status);

            Trace.WriteLine($"{_trackedFeatures.Size} features survived optical flow.");

            if (_trackedFeatures.Size != _bootstrapKp.Size)
            {
                const string error = "Tracked features vector size is not equal to bootstrapped one.";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            #region Trace

            Trace.WriteLine($"Bootstrap first point: ({_bootstrapKp[0].Point.X}, {_bootstrapKp[0].Point.Y}) / Found first corner = ({corners[0].X}, {corners[0].Y})");
            Trace.WriteLine($"Bootstrap second point: ({_bootstrapKp[1].Point.X}, {_bootstrapKp[1].Point.Y}) / Found second corner = ({corners[1].X}, {corners[1].Y})");
            Trace.WriteLine($"Bootstrap third point: ({_bootstrapKp[2].Point.X}, {_bootstrapKp[2].Point.Y}) / Found third corner = ({corners[2].X}, {corners[2].Y})");

            #endregion

            //verify features with a homography
            var inlierMask = new VectorOfByte();
            var homography = new Mat();
            if (_trackedFeatures.Size > 4)
            {
                CvInvoke.FindHomography(Utils.GetPointsVector(_trackedFeatures), Utils.GetPointsVector(_bootstrapKp), homography, HomographyMethod.Ransac, RansacThreshold, inlierMask);
            }

            int inliersNum = CvInvoke.CountNonZero(inlierMask);

            var m = new Matrix <double>(homography.Rows, homography.Cols, homography.DataPointer);

            m.Dispose();

            Trace.WriteLine($"{inliersNum} features survived homography.");

            if (inliersNum != _trackedFeatures.Size && inliersNum >= 4 && !homography.IsEmpty)
            {
                Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _bootstrapKp, inlierMask);
            }
            else if (inliersNum < MinInliers)
            {
                Trace.TraceError("Not enough features survived homography.");
                return(false);
            }

            var bootstrapKpOrig     = new VectorOfKeyPoint(_bootstrapKp.ToArray());
            var trackedFeaturesOrig = new VectorOfKeyPoint(_trackedFeatures.ToArray());

            //Attempt at 3D reconstruction (triangulation) if conditions are right
            var rigidT = CvInvoke.EstimateRigidTransform(Utils.GetPointsVector(_trackedFeatures).ToArray(), Utils.GetPointsVector(_bootstrapKp).ToArray(), false);
            var matrix = new Matrix <double>(rigidT.Rows, rigidT.Cols, rigidT.DataPointer);

            #region Trace

            Trace.WriteLine($"Track first point: ({_trackedFeatures[0].Point.X}, {_trackedFeatures[0].Point.Y}) / Bootstrap first point = ({_bootstrapKp[0].Point.X}, {_bootstrapKp[0].Point.Y})");
            Trace.WriteLine($"Track 10th point: ({_trackedFeatures[10].Point.X}, {_trackedFeatures[10].Point.Y}) / Bootstrap 10th point = ({_bootstrapKp[10].Point.X}, {_bootstrapKp[10].Point.Y})");
            Trace.WriteLine($"Track last point: ({_trackedFeatures[_trackedFeatures.Size - 1].Point.X}, {_trackedFeatures[_trackedFeatures.Size - 1].Point.Y}" +
                            $") / Bootstrap third point = ({_bootstrapKp[_bootstrapKp.Size - 1].Point.X}, {_bootstrapKp[_bootstrapKp.Size - 1].Point.Y})");

            Trace.WriteLine($"Rigid matrix: [ [ {matrix[0, 0]}, {matrix[0, 1]}, {matrix[0, 2]} ] [ {matrix[1, 0]}, {matrix[1, 1]}, {matrix[1, 2]} ] ].");
            Trace.WriteLine($"Rigid: {CvInvoke.Norm(matrix.GetCol(2))}");

            #endregion

            if (CvInvoke.Norm(matrix.GetCol(2)) > 100)
            {
                //camera motion is sufficient
                var p1 = new Matrix <double>(3, 4);
                p1.SetIdentity();
                var result = OpenCvUtilities.CameraPoseAndTriangulationFromFundamental(_calibrationInfo, _trackedFeatures, _bootstrapKp, p1);

                _trackedFeatures = result.FilteredTrackedFeaturesKp;
                _bootstrapKp     = result.FilteredBootstrapKp;

                if (result.Result)
                {
                    _trackedFeatures3D = result.TrackedFeatures3D;
                    var trackedFeatures3Dm = Utils.Get3dPointsMat(_trackedFeatures3D);

                    var eigenvectors = new Mat();
                    var mean         = new Mat();
                    CvInvoke.PCACompute(trackedFeatures3Dm, mean, eigenvectors);
                    var eigenvectorsMatrix = new Matrix <double>(eigenvectors.Rows, eigenvectors.Cols, eigenvectors.DataPointer);

                    int numInliers    = 0;
                    var normalOfPlane = eigenvectorsMatrix.GetRow(2).ToUMat().ToMat(AccessType.Fast);
                    //eigenvectors.GetRow(2).CopyTo(normalOfPlane);
                    CvInvoke.Normalize(normalOfPlane, normalOfPlane);

                    var normalOfPlaneMatrix = new Matrix <double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);
                    Trace.WriteLine($"normal of plane: {normalOfPlaneMatrix[0, 0]}");
                    //cv::Vec3d x0 = pca.mean;
                    //double p_to_plane_thresh = sqrt(pca.eigenvalues.at<double>(2));
                }

                return(true);
            }

            #region Trace

            Trace.Unindent();
            Trace.WriteLine("--------------------------");

            #endregion

            return(false);
        }
        public bool Track(Mat img)
        {
            //Track detected features
            if (_prevGray.IsEmpty)
            {
                Trace.WriteLine("Can't track: empty prev frame."); return(false);
            }

            var corners = new VectorOfPointF();
            var status  = new VectorOfByte();
            var errors  = new VectorOfFloat();

            CvInvoke.CvtColor(img, _currGray, ColorConversion.Bgr2Gray);

            CvInvoke.CalcOpticalFlowPyrLK(_prevGray, _currGray, Utils.GetPointsVector(_trackedFeatures), corners, status, errors, new Size(11, 11), 0, new MCvTermCriteria(100));
            _currGray.CopyTo(_prevGray);

            if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
            {
                Trace.WriteLine("Tracking failed.");
                _bootstrapping = false;
                _canCalcMvm    = false;
                return(false);
            }

            _trackedFeatures = Utils.GetKeyPointsVector(corners);

            Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _trackedFeatures3D, status);

            Console.WriteLine("tracking.");

            _canCalcMvm = (_trackedFeatures.Size >= MinInliers);

            if (_canCalcMvm)
            {
                //Perform camera pose estimation for AR
                var rvec = new Mat();
                var tvec = new Mat();

                CvInvoke.SolvePnP(_trackedFeatures3D, Utils.GetPointsVector(_trackedFeatures), _calibrationInfo.Intrinsic, _calibrationInfo.Distortion, _raux, _taux, !_raux.IsEmpty);

                _raux.ConvertTo(rvec, DepthType.Cv32F);
                _taux.ConvertTo(tvec, DepthType.Cv64F);

                var pts = new MCvPoint3D32f[] {
                    new MCvPoint3D32f(0.01f, 0, 0),
                    new MCvPoint3D32f(0, 0.01f, 0),
                    new MCvPoint3D32f(0, 0, 0.01f)
                };
                var axis = new VectorOfPoint3D32F(pts);

                var imgPoints = new VectorOfPointF();
                CvInvoke.ProjectPoints(axis, _raux, _taux, _calibrationInfo.Intrinsic, _calibrationInfo.Distortion, imgPoints);

                var centerPoint = new Point((int)_trackedFeatures[0].Point.X, (int)_trackedFeatures[0].Point.Y);

                var xPoint = new Point((int)imgPoints[0].X, (int)imgPoints[0].Y);
                var yPoint = new Point((int)imgPoints[1].X, (int)imgPoints[1].Y);
                var zPoint = new Point((int)imgPoints[2].X, (int)imgPoints[2].Y);

                CvInvoke.Line(img, centerPoint, xPoint, new MCvScalar(255, 0, 0), 5); //blue x-ax
                CvInvoke.Line(img, centerPoint, yPoint, new MCvScalar(0, 255, 0), 5); //green y-ax
                CvInvoke.Line(img, centerPoint, zPoint, new MCvScalar(0, 0, 255), 5); //red z-ax

                var rot = new Mat(3, 3, DepthType.Cv32F, 3);

                CvInvoke.Rodrigues(rvec, rot);
            }

            return(true);
        }
示例#13
0
        /// <summary>
        /// Выполняет кодирование указанного изображения в целевой формат
        /// </summary>
        /// <param name="image">Кодируемое изображение</param>
        /// <param name="buffer">Буфер, в который выполняется сохранение</param>
        /// <param name="encoding">Значение перечисления <see cref="ImageEncoding"/>, определяющее целевой формат</param>
        /// <param name="parameters">Необязательные параметры кодирования</param>
        public static void Imencode(IInputArray image, VectorOfByte buffer, ImageEncoding encoding = ImageEncoding.Jpeg, params int[] parameters)
        {
            string destEncoding = GetEncoding(ref encoding);

            Imencode(destEncoding, image, buffer, parameters);
        }
 /// <summary>
 /// Выполняет кодирование указанного изображения в целевой формат
 /// </summary>
 /// <param name="image">Кодируемое изображение</param>
 /// <param name="buffer">Буфер, в который выполняется сохранение</param>
 /// <param name="encoding">Значение перечисления <see cref="ImageEncoding"/>, определяющее целевой формат</param>
 /// <param name="parameters">Необязательные параметры кодирования</param>
 public static void Imencode(IInputArray image, VectorOfByte buffer, ImageEncoding encoding = ImageEncoding.Jpeg, params int[] parameters)
 {
     string destEncoding = GetEncoding(ref encoding);
     Imencode(destEncoding, image, buffer, parameters);
 }