/// <summary>
 /// Create a new HOGDescriptor
 /// </summary>
 public HOGDescriptor()
 {
     _ptr = CvHOGDescriptorCreateDefault();
      _rectStorage = new MemStorage();
      _rectSeq = new Seq<Rectangle>(_rectStorage);
      _vector = new VectorOfFloat();
 }
예제 #2
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="templ"></param>
        /// <param name="results"></param>
        /// <param name="cost"></param>
        /// <param name="templScale"></param>
        /// <param name="maxMatches"></param>
        /// <param name="minMatchDistance"></param>
        /// <param name="padX"></param>
        /// <param name="padY"></param>
        /// <param name="scales"></param>
        /// <param name="minScale"></param>
        /// <param name="maxScale"></param>
        /// <param name="orientationWeight"></param>
        /// <param name="truncate"></param>
        /// <returns></returns>
        public static int ChamferMatching(
            Mat img, Mat templ,
                                  out Point[][] results, out float[] cost,
                                  double templScale=1, int maxMatches = 20,
                                  double minMatchDistance = 1.0, int padX = 3,
                                  int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
                                  double orientationWeight = 0.5, double truncate = 20)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (templ == null)
                throw new ArgumentNullException("templ");
            img.ThrowIfDisposed();
            templ.ThrowIfDisposed();
            
            using (var resultsVec = new VectorOfVectorPoint())
            using (var costVec = new VectorOfFloat())
            {
                int ret = NativeMethods.contrib_chamerMatching(
                    img.CvPtr, templ.CvPtr, resultsVec.CvPtr, costVec.CvPtr, 
                    templScale, maxMatches, minMatchDistance,
                    padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
                GC.KeepAlive(img);
                GC.KeepAlive(templ);

                results = resultsVec.ToArray();
                cost = costVec.ToArray();

                return ret;
            }
        }
예제 #3
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image"></param>
 /// <param name="winStride"></param>
 /// <param name="locations"></param>
 /// <returns></returns>
 public float[] Compute(Image<Gray, Byte> image, Size winStride, Point[] locations)
 {
    using (VectorOfFloat vof = new VectorOfFloat())
    {
       GCHandle handle = GCHandle.Alloc(locations, GCHandleType.Pinned);
       CvSelfSimDescriptorCompute(_ptr, image, vof, ref winStride, handle.AddrOfPinnedObject(), locations.Length);
       handle.Free();
       return vof.ToArray();
    }
 }
예제 #4
0
        /// <summary>
        /// Apply cascade to an input frame and return the vector of Detection objects.
        /// </summary>
        /// <param name="image">A frame on which detector will be applied.</param>
        /// <param name="rois">A vector of regions of interest. Only the objects that fall into one of the regions will be returned.</param>
        /// <returns>An output array of Detections.</returns>
        public Detection[] Detect(IInputArray image, Rectangle[] rois = null)
        {
            using (VectorOfRect roiRects = new VectorOfRect())
                using (VectorOfRect regions = new VectorOfRect())
                    using (VectorOfFloat confidents = new VectorOfFloat())
                    {
                        IntPtr roisPtr;
                        if (rois == null || rois.Length == 0)
                        {
                            roisPtr = IntPtr.Zero;
                        }
                        else
                        {
                            roiRects.Push(rois);
                            roisPtr = roiRects.Ptr;
                        }
                        using (InputArray iaImage = image.GetInputArray())
                            SoftCascadeInvoke.cveSoftCascadeDetectorDetect(_ptr, iaImage, roisPtr, regions, confidents);

                        if (regions.Size == 0)
                        {
                            return(new Detection[0]);
                        }
                        else
                        {
                            Rectangle[] regionArr    = regions.ToArray();
                            float[]     confidentArr = confidents.ToArray();
                            Detection[] results      = new Detection[regionArr.Length];
                            for (int i = 0; i < results.Length; i++)
                            {
                                results[i] = new Detection(regionArr[i], confidentArr[i]);
                            }
                            return(results);
                        }
                    }
        }
예제 #5
0
        /// <summary>
        /// Performs non maximum suppression given boxes and corresponding scores.
        /// </summary>
        /// <param name="bboxes">a set of bounding boxes to apply NMS.</param>
        /// <param name="scores">a set of corresponding confidences.</param>
        /// <param name="scoreThreshold">a threshold used to filter boxes by score.</param>
        /// <param name="nmsThreshold">a threshold used in non maximum suppression.</param>
        /// <param name="indices">the kept indices of bboxes after NMS.</param>
        /// <param name="eta">a coefficient in adaptive threshold formula</param>
        /// <param name="topK">if `&gt;0`, keep at most @p top_k picked indices.</param>
        public static void NMSBoxes(IEnumerable <RotatedRect> bboxes, IEnumerable <float> scores,
                                    float scoreThreshold, float nmsThreshold,
                                    out int[] indices,
                                    float eta = 1.0f, int topK = 0)
        {
            if (bboxes == null)
            {
                throw new ArgumentNullException(nameof(bboxes));
            }
            if (scores == null)
            {
                throw new ArgumentNullException(nameof(scores));
            }

            using (var bboxesVec = new VectorOfRotatedRect(bboxes))
                using (var scoresVec = new VectorOfFloat(scores))
                    using (var indicesVec = new VectorOfInt32())
                    {
                        NativeMethods.dnn_NMSBoxes_RotatedRect(
                            bboxesVec.CvPtr, scoresVec.CvPtr, scoreThreshold, nmsThreshold,
                            indicesVec.CvPtr, eta, topK);
                        indices = indicesVec.ToArray();
                    }
        }
예제 #6
0
파일: CvDnn.cs 프로젝트: bsmehj/opencvsharp
        /// <summary>
        /// Performs non maximum suppression given boxes and corresponding scores.
        /// </summary>
        /// <param name="bboxes">a set of bounding boxes to apply NMS.</param>
        /// <param name="scores">a set of corresponding confidences.</param>
        /// <param name="scoreThreshold">a threshold used to filter boxes by score.</param>
        /// <param name="nmsThreshold">a threshold used in non maximum suppression.</param>
        /// <param name="indices">the kept indices of bboxes after NMS.</param>
        /// <param name="eta">a coefficient in adaptive threshold formula</param>
        /// <param name="topK">if `&gt;0`, keep at most @p top_k picked indices.</param>
        // ReSharper disable once IdentifierTypo
        public static void NMSBoxes(IEnumerable <RotatedRect> bboxes, IEnumerable <float> scores,
                                    float scoreThreshold, float nmsThreshold,
                                    out int[] indices,
                                    float eta = 1.0f, int topK = 0)
        {
            if (bboxes == null)
            {
                throw new ArgumentNullException(nameof(bboxes));
            }
            if (scores == null)
            {
                throw new ArgumentNullException(nameof(scores));
            }

            // ReSharper disable once IdentifierTypo
            using var bboxesVec  = new VectorOfRotatedRect(bboxes);
            using var scoresVec  = new VectorOfFloat(scores);
            using var indicesVec = new VectorOfInt32();
            NativeMethods.HandleException(
                NativeMethods.dnn_NMSBoxes_RotatedRect(
                    bboxesVec.CvPtr, scoresVec.CvPtr, scoreThreshold, nmsThreshold,
                    indicesVec.CvPtr, eta, topK));
            indices = indicesVec.ToArray();
        }
        /// <summary>
        /// Recognize text using the tesseract-ocr API.
        /// Takes image on input and returns recognized text in the output_text parameter.
        /// Optionally provides also the Rects for individual text elements found(e.g.words),
        /// and the list of those text elements with their confidence values.
        /// </summary>
        /// <param name="image">Input image CV_8UC1 or CV_8UC3</param>
        /// <param name="outputText">Output text of the tesseract-ocr.</param>
        /// <param name="componentRects">If provided the method will output a list of Rects for the individual
        /// text elements found(e.g.words or text lines).</param>
        /// <param name="componentTexts">If provided the method will output a list of text strings for the
        /// recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentConfidences">If provided the method will output a list of confidence values
        /// for the recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentLevel">OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXT_LINE.</param>
        public override void Run(
            Mat image,
            out string outputText,
            out Rect[] componentRects,
            out string[] componentTexts,
            out float[] componentConfidences,
            ComponentLevels componentLevel = ComponentLevels.Word)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            using (var outputTextString = new StdString())
                using (var componentRectsVector = new VectorOfRect())
                    using (var componentTextsVector = new VectorOfString())
                        using (var componentConfidencesVector = new VectorOfFloat())
                        {
                            NativeMethods.text_OCRTesseract_run1(
                                ptr,
                                image.CvPtr,
                                outputTextString.CvPtr,
                                componentRectsVector.CvPtr,
                                componentTextsVector.CvPtr,
                                componentConfidencesVector.CvPtr,
                                (int)componentLevel);

                            outputText           = outputTextString.ToString();
                            componentRects       = componentRectsVector.ToArray();
                            componentTexts       = componentTextsVector.ToArray();
                            componentConfidences = componentConfidencesVector.ToArray();
                        }

            GC.KeepAlive(image);
        }
예제 #8
0
        // Calculate Optical Flow Using PyrLk Algorithm
        public void PyrLkOpticalFlow(Image <Gray, byte> prevFrame, Image <Gray, byte> nextFrame)
        {
            //Get the Optical flow of L-K feature
            Image <Gray, Byte> mask     = prevFrame.Clone();
            GFTTDetector       detector = new GFTTDetector(30, 0.01, 10, 3, false, 0.04);

            MKeyPoint[]     fp1      = detector.Detect(prevFrame, null);
            VectorOfPointF  vp1      = new VectorOfPointF(fp1.Select(x => x.Point).ToArray());
            VectorOfPointF  vp2      = new VectorOfPointF(vp1.Size);
            VectorOfByte    vstatus  = new VectorOfByte(vp1.Size);
            VectorOfFloat   verr     = new VectorOfFloat(vp1.Size);
            Size            winsize  = new Size(prevFrame.Width, prevFrame.Height);
            int             maxLevel = 1; // if 0, winsize is not used
            MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

            try
            {
                CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, vp1, vp2, vstatus, verr, winsize, maxLevel, criteria);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
예제 #9
0
      /// <summary>
      /// Detect image features from the given image
      /// </summary>
      /// <param name="image">The image to detect features from</param>
      /// <param name="mask">The optional mask, can be null if not needed</param>
      /// <returns>The Image features detected from the given image</returns>
      public ImageFeature[] DetectFeatures(Image<Gray, Byte> image, Image<Gray, byte> mask)
      {
         using (MemStorage stor = new MemStorage())
         using (VectorOfFloat descs = new VectorOfFloat())
         {
            Seq<MKeyPoint> pts = new Seq<MKeyPoint>(stor);
            CvSURFDetectorDetectFeature(ref this, image, mask, pts, descs);
            MKeyPoint[] kpts = pts.ToArray();
            int n = kpts.Length;
            long add = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = extended == 0 ? 64 : 128;
            for (int i = 0; i < n; i++, add += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = kpts[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(add), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
         }
      }
예제 #10
0
파일: LaserTracker.cs 프로젝트: akx/ltag
 private void HueThreshold(float hueMin, float hueMax, Mat hsvFrame, Mat threshFrame)
 {
     using (var minThresh1 = new VectorOfFloat(new[] {hueMin, _satMin, _valMin}))
     {
         using (var maxThresh1 = new VectorOfFloat(new[] {hueMax, _satMax, _valMax}))
         {
             CvInvoke.InRange(hsvFrame, minThresh1, maxThresh1, threshFrame);
         }
     }
 }
예제 #11
0
 public void Test_VectorOfFloat()
 {
    VectorOfFloat vf = new VectorOfFloat();
    float[] values = new float[20];
    for (int i = 0; i < values.Length; i++)
       values[i] = i;
    vf.Push(values);
    float[] valuesCopy = vf.ToArray();
    for (int i = 0; i < values.Length; i++)
       EmguAssert.AreEqual(values[i], valuesCopy[i]);
 }
예제 #12
0
파일: SURF.cs 프로젝트: MJunak/opencvsharp
        /// <summary>
        /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#else
        /// <summary>
        /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#endif
        public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, out float[] descriptors,
            bool useProvidedKeypoints = false)
        {
            ThrowIfDisposed();
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
            using (VectorOfFloat descriptorsVec = new VectorOfFloat())
            {
                NativeMethods.nonfree_SURF_run2_vector(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr,
                    descriptorsVec.CvPtr, useProvidedKeypoints ? 1 : 0);

                keypoints = keypointsVec.ToArray();
                descriptors = descriptorsVec.ToArray();
            }
        }
예제 #13
0
        private static double ComputeAngleBetweenCameraNormAndPlaneNorm(VectorOfPoint3D32F trackedFeatures3D, Matrix <double> normal, VectorOfFloat raux, VectorOfFloat taux)
        {
            var tvec = taux.ToArray().Select(i => (double)i).ToArray();

            var rotationMat = new Mat();

            CvInvoke.Rodrigues(raux, rotationMat);
            var rotationMatrix = new Matrix <double>(rotationMat.Rows, rotationMat.Cols, rotationMat.DataPointer);

            // ???
            Utils.Negotiate(ref rotationMatrix);

            var cameraPosition      = rotationMatrix * new Matrix <double>(tvec);
            var cameraPositionPoint = new MCvPoint3D32f((float)cameraPosition[0, 0], (float)cameraPosition[1, 0], (float)cameraPosition[2, 0]);

            var cameraVector = trackedFeatures3D[0] - cameraPositionPoint;

            Func <double, double> radianToDegree = angle => angle * (180.0 / Math.PI);

            double dotProduct = new double[] { cameraVector.X, cameraVector.Y, cameraVector.Z }.Dot(new[] { normal[0, 0], normal[0, 1], normal[0, 2] });
            double acos       = Math.Acos(dotProduct);
            double anglResult = radianToDegree(acos);

            Console.WriteLine($"Normal: [{normal.Data[0, 0]}, {normal.Data[0, 1]}, {normal.Data[0, 2]}]");
            Console.WriteLine($"Angle: {anglResult}");
            Console.WriteLine($"Dot product: {dotProduct}");

            return(anglResult);
        }
예제 #14
0
        private static void ComputeRotationAndTranslation(VectorOfPoint3D32F trackedFeatures3D, VectorOfKeyPoint trackedFeatures, CameraCalibrationInfo calibrationInfo, out VectorOfFloat raux, out VectorOfFloat taux)
        {
            var rotationVector32F    = new VectorOfFloat();
            var translationVector32F = new VectorOfFloat();
            var rotationVector       = new Mat();
            var translationVector    = new Mat();

            CvInvoke.SolvePnP(trackedFeatures3D, Utils.GetPointsVector(trackedFeatures), calibrationInfo.Intrinsic, calibrationInfo.Distortion, rotationVector, translationVector);

            rotationVector.ConvertTo(rotationVector32F, DepthType.Cv32F);
            translationVector.ConvertTo(translationVector32F, DepthType.Cv32F);

            raux = rotationVector32F;
            taux = translationVector32F;
        }
예제 #15
0
      /// <summary>
      /// Compute the descriptor given the image and the point location
      /// </summary>
      /// <param name="image">The image where the descriptor will be computed from</param>
      /// <param name="mask">The optional mask, can be null if not needed</param>
      /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
      /// <returns>The image features founded on the keypoint location</returns>
      public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
      {
         using (VectorOfFloat descs = new VectorOfFloat())
         {
            GCHandle handle = GCHandle.Alloc(keyPoints, GCHandleType.Pinned);
            CvSURFDetectorComputeDescriptors(ref this, image, mask, handle.AddrOfPinnedObject(), keyPoints.Length, descs);
            handle.Free();

            int n = keyPoints.Length;
            long address = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = extended == 0 ? 64 : 128;
            for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = keyPoints[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
         }
      }
예제 #16
0
        public static void FitEdge(Image <Gray, byte> inputImage, int startRow, int endRow, int startCol, int endCol, bool isTopBottom, out WaferEdgeFit edge)
        {
            edge = new WaferEdgeFit();

            Rectangle origRoi = inputImage.ROI;
            Rectangle sideRoi = new Rectangle(startCol, startRow, endCol - startCol, endRow - startRow);

            bool startFromRight = !isTopBottom && startCol > origRoi.Width / 2 || isTopBottom && startRow > origRoi.Height / 2;

            inputImage.ROI = sideRoi;

            int workingWidth  = isTopBottom ? sideRoi.Height : sideRoi.Width;
            int workingHeight = isTopBottom ? sideRoi.Width : sideRoi.Height;

            using (Image <Gray, float> workImage = new Image <Gray, float>(workingWidth, workingHeight))
            {
                double gradientLimit;

                using (Image <Gray, float> sobelImage =
                           isTopBottom ? inputImage.Sobel(0, 1, 3) : inputImage.Sobel(1, 0, 3))
                {
                    using (Image <Gray, float> nullImage = new Image <Gray, float>(sobelImage.Size))
                    {
                        CvInvoke.AbsDiff(sobelImage, nullImage, sobelImage);
                    }

                    using (Image <Gray, byte> mask = new Image <Gray, byte>(sideRoi.Width, sideRoi.Height))
                    {
                        CvInvoke.Threshold(inputImage, mask, 0, 1, ThresholdType.Binary);
                        MCvScalar gradientMean = new MCvScalar();
                        MCvScalar gradientStd  = new MCvScalar();
                        CvInvoke.MeanStdDev(sobelImage, ref gradientMean, ref gradientStd, mask);
                        double nSigma = 5;
                        gradientLimit = gradientMean.V0 + nSigma * gradientStd.V0;
                    }

                    if (isTopBottom)
                    {
                        CvInvoke.Transpose(sobelImage, workImage);
                    }
                    else
                    {
                        sobelImage.CopyTo(workImage);
                    }
                }

                inputImage.ROI = origRoi;

                List <PointF> edgePoints = new List <PointF>();
                List <float>  fullWidthHalfMaximumVals = new List <float>();
                var           sobelData = workImage.Data;
                int           stride    = 1;

                for (int r = 0; r < workingHeight; r += stride)
                {
                    int approxEdgeCol = 0;
                    if (!startFromRight)
                    {
                        for (int c = 0; c < workingWidth; c++)
                        {
                            var currentValue = sobelData[r, c, 0];
                            if (currentValue > gradientLimit)
                            {
                                approxEdgeCol = c;
                                break;
                            }
                        }
                    }
                    else
                    {
                        for (int c = workingWidth - 1; c > 0; c--)
                        {
                            var currentValue = sobelData[r, c, 0];
                            if (currentValue > gradientLimit)
                            {
                                approxEdgeCol = c;
                                break;
                            }
                        }
                    }

                    int   meanEdgeCol     = 0;
                    float maxValue        = 0;
                    var   currentStartCol = Math.Max(approxEdgeCol - 5, 1);
                    var   currentEndCol   = Math.Min(approxEdgeCol + 5 + 1, workingWidth - 1);
                    for (int c = currentStartCol; c < currentEndCol; c++)
                    {
                        if (sobelData[r, c, 0] > maxValue)
                        {
                            maxValue    = sobelData[r, c, 0];
                            meanEdgeCol = c;
                        }
                    }

                    if (!(maxValue > 0))
                    {
                        continue;
                    }

                    float halfMaxLeftValue  = maxValue;
                    float halfMaxRightValue = maxValue;
                    int   halfMaxLeftCol    = meanEdgeCol;
                    int   halfMaxRightCol   = meanEdgeCol;

                    while (halfMaxLeftValue > maxValue / 2 && halfMaxLeftCol >= 0)
                    {
                        halfMaxLeftCol--;
                        halfMaxLeftValue = sobelData[r, halfMaxLeftCol, 0];
                    }
                    while (halfMaxRightValue > maxValue / 2 && halfMaxRightCol < workingWidth)
                    {
                        halfMaxRightCol++;
                        halfMaxRightValue = sobelData[r, halfMaxRightCol, 0];
                    }

                    float fwhm = halfMaxRightCol - halfMaxLeftCol;

                    //Interpolation
                    float dPixel = (maxValue / 2 - halfMaxLeftValue) /
                                   (sobelData[r, halfMaxLeftCol + 1, 0] - halfMaxLeftValue);
                    fwhm  -= dPixel;
                    dPixel = (maxValue / 2 - halfMaxRightValue) /
                             (sobelData[r, halfMaxRightCol - 1, 0] - halfMaxRightValue);
                    fwhm -= dPixel;

                    fullWidthHalfMaximumVals.Add(fwhm);


                    edgePoints.Add(isTopBottom
                        ? new PointF(r + sideRoi.X, meanEdgeCol + sideRoi.Y)
                        : new PointF(r + sideRoi.Y, meanEdgeCol + sideRoi.X));
                }

                VectorOfPointF yvector    = new VectorOfPointF();
                VectorOfFloat  parameters = new VectorOfFloat();
                yvector.Push(edgePoints.ToArray());
                CvInvoke.FitLine(yvector, parameters, DistType.L12, 0, 0.01, 0.01);

                float vx = parameters[0];
                float vy = parameters[1];
                float x0 = parameters[2];
                float y0 = parameters[3];

                edge.FitParams = parameters;
                edge.Slope     = vy / vx;
                edge.Intercept = y0 - edge.Slope * x0;

                fullWidthHalfMaximumVals.Sort();
                int length = fullWidthHalfMaximumVals.Count;

                if (length % 2 == 0)
                {
                    edge.LineSpread = (fullWidthHalfMaximumVals[length / 2 - 1] + fullWidthHalfMaximumVals[length / 2]) / 2;
                }
                else
                {
                    edge.LineSpread = fullWidthHalfMaximumVals[length / 2];
                }

                if (!isTopBottom)
                {
                    edge.InvertedRepresentation = true;
                }
            }
        }
예제 #17
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="calibrationInfo"></param>
        /// <param name="trackedFeaturesKp"></param>
        /// <param name="bootstrapKp"></param>
        /// <param name="p"></param>
        /// <param name="p1"></param>
        /// <returns></returns>
        public static TriangulateAndCheckReprojResult TriangulateAndCheckReproj(CameraCalibrationInfo calibrationInfo, VectorOfKeyPoint trackedFeaturesKp, VectorOfKeyPoint bootstrapKp, Matrix <double> p, Matrix <double> p1)
        {
            var result = new TriangulateAndCheckReprojResult();

            var trackedFeaturesPoints = Utils.GetPointsVector(trackedFeaturesKp);
            var bootstrapPoints       = Utils.GetPointsVector(bootstrapKp);

            //undistort
            var normalizedTrackedPts   = new VectorOfPointF();
            var normalizedBootstrapPts = new VectorOfPointF();

            CvInvoke.UndistortPoints(trackedFeaturesPoints, normalizedTrackedPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);
            CvInvoke.UndistortPoints(bootstrapPoints, normalizedBootstrapPts, calibrationInfo.Intrinsic, calibrationInfo.Distortion);

            //triangulate
            var pt3Dh = new Mat();

            CvInvoke.TriangulatePoints(p, p1, normalizedBootstrapPts, normalizedTrackedPts, pt3Dh);
            var pt3DhMatrix = new Matrix <float>(pt3Dh.Rows, pt3Dh.Cols, pt3Dh.DataPointer);

            var pt3DMat = new Mat();

            CvInvoke.ConvertPointsFromHomogeneous(pt3DhMatrix.Transpose(), pt3DMat);
            var pt3D = new Matrix <float>(pt3DMat.Rows, pt3DMat.Cols * pt3DMat.NumberOfChannels, pt3DMat.DataPointer);

            var statusArray = new byte[pt3D.Rows];

            for (int i = 0; i < pt3D.Rows; i++)
            {
                statusArray[i] = (pt3D[i, 2] > 0) ? (byte)1 : (byte)0;
            }

            var status = new VectorOfByte(statusArray);
            int count  = CvInvoke.CountNonZero(status);

            double percentage = count / (double)pt3D.Rows;

            if (percentage > 0.75)
            {
                //calculate reprojection
                var rvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var tvec = new VectorOfFloat(new float[] { 0, 0, 0 });
                var reprojectedPtSet1 = new VectorOfPointF();
                CvInvoke.ProjectPoints(pt3D, rvec, tvec, calibrationInfo.Intrinsic, calibrationInfo.Distortion,
                                       reprojectedPtSet1);

                double reprojErr = CvInvoke.Norm(reprojectedPtSet1, bootstrapPoints) / bootstrapPoints.Size;
                if (reprojErr < 5)
                {
                    statusArray = new byte[bootstrapPoints.Size];
                    for (int i = 0; i < bootstrapPoints.Size; ++i)
                    {
                        var pointsDiff    = Utils.SubstarctPoints(bootstrapPoints[i], reprojectedPtSet1[i]);
                        var vectorOfPoint = new VectorOfPointF(new[] { pointsDiff });
                        statusArray[i] = CvInvoke.Norm(vectorOfPoint) < 20.0 ? (byte)1 : (byte)0;
                    }

                    status = new VectorOfByte(statusArray);

                    var trackedFeatures3D = new VectorOfPoint3D32F(Utils.Get3dPointsArray(pt3D));

                    Utils.KeepVectorsByStatus(ref trackedFeaturesKp, ref trackedFeatures3D, status);

                    result.Error                     = reprojErr;
                    result.TrackedFeatures3D         = new VectorOfPoint3D32F(trackedFeatures3D.ToArray());
                    result.FilteredTrackedFeaturesKp = new VectorOfKeyPoint(trackedFeaturesKp.ToArray());
                    result.FilteredBootstrapKp       = new VectorOfKeyPoint(bootstrapKp.ToArray());
                    result.Result                    = true;
                }

                rvec.Dispose();
                tvec.Dispose();
                reprojectedPtSet1.Dispose();
            }

            normalizedTrackedPts.Dispose();
            normalizedBootstrapPts.Dispose();
            pt3Dh.Dispose();
            pt3DhMatrix.Dispose();
            pt3DMat.Dispose();
            pt3D.Dispose();
            status.Dispose();

            return(result);
        }
        public bool BootstrapTrack(Mat img)
        {
            #region Trace

            Trace.WriteLine($"BootstrapTrack iteration ({ _trackedFeatures.Size}).");
            Trace.WriteLine("--------------------------");
            Trace.Indent();

            #endregion

            //Track detected features
            if (_prevGray.IsEmpty)
            {
                const string error = "Previous frame is empty. Bootstrap first.";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            if (img.IsEmpty || !img.IsEmpty && img.NumberOfChannels != 3)
            {
                const string error = "Image is not appropriate (Empty or wrong number of channels).";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            var corners = new VectorOfPointF();
            var status  = new VectorOfByte();
            var errors  = new VectorOfFloat();

            var currGray = new Mat();
            CvInvoke.CvtColor(img, currGray, ColorConversion.Bgr2Gray);

            CvInvoke.CalcOpticalFlowPyrLK(_prevGray, currGray, Utils.GetPointsVector(_trackedFeatures), corners,
                                          status, errors, new Size(11, 11), 3, new MCvTermCriteria(20, 0.03));
            currGray.CopyTo(_prevGray);

            #region Trace

            Trace.WriteLine($"Tracked first point: ({_trackedFeatures[0].Point.X}, {_trackedFeatures[0].Point.Y}) / Found first corner = ({corners[0].X}, {corners[0].Y})");
            Trace.WriteLine($"Tracked second point: ({_trackedFeatures[1].Point.X}, {_trackedFeatures[1].Point.Y}) / Found second corner = ({corners[1].X}, {corners[1].Y})");
            Trace.WriteLine($"Tracked third point: ({_trackedFeatures[2].Point.X}, {_trackedFeatures[2].Point.Y}) / Found third corner = ({corners[2].X}, {corners[2].Y})");

            #endregion

            for (int j = 0; j < corners.Size; j++)
            {
                if (status[j] == 1)
                {
                    var p1 = new Point((int)_trackedFeatures[j].Point.X, (int)_trackedFeatures[j].Point.Y);
                    var p2 = new Point((int)corners[j].X, (int)corners[j].Y);

                    CvInvoke.Line(img, p1, p2, new MCvScalar(120, 10, 20));
                }
            }

            if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
            {
                Trace.TraceError("Tracking failed.");
                throw new Exception("Tracking failed.");
            }

            _trackedFeatures = Utils.GetKeyPointsVector(corners);

            Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _bootstrapKp, status);

            Trace.WriteLine($"{_trackedFeatures.Size} features survived optical flow.");

            if (_trackedFeatures.Size != _bootstrapKp.Size)
            {
                const string error = "Tracked features vector size is not equal to bootstrapped one.";
                Trace.TraceError(error);
                throw new Exception(error);
            }

            #region Trace

            Trace.WriteLine($"Bootstrap first point: ({_bootstrapKp[0].Point.X}, {_bootstrapKp[0].Point.Y}) / Found first corner = ({corners[0].X}, {corners[0].Y})");
            Trace.WriteLine($"Bootstrap second point: ({_bootstrapKp[1].Point.X}, {_bootstrapKp[1].Point.Y}) / Found second corner = ({corners[1].X}, {corners[1].Y})");
            Trace.WriteLine($"Bootstrap third point: ({_bootstrapKp[2].Point.X}, {_bootstrapKp[2].Point.Y}) / Found third corner = ({corners[2].X}, {corners[2].Y})");

            #endregion

            //verify features with a homography
            var inlierMask = new VectorOfByte();
            var homography = new Mat();
            if (_trackedFeatures.Size > 4)
            {
                CvInvoke.FindHomography(Utils.GetPointsVector(_trackedFeatures), Utils.GetPointsVector(_bootstrapKp), homography, HomographyMethod.Ransac, RansacThreshold, inlierMask);
            }

            int inliersNum = CvInvoke.CountNonZero(inlierMask);

            var m = new Matrix <double>(homography.Rows, homography.Cols, homography.DataPointer);

            m.Dispose();

            Trace.WriteLine($"{inliersNum} features survived homography.");

            if (inliersNum != _trackedFeatures.Size && inliersNum >= 4 && !homography.IsEmpty)
            {
                Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _bootstrapKp, inlierMask);
            }
            else if (inliersNum < MinInliers)
            {
                Trace.TraceError("Not enough features survived homography.");
                return(false);
            }

            var bootstrapKpOrig     = new VectorOfKeyPoint(_bootstrapKp.ToArray());
            var trackedFeaturesOrig = new VectorOfKeyPoint(_trackedFeatures.ToArray());

            //Attempt at 3D reconstruction (triangulation) if conditions are right
            var rigidT = CvInvoke.EstimateRigidTransform(Utils.GetPointsVector(_trackedFeatures).ToArray(), Utils.GetPointsVector(_bootstrapKp).ToArray(), false);
            var matrix = new Matrix <double>(rigidT.Rows, rigidT.Cols, rigidT.DataPointer);

            #region Trace

            Trace.WriteLine($"Track first point: ({_trackedFeatures[0].Point.X}, {_trackedFeatures[0].Point.Y}) / Bootstrap first point = ({_bootstrapKp[0].Point.X}, {_bootstrapKp[0].Point.Y})");
            Trace.WriteLine($"Track 10th point: ({_trackedFeatures[10].Point.X}, {_trackedFeatures[10].Point.Y}) / Bootstrap 10th point = ({_bootstrapKp[10].Point.X}, {_bootstrapKp[10].Point.Y})");
            Trace.WriteLine($"Track last point: ({_trackedFeatures[_trackedFeatures.Size - 1].Point.X}, {_trackedFeatures[_trackedFeatures.Size - 1].Point.Y}" +
                            $") / Bootstrap third point = ({_bootstrapKp[_bootstrapKp.Size - 1].Point.X}, {_bootstrapKp[_bootstrapKp.Size - 1].Point.Y})");

            Trace.WriteLine($"Rigid matrix: [ [ {matrix[0, 0]}, {matrix[0, 1]}, {matrix[0, 2]} ] [ {matrix[1, 0]}, {matrix[1, 1]}, {matrix[1, 2]} ] ].");
            Trace.WriteLine($"Rigid: {CvInvoke.Norm(matrix.GetCol(2))}");

            #endregion

            if (CvInvoke.Norm(matrix.GetCol(2)) > 100)
            {
                //camera motion is sufficient
                var p1 = new Matrix <double>(3, 4);
                p1.SetIdentity();
                var result = OpenCvUtilities.CameraPoseAndTriangulationFromFundamental(_calibrationInfo, _trackedFeatures, _bootstrapKp, p1);

                _trackedFeatures = result.FilteredTrackedFeaturesKp;
                _bootstrapKp     = result.FilteredBootstrapKp;

                if (result.Result)
                {
                    _trackedFeatures3D = result.TrackedFeatures3D;
                    var trackedFeatures3Dm = Utils.Get3dPointsMat(_trackedFeatures3D);

                    var eigenvectors = new Mat();
                    var mean         = new Mat();
                    CvInvoke.PCACompute(trackedFeatures3Dm, mean, eigenvectors);
                    var eigenvectorsMatrix = new Matrix <double>(eigenvectors.Rows, eigenvectors.Cols, eigenvectors.DataPointer);

                    int numInliers    = 0;
                    var normalOfPlane = eigenvectorsMatrix.GetRow(2).ToUMat().ToMat(AccessType.Fast);
                    //eigenvectors.GetRow(2).CopyTo(normalOfPlane);
                    CvInvoke.Normalize(normalOfPlane, normalOfPlane);

                    var normalOfPlaneMatrix = new Matrix <double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);
                    Trace.WriteLine($"normal of plane: {normalOfPlaneMatrix[0, 0]}");
                    //cv::Vec3d x0 = pca.mean;
                    //double p_to_plane_thresh = sqrt(pca.eigenvalues.at<double>(2));
                }

                return(true);
            }

            #region Trace

            Trace.Unindent();
            Trace.WriteLine("--------------------------");

            #endregion

            return(false);
        }
        public bool Track(Mat img)
        {
            //Track detected features
            if (_prevGray.IsEmpty)
            {
                Trace.WriteLine("Can't track: empty prev frame."); return(false);
            }

            var corners = new VectorOfPointF();
            var status  = new VectorOfByte();
            var errors  = new VectorOfFloat();

            CvInvoke.CvtColor(img, _currGray, ColorConversion.Bgr2Gray);

            CvInvoke.CalcOpticalFlowPyrLK(_prevGray, _currGray, Utils.GetPointsVector(_trackedFeatures), corners, status, errors, new Size(11, 11), 0, new MCvTermCriteria(100));
            _currGray.CopyTo(_prevGray);

            if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
            {
                Trace.WriteLine("Tracking failed.");
                _bootstrapping = false;
                _canCalcMvm    = false;
                return(false);
            }

            _trackedFeatures = Utils.GetKeyPointsVector(corners);

            Utils.KeepVectorsByStatus(ref _trackedFeatures, ref _trackedFeatures3D, status);

            Console.WriteLine("tracking.");

            _canCalcMvm = (_trackedFeatures.Size >= MinInliers);

            if (_canCalcMvm)
            {
                //Perform camera pose estimation for AR
                var rvec = new Mat();
                var tvec = new Mat();

                CvInvoke.SolvePnP(_trackedFeatures3D, Utils.GetPointsVector(_trackedFeatures), _calibrationInfo.Intrinsic, _calibrationInfo.Distortion, _raux, _taux, !_raux.IsEmpty);

                _raux.ConvertTo(rvec, DepthType.Cv32F);
                _taux.ConvertTo(tvec, DepthType.Cv64F);

                var pts = new MCvPoint3D32f[] {
                    new MCvPoint3D32f(0.01f, 0, 0),
                    new MCvPoint3D32f(0, 0.01f, 0),
                    new MCvPoint3D32f(0, 0, 0.01f)
                };
                var axis = new VectorOfPoint3D32F(pts);

                var imgPoints = new VectorOfPointF();
                CvInvoke.ProjectPoints(axis, _raux, _taux, _calibrationInfo.Intrinsic, _calibrationInfo.Distortion, imgPoints);

                var centerPoint = new Point((int)_trackedFeatures[0].Point.X, (int)_trackedFeatures[0].Point.Y);

                var xPoint = new Point((int)imgPoints[0].X, (int)imgPoints[0].Y);
                var yPoint = new Point((int)imgPoints[1].X, (int)imgPoints[1].Y);
                var zPoint = new Point((int)imgPoints[2].X, (int)imgPoints[2].Y);

                CvInvoke.Line(img, centerPoint, xPoint, new MCvScalar(255, 0, 0), 5); //blue x-ax
                CvInvoke.Line(img, centerPoint, yPoint, new MCvScalar(0, 255, 0), 5); //green y-ax
                CvInvoke.Line(img, centerPoint, zPoint, new MCvScalar(0, 0, 255), 5); //red z-ax

                var rot = new Mat(3, 3, DepthType.Cv32F, 3);

                CvInvoke.Rodrigues(rvec, rot);
            }

            return(true);
        }
예제 #20
0
        /// <summary>
        /// Compute pattern pose using PnP algorithm
        /// </summary>
        /// <param name="points"></param>
        /// <param name="calibration"></param>
        /// <param name="points3D"></param>
        /// <param name="raux"></param>
        /// <param name="taux"></param>
        public Transformation ComputePose(VectorOfPoint3D32F points3D, VectorOfPointF points, CameraCalibrationInfo calibration, out VectorOfFloat raux, out VectorOfFloat taux)
        {
            var pose3D = new Transformation();

            var rotationVector32F    = new VectorOfFloat();
            var translationVector32F = new VectorOfFloat();
            var rotationVector       = new Mat();
            var translationVector    = new Mat();

            CvInvoke.SolvePnP(points3D, points, calibration.Intrinsic, calibration.Distortion, rotationVector, translationVector);

            rotationVector.ConvertTo(rotationVector32F, DepthType.Cv32F);
            translationVector.ConvertTo(translationVector32F, DepthType.Cv32F);

            raux = rotationVector32F;
            taux = translationVector32F;

            var rotationMat = new Mat();

            CvInvoke.Rodrigues(rotationVector32F, rotationMat);
            var rotationMatrix = new Matrix <double>(rotationMat.Rows, rotationMat.Cols, rotationMat.DataPointer);

            // Copy to transformation matrix
            for (int col = 0; col < 3; col++)
            {
                for (int row = 0; row < 3; row++)
                {
                    pose3D.SetRotationMatrixValue(row, col, (float)rotationMatrix[row, col]); // Copy rotation component
                }
                pose3D.SetTranslationVectorValue(col, translationVector32F[col]);             // Copy translation component
            }

            // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
            return(pose3D.GetInverted());
        }
예제 #21
0
 public static extern int search_organizedNeighbor_xyz_nearestKSearch(IntPtr ptr, PointXYZ point, int k, VectorOfInt k_indices, VectorOfFloat k_sqr_distances);
예제 #22
0
        public void ParseTestVideo(string testFile)
        {
            //Capture Image
            if(string.IsNullOrWhiteSpace(OutputPath))
                OutputPath = _defaultTestVideoPath;

            List<string> grayImgList = CatchImages(testFile, 0, OutputPath);

            //Get the Optical flow of L-K feature
            Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First());
            Image<Gray, Byte> grayImage1 = new Image<Gray, Byte>(grayImgList.First());

            Image<Gray, Byte> grayImage2 = new Image<Gray, Byte>(grayImgList.Last());
            EmguType features1 = SURFFeatureDetect(grayImage1, mask);

            VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray());
            VectorOfPointF vp2 = new VectorOfPointF(vp1.Size);
            VectorOfByte vstatus = new VectorOfByte(vp1.Size);
            VectorOfFloat verr = new VectorOfFloat(vp1.Size);
            Size winsize = new Size(grayImage1.Width, grayImage1.Height);
            int maxLevel = 1; // if 0, winsize is not used
            MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

            try
            {
                //GFTTDetector gd = new GFTTDetector();
                //MKeyPoint[] gdkp = gd.Detect(grayImage1);
                //VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray());

                CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, vp1, vp2, vstatus, verr, winsize, maxLevel, criteria);

                Utils.WriteJsonFile(vp1, grayImgList.First() + "p.dat");
                Utils.WriteJsonFile(vp2, grayImgList.Last() + "p.dat");
            }
            catch (Exception e)
            {
                _log.Debug("error: " + e.Message);
            }

            //List<string> grayImgList = CatchImages(testFile, 0, OutputPath);
            /*
            //Get SIFT Feature
            foreach (string grayImgPath in grayImgList)
            {

                //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath);
                //List<Feature> features = SiftFeatureDetect(grayImage);
                Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath);
                //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true);
                //Write features To File
                EmguType features = SURFFeatureDetect(grayImage);

                Utils.WriteJsonFile(features, grayImgPath + ".dat");

            }
            */
            _parseSuccess = true;
        }
예제 #23
0
 public override int NearestKSearch(PointXYZ point, int k, VectorOfInt k_indices, VectorOfFloat k_sqr_distances)
 {
     return(Invoke.search_organizedNeighbor_xyz_nearestKSearch(_ptr, point, k, k_indices, k_sqr_distances));
 }
예제 #24
0
 public abstract int NearestKSearch(PointT point, int k, VectorOfInt k_indices, VectorOfFloat k_sqr_distances);
        public void Bootstrap_Track_Logic_Test()
        {
            var capture = new Capture($@"{TestCaseProjectPath}\Videos\cube2.avi");

            //var capture = new Capture(@"C:\Users\zakharov\Documents\Repos\Mine\Rc\src\RubiksCube.OpenCV.TestCase\Videos\cube2.avi");
            for (int i = 0; i < 40; i++)
            {
                capture.QueryFrame();
            }

            var prevGray = capture.QueryFrame();

            CvInvoke.CvtColor(prevGray, prevGray, ColorConversion.Bgr2Gray);

            var currentGray = capture.QueryFrame();

            CvInvoke.CvtColor(currentGray, currentGray, ColorConversion.Bgr2Gray);

            var bootstrapKp = new VectorOfKeyPoint();

            new ORBDetector().DetectRaw(prevGray, bootstrapKp);

            var trackedFeatures = new VectorOfKeyPoint(bootstrapKp.ToArray());

            //-------------------------------------------------------------------------

            var pointComparer = Comparer <PointF> .Create((p1, p2) => Math.Abs(p1.X - p2.X) < 0.0001f && Math.Abs(p1.Y - p2.Y) < 0.0001f? 0 : 1);

            var point3DComparer = Comparer <MCvPoint3D32f> .Create((p1, p2) => Math.Abs(p1.X - p2.X) < 0.0001f && Math.Abs(p1.Y - p2.Y) < 0.0001f && Math.Abs(p1.Z - p2.Z) < 0.0001f? 0 : 1);

            var matrixComparer = Comparer <double> .Create((x, y) => Math.Abs(x - y) < 0.0001f? 0 : 1);

            for (int i = 41; i <= 95; i++)
            {
                var bootstrapPointsBeforeOpticalFlowCplusPlus = GetPoints($"I = {i}txt - Bootstrap points before optical flow.txt");
                var trackedPointsBeforeOpticalFlowCplusPlus   = GetPoints($"I = {i}txt - Tracked points before optical flow.txt");
                var bootstrapPointsAfterOpticalFlowCplusPlus  = GetPoints($"I = {i}txt - Bootstrap points after optical flow.txt");
                var trackedPointsAfterOpticalFlowCplusPlus    = GetPoints($"I = {i}txt - Tracked points after optical flow.txt");
                var bootstrapPointsAfterHomographyCplusPlus   = GetPoints($"I = {i}txt - Bootstrap points after homography.txt");
                var trackedPointsAfterHomographyCplusPlus     = GetPoints($"I = {i}txt - Tracked points after homography.txt");

                var homographyCplusPlus     = Getmatrix3X3($"I = {i}txt - Homography.txt");
                var homographyMaskCplusPlus = GetByteVector($"I = {i}txt - Homography mask.txt");

                var corners = new VectorOfPointF();
                var status  = new VectorOfByte();
                var errors  = new VectorOfFloat();

                CollectionAssert.AreEqual(Utils.GetPointsVector(bootstrapKp).ToArray(), bootstrapPointsBeforeOpticalFlowCplusPlus.ToArray(), pointComparer);
                CollectionAssert.AreEqual(Utils.GetPointsVector(trackedFeatures).ToArray(), trackedPointsBeforeOpticalFlowCplusPlus.ToArray(), pointComparer);

                CvInvoke.CalcOpticalFlowPyrLK(prevGray, currentGray, Utils.GetPointsVector(trackedFeatures), corners,
                                              status, errors, new Size(11, 11), 3, new MCvTermCriteria(30, 0.01));
                currentGray.CopyTo(prevGray);

                if (CvInvoke.CountNonZero(status) < status.Size * 0.8)
                {
                    throw new Exception("Tracking failed.");
                }

                trackedFeatures = Utils.GetKeyPointsVector(corners);
                Utils.KeepVectorsByStatus(ref trackedFeatures, ref bootstrapKp, status);

                CollectionAssert.AreEqual(Utils.GetPointsVector(bootstrapKp).ToArray(), bootstrapPointsAfterOpticalFlowCplusPlus.ToArray(), pointComparer);
                CollectionAssert.AreEqual(Utils.GetPointsVector(trackedFeatures).ToArray(), trackedPointsAfterOpticalFlowCplusPlus.ToArray(), pointComparer);

                if (trackedFeatures.Size != bootstrapKp.Size)
                {
                    const string error = "Tracked features vector size is not equal to bootstrapped one.";
                    throw new Exception(error);
                }

                //verify features with a homography
                var inlierMask = new VectorOfByte();
                var homography = new Mat();
                if (trackedFeatures.Size > 4)
                {
                    CvInvoke.FindHomography(Utils.GetPointsVector(trackedFeatures), Utils.GetPointsVector(bootstrapKp), homography, HomographyMethod.Ransac, 0.99,
                                            inlierMask);
                }

                var homographyMatrix = new Matrix <double>(homography.Rows, homography.Cols, homography.DataPointer);
                CollectionAssert.AreEqual(homographyMatrix.Data, homographyCplusPlus.Data, matrixComparer);

                int inliersNum = CvInvoke.CountNonZero(inlierMask);
                CollectionAssert.AreEqual(inlierMask.ToArray(), homographyMaskCplusPlus.ToArray());

                if (inliersNum != trackedFeatures.Size && inliersNum >= 4 && !homography.IsEmpty)
                {
                    Utils.KeepVectorsByStatus(ref trackedFeatures, ref bootstrapKp, inlierMask);
                }
                else if (inliersNum < 10)
                {
                    throw new Exception("Not enough features survived homography.");
                }

                CollectionAssert.AreEqual(Utils.GetPointsVector(bootstrapKp).ToArray(), bootstrapPointsAfterHomographyCplusPlus.ToArray(), pointComparer);
                CollectionAssert.AreEqual(Utils.GetPointsVector(trackedFeatures).ToArray(), trackedPointsAfterHomographyCplusPlus.ToArray(), pointComparer);

                var bootstrapKpOrig     = new VectorOfKeyPoint(bootstrapKp.ToArray());
                var trackedFeaturesOrig = new VectorOfKeyPoint(trackedFeatures.ToArray());

                //TODO: Compare all these to c++ version
                //Attempt at 3D reconstruction (triangulation) if conditions are right
                var rigidT = CvInvoke.EstimateRigidTransform(Utils.GetPointsVector(trackedFeatures).ToArray(), Utils.GetPointsVector(bootstrapKp).ToArray(), false);
                var matrix = new Matrix <double>(rigidT.Rows, rigidT.Cols, rigidT.DataPointer);

                if (CvInvoke.Norm(matrix.GetCol(2)) > 100)
                {
                    var points3DCplusPlus      = GetPoints3d($"I = {i}txt - 3d points.txt");
                    var eigenvectorsCplusPlus  = Getmatrix3X3($"I = {i}txt - eigenvectors.txt");
                    var normalOfPlaneCplusPlus = GetDoubleArray($"I = {i}txt - normal of plane.txt");

                    //camera motion is sufficient
                    var p1Init = new Matrix <double>(3, 4);
                    p1Init.SetIdentity();
                    var result = OpenCvUtilities.CameraPoseAndTriangulationFromFundamental(_calibration, trackedFeatures, bootstrapKp, p1Init);

                    trackedFeatures = result.FilteredTrackedFeaturesKp;
                    bootstrapKp     = result.FilteredBootstrapKp;

                    if (result.Result)
                    {
                        double pToPlaneTrashCplusPlus = GetDouble($"I = {i}txt - p_to_plane_thresh.txt");
                        int    numInliersCplusPlus    = GetInt($"I = {i}txt - num inliers.txt");
                        var    statusArrCplusPlus     = GetByteVector($"I = {i}txt - status arr.txt");

                        var trackedFeatures3D = result.TrackedFeatures3D;

                        CollectionAssert.AreEqual(trackedFeatures3D.ToArray(), points3DCplusPlus.ToArray(), point3DComparer);

                        //var trackedFeatures3Dm = Utils.Get3dPointsMat(trackedFeatures3D);
                        var tf3D = new double[trackedFeatures3D.Size, 3];
                        var trackedFeatures3Dm = new Matrix <double>(trackedFeatures3D.Size, 3);
                        for (int k = 0; k < trackedFeatures3D.Size; k++)
                        {
                            trackedFeatures3Dm[k, 0] = trackedFeatures3D[k].X;
                            trackedFeatures3Dm[k, 1] = trackedFeatures3D[k].Y;
                            trackedFeatures3Dm[k, 2] = trackedFeatures3D[k].Z;

                            tf3D[k, 0] = trackedFeatures3D[k].X;
                            tf3D[k, 1] = trackedFeatures3D[k].Y;
                            tf3D[k, 2] = trackedFeatures3D[k].Z;
                        }

                        var eigenvectors = new Mat();
                        var mean         = new Mat();
                        CvInvoke.PCACompute(trackedFeatures3Dm, mean, eigenvectors);
                        var eigenvectorsMatrix = new Matrix <double>(eigenvectors.Rows, eigenvectors.Cols, eigenvectors.DataPointer);

                        CollectionAssert.AreEqual(eigenvectorsMatrix.Data, eigenvectorsCplusPlus.Data, matrixComparer);

                        var method = PrincipalComponentMethod.Center;
                        var pca    = new PrincipalComponentAnalysis(method);
                        pca.Learn(tf3D.ToJagged());

                        var meanMatrix = new Matrix <double>(mean.Rows, mean.Cols, mean.DataPointer);
                        CollectionAssert.AreEqual(meanMatrix.Data.ToJagged()[0], pca.Means, matrixComparer);

                        int numInliers    = 0;
                        var normalOfPlane = eigenvectorsMatrix.GetRow(2).ToUMat().ToMat(AccessType.Fast);
                        CvInvoke.Normalize(normalOfPlane, normalOfPlane);

                        var normalOfPlaneMatrix = new Matrix <double>(normalOfPlane.Rows, normalOfPlane.Cols, normalOfPlane.DataPointer);
                        var normalOfPlaneArray  = new[] { normalOfPlaneMatrix[0, 0], normalOfPlaneMatrix[0, 1], normalOfPlaneMatrix[0, 2] };

                        CollectionAssert.AreEqual(normalOfPlaneArray, normalOfPlaneCplusPlus, matrixComparer);

                        double pToPlaneThresh = Math.Sqrt(pca.Eigenvalues.ElementAt(2));

                        Assert.AreEqual(pToPlaneTrashCplusPlus, pToPlaneThresh, 0.0001);

                        var statusArray = new byte[trackedFeatures3D.Size];
                        for (int k = 0; k < trackedFeatures3D.Size; k++)
                        {
                            var t1 = new double[] { trackedFeatures3D[k].X, trackedFeatures3D[k].Y, trackedFeatures3D[k].Z };
                            var t2 = t1.Subtract(pca.Means);
                            var w  = new Matrix <double>(new[, ] {
                                { t2[0], t2[1], t2[2] }
                            });
                            double d = Math.Abs(normalOfPlane.Dot(w));
                            if (d < pToPlaneThresh)
                            {
                                numInliers++;
                                statusArray[k] = 1;
                            }
                        }

                        Assert.AreEqual(numInliersCplusPlus, numInliers);

                        var statusVector = new VectorOfByte(statusArray);
                        CollectionAssert.AreEqual(statusArrCplusPlus.ToArray(), statusVector.ToArray());

                        bool bootstrapping = numInliers / (double)trackedFeatures3D.Size < 0.75;
                        if (!bootstrapping)
                        {
                            //enough features are coplanar, keep them and flatten them on the XY plane
                            Utils.KeepVectorsByStatus(ref trackedFeatures, ref trackedFeatures3D, statusVector);

                            //the PCA has the major axes of the plane
                            var projected = new Mat();
                            CvInvoke.PCAProject(trackedFeatures3Dm, mean, eigenvectors, projected);
                            var projectedMatrix = new Matrix <double>(projected.Rows, projected.Cols, projected.DataPointer);
                            projectedMatrix.GetCol(2).SetValue(0);
                            projectedMatrix.CopyTo(trackedFeatures3Dm);
                        }
                        else
                        {
                            bootstrapKp     = bootstrapKpOrig;
                            trackedFeatures = trackedFeaturesOrig;
                        }
                    }
                }

                currentGray = capture.QueryFrame();
                CvInvoke.CvtColor(currentGray, currentGray, ColorConversion.Bgr2Gray);
            }
        }
예제 #26
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride. Use Size.Empty for default</param>
 /// <param name="padding">Padding. Use Size.Empty for default</param>
 /// <param name="locations">Locations for the computation. Can be null if not needed</param>
 /// <returns>The descriptor vector</returns>
 public float[] Compute(IInputArray image, Size winStride = new Size(), Size padding = new Size(),
    Point[] locations = null)
 {
    using (VectorOfFloat desc = new VectorOfFloat())
    using (InputArray iaImage = image.GetInputArray())
    {
       if (locations == null)
       {
          CvInvoke.cveHOGDescriptorCompute(_ptr, iaImage, desc, ref winStride, ref padding, IntPtr.Zero);
       }
       else
       {
          using (VectorOfPoint vp = new VectorOfPoint(locations))
          {
             CvInvoke.cveHOGDescriptorCompute(_ptr, iaImage, desc, ref winStride, ref padding, vp);
          }
       }
       return desc.ToArray();
    }
 }
예제 #27
0
 /// <summary>
 /// Set the SVM detector 
 /// </summary>
 /// <param name="detector">The SVM detector</param>
 public void SetSVMDetector(float[] detector)
 {
    using (VectorOfFloat vec = new VectorOfFloat(detector))
    {
       CvInvoke.cveHOGSetSVMDetector(_ptr, vec);
    }
 }
예제 #28
0
 public static extern int search_kdtree_xyz_nearestKSearch(IntPtr ptr, PointXYZ point, int k, VectorOfInt k_indices, VectorOfFloat k_sqr_distances);
예제 #29
0
 /// <summary>
 /// Performs non maximum suppression given boxes and corresponding scores.
 /// </summary>
 /// <param name="bboxes">A set of bounding boxes to apply NMS.</param>
 /// <param name="scores">A set of corresponding confidences.</param>
 /// <param name="scoreThreshold">A threshold used to filter boxes by score.</param>
 /// <param name="nmsThreshold">A threshold used in non maximum suppression.</param>
 /// <param name="indices">The kept indices of bboxes after NMS.</param>
 /// <param name="eta">A coefficient in adaptive threshold</param>
 /// <param name="topK">If &gt;0, keep at most top_k picked indices.</param>
 public static void NMSBoxes(VectorOfRect bboxes, VectorOfFloat scores, float scoreThreshold, float nmsThreshold, VectorOfInt indices, float eta = 1.0f, int topK = 0)
 {
     cveDnnNMSBoxes(bboxes, scores, scoreThreshold, nmsThreshold, indices, eta, topK);
 }
예제 #30
0
 /// <summary>
 /// Return the default people detector
 /// </summary>
 /// <returns>The default people detector</returns>
 public static float[] GetDefaultPeopleDetector()
 {
    using (Util.VectorOfFloat desc = new VectorOfFloat())
    {
       CvInvoke.cveHOGDescriptorPeopleDetectorCreate(desc);
       return desc.ToArray();
    }
 }
예제 #31
0
 /// <summary>
 /// Calculates the back projection of a histogram.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">Number of source images.</param>
 /// <param name="hist">Input histogram that can be dense or sparse.</param>
 /// <param name="backProject">Destination back projection array that is a single-channel array of the same size and depth as images[0] .</param>
 /// <param name="ranges">Array of arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="scale"> Optional scale factor for the output back projection.</param>
 public static void CalcBackProject(IInputArray images, int[] channels, IInputArray hist, IOutputArray backProject, float[] ranges, double scale = 1.0)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfFloat rangeVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaHist = hist.GetInputArray())
    using (OutputArray oaBackProject = backProject.GetOutputArray())
    {
       cveCalcBackProject(iaImages, channelsVec, iaHist, oaBackProject, rangeVec, scale);
    }
 }
예제 #32
0
      /// <summary>
      /// Fits line to 2D or 3D point set 
      /// </summary>
      /// <param name="points">Input vector of 2D points.</param>
      /// <param name="distType">The distance used for fitting </param>
      /// <param name="param">Numerical parameter (C) for some types of distances, if 0 then some optimal value is chosen</param>
      /// <param name="reps">Sufficient accuracy for radius (distance between the coordinate origin and the line),  0.01 would be a good default</param>
      /// <param name="aeps">Sufficient accuracy for angle, 0.01 would be a good default</param>
      /// <param name="direction">A normalized vector collinear to the line </param>
      /// <param name="pointOnLine">A point on the line.</param>
      public static void FitLine(
          PointF[] points,
          out PointF direction,
          out PointF pointOnLine,
          CvEnum.DistType distType,
          double param,
          double reps,
          double aeps)
      {
         using (VectorOfPointF pv = new VectorOfPointF(points))
         using (VectorOfFloat line = new VectorOfFloat())
         using (InputArray iaPv = pv.GetInputArray())
         using (OutputArray oaLine = line.GetOutputArray())
         {
            cveFitLine(iaPv, oaLine, distType, param, reps, aeps);
            float[] values = line.ToArray();
            direction = new PointF(values[0], values[1]);
            pointOnLine = new PointF(values[2], values[3]);

         }
      }
예제 #33
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        /// <param name="locations"></param>
        /// <returns></returns>
        public virtual float[] Compute(Mat img, Size? winStride = null, Size? padding = null, Point[] locations = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfFloat())
            {
                int length = (locations != null) ? locations.Length : 0;
                NativeMethods.objdetect_HOGDescriptor_compute(ptr, img.CvPtr, flVec.CvPtr, winStride0, padding0, locations, length);
                return flVec.ToArray();
            }
        }
예제 #34
0
 /// <summary>
 /// Calculates a histogram of a set of arrays.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">List of the channels used to compute the histogram. </param>
 /// <param name="mask">Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.</param>
 /// <param name="hist">Output histogram</param>
 /// <param name="histSize">Array of histogram sizes in each dimension.</param>
 /// <param name="ranges">Array of the dims arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="accumulate">Accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.</param>
 public static void CalcHist(IInputArray images, int[] channels, IInputArray mask, IOutputArray hist, int[] histSize, float[] ranges, bool accumulate)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfInt histSizeVec = new VectorOfInt(histSize))
    using (VectorOfFloat rangesVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaHist = hist.GetOutputArray())
    {
       cveCalcHist(iaImages, channelsVec, iaMask, oaHist, histSizeVec, rangesVec, accumulate);
    }
 }
예제 #35
0
 /// <summary>
 /// Set the SVM detector 
 /// </summary>
 /// <param name="detector">The SVM detector</param>
 public void SetSVMDetector(float[] detector)
 {
     using (VectorOfFloat vec = new VectorOfFloat())
      {
     vec.Push(detector);
     CvInvoke.CvHOGSetSVMDetector(_ptr, vec);
      }
 }
예제 #36
0
        /// <summary>
        /// 線形SVM分類器に,係数をセットします.
        /// </summary>
        /// <param name="svmDetector"></param>
#else
        /// <summary>
        /// 
        /// </summary>
        /// <param name="svmDetector"></param>
#endif
        public virtual void SetSVMDetector(float[] svmDetector)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");

            using (var svmDetectorVec = new VectorOfFloat(svmDetector))
            {
                NativeMethods.objdetect_HOGDescriptor_setSVMDetector(ptr, svmDetectorVec.CvPtr);
            }
        }
예제 #37
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="winStride">Window stride. Must be a multiple of block stride. Use Size.Empty for default</param>
 /// <param name="padding">Padding. Use Size.Empty for default</param>
 /// <param name="locations">Locations for the computation. Can be null if not needed</param>
 /// <returns>The descriptor vector</returns>
 public float[] Compute(Image<Bgr, Byte> image, Size winStride, Size padding, Point[] locations)
 {
     using (VectorOfFloat desc = new VectorOfFloat())
      {
     if (locations == null)
        CvInvoke.CvHOGDescriptorCompute(_ptr, image, desc, winStride, padding, IntPtr.Zero);
     else
     {
        using (MemStorage stor = new MemStorage())
        {
           Seq<Point> locationSeq = new Seq<Point>(stor);
           CvInvoke.CvHOGDescriptorCompute(_ptr, image, desc, winStride, padding, locationSeq);
        }
     }
     return desc.ToArray();
      }
 }
예제 #38
0
        /// <summary>
        /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm
        /// </summary>
        /// <param name="prevImg"></param>
        /// <param name="nextImg"></param>
        /// <param name="prevPts"></param>
        /// <param name="nextPts"></param>
        /// <param name="status"></param>
        /// <param name="err"></param>
        /// <param name="winSize"></param>
        /// <param name="maxLevel"></param>
        /// <param name="criteria"></param>
        /// <param name="flags"></param>
        /// <param name="minEigThreshold"></param>
        public static void CalcOpticalFlowPyrLK(
            InputArray prevImg, InputArray nextImg,
            Point2f[] prevPts, ref Point2f[] nextPts,
            out byte[] status, out float[] err,
            Size? winSize = null,
            int maxLevel = 3,
            TermCriteria? criteria = null,
            OpticalFlowFlags flags = OpticalFlowFlags.None,
            double minEigThreshold = 1e-4)
        {
            if (prevImg == null)
                throw new ArgumentNullException("prevImg");
            if (nextImg == null)
                throw new ArgumentNullException("nextImg");
            if (prevPts == null)
                throw new ArgumentNullException("prevPts");
            if (nextPts == null)
                throw new ArgumentNullException("nextPts");
            prevImg.ThrowIfDisposed();
            nextImg.ThrowIfDisposed();

            Size winSize0 = winSize.GetValueOrDefault(new Size(21, 21));
            TermCriteria criteria0 = criteria.GetValueOrDefault(
                TermCriteria.Both(30, 0.01));

            using (var nextPtsVec = new VectorOfPoint2f())
            using (var statusVec = new VectorOfByte())
            using (var errVec = new VectorOfFloat())
            {
                NativeMethods.video_calcOpticalFlowPyrLK_vector(
                    prevImg.CvPtr, nextImg.CvPtr, prevPts, prevPts.Length,
                    nextPtsVec.CvPtr, statusVec.CvPtr, errVec.CvPtr, 
                    winSize0, maxLevel, criteria0, (int)flags, minEigThreshold);
                nextPts = nextPtsVec.ToArray();
                status = statusVec.ToArray();
                err = errVec.ToArray();
            }
        }
예제 #39
0
 /// <summary>
 /// Given the input frame, prepare network input, run network inference, post-process network output and return result detections.
 /// </summary>
 /// <param name="frame">The input image</param>
 /// <param name="detections">Array with detections' RotationRect results</param>
 /// <param name="confidences">Array with detection confidences</param>
 public void DetectTextRectangles(IInputArray frame, VectorOfRotatedRect detections, VectorOfFloat confidences)
 {
     using (InputArray iaFrame = frame.GetInputArray())
     {
         DnnInvoke.cveDnnTextDetectionModelDetectTextRectangles(_textDetectionModel, iaFrame, detections, confidences);
     }
 }
예제 #40
0
        /// <summary>
        /// 線形SVM分類器に,係数をセットします.
        /// </summary>
        /// <param name="svmdetector"></param>
#else
        /// <summary>
        /// 
        /// </summary>
        /// <param name="svmdetector"></param>
#endif
        public virtual void SetSVMDetector(float[] svmdetector)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");

            if (svmDetector != null)
                svmDetector.Dispose();
            svmDetector = new VectorOfFloat(svmdetector);
            NativeMethods.HOGDescriptor_setSVMDetector(ptr, svmDetector.CvPtr);
        }
예제 #41
-2
        public void InitOriginalVideo(string initFile)
        {
            //Capture Image
            OutputPath = _defaultInitVideoPath;

            List<string> grayImgList = CatchImages(initFile, 0, OutputPath);

            if (grayImgList.Count < 3)
            {
                return;
            }

            //Get the Optical flow of L-K feature
            Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First());

            Image<Gray, Byte> grayImage1 = null;//new Image<Gray, Byte>(grayImgList[1]);
            Image<Gray, Byte> grayImage2 = null;//new Image<Gray, Byte>(grayImgList.Last());

            for (int i=1; i< grayImgList.Count-1; i++)
            {
                grayImage1 = new Image<Gray, Byte>(grayImgList[i]);
                grayImage2 = new Image<Gray, Byte>(grayImgList[i + 1]);
                EmguType features1 = SURFFeatureDetect(grayImage1, mask);

                Utils.WriteJsonFile(features1, grayImgList[i] + ".dat");

                //VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray());
                //VectorOfPointF vp2 = new VectorOfPointF(vp1.Size);
                //VectorOfByte vstatus = new VectorOfByte(vp1.Size);
                //VectorOfFloat verr = new VectorOfFloat(vp1.Size);
                Size winsize = new Size(grayImage1.Width, grayImage1.Height);
                int maxLevel = 1; // if 0, winsize is not used
                MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

                try
                {
                    if (i % Constants.DETECTIVE_GROUP_COUNT == 1)
                    {
                        GFTTDetector gd = new GFTTDetector();
                        MKeyPoint[] gdkp = gd.Detect(grayImage1, mask);
                        VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray());
                        VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size);
                        VectorOfByte vstatus = new VectorOfByte(gdvp1.Size);
                        VectorOfFloat verr = new VectorOfFloat(gdvp1.Size);

                        CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria);
                        Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat");
                    }
                    else
                    {
                        VectorOfPointF gdvp1 = Utils.ReadJsonFile<VectorOfPointF>(grayImgList[i - 1] + "pp.dat");
                        VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size);
                        VectorOfByte vstatus = new VectorOfByte(gdvp1.Size);
                        VectorOfFloat verr = new VectorOfFloat(gdvp1.Size);

                        CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria);
                        Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat");
                    }

                }
                catch (Exception e)
                {
                    _log.Debug("error: " + e.Message);
                }
            }

            /*
            //Get SIFT Feature
            foreach (string grayImgPath in grayImgList)
            {
                Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath);
                //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true);

                //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath);
                //List<Feature> features = SiftFeatureDetect(grayImage);

                EmguType features = SURFFeatureDetect(grayImage);

                Utils.WriteJsonFile(features, grayImgPath + ".dat");
            }
            */

            _initSuccess = true;
            OutputPath = string.Empty;
        }