/// <summary>
        /// Convert the image features to keypoint vector and descriptor matrix
        /// </summary>
        public static void ConvertToRaw(ImageFeature <TDescriptor>[] features, out VectorOfKeyPoint keyPoints, out Matrix <TDescriptor> descriptors)
        {
            if (features.Length == 0)
            {
                keyPoints   = null;
                descriptors = null;
                return;
            }
            keyPoints = new VectorOfKeyPoint();
            keyPoints.Push(
#if NETFX_CORE
                Extensions.
#else
                Array.
#endif
                ConvertAll <ImageFeature <TDescriptor>, MKeyPoint>(features, delegate(ImageFeature <TDescriptor> feature) { return(feature.KeyPoint); }));

            descriptors = new Matrix <TDescriptor>(features.Length, features[0].Descriptor.Length);
            int    descriptorLength = features[0].Descriptor.Length;
            int    rowSizeInByte    = descriptorLength * Marshal.SizeOf(typeof(TDescriptor));
            MCvMat header           = descriptors.MCvMat;
            long   address          = header.data.ToInt64();
            for (int i = 0; i < features.Length; i++, address += header.step)
            {
                GCHandle handler = GCHandle.Alloc(features[i].Descriptor, GCHandleType.Pinned);
                Toolbox.memcpy(new IntPtr(address), handler.AddrOfPinnedObject(), rowSizeInByte);
                handler.Free();
            }
        }
Example #2
0
        public void RemoveInliersFromKeypointsAndDescriptors(VectorOfDMatch inliers, ref VectorOfKeyPoint keypointsQueryImageInOut, ref Mat descriptorsQueryImageInOut)
        {
            List <int> inliersKeypointsPositions = new List <int>();

            for (int inliersIndex = 0; inliersIndex < inliers.Size; ++inliersIndex)
            {
                MDMatch match = inliers[inliersIndex];
                inliersKeypointsPositions.Add(match.QueryIdx);
            }

            inliersKeypointsPositions.Sort();

            VectorOfKeyPoint keypointsQueryImageBackup = null;

            keypointsQueryImageBackup = keypointsQueryImageInOut;
            keypointsQueryImageInOut  = new VectorOfKeyPoint();
            Mat filteredDescriptors = new Mat();

            for (int rowIndex = 0; rowIndex < descriptorsQueryImageInOut.Rows; ++rowIndex)
            {
                if (!inliersKeypointsPositions.Exists(i => i == rowIndex))
                {
                    keypointsQueryImageInOut.Push(new MKeyPoint[] { keypointsQueryImageBackup[rowIndex] });

                    Matrix <float> matrix = new Matrix <float>(descriptorsQueryImageInOut.Size);
                    descriptorsQueryImageInOut.ConvertTo(matrix, Emgu.CV.CvEnum.DepthType.Cv32F);
                    var linha = matrix.GetRow(rowIndex).Mat;
                    filteredDescriptors.PushBack(linha);
                }
            }
            filteredDescriptors.CopyTo(descriptorsQueryImageInOut);
        }
        public UMat ORBDescriptor()
        {
            //ORB Feature Descriptor
            ORBDetector      orbDetector       = null;
            VectorOfKeyPoint modelKeyPointsOrb = null;
            VectorOfKeyPoint modelKeyPoints    = null;

            try
            {
                orbDetector       = new ORBDetector(500, 1, 8, 30, 0, 3, ORBDetector.ScoreType.Harris, 31, 20);
                modelKeyPoints    = new VectorOfKeyPoint();
                modelKeyPointsOrb = new VectorOfKeyPoint();
                MKeyPoint[] mKeyPointsOrb = orbDetector.Detect(preProcessedImageInGrayScale);
                modelKeyPointsOrb.Push(mKeyPointsOrb);
                UMat ORBDescriptor = new UMat();
                orbDetector.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPoints, ORBDescriptor, true);
                return(ORBDescriptor);
            }
            finally
            {
                orbDetector.Dispose();
                modelKeyPointsOrb.Dispose();
                modelKeyPoints.Dispose();
            }
        }
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0)
            {
                return(new ImageFeature[0]);
            }
            using (VectorOfFloat descVec = new VectorOfFloat())
                using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
                {
                    kpts.Push(keyPoints);
                    CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec);

                    int     n     = keyPoints.Length;
                    float[] descs = descVec.ToArray();
                    //long address = descVec.StartAddress.ToInt64();

                    ImageFeature[] features         = new ImageFeature[n];
                    int            sizeOfdescriptor = DescriptorSize;
                    for (int i = 0; i < n; i++)
                    {
                        features[i].KeyPoint = keyPoints[i];
                        float[] d = new float[sizeOfdescriptor];
                        Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
                        features[i].Descriptor = d;
                    }
                    return(features);
                }
        }
Example #5
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            int sizeOfdescriptor = _surfParams.Extended ? 128 : 64;

            using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
            {
                pts.Push(keyPoints);
                using (Matrix <float> descriptors = ComputeDescriptorsRaw(image, mask, pts))
                    return(Features2DTracker.ConvertToImageFeature(pts, descriptors));
            }
        }
Example #6
0
        /*
         * /// <summary>
         * /// Compute the descriptor given the bgr image and the point location, using oppponent color (CGIV 2008 "Color Descriptors for Object Category Recognition").
         * /// </summary>
         * /// <param name="image">The image where the descriptor will be computed from</param>
         * /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
         * /// <returns>The descriptors founded on the keypoint location</returns>
         * public Matrix<float> ComputeDescriptorsRaw(Image<Bgr, Byte> image, VectorOfKeyPoint keyPoints)
         * {
         * int count = keyPoints.Size;
         * if (count == 0) return null;
         * Matrix<float> descriptors = new Matrix<float>(count, DescriptorSize * 3, 1);
         * CvSIFTDetectorComputeDescriptorsBGR(_ptr, image, keyPoints, descriptors);
         * return descriptors;
         * }*/

        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The descriptors founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0)
            {
                return(new ImageFeature[0]);
            }
            using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
            {
                kpts.Push(keyPoints);
                using (Matrix <float> descriptor = ComputeDescriptorsRaw(image, mask, kpts))
                {
                    return(Features2DTracker.ConvertToImageFeature(kpts, descriptor));
                }
            }
        }
        public KeyPoints SIFTDescriptor()
        {
            KeyPoints result = new KeyPoints();
            //SiFT Descriptor
            SIFT             siftAlgo           = null;
            VectorOfKeyPoint modelKeyPointsSift = null;

            try
            {
                siftAlgo           = new SIFT();
                modelKeyPointsSift = new VectorOfKeyPoint();

                MKeyPoint[] siftPoints = siftAlgo.Detect(preProcessedImageInGrayScale);
                modelKeyPointsSift.Push(siftPoints);
                UMat siftDescriptors = new UMat();
                siftAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPointsSift, siftDescriptors, true);
                Image <Gray, Byte> outputImage = new Image <Gray, byte>(
                    preProcessedImageInGrayScale.Width,
                    preProcessedImageInGrayScale.Height);
                Features2DToolbox.DrawKeypoints(
                    preProcessedImageInGrayScale,
                    modelKeyPointsSift,
                    outputImage,
                    new Bgr(255, 255, 255),
                    Features2DToolbox.KeypointDrawType.Default);

                string folderName = @"C:\Projects\LeafService\SiftImage";
                string pathString = System.IO.Path.Combine(folderName, "Sift" + DateTime.UtcNow.Ticks);
                System.IO.Directory.CreateDirectory(pathString);
                if (Directory.Exists(pathString))
                {
                    string newFilePath = Path.Combine(pathString, "SiftImage" + DateTime.UtcNow.Ticks);
                    outputImage.Save(folderName + ".jpg");
                    outputImage.Save(@"C:\Projects\LeafService\SIFTgray.jpg");
                }


                //outputImage.Save("sift.jpg");
                result.Descriptor = siftDescriptors;
                result.Points     = siftPoints;
                return(result);
            }
            finally
            {
                siftAlgo.Dispose();
                modelKeyPointsSift.Dispose();
            }
        }
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="extractor">The descriptor extractor</param>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The descriptors founded on the keypoint location</returns>
 public static ImageFeature <TDepth>[] ComputeDescriptors <TDepth>(this IDescriptorExtractor <TDepth> extractor, Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
     where TDepth : struct
 {
     if (keyPoints.Length == 0)
     {
         return(new ImageFeature <TDepth> [0]);
     }
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         kpts.Push(keyPoints);
         using (Matrix <TDepth> descriptor = extractor.ComputeDescriptorsRaw(image, mask, kpts))
         {
             return(ImageFeature <TDepth> .ConvertFromRaw(kpts, descriptor));
         }
     }
 }
        public VectorOfKeyPoint GetInliersKeypoints()
        {
            if (_inliersKeyPoints.Size == 0)
            {
                for (int i = 0; i < _inliers.Size; ++i)
                {
                    MDMatch match = _inliers[i];

                    if (match.QueryIdx < _keypointsEvalImag.Size)
                    {
                        _inliersKeyPoints.Push(new MKeyPoint[] { _keypointsEvalImag[match.QueryIdx] });
                    }
                }
            }
            return(_inliersKeyPoints);
        }
        public UMat SURFDescriptor()
        {
            double hessianThresh = 800;
            // public SURF(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true, bool upright = false)
            SURF             surfAlgo       = new SURF(hessianThresh, 4, 2, true, false);
            VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();

            MKeyPoint[] mKeyPoints = surfAlgo.Detect(preProcessedImageInGrayScale);
            modelKeyPoints.Push(mKeyPoints);
            VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();
            UMat             SurfDescriptors   = new UMat();

            surfAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPoints, SurfDescriptors, true);
            //image2.Source = BitmapSourceConvert.ToBitmapSource(modelDescriptors);
            SurfDescriptors.Save("SURFDetection.jpg");
            return(SurfDescriptors);
        }
        /// <summary>
        /// Convert the image features to keypoint vector and descriptor matrix
        /// </summary>
        private static void ConvertFromImageFeature(ImageFeature[] features, out VectorOfKeyPoint keyPoints, out Matrix <float> descriptors)
        {
            keyPoints = new VectorOfKeyPoint();
            keyPoints.Push(Array.ConvertAll <ImageFeature, MKeyPoint>(features, delegate(ImageFeature feature) { return(feature.KeyPoint); }));
            descriptors = new Matrix <float>(features.Length, features[0].Descriptor.Length);

            int descriptorLength = features[0].Descriptor.Length;

            float[,] data = descriptors.Data;
            for (int i = 0; i < features.Length; i++)
            {
                for (int j = 0; j < descriptorLength; j++)
                {
                    data[i, j] = features[i].Descriptor[j];
                }
            }
        }
Example #12
0
        public static void KeepVectorsByStatus(ref VectorOfKeyPoint f1, ref VectorOfPoint3D32F f2, VectorOfByte status)
        {
            var newF1 = new VectorOfKeyPoint();
            var newF2 = new VectorOfPoint3D32F();

            for (int i = 0; i < status.Size; i++)
            {
                if (status[i] > 0)
                {
                    newF1.Push(new[] { f1[i] });
                    newF2.Push(new[] { f2[i] });
                }
            }

            f1 = newF1;
            f2 = newF2;
        }
Example #13
0
        public Mat FingerprintDescriptor(Mat input)
        {
            var harris_normalised = PrepareImage(input);

            float            threshold  = 125.0f;
            List <MKeyPoint> mKeyPoints = new List <MKeyPoint>();
            Mat rescaled = new Mat();
            VectorOfKeyPoint keypoints = new VectorOfKeyPoint();
            double           scale = 1.0, shift = 0.0;

            CvInvoke.ConvertScaleAbs(harris_normalised, rescaled, scale, shift);
            Mat[]       mat         = new Mat[] { rescaled, rescaled, rescaled };
            VectorOfMat vectorOfMat = new VectorOfMat(mat);

            int[] from_to  = { 0, 0, 1, 1, 2, 2 };
            Mat   harris_c = new Mat(rescaled.Size, DepthType.Cv8U, 3);

            CvInvoke.MixChannels(vectorOfMat, harris_c, from_to);
            for (int x = 0; x < harris_c.Width; x++)
            {
                for (int y = 0; y < harris_c.Height; y++)
                {
                    if (GetFloatValue(harris_c, x, y) > threshold)
                    {
                        MKeyPoint m = new MKeyPoint
                        {
                            Size  = 1,
                            Point = new PointF(x, y)
                        };
                        mKeyPoints.Add(m);
                    }
                }
            }

            keypoints.Push(mKeyPoints.ToArray());
            Mat         descriptors = new Mat();
            ORBDetector ORBCPU      = new ORBDetector();

            ORBCPU.Compute(_input_thinned, keypoints, descriptors);

            return(descriptors);
        }
Example #14
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public        ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            using (VectorOfFloat descs = new VectorOfFloat())
                using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
                {
                    kpts.Push(keyPoints);
                    CvSURFDetectorComputeDescriptors(ref this, image, mask, kpts, descs);

                    int  n       = keyPoints.Length;
                    long address = descs.StartAddress.ToInt64();

                    ImageFeature[] features         = new ImageFeature[n];
                    int            sizeOfdescriptor = extended == 0 ? 64 : 128;
                    for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
                    {
                        features[i].KeyPoint = keyPoints[i];
                        float[] desc = new float[sizeOfdescriptor];
                        Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
                        features[i].Descriptor = desc;
                    }
                    return(features);
                }
        }
        public void DrawSIFTDescriptor(string inputFile, string outputFile)
        {
            //SiFT Descriptor
            SIFT             siftAlgo           = null;
            VectorOfKeyPoint modelKeyPointsSift = null;

            try
            {
                siftAlgo           = new SIFT();
                modelKeyPointsSift = new VectorOfKeyPoint();

                using (Image <Bgr, byte> inputImage = new Image <Bgr, byte>(inputFile))
                {
                    MKeyPoint[] siftPoints = siftAlgo.Detect(inputImage);
                    modelKeyPointsSift.Push(siftPoints);
                    UMat siftDescriptors = new UMat();
                    siftAlgo.DetectAndCompute(inputImage, null, modelKeyPointsSift, siftDescriptors, true);
                    using (Image <Gray, Byte> outputImage = new Image <Gray, byte>(
                               inputImage.Width,
                               inputImage.Height))
                    {
                        Features2DToolbox.DrawKeypoints(
                            inputImage,
                            modelKeyPointsSift,
                            outputImage,
                            new Bgr(255, 255, 255),
                            Features2DToolbox.KeypointDrawType.Default);
                        outputImage.Save(outputFile);
                    }
                }
            }
            finally
            {
                siftAlgo.Dispose();
                modelKeyPointsSift.Dispose();
            }
        }
Example #16
0
        public float[] ComputeDescriptor(Image <Bgr, byte> image, int stepX = 9, int stepY = 9)
        {
            SIFT sift = new SIFT();

            VectorOfKeyPoint keypoints = new VectorOfKeyPoint();
            Mat descriptors            = new Mat();

            for (int y = stepY; y < image.Rows - stepY; y += stepY)
            {
                for (int x = stepX; x < image.Cols - stepX; x += stepX)
                {
                    MKeyPoint[] point = { new MKeyPoint() };
                    point[0].Size  = stepX;
                    point[0].Point = new Point(x, y);
                    keypoints.Push(point);
                }
            }

            sift.Compute(image, keypoints, descriptors);

            float[] returnArray = new float[descriptors.Rows * descriptors.Cols];
            descriptors.CopyTo(returnArray);
            return(returnArray);
        }
        private List <Result> DetectBanknotesTrain(Mat image, float minimumMatchAllowed = 0.07f, float minimuTargetAreaPercentage = 0.05f, float maxDistanceRatio = 0.75f, float reprojectionThresholPercentage = 0.01f,
                                                   double confidence = 0.99, int maxIters = 5000, int minimumNumerInliers = 8)
        {
            object locker = new object();

            List <Result> detectorResults = new List <Result>();

            MKeyPoint[] mKeyPoints;
            SIFT        sift = new SIFT();

            mKeyPoints = sift.Detect(image);
            VectorOfKeyPoint keypointsEvalImage = new VectorOfKeyPoint();

            keypointsEvalImage.Push(mKeyPoints);

            if (keypointsEvalImage.Size < 4)
            {
                return(detectorResults);
            }

            Mat descriptorsEvalImage = new Mat();

            sift.Compute(image, keypointsEvalImage, descriptorsEvalImage);

            Features2DToolbox.DrawKeypoints(image, keypointsEvalImage, image, new Bgr(0, 0, 255), Features2DToolbox.KeypointDrawType.Default);

            float  bestMatch          = 0;
            Result bestDetectorResult = new Result();

            int   trainDetectorsSize    = DetectedBanknotes.Count;
            bool  validDetection        = true;
            float reprojectionThreshold = image.Cols * reprojectionThresholPercentage;

            do
            {
                bestMatch = 0;

                Parallel.For(0, trainDetectorsSize, i =>
                {
                    DetectedBanknotes[(int)i].UpdateCurrentLODIndex(ref image, 0.6999999881F);
                    Result detectorResult = DetectedBanknotes[(int)i].AnalyzeImageEval(ref keypointsEvalImage, ref descriptorsEvalImage, maxDistanceRatio, reprojectionThreshold, confidence, maxIters, minimumNumerInliers);
                    if (detectorResult.GetBestROIMatch() > minimumMatchAllowed)
                    {
                        float contourArea           = (float)CvInvoke.ContourArea(detectorResult.GetTrainContour());
                        float imageArea             = (float)(image.Cols * image.Rows);
                        float contourAreaPercentage = contourArea / imageArea;

                        if (contourAreaPercentage > minimuTargetAreaPercentage)
                        {
                            double contourAspectRatio = _util.ComputeContourAspectRatio(detectorResult.GetTrainContour());
                            if (contourAspectRatio > _contourAspectRatioRange.X && contourAspectRatio < _contourAspectRatioRange.Y)
                            {
                                double contourCircularity = _util.ComputeContourCircularity(detectorResult.GetTrainContour());
                                if (contourCircularity > _contourCircularityRange.X && contourCircularity < _contourCircularityRange.Y)
                                {
                                    if (CvInvoke.IsContourConvex(detectorResult.GetTrainContour()))
                                    {
                                        lock (locker)
                                        {
                                            if (detectorResult.GetBestROIMatch() > bestMatch)
                                            {
                                                bestMatch          = detectorResult.GetBestROIMatch();
                                                bestDetectorResult = detectorResult;
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                });

                validDetection = bestMatch > minimumMatchAllowed && bestDetectorResult.GetInliers().Size > minimumNumerInliers;

                if (bestDetectorResult != null && validDetection)
                {
                    detectorResults.Add(bestDetectorResult);
                    _util.RemoveInliersFromKeypointsAndDescriptors(bestDetectorResult.GetInliers(), ref keypointsEvalImage, ref descriptorsEvalImage);
                }
            } while (validDetection);

            return(detectorResults);
        }
Example #18
0
        /// <summary>
        /// Identify good matches using RANSAC
        /// </summary>/// symmetrical matches
        /// keypoint1
        /// keypoint2
        /// the number of symmetrical matches
        Matrix <double> ApplyRANSAC(Matrix <float> matches, VectorOfKeyPoint keyPoints1, VectorOfKeyPoint keyPoints2, int matchesNumber)
        {
            selPoints1 = new Matrix <float>(matchesNumber, 2);
            selPoints2 = new Matrix <float>(matchesNumber, 2);

            int selPointsIndex = 0;

            for (int i = 0; i < matches.Rows; i++)
            {
                if (matches[i, 0] == 0 && matches[i, 1] == 0)
                {
                    continue;
                }

                //Get the position of left keypoints
                float x = keyPoints1[(int)matches[i, 0]].Point.X;
                float y = keyPoints1[(int)matches[i, 0]].Point.Y;
                selPoints1[selPointsIndex, 0] = x;
                selPoints1[selPointsIndex, 1] = y;
                //Get the position of right keypoints
                x = keyPoints2[(int)matches[i, 1]].Point.X;
                y = keyPoints2[(int)matches[i, 1]].Point.Y;
                selPoints2[selPointsIndex, 0] = x;
                selPoints2[selPointsIndex, 1] = y;
                selPointsIndex++;
            }

            Matrix <double> fundamentalMatrix = new Matrix <double>(3, 3);
            //IntPtr status = CvInvoke.cvCreateMat(1, matchesNumber, MAT_DEPTH.CV_8U);
            //Matrix<double> status = new Matrix<double>(1, matchesNumber);
            IntPtr statusp = CvInvoke.cvCreateMat(1, matchesNumber, MAT_DEPTH.CV_8U);
            IntPtr points1 = CreatePointListPointer(selPoints1);
            IntPtr points2 = CreatePointListPointer(selPoints2);

            //IntPtr fundamentalMatrixp = CvInvoke.cvCreateMat(3, 3, MAT_DEPTH.CV_32F);

            //Compute F matrix from RANSAC matches
            CvInvoke.cvFindFundamentalMat(
                points1,            //selPoints1   points in first image
                points2,            //selPoints2   points in second image
                fundamentalMatrix,  //fundamental matrix
                CV_FM.CV_FM_RANSAC, //RANSAC method
                this._Distance,     //Use 3.0 for default. The parameter is used for RANSAC method only.
                this._Confidence,   //Use 0.99 for default. The parameter is used for RANSAC or LMedS methods only.
                statusp);           //The array is computed only in RANSAC and LMedS methods.

            Matrix <int> status = new Matrix <int>(1, matchesNumber, statusp);

            //Matrix<double> fundamentalMatrix = new Matrix<double>(3, 3, fundamentalMatrixp);
            if (this._RefineF)
            {
                matchesNumber = 0;
                for (int i = 0; i < status.Cols; i++)
                {
                    if (status[0, i] >= 1)  // ==1
                    {
                        matchesNumber++;
                    }
                }
                selPoints1 = new Matrix <float>(matchesNumber, 2);
                selPoints2 = new Matrix <float>(matchesNumber, 2);

                modelKeyPoints    = new VectorOfKeyPoint();
                observedKeyPoints = new VectorOfKeyPoint();

                int statusIndex = -1;
                selPointsIndex = 0;
                for (int i = 0; i < matches.Rows; i++)
                {
                    if (matches[i, 0] == 0 && matches[i, 1] == 0)
                    {
                        continue;
                    }

                    statusIndex++;
                    if (status[0, statusIndex] >= 1)  // == 1
                    {
                        //Get the position of left keypoints
                        float x = keyPoints1[(int)matches[i, 0]].Point.X;
                        float y = keyPoints1[(int)matches[i, 0]].Point.Y;
                        selPoints1[selPointsIndex, 0] = x;
                        selPoints1[selPointsIndex, 1] = y;

                        MKeyPoint[] kpt = new MKeyPoint[1];
                        kpt[0]         = new MKeyPoint();
                        kpt[0].Point.X = x; kpt[0].Point.Y = y;
                        modelKeyPoints.Push(kpt);

                        //Get the position of right keypoints
                        x = keyPoints2[(int)matches[i, 1]].Point.X;
                        y = keyPoints2[(int)matches[i, 1]].Point.Y;
                        selPoints2[selPointsIndex, 0] = x;
                        selPoints2[selPointsIndex, 1] = y;

                        MKeyPoint[] kpt2 = new MKeyPoint[1];
                        kpt2[0]         = new MKeyPoint();
                        kpt2[0].Point.X = x; kpt2[0].Point.Y = y;
                        observedKeyPoints.Push(kpt2);
                        selPointsIndex++;
                    }
                }

                status = new Matrix <int>(1, matchesNumber);

                mask = new Matrix <byte>(matchesNumber, 1);
                for (int i = 0; i < mask.Rows; i++)
                {
                    mask[i, 0] = 0;  // don't draw lines, we will do it our selves
                }                    // set this to one if you wanted to use Features2DToolbox.DrawMatches
                // to draw correspondences

                indices = new Matrix <int>(matchesNumber, 2); // not being used as we draw correspondences
                for (int i = 0; i < indices.Rows; i++)        // ourselves
                {
                    indices[i, 0] = i;                        // has a problem in drawing lines, so we will drawe ourselves
                    indices[i, 1] = i;                        // this is not being used in our code
                }

                //Compute F matrix from RANSAC matches   // we can do additional RANSAC filtering
                //CvInvoke.cvFindFundamentalMat(         // but first RANSAC gives good results so not used
                //    selPoints1, //points in first image
                //    selPoints2, //points in second image
                //    fundamentalMatrix,  //fundamental matrix
                //    CV_FM.CV_FM_RANSAC, //RANSAC method
                //    this._Distance,  //Use 3.0 for default. The parameter is used for RANSAC method only.
                //    this._Confidence, //Use 0.99 for default. The parameter is used for RANSAC or LMedS methods only.
                //    status);//The array is computed only in RANSAC and LMedS methods.
                // we will need to copy points from selPoints1 and 2 based on status if above was uncommented
            }
            return(fundamentalMatrix);
        }