public void RemoveInliersFromKeypointsAndDescriptors(VectorOfDMatch inliers, ref VectorOfKeyPoint keypointsQueryImageInOut, ref Mat descriptorsQueryImageInOut) { List <int> inliersKeypointsPositions = new List <int>(); for (int inliersIndex = 0; inliersIndex < inliers.Size; ++inliersIndex) { MDMatch match = inliers[inliersIndex]; inliersKeypointsPositions.Add(match.QueryIdx); } inliersKeypointsPositions.Sort(); VectorOfKeyPoint keypointsQueryImageBackup = null; keypointsQueryImageBackup = keypointsQueryImageInOut; keypointsQueryImageInOut = new VectorOfKeyPoint(); Mat filteredDescriptors = new Mat(); for (int rowIndex = 0; rowIndex < descriptorsQueryImageInOut.Rows; ++rowIndex) { if (!inliersKeypointsPositions.Exists(i => i == rowIndex)) { keypointsQueryImageInOut.Push(new MKeyPoint[] { keypointsQueryImageBackup[rowIndex] }); Matrix <float> matrix = new Matrix <float>(descriptorsQueryImageInOut.Size); descriptorsQueryImageInOut.ConvertTo(matrix, Emgu.CV.CvEnum.DepthType.Cv32F); var linha = matrix.GetRow(rowIndex).Mat; filteredDescriptors.PushBack(linha); } } filteredDescriptors.CopyTo(descriptorsQueryImageInOut); }
public Result AnalyzeImageEval(ref VectorOfKeyPoint keypointsEvalImage, ref Mat descriptorsEvalImage, float maxDistanceRatio, float reprojectionThreshold, double confidence, int maxIters, int minimumNumbersInliers) { var matches = new VectorOfDMatch(); //Emgu.CV.Flann.KdTreeIndexParamses flannIndexParams = new Emgu.CV.Flann.KdTreeIndexParamses(4); //var flannIndex = new Index(descriptorsQueryImage, flannIndexParams); //DescriptorMatcher matcher = flannIndex; BFMatcher bfmatcher = new BFMatcher(DistanceType.L2); _util.MatchDescriptorsWithRatioTest(bfmatcher, ref descriptorsEvalImage, _descriptorsImageTrain[_LODIndex], ref matches, maxDistanceRatio); if (matches.Size < minimumNumbersInliers) { return(new Result()); } Mat homography = new Mat(); VectorOfDMatch inliers = new VectorOfDMatch(); VectorOfInt inliersMaskOut = new VectorOfInt(); _util.RefineMatchesWithHomography(keypointsEvalImage, _keypointsImageTrain[_LODIndex], matches, ref homography, inliers, inliersMaskOut, reprojectionThreshold, minimumNumbersInliers); if (inliers.Size < minimumNumbersInliers) { return(new Result()); } float bestROIMatch = 0; bestROIMatch = (float)inliers.Size / (float)matches.Size; return(new Result(ValueBanknote, new VectorOfPoint(), ColorContour, bestROIMatch, _trainsImage[_LODIndex], _keypointsImageTrain[_LODIndex], keypointsEvalImage, ref matches, ref inliers, ref inliersMaskOut, ref homography)); }
/// <summary> /// Finds the best match for each descriptor from a query set (blocking version). /// </summary> /// <param name="queryDescriptors">Query set of descriptors.</param> /// <param name="matches">Matches. If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param> /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param> public void Match( IInputArray queryDescriptors, VectorOfDMatch matches, VectorOfGpuMat mask = null) { using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray()) { CudaInvoke.cveCudaDescriptorMatcherMatch2(_ptr, iaQueryDesccriptor, matches, mask == null ? IntPtr.Zero : mask.Ptr); } }
/// <summary> /// GMS (Grid-based Motion Statistics) feature matching strategy /// </summary> /// <param name="size1">Input size of image1.</param> /// <param name="size2">Input size of image2.</param> /// <param name="keypoints1">Input keypoints of image1.</param> /// <param name="keypoints2">Input keypoints of image2.</param> /// <param name="matches1to2">Input 1-nearest neighbor matches.</param> /// <param name="matchesGMS">Matches returned by the GMS matching strategy.</param> /// <param name="withRotation">Take rotation transformation into account.</param> /// <param name="withScale">Take scale transformation into account.</param> /// <param name="thresholdFactor">The higher, the less matches.</param> public static void MatchGMS( Size size1, Size size2, VectorOfKeyPoint keypoints1, VectorOfKeyPoint keypoints2, VectorOfDMatch matches1to2, VectorOfDMatch matchesGMS, bool withRotation = false, bool withScale = false, double thresholdFactor = 6.0) { cveMatchGMS(ref size1, ref size2, keypoints1, keypoints2, matches1to2, matchesGMS, withRotation, withScale, thresholdFactor); }
public String GetLabelFromMatches(VectorOfVectorOfDMatch vDMatch, Mat uniqueMask) { Dictionary <String, int> labelCount = new Dictionary <string, int>(); for (int i = 0; i < vDMatch.Size; i++) { // Not need to use uniqueMask because we have multiple train feature for one class //if (uniqueMask.GetData(i)[0] == 0) continue; VectorOfDMatch vMatch = vDMatch[i]; for (int j = 0; j < vMatch.Size; j++) { MDMatch dmatch = vMatch[j]; //sb.Append("\n\t" + JsonConvert.SerializeObject(dmatch) + " " + ); String label = GetLabel(dmatch.ImgIdx); if (labelCount.ContainsKey(label)) { labelCount[label] += 1; } else { labelCount[label] = 1; } } } String ret = labelCount.Keys.Aggregate((i, j) => labelCount[i] >= labelCount[j] ? i : j); // Scan to find best imgIndex Dictionary <int, int> imgIndexCount = new Dictionary <int, int>(); for (int i = 0; i < vDMatch.Size; i++) { VectorOfDMatch vMatch = vDMatch[i]; for (int j = 0; j < vMatch.Size; j++) { MDMatch dmatch = vMatch[j]; if (GetLabel(dmatch.ImgIdx) == ret) { if (imgIndexCount.ContainsKey(dmatch.ImgIdx)) { imgIndexCount[dmatch.ImgIdx] += 1; } else { imgIndexCount[dmatch.ImgIdx] = 1; } } } } int imgIndex = imgIndexCount.Keys.Aggregate((i, j) => imgIndexCount[i] >= imgIndexCount[j] ? i : j); Console.WriteLine("imgIndex=" + imgIndex); lastMatchFeatureData = this[imgIndex]; return(ret); }
/// <summary> /// Converts matches array from internal representation to standard matches vector. /// </summary> /// <param name="gpuMatches">Matches, returned from MatchAsync.</param> /// <param name="matches">Vector of DMatch objects.</param> public void MatchConvert( IInputArray gpuMatches, VectorOfDMatch matches) { using (InputArray iaGpuMatches = gpuMatches.GetInputArray()) { CudaInvoke.cveCudaDescriptorMatcherMatchConvert( _ptr, iaGpuMatches, matches); } }
public static String ToString(VectorOfDMatch vDMatch, String indent = "") { StringBuilder sb = new StringBuilder(); sb.Append(indent + "[VectorOfDMatch Size=" + vDMatch.Size); for (int i = 0; i < vDMatch.Size; i++) { sb.Append("\n" + ToString(vDMatch[i], indent + "\t")); } sb.Append("\n" + indent + "]"); return(sb.ToString()); }
/// <summary> /// Finds the best match for each descriptor from a query set. Train descriptors collection that was set by the Add function is used. /// </summary> /// <param name="queryDescriptors">Query set of descriptors.</param> /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param> /// <param name="masks">Mask specifying permissible matches between an input query and train matrices of descriptors.</param> public void Match( IInputArray queryDescriptors, VectorOfDMatch matches, IInputArrayOfArrays masks = null ) { using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray()) using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray()) { CvInvoke.cveDescriptorMatcherMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, iaMasks); } }
/// <summary> /// Finds the best match for each descriptor from a query set. /// </summary> /// <param name="queryDescriptors">Query set of descriptors.</param> /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param> /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param> /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param> public void Match( IInputArray queryDescriptors, IInputArray trainDescriptors, VectorOfDMatch matches, IInputArray mask = null) { using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray()) using (InputArray iaTrainDescriptor = trainDescriptors.GetInputArray()) using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray()) { CvInvoke.cveDescriptorMatcherMatch1(_descriptorMatcherPtr, iaQueryDesccriptor, iaTrainDescriptor, matches, iaMask); } }
/// <summary> /// LOGOS (Local geometric support for high-outlier spatial verification) feature matching strategy /// </summary> /// <param name="keypoints1">Input keypoints of image1.</param> /// <param name="keypoints2">Input keypoints of image2.</param> /// <param name="nn1">Index to the closest BoW centroid for each descriptors of image1.</param> /// <param name="nn2">Index to the closest BoW centroid for each descriptors of image2.</param> /// <param name="matches1to2">Matches returned by the LOGOS matching strategy.</param> public static void MatchLOGOS( VectorOfKeyPoint keypoints1, VectorOfKeyPoint keypoints2, VectorOfInt nn1, VectorOfInt nn2, VectorOfDMatch matches1to2) { cveMatchLOGOS( keypoints1, keypoints2, nn1, nn2, matches1to2 ); }
/// <summary> /// Estimate the transformation parameters of the current transformer algorithm, based on point matches. /// </summary> /// <param name="transformer">The shape transformer</param> /// <param name="transformingShape">Contour defining first shape.</param> /// <param name="targetShape">Contour defining second shape (Target).</param> /// <param name="matches">Standard vector of Matches between points.</param> public static void EstimateTransformation( this IShapeTransformer transformer, IInputArray transformingShape, IInputArray targetShape, VectorOfDMatch matches) { using (InputArray iaTransformingShape = transformingShape.GetInputArray()) using (InputArray iaTargetShape = targetShape.GetInputArray()) { cveShapeTransformerEstimateTransformation( transformer.ShapeTransformerPtr, iaTransformingShape, iaTargetShape, matches); } }
public Result(int trainValue, VectorOfPoint trainContour, MCvScalar trainContourColor, float bestROIMatch, Mat referenceTrainImage, VectorOfKeyPoint referenceTrainKeyPoints, VectorOfKeyPoint keypointsEvalImage, ref VectorOfDMatch matches, ref VectorOfDMatch inliers, ref VectorOfInt inliersMatcheMask, ref Mat homography) { this._trainValue = trainValue; this._trainContour = trainContour; this._trainContourColor = trainContourColor; this._bestROIMatch = bestROIMatch; this._referenceTrainImage = referenceTrainImage; this._referenceTrainKeyPoints = referenceTrainKeyPoints; this._keypointsEvalImag = keypointsEvalImage; this._matches = matches; this._inliers = inliers; this._inliersMatcheMask = inliersMatcheMask; this._homography = homography; this._inliersKeyPoints = new VectorOfKeyPoint(); }
public String MatchesToString(VectorOfVectorOfDMatch vDMatch, String indent = "") { StringBuilder sb = new StringBuilder(); sb.Append(indent + "[VectorOfVectorOfDMatch Size=" + vDMatch.Size); for (int i = 0; i < vDMatch.Size; i++) { VectorOfDMatch vMatch = vDMatch[i]; for (int j = 0; j < vMatch.Size; j++) { MDMatch dmatch = vMatch[j]; sb.Append("\n\t" + JsonConvert.SerializeObject(dmatch) + " " + GetLabel(dmatch.ImgIdx)); } //sb.Append("\n" + ToString(vDMatch[i], indent + "\t")); } sb.Append("\n" + indent + "]"); return(sb.ToString()); }
/// <summary> /// Draw the matched keypoints between the model image and the observered image. /// </summary> /// <param name="modelImage">The model image</param> /// <param name="modelKeypoints">The keypoints in the model image</param> /// <param name="observedImage">The observed image</param> /// <param name="observedKeyPoints">The keypoints in the observed image</param> /// <param name="matchColor">The color for the match correspondence lines</param> /// <param name="singlePointColor">The color for highlighting the keypoints</param> /// <param name="mask">The mask for the matches. Use null for all matches.</param> /// <param name="flags">The drawing type</param> /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param> /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param> public static void DrawMatches( IInputArray modelImage, VectorOfKeyPoint modelKeypoints, IInputArray observedImage, VectorOfKeyPoint observedKeyPoints, VectorOfDMatch matches, IInputOutputArray result, MCvScalar matchColor, MCvScalar singlePointColor, VectorOfByte mask = null, KeypointDrawType flags = KeypointDrawType.Default) { using (InputArray iaModelImage = modelImage.GetInputArray()) using (InputArray iaObservedImage = observedImage.GetInputArray()) using (InputOutputArray ioaResult = result.GetInputOutputArray()) Features2DInvoke.drawMatchedFeatures1(iaObservedImage, observedKeyPoints, iaModelImage, modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, mask, flags); }
public async Task <FingerprintModel> CompareImages(Image input) { BFMatcher bF = new BFMatcher(DistanceType.Hamming); VectorOfDMatch matches = new VectorOfDMatch(); var descriptorToCompare = FingerprintDescriptor( BitConvert.GetMatFromImage(input) ); var AllFingerPrints = await _fingerPrintData.GetAll(); foreach (FingerprintModel fingerprintDatabase in AllFingerPrints) { var descriptorDatabase = FingerprintDescriptor( BitConvert.GetMatFromImage( fingerprintDatabase.GetFingerPrintImage() ) ); //Here you put the firgerPrint's Mat you want to compare. bF.Match(descriptorToCompare, descriptorDatabase, matches); //Algorithm to Compare fingerprints //Calculate score float score = 0; foreach (MDMatch match in matches.ToArray()) { score += match.Distance; } float score_threshold = 33; if (score / matches.ToArray().Length < score_threshold) { return(fingerprintDatabase); } else { continue; } } return(null); }
public bool RefineMatchesWithHomography(VectorOfKeyPoint evalKeypoints, VectorOfKeyPoint trainKeypoints, VectorOfDMatch matches, ref Mat homographyOut, VectorOfDMatch inliersOut, VectorOfInt inliersMaskOut, float reprojectionThreshold, int minNumberMatchesAllowed) { if (matches.Size < minNumberMatchesAllowed) { return(false); } PointF[] srcPoints = new PointF[matches.Size]; PointF[] dstPoints = new PointF[matches.Size]; for (int i = 0; i < matches.Size; ++i) { srcPoints[i] = trainKeypoints[matches[i].TrainIdx].Point; dstPoints[i] = evalKeypoints[matches[i].QueryIdx].Point; } inliersMaskOut.Clear(); inliersMaskOut = new VectorOfInt(srcPoints.Count()); //for(int i = 0; i < srcPoints.Count(); ++i) //{ // inliersMaskOut = 0; //} CvInvoke.FindHomography(srcPoints, dstPoints, homographyOut, Emgu.CV.CvEnum.HomographyMethod.Ransac, reprojectionThreshold, inliersMaskOut); for (int i = 0; i < inliersMaskOut.Size; ++i) { if (inliersMaskOut[i] > 0) { inliersOut.Push(new MDMatch[] { matches[i] }); } } return(inliersOut.Size >= minNumberMatchesAllowed); }
/// <summary> /// /// </summary> /// <param name="name"></param> /// <param name="value"></param> public void Write(string name, IEnumerable<DMatch> value) { if (name == null) throw new ArgumentNullException(nameof(name)); if (value == null) throw new ArgumentNullException(nameof(value)); using (var valueVector = new VectorOfDMatch(value)) { NativeMethods.core_FileStorage_write_vectorOfDMatch(ptr, name, valueVector.CvPtr); } }
public bool MatchDescriptorsWithRatioTest(BFMatcher descriptorMatcher, ref Mat descriptorsEvalImage, Mat trainDescriptors, ref VectorOfDMatch matchesFilteredOut, float maxDistanceRatio) { if (trainDescriptors.Rows < 4) { return(false); } matchesFilteredOut.Clear(); descriptorMatcher.Add(trainDescriptors); VectorOfVectorOfDMatch matchesKNN = new VectorOfVectorOfDMatch(); descriptorMatcher.KnnMatch(descriptorsEvalImage, matchesKNN, 2, null); for (int matchPos = 0; matchPos < matchesKNN.Size; ++matchPos) { if (matchesKNN[matchPos].Size >= 2) { if (matchesKNN[matchPos][0].Distance <= maxDistanceRatio * matchesKNN[matchPos][1].Distance) { matchesFilteredOut.Push(new MDMatch[] { matchesKNN[matchPos][0] }); } } } return(!(matchesFilteredOut.Size == 0)); }
/// <summary> /// Performs images matching. /// </summary> /// <param name="features1">First image features</param> /// <param name="features2">Second image features</param> /// <returns>Found matches</returns> public virtual MatchesInfo Apply( ImageFeatures features1, ImageFeatures features2) { ThrowIfDisposed(); if (features1 == null) { throw new ArgumentNullException(nameof(features1)); } if (features2 == null) { throw new ArgumentNullException(nameof(features2)); } if (features1.Descriptors == null) { throw new ArgumentException($"{nameof(features1)}.Descriptors == null", nameof(features1)); } if (features2.Descriptors == null) { throw new ArgumentException($"{nameof(features2)}.Descriptors == null", nameof(features1)); } features1.Descriptors.ThrowIfDisposed(); features2.Descriptors.ThrowIfDisposed(); using var keypointsVec1 = new VectorOfKeyPoint(features1.Keypoints); using var keypointsVec2 = new VectorOfKeyPoint(features2.Keypoints); var features1Cpp = new WImageFeatures { ImgIdx = features1.ImgIdx, ImgSize = features1.ImgSize, Keypoints = keypointsVec1.CvPtr, Descriptors = features1.Descriptors.CvPtr, }; var features2Cpp = new WImageFeatures { ImgIdx = features2.ImgIdx, ImgSize = features2.ImgSize, Keypoints = keypointsVec2.CvPtr, Descriptors = features2.Descriptors.CvPtr, }; using var matchesVec = new VectorOfDMatch(); using var inliersMaskVec = new VectorOfByte(); var h = new Mat(); NativeMethods.HandleException( NativeMethods.stitching_FeaturesMatcher_apply( ptr, ref features1Cpp, ref features2Cpp, out var srcImgIdx, out var dstImgIdx, matchesVec.CvPtr, inliersMaskVec.CvPtr, out var numInliers, h.CvPtr, out var confidence)); GC.KeepAlive(this); return(new MatchesInfo( srcImgIdx, dstImgIdx, matchesVec.ToArray(), inliersMaskVec.ToArray(), numInliers, h, confidence)); }
/// <summary> /// /// </summary> /// <returns></returns> public DMatch[] ReadDMatches() { using (var valueVector = new VectorOfDMatch()) { NativeMethods.core_FileNode_read_vectorOfDMatch(ptr, valueVector.CvPtr); return valueVector.ToArray(); } }
/// <summary> /// Find one best match for each query descriptor (if mask is empty). /// </summary> /// <param name="queryDescriptors"></param> /// <param name="masks"></param> /// <returns></returns> public DMatch[] Match(Mat queryDescriptors, Mat[] masks = null) { ThrowIfDisposed(); if (queryDescriptors == null) throw new ArgumentNullException("queryDescriptors"); var masksPtrs = new IntPtr[0]; if (masks != null) { masksPtrs = EnumerableEx.SelectPtrs(masks); } using (var matchesVec = new VectorOfDMatch()) { NativeMethods.features2d_DescriptorMatcher_match2( ptr, queryDescriptors.CvPtr, matchesVec.CvPtr, masksPtrs, masksPtrs.Length); return matchesVec.ToArray(); } }
/// <summary> /// Find one best match for each query descriptor (if mask is empty). /// </summary> /// <param name="queryDescriptors"></param> /// <param name="trainDescriptors"></param> /// <param name="mask"></param> /// <returns></returns> public DMatch[] Match(Mat queryDescriptors, Mat trainDescriptors, Mat mask = null) { ThrowIfDisposed(); if (queryDescriptors == null) throw new ArgumentNullException("queryDescriptors"); if (trainDescriptors == null) throw new ArgumentNullException("trainDescriptors"); using (var matchesVec = new VectorOfDMatch()) { NativeMethods.features2d_DescriptorMatcher_match1( ptr, queryDescriptors.CvPtr, trainDescriptors.CvPtr, matchesVec.CvPtr, Cv2.ToPtr(mask)); return matchesVec.ToArray(); } }