// Use this for initialization void Start () { Texture2D imgTexture = Resources.Load ("lena") as Texture2D; Mat img1Mat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat (imgTexture, img1Mat); Debug.Log ("img1Mat dst ToString " + img1Mat.ToString ()); Mat img2Mat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat (imgTexture, img2Mat); Debug.Log ("img2Mat dst ToString " + img2Mat.ToString ()); float angle = UnityEngine.Random.Range (0, 360), scale = 1.0f; Point center = new Point (img2Mat.cols () * 0.5f, img2Mat.rows () * 0.5f); Mat affine_matrix = Imgproc.getRotationMatrix2D (center, angle, scale); Imgproc.warpAffine (img1Mat, img2Mat, affine_matrix, img2Mat.size ()); FeatureDetector detector = FeatureDetector.create (FeatureDetector.ORB); DescriptorExtractor extractor = DescriptorExtractor.create (DescriptorExtractor.ORB); MatOfKeyPoint keypoints1 = new MatOfKeyPoint (); Mat descriptors1 = new Mat (); detector.detect (img1Mat, keypoints1); extractor.compute (img1Mat, keypoints1, descriptors1); MatOfKeyPoint keypoints2 = new MatOfKeyPoint (); Mat descriptors2 = new Mat (); detector.detect (img2Mat, keypoints2); extractor.compute (img2Mat, keypoints2, descriptors2); DescriptorMatcher matcher = DescriptorMatcher.create (DescriptorMatcher.BRUTEFORCE_HAMMINGLUT); MatOfDMatch matches = new MatOfDMatch (); matcher.match (descriptors1, descriptors2, matches); Mat resultImg = new Mat (); Features2d.drawMatches (img1Mat, keypoints1, img2Mat, keypoints2, matches, resultImg); Texture2D texture = new Texture2D (resultImg.cols (), resultImg.rows (), TextureFormat.RGBA32, false); Utils.matToTexture2D (resultImg, texture); gameObject.GetComponent<Renderer> ().material.mainTexture = texture; }
// Use this for initialization void Start() { Texture2D imgTexture = Resources.Load ("detect_blob") as Texture2D; Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC1); Utils.texture2DToMat (imgTexture, imgMat); Debug.Log ("imgMat dst ToString " + imgMat.ToString ()); Mat outImgMat = new Mat (); FeatureDetector blobDetector = FeatureDetector.create (FeatureDetector.SIMPLEBLOB); blobDetector.read (Utils.getFilePath ("blobparams.yml")); MatOfKeyPoint keypoints = new MatOfKeyPoint (); blobDetector.detect (imgMat, keypoints); Features2d.drawKeypoints (imgMat, keypoints, outImgMat); Texture2D texture = new Texture2D (outImgMat.cols (), outImgMat.rows (), TextureFormat.RGBA32, false); Utils.matToTexture2D (outImgMat, texture); gameObject.GetComponent<Renderer> ().material.mainTexture = texture; }
// Update is called once per frame public void UpdateScreen(Mat mat) { using (MatOfKeyPoint keypoints = new MatOfKeyPoint()) using (Mat descriptors = new Mat()) { detector.detect(mat, keypoints); extractor.compute(mat, keypoints, descriptors); int matchCount = 0; var trainPoints = keypoints.toArray(); var newBuffPointL = new List <List <Point> >(); var newBuffColorL = new List <Scalar>(); foreach (var keyPoint in trainPoints) { var points = new List <Point>(); points.Add(keyPoint.pt); newBuffPointL.Add(points); Scalar color = new Scalar(255, 225, 225, 100); var x = Random.Range(0, 256); switch (Random.Range(0, 6)) { case 0: color = new Scalar(255, 0, x, 100); break; case 1: color = new Scalar(0, 255, x, 100); break; case 2: color = new Scalar(0, x, 255, 100); break; case 3: color = new Scalar(225, x, 0, 100); break; case 4: color = new Scalar(x, 0, 255, 100); break; case 5: color = new Scalar(x, 255, 0, 100); break; } newBuffColorL.Add(color); } if (buffPointL.Count > 0) { using (MatOfDMatch matches = new MatOfDMatch()) using (MatOfDMatch crossMatches = new MatOfDMatch()) { matcher.match(buffDescriptors, descriptors, matches); matcher.match(descriptors, buffDescriptors, crossMatches); var matchL = matches.toArray(); var crossMatchL = crossMatches.toArray(); int i = 0; foreach (DMatch match in matchL) { bool flag = false; foreach (DMatch crossMatch in crossMatchL) { if (match.trainIdx == crossMatch.queryIdx && match.queryIdx == crossMatch.trainIdx) { flag = true; } } if (match.distance > filter) { flag = false; } if (flag) { var trainPoint = trainPoints[match.trainIdx]; var queryPoints = buffPointL[match.queryIdx]; int a = (int)trainPoint.pt.x / mat.width() * 250; int b = (int)trainPoint.pt.y / mat.height() * 250; Scalar color = buffColorL[match.queryIdx]; Imgproc.circle(mat, trainPoint.pt, 4, color, -1); Point startPoint = trainPoint.pt; foreach (Point queryPoint in queryPoints) { Imgproc.line(mat, startPoint, queryPoint, color, 2); Imgproc.circle(mat, queryPoint, 4, color, -1); startPoint = queryPoint; newBuffPointL[match.trainIdx].Add(queryPoint); } newBuffColorL[match.trainIdx] = buffColorL[match.queryIdx]; matchCount++; } i++; } } } buffDescriptors.Dispose(); buffPointL = newBuffPointL; buffColorL = newBuffColorL; buffDescriptors = descriptors.clone(); text = string.Format("Matching Count : {0}.", matchCount); DestroyImmediate(tex); tex = new Texture2D(ARCameraManager.Instance.Width, ARCameraManager.Instance.Height); OpenCVForUnity.Utils.matToTexture2D(mat, tex); ARCameraManager.Instance.UpdateScreenTexture(tex); } }
public static void drawMatchesKnn(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List <MatOfDMatch> matches1to2, Mat outImg) { if (img1 != null) { img1.ThrowIfDisposed(); } if (keypoints1 != null) { keypoints1.ThrowIfDisposed(); } if (img2 != null) { img2.ThrowIfDisposed(); } if (keypoints2 != null) { keypoints2.ThrowIfDisposed(); } if (outImg != null) { outImg.ThrowIfDisposed(); } Mat keypoints1_mat = keypoints1; Mat keypoints2_mat = keypoints2; List <Mat> matches1to2_tmplm = new List <Mat>((matches1to2 != null) ? matches1to2.Count : 0); Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); features2d_Features2d_drawMatchesKnn_14(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj); }
public static void drawMatchesKnn(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List <MatOfDMatch> matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, List <MatOfByte> matchesMask) { if (img1 != null) { img1.ThrowIfDisposed(); } if (keypoints1 != null) { keypoints1.ThrowIfDisposed(); } if (img2 != null) { img2.ThrowIfDisposed(); } if (keypoints2 != null) { keypoints2.ThrowIfDisposed(); } if (outImg != null) { outImg.ThrowIfDisposed(); } Mat keypoints1_mat = keypoints1; Mat keypoints2_mat = keypoints2; List <Mat> matches1to2_tmplm = new List <Mat>((matches1to2 != null) ? matches1to2.Count : 0); Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); List <Mat> matchesMask_tmplm = new List <Mat>((matchesMask != null) ? matchesMask.Count : 0); Mat matchesMask_mat = Converters.vector_vector_char_to_Mat(matchesMask, matchesMask_tmplm); features2d_Features2d_drawMatchesKnn_11(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj); }
/// <summary> /// Refines the matches with homography. /// </summary> /// <returns><c>true</c>, if matches with homography was refined, <c>false</c> otherwise.</returns> /// <param name="queryKeypoints">Query keypoints.</param> /// <param name="trainKeypoints">Train keypoints.</param> /// <param name="reprojectionThreshold">Reprojection threshold.</param> /// <param name="matches">Matches.</param> /// <param name="homography">Homography.</param> static bool refineMatchesWithHomography ( MatOfKeyPoint queryKeypoints, MatOfKeyPoint trainKeypoints, float reprojectionThreshold, MatOfDMatch matches, Mat homography ) { //Debug.Log ("matches " + matches.ToString ()); int minNumberMatchesAllowed = 8; List <KeyPoint> queryKeypointsList = queryKeypoints.toList(); List <KeyPoint> trainKeypointsList = trainKeypoints.toList(); List <DMatch> matchesList = matches.toList(); if (matchesList.Count < minNumberMatchesAllowed) { return(false); } // Prepare data for cv::findHomography List <Point> srcPointsList = new List <Point>(matchesList.Count); List <Point> dstPointsList = new List <Point>(matchesList.Count); for (int i = 0; i < matchesList.Count; i++) { srcPointsList.Add(trainKeypointsList[matchesList[i].trainIdx].pt); dstPointsList.Add(queryKeypointsList[matchesList[i].queryIdx].pt); } // Find homography matrix and get inliers mask using (MatOfPoint2f srcPoints = new MatOfPoint2f()) using (MatOfPoint2f dstPoints = new MatOfPoint2f()) using (MatOfByte inliersMask = new MatOfByte(new byte[srcPointsList.Count])) { srcPoints.fromList(srcPointsList); dstPoints.fromList(dstPointsList); //Debug.Log ("srcPoints " + srcPoints.ToString ()); //Debug.Log ("dstPoints " + dstPoints.ToString ()); Calib3d.findHomography(srcPoints, dstPoints, Calib3d.FM_RANSAC, reprojectionThreshold, inliersMask, 2000, 0.955).copyTo(homography); if (homography.rows() != 3 || homography.cols() != 3) { return(false); } //Debug.Log ("homography " + homography.ToString ()); //Debug.Log ("inliersMask " + inliersMask.dump ()); List <byte> inliersMaskList = inliersMask.toList(); List <DMatch> inliers = new List <DMatch>(); for (int i = 0; i < inliersMaskList.Count; i++) { if (inliersMaskList[i] == 1) { inliers.Add(matchesList[i]); } } matches.fromList(inliers); //Debug.Log ("matches " + matches.ToString ()); } return(matchesList.Count > minNumberMatchesAllowed); }
public void Tracking(Mat mat) { using (MatOfKeyPoint keypoints = new MatOfKeyPoint()) using (Mat descriptors = new Mat()) { detector.detect(mat, keypoints); extractor.compute(mat, keypoints, descriptors); var trainPoints = keypoints.toArray(); List <List <Vector3> > newLandmarks = new List <List <Vector3> >(); List <List <Mat> > newDescriptors = new List <List <Mat> >(); for (int i = 0; i < trainPoints.Length; i++) { var keyVectorL = new List <Vector3>(); keyVectorL.Add(ARCameraManager.Instance.ToVector(trainPoints[i])); var DescriptorL = new List <Mat>(); DescriptorL.Add(descriptors.clone().row(i)); newLandmarks.Add(keyVectorL); newDescriptors.Add(DescriptorL); } List <Vector3> FromVectorL = new List <Vector3>(); List <Vector3> ToVectorL = new List <Vector3>(); List <int> FinalizingL = new List <int>(); bool finLMedS = false; if (FinalizingLandmarks.Count > 0) { using (MatOfDMatch matchesFinal = new MatOfDMatch()) using (MatOfDMatch crossMatchesFinal = new MatOfDMatch()) { matcher.match(FinalizingLandmarkDescriptors, descriptors, matchesFinal); matcher.match(descriptors, FinalizingLandmarkDescriptors, crossMatchesFinal); var matchLFinal = matchesFinal.toArray(); var crossMatchLFinal = crossMatchesFinal.toArray(); int i = 0; foreach (DMatch match in matchLFinal) { bool flag = false; foreach (DMatch crossMatch in crossMatchLFinal) { if (match.trainIdx == crossMatch.queryIdx && match.queryIdx == crossMatch.trainIdx) { flag = true; } } if (match.distance > MatchFilter) { flag = false; } if (flag) { FromVectorL.Add(newLandmarks[match.trainIdx][0]); ToVectorL.Add(FinalizingLandmarks[match.queryIdx]); FinalizingL.Add(match.trainIdx); newLandmarks[match.trainIdx][0] = FinalizingLandmarks[match.queryIdx]; newDescriptors[match.trainIdx][0] = FinalizingLandmarkDescriptors.row(match.queryIdx); } i++; } Quaternion newAttitude; float error = ARCameraManager.Instance.LMedS(FromVectorL, ToVectorL, out newAttitude); if (error > 0 && LMedSFilter > error) { Attitude = newAttitude; _trackingEvent.Invoke(Attitude); _matchingEvent.Invoke(FromVectorL.Count); //ARCameraManager.Instance.UpdateCameraPosture(Attitude); Debug.Log(string.Format("Attitude = {0}\nError = {1}\nFinalizMatch = {2}\nAccuracy = {3}", Attitude, error, FinalizingL.Count, 100 * FinalizingL.Count / FromVectorL.Count)); finLMedS = true; } } } if (ProvisioningLandmarks.Count > 0) { using (MatOfDMatch matches = new MatOfDMatch()) using (MatOfDMatch crossMatches = new MatOfDMatch()) { Mat optimisationDescriptors = OptimisationDescriptors; matcher.match(optimisationDescriptors, descriptors, matches); matcher.match(descriptors, optimisationDescriptors, crossMatches); var matchL = matches.toArray(); var crossMatchL = crossMatches.toArray(); int i = 0; foreach (DMatch match in matchL) { bool flag = false; foreach (DMatch crossMatch in crossMatchL) { if (match.trainIdx == crossMatch.queryIdx && match.queryIdx == crossMatch.trainIdx) { flag = true; } } if (match.distance > MatchFilter) { flag = false; } if (flag) { if (FinalizingL.IndexOf(match.trainIdx) < 0) { var trainVectors = newLandmarks[match.trainIdx]; var queryVectors = ProvisioningLandmarks[match.queryIdx]; Vector3 queryVector; int filter = OptimisationLandmark(queryVectors, Attitude, out queryVector); if (filter > 0) { if ((filter > SufficientCount) && (matchL.Length * FinalizedPercentage < FinalizingL.Count || matchL.Length * FinalizedPercentage > FinalizingLandmarks.Count)) { FinalizingLandmarks.Add(queryVector); if (FinalizingLandmarkDescriptors != null) { FinalizingLandmarkDescriptors.push_back(optimisationDescriptors.row(match.queryIdx)); } else { FinalizingLandmarkDescriptors = optimisationDescriptors.row(match.queryIdx); } Debug.Log(string.Format("Finalizing :Landmark = {0}\nDescriptors = {1}\nCount ALL = {2}", queryVector, optimisationDescriptors.row(match.queryIdx).ToStringMat(), FinalizingLandmarks.Count)); } else { FromVectorL.Add(trainVectors[0]); ToVectorL.Add(queryVector); newLandmarks[match.trainIdx].AddRange(queryVectors.ToArray()); newDescriptors[match.trainIdx].AddRange(ProvisioningLandmarkDescriptors[match.queryIdx].ToArray()); } } } } i++; } } } if (FromVectorL.Count == ToVectorL.Count && ToVectorL.Count > 0) { Quaternion newAttitude; float error = ARCameraManager.Instance.LMedS(FromVectorL, ToVectorL, out newAttitude); if ((error > 0 && LMedSFilter > error) && (!finLMedS)) { Attitude = newAttitude; _trackingEvent.Invoke(Attitude); //ARCameraManager.Instance.UpdateCameraPosture(Attitude); Debug.Log(string.Format("Attitude = {0}\nError = {1}\nFinalizMatch = {2}\nAccuracy = {3}", Attitude, error, FinalizingL.Count, 100 * FinalizingL.Count / FromVectorL.Count)); } for (int i = 0; i < newLandmarks.Count; i++) { if (FinalizingL.IndexOf(i) < 0) { newLandmarks[i][0] = Attitude * newLandmarks[i][0]; } } } _matchingEvent.Invoke(FromVectorL.Count); FromVectorL.Clear(); ToVectorL.Clear(); ProvisioningLandmarks.Clear(); ProvisioningLandmarks = newLandmarks; ProvisioningLandmarkDescriptors = newDescriptors; } }
/** * GMS (Grid-based Motion Statistics) feature matching strategy described in CITE: Bian2017gms . * param size1 Input size of image1. * param size2 Input size of image2. * param keypoints1 Input keypoints of image1. * param keypoints2 Input keypoints of image2. * param matches1to2 Input 1-nearest neighbor matches. * param matchesGMS Matches returned by the GMS matching strategy. * <b>Note:</b> * Since GMS works well when the number of features is large, we recommend to use the ORB feature and set FastThreshold to 0 to get as many as possible features quickly. * If matching results are not satisfying, please add more features. (We use 10000 for images with 640 X 480). * If your images have big rotation and scale changes, please set withRotation or withScale to true. */ public static void matchGMS(Size size1, Size size2, MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, MatOfDMatch matchesGMS) { if (keypoints1 != null) { keypoints1.ThrowIfDisposed(); } if (keypoints2 != null) { keypoints2.ThrowIfDisposed(); } if (matches1to2 != null) { matches1to2.ThrowIfDisposed(); } if (matchesGMS != null) { matchesGMS.ThrowIfDisposed(); } Mat keypoints1_mat = keypoints1; Mat keypoints2_mat = keypoints2; Mat matches1to2_mat = matches1to2; Mat matchesGMS_mat = matchesGMS; xfeatures2d_Xfeatures2d_matchGMS_13(size1.width, size1.height, size2.width, size2.height, keypoints1_mat.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, matchesGMS_mat.nativeObj); }
//javadoc: matchGMS(size1, size2, keypoints1, keypoints2, matches1to2, matchesGMS, withRotation) public static void matchGMS(Size size1, Size size2, MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, MatOfDMatch matchesGMS, bool withRotation) { if (keypoints1 != null) { keypoints1.ThrowIfDisposed(); } if (keypoints2 != null) { keypoints2.ThrowIfDisposed(); } if (matches1to2 != null) { matches1to2.ThrowIfDisposed(); } if (matchesGMS != null) { matchesGMS.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat keypoints1_mat = keypoints1; Mat keypoints2_mat = keypoints2; Mat matches1to2_mat = matches1to2; Mat matchesGMS_mat = matchesGMS; xfeatures2d_Xfeatures2d_matchGMS_12(size1.width, size1.height, size2.width, size2.height, keypoints1_mat.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, matchesGMS_mat.nativeObj, withRotation); return; #else return; #endif }
public ImageString MatchFeatures(string base64image, List <string> base64imageList) { List <MatOfDMatch> winnerMatches = new List <MatOfDMatch>(); MatOfKeyPoint winnerKeyPoints = new MatOfKeyPoint(); Mat winnerImage = new Mat(); int winnerIndex = -1; int winnerValue = 0; Texture2D imgTexture = base64ImageToTexture(base64image); List <Texture2D> imgTextures = new List <Texture2D>(); for (int i = 0; i < base64imageList.Count; i++) { imgTextures.Add(base64ImageToTexture(base64imageList[i])); } //Create Mat from texture Mat img1Mat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture, img1Mat); MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); Mat descriptors1 = new Mat(); FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB); DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB); //Detect keypoints and compute descriptors from photo. detector.detect(img1Mat, keypoints1); extractor.compute(img1Mat, keypoints1, descriptors1); Debug.Log("Billede features: " + descriptors1.rows()); if (descriptors1.rows() < 10) { Debug.Log("ARRRRRRGH der er ikke mange descripters i mit original-billede"); return(new ImageString(base64image, winnerIndex)); } //Run through each image in list for (int i = 0; i < imgTextures.Count; i++) { Texture2D imgTexture2 = imgTextures[i]; //Create Mat from texture Mat img2Mat = new Mat(imgTexture2.height, imgTexture2.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture2, img2Mat); //Find keypoints and descriptors from image in list MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); Mat descriptors2 = new Mat(); detector.detect(img2Mat, keypoints2); extractor.compute(img2Mat, keypoints2, descriptors2); //Match photo with image from list DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT); Debug.Log("Billede2 features: " + descriptors2.rows()); if (descriptors2.rows() < 10) { Debug.Log("ARRRRRRGH der er ikke mange descripters i mit test billede: " + i); continue; } List <MatOfDMatch> matchList = new List <MatOfDMatch>(); matcher.knnMatch(descriptors1, descriptors2, matchList, 2); //Find the good matches and put them ind a list List <MatOfDMatch> good = new List <MatOfDMatch>(); foreach (MatOfDMatch match in matchList) { DMatch[] arrayDmatch = match.toArray(); if (arrayDmatch[0].distance < 0.7f * arrayDmatch[1].distance) { good.Add(match); } } //Find the best match image based on the good lists if (good.Count > winnerThreshold && good.Count > winnerValue) { winnerImage = img2Mat; winnerMatches = good; winnerKeyPoints = keypoints2; winnerIndex = i; winnerValue = good.Count; } } Debug.Log("The winner is image: " + winnerIndex + " with a value of: " + winnerValue); //If no winner just return the original image if (winnerIndex == -1) { Debug.Log("No winner"); return(new ImageString(base64image, winnerIndex)); } Debug.Log("No winner"); //Find the matching keypoints from the winner list. MatOfPoint2f queryPoints = new MatOfPoint2f(); MatOfPoint2f matchPoints = new MatOfPoint2f(); List <Point> queryPointsList = new List <Point>(); List <Point> matchPointsList = new List <Point>(); foreach (MatOfDMatch match in winnerMatches) { DMatch[] arrayDmatch = match.toArray(); queryPointsList.Add(keypoints1.toList()[arrayDmatch[0].queryIdx].pt); matchPointsList.Add(winnerKeyPoints.toList()[arrayDmatch[0].trainIdx].pt); } queryPoints.fromList(queryPointsList); matchPoints.fromList(matchPointsList); //Calculate the homography of the best matching image Mat homography = Calib3d.findHomography(queryPoints, matchPoints, Calib3d.RANSAC, 5.0); Mat resultImg = new Mat(); Imgproc.warpPerspective(img1Mat, resultImg, homography, winnerImage.size()); //Show image Texture2D texture = new Texture2D(winnerImage.cols(), winnerImage.rows(), TextureFormat.RGBA32, false); Utils.matToTexture2D(resultImg, texture); return(new ImageString(Convert.ToBase64String(texture.EncodeToPNG()), winnerIndex)); }
// Draw matches between two images public static Mat getMatchesImage(Mat query, Mat pattern, MatOfKeyPoint queryKp, MatOfKeyPoint trainKp, MatOfDMatch matches, int maxMatchesDrawn) { Mat outImg = new Mat(); List <DMatch> matchesList = matches.toList(); if (matchesList.Count > maxMatchesDrawn) { // matches.resize (maxMatchesDrawn); matchesList.RemoveRange(maxMatchesDrawn, matchesList.Count - maxMatchesDrawn); } MatOfDMatch tmpMatches = new MatOfDMatch(); tmpMatches.fromList(matchesList); Features2d.drawMatches ( query, queryKp, pattern, trainKp, tmpMatches, outImg, new Scalar(0, 200, 0, 255), Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS ); return(outImg); }
public void UpdateAttitude(Mat mat) { int LandmarksCount = 0; int MatchsCount = 0; using (MatOfKeyPoint keypoints = new MatOfKeyPoint()) using (Mat descriptors = new Mat()) { detector.detect(mat, keypoints); extractor.compute(mat, keypoints, descriptors); var trainPoints = keypoints.toArray(); List <List <Vector3> > newLandmarks = new List <List <Vector3> >(); foreach (var keyPoint in trainPoints) { var keyVectorL = new List <Vector3>(); keyVectorL.Add(ARCameraManager.Instance.ToVector(keyPoint)); newLandmarks.Add(keyVectorL); LandmarksCount++; } if (Landmarks.Count > 0) { List <Vector3> FromVectorL = new List <Vector3>(); List <Vector3> ToVectorL = new List <Vector3>(); using (MatOfDMatch matches = new MatOfDMatch()) using (MatOfDMatch crossMatches = new MatOfDMatch()) { matcher.match(MapDescriptors, descriptors, matches); matcher.match(descriptors, MapDescriptors, crossMatches); var matchL = matches.toArray(); var crossMatchL = crossMatches.toArray(); int i = 0; foreach (DMatch match in matchL) { bool flag = false; foreach (DMatch crossMatch in crossMatchL) { if (match.trainIdx == crossMatch.queryIdx && match.queryIdx == crossMatch.trainIdx) { flag = true; MatchsCount++; } } if (match.distance > MatchFilter) { flag = false; } if (flag) { var trainVectors = newLandmarks[match.trainIdx]; var queryVectors = Landmarks[match.queryIdx]; FromVectorL.Add(trainVectors[0]); //ToVectorL.Add(queryVectors.ToArray().Median()); START double[] queryPointsX = new double[queryVectors.Count]; double[] queryPointsY = new double[queryVectors.Count]; for (int j = 0; j < queryVectors.Count; j++) { var queryPoint = ARCameraManager.Instance.toPoint(queryVectors[j], Attitude); queryPointsX[j] = queryPoint.x; queryPointsY[j] = queryPoint.y; } ToVectorL.Add(Attitude * ARCameraManager.Instance.ToVector(new Point(queryPointsX.Median(), queryPointsY.Median()))); //ToVectorL.Add(queryVectors.ToArray().Median()); END newLandmarks[match.trainIdx].AddRange(queryVectors.ToArray()); } i++; } Quaternion newAttitude; float error = ARCameraManager.Instance.LMedS(FromVectorL, ToVectorL, out newAttitude); _matchTestEvent.Invoke(FromVectorL.Count); FromVectorL.Clear(); ToVectorL.Clear(); if (error > 0 && LMedSFilter > error) { Attitude = newAttitude; _trackingTestEvent.Invoke(Attitude); //ARCameraManager.Instance.UpdateCameraPosture(Attitude); if (debugMode) { Debug.Log(string.Format("Attitude = {0}\nError = {1}", Attitude, error)); } } foreach (var newLandmark in newLandmarks) { newLandmark[0] = Attitude * newLandmark[0]; } } } MapDescriptors.Dispose(); Landmarks.Clear(); Landmarks = newLandmarks; MapDescriptors = descriptors.clone(); } float now = Time.time; if (debugMode) { Debug.Log(string.Format("time : {0} Landmarks : {1}, Matchs : {2}.", 1 / (Time.time - startime), LandmarksCount, MatchsCount)); } startime = Time.time; }
public List <ImageObject> MatchFeatures(string base64image, List <string> base64imageList) { ImageObject myImage = new ImageObject(); ImageObject winnerImage = new ImageObject(); List <ImageObject> returnImageList = new List <ImageObject>(); Texture2D imgTexture = base64ImageToTexture(base64image); List <Texture2D> imgTextures = new List <Texture2D>(); for (int i = 0; i < base64imageList.Count; i++) { imgTextures.Add(base64ImageToTexture(base64imageList[i])); } //Create Mat from texture Mat img1Mat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture, img1Mat); MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); Mat descriptors1 = new Mat(); FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB); DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB); //Detect keypoints and compute descriptors from photo. detector.detect(img1Mat, keypoints1); extractor.compute(img1Mat, keypoints1, descriptors1); //Debug.Log("Billede features: " + descriptors1.rows()); myImage.image = base64image; myImage.keyPoints = keypoints1; myImage.imageMat = img1Mat; if (descriptors1.rows() < 10) { Debug.Log("ARRRRRRGH der er ikke mange descripters i mit original-billede"); //No winner as there is to few descriptors. return(returnImageList); } //Run through each image in list //------------------------------------------------------------- for (int i = 0; i < imgTextures.Count; i++) { Texture2D imgTexture2 = imgTextures[i]; //Create Mat from texture Mat img2Mat = new Mat(imgTexture2.height, imgTexture2.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture2, img2Mat); //Find keypoints and descriptors from image in list MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); Mat descriptors2 = new Mat(); detector.detect(img2Mat, keypoints2); extractor.compute(img2Mat, keypoints2, descriptors2); //Match photo with image from list DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT); //Debug.Log("Billede2 features: " + descriptors2.rows()); if (descriptors2.rows() < 10) { Debug.Log("ARRRRRRGH der er ikke mange descripters i mit test billede: " + i); continue; } List <MatOfDMatch> matchList = new List <MatOfDMatch>(); matcher.knnMatch(descriptors1, descriptors2, matchList, 2); //Find the good matches and put them ind a list List <MatOfDMatch> good = new List <MatOfDMatch>(); foreach (MatOfDMatch match in matchList) { DMatch[] arrayDmatch = match.toArray(); if (arrayDmatch[0].distance < 0.7f * arrayDmatch[1].distance) { good.Add(match); } } //Find the best match image based on the good lists if (good.Count > winnerThreshold && good.Count > winnerImage.value) { winnerImage.index = i; winnerImage.imageMat = img2Mat; winnerImage.keyPoints = keypoints2; winnerImage.value = good.Count; winnerImage.matches = good; } } // Run through done //------------------------------------------------------------- Debug.Log("The winner is image: " + winnerImage.index + " with a value of: " + winnerImage.value); //If no winner just return the original image if (winnerImage.index == -1) { Debug.Log("No winner"); return(returnImageList); } Texture2D imageTexture = new Texture2D(winnerImage.imageMat.cols(), winnerImage.imageMat.rows(), TextureFormat.RGBA32, false); winnerImage.image = Convert.ToBase64String(imageTexture.EncodeToPNG()); returnImageList.Add(myImage); returnImageList.Add(winnerImage); return(returnImageList); }
/// <summary> /// ORB the point feature. /// ORBで特徴点取得 /// </summary> /// <param name="srcMat">Source mat.</param> /// <param name="dstKeypoints">Dst keypoints.</param> /// <param name="dstDescriptors">Dst descriptors.</param> public static void ORBPointFeature(this Mat srcMat, MatOfKeyPoint dstKeyPoints, Mat dstDescriptors) { ORBDetector.detect(srcMat, dstKeyPoints); ORBExtractor.compute(srcMat, dstKeyPoints, dstDescriptors); }
public static int ORBMatcher(Mat queryMat, Mat trainMat, MatOfKeyPoint queryKeypoints, MatOfKeyPoint trainKeypoints, out IList <DMatch> matches) { using (MatOfFloat queryDescriptors = new MatOfFloat()) using (MatOfFloat trainDescriptors = new MatOfFloat()) { queryMat.ORBPointFeature(queryKeypoints, queryDescriptors); trainMat.ORBPointFeature(trainKeypoints, trainDescriptors); if (queryDescriptors.type() == CvType.CV_8U && trainDescriptors.type() == CvType.CV_8U) { matches = Utils.CrossMatcher(queryDescriptors, trainDescriptors); if (matches.Count > 0) { return(0); } else { return(-1); } } else { matches = null; return(-1); } } }
private void Update() { inputMat = webCamTextureToMatHelper.GetMat(); MatOfKeyPoint camKeyPoints = new MatOfKeyPoint(); Mat camDescriptors = new Mat(); Imgproc.cvtColor(inputMat, grayMat, Imgproc.COLOR_BGR2GRAY); detector.detect(grayMat, camKeyPoints); extractor.compute(grayMat, camKeyPoints, camDescriptors); if (camKeyPoints.toList().Count < 1) { return; } List <MatOfDMatch> matches = new List <MatOfDMatch>(); matcher.knnMatch(makerDescriptors, camDescriptors, matches, 2); //-- Filter matches using the Lowe's ratio test float ratioThresh = 0.75f; List <DMatch> listOfGoodMatches = new List <DMatch>(); for (int i = 0; i < matches.Count; i++) { if (matches[i].rows() > 1) { DMatch[] dMatches = matches[i].toArray(); if (dMatches[0].distance < ratioThresh * dMatches[1].distance) { listOfGoodMatches.Add(dMatches[0]); } } } MatOfDMatch goodMatches = new MatOfDMatch(); goodMatches.fromList(listOfGoodMatches); //-- Draw matches Mat resultImg = new Mat(); Features2d.drawMatches(makerMat, makerKeyPoints, grayMat, camKeyPoints, goodMatches, resultImg); //listOfGoodMatches = goodMatches.toList(); ////-- Localize the object //List<Point> obj = new List<Point>(); //List<Point> scene = new List<Point>(); //List<KeyPoint> listOfKeypointsObject = makerKeyPoints.toList(); //List<KeyPoint> listOfKeypointsScene = camKeyPoints.toList(); //for (int i = 0; i < listOfGoodMatches.Count(); i++) //{ // //-- Get the keypoints from the good matches // obj.Add(listOfKeypointsObject[listOfGoodMatches[i].queryIdx].pt); // scene.Add(listOfKeypointsScene[listOfGoodMatches[i].trainIdx].pt); //} //MatOfPoint2f objMat = new MatOfPoint2f(); //MatOfPoint2f sceneMat = new MatOfPoint2f(); //objMat.fromList(obj); //sceneMat.fromList(scene); //double ransacReprojThreshold = 3.0; //Mat H = Calib3d.findHomography(objMat, sceneMat, Calib3d.RANSAC, ransacReprojThreshold); ////-- Get the corners from the image_1 ( the object to be "detected" ) //Mat objCorners = new Mat(4, 1, CvType.CV_32FC2); //Mat sceneCorners = new Mat(); //float[] objCornersData = new float[(int)(objCorners.total() * objCorners.channels())]; //objCorners.get(0, 0, objCornersData); //objCornersData[0] = 0; //objCornersData[1] = 0; //objCornersData[2] = makerMat.cols(); //objCornersData[3] = 0; //objCornersData[4] = makerMat.cols(); //objCornersData[5] = makerMat.rows(); //objCornersData[6] = 0; //objCornersData[7] = makerMat.rows(); //objCorners.put(0, 0, objCornersData); //Core.perspectiveTransform(objCorners, sceneCorners, H); //byte[] sceneCornersData = new byte[(int)(sceneCorners.total() * sceneCorners.channels())]; //sceneCorners.get(0, 0, sceneCornersData); ////-- Draw lines between the corners (the mapped object in the scene - image_2 ) //Imgproc.line(resultImg, new Point(sceneCornersData[0] + makerMat.cols(), sceneCornersData[1]), // new Point(sceneCornersData[2] + makerMat.cols(), sceneCornersData[3]), new Scalar(0, 255, 0), 4); //Imgproc.line(resultImg, new Point(sceneCornersData[2] + makerMat.cols(), sceneCornersData[3]), // new Point(sceneCornersData[4] + makerMat.cols(), sceneCornersData[5]), new Scalar(0, 255, 0), 4); //Imgproc.line(resultImg, new Point(sceneCornersData[4] + makerMat.cols(), sceneCornersData[5]), // new Point(sceneCornersData[6] + makerMat.cols(), sceneCornersData[7]), new Scalar(0, 255, 0), 4); //Imgproc.line(resultImg, new Point(sceneCornersData[6] + makerMat.cols(), sceneCornersData[7]), // new Point(sceneCornersData[0] + makerMat.cols(), sceneCornersData[1]), new Scalar(0, 255, 0), 4); if (!first) { texture = new Texture2D(resultImg.cols(), resultImg.rows(), TextureFormat.RGBA32, false); dstQuad.GetComponent <Renderer>().material.mainTexture = texture; first = true; } Utils.matToTexture2D(resultImg, texture); }
//javadoc: drawMatchesKnn(img1, keypoints1, img2, keypoints2, matches1to2, outImg) public static void drawMatchesKnn(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, List <MatOfDMatch> matches1to2, Mat outImg) { if (img1 != null) { img1.ThrowIfDisposed(); } if (keypoints1 != null) { keypoints1.ThrowIfDisposed(); } if (img2 != null) { img2.ThrowIfDisposed(); } if (keypoints2 != null) { keypoints2.ThrowIfDisposed(); } if (outImg != null) { outImg.ThrowIfDisposed(); } #if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat keypoints1_mat = keypoints1; Mat keypoints2_mat = keypoints2; List <Mat> matches1to2_tmplm = new List <Mat>((matches1to2 != null) ? matches1to2.Count : 0); Mat matches1to2_mat = Converters.vector_vector_DMatch_to_Mat(matches1to2, matches1to2_tmplm); features2d_Features2d_drawMatchesKnn_14(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj); return; #else return; #endif }
// Use this for initialization void Start() { Texture2D imgTexture = Resources.Load("lena") as Texture2D; Mat img1Mat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture, img1Mat); Debug.Log("img1Mat.ToString() " + img1Mat.ToString()); Mat img2Mat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC3); Utils.texture2DToMat(imgTexture, img2Mat); Debug.Log("img2Mat.ToString() " + img2Mat.ToString()); float angle = UnityEngine.Random.Range(0, 360), scale = 1.0f; Point center = new Point(img2Mat.cols() * 0.5f, img2Mat.rows() * 0.5f); Mat affine_matrix = Imgproc.getRotationMatrix2D(center, angle, scale); Imgproc.warpAffine(img1Mat, img2Mat, affine_matrix, img2Mat.size()); ORB detector = ORB.create(); DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB); MatOfKeyPoint keypoints1 = new MatOfKeyPoint(); Mat descriptors1 = new Mat(); detector.detect(img1Mat, keypoints1); extractor.compute(img1Mat, keypoints1, descriptors1); MatOfKeyPoint keypoints2 = new MatOfKeyPoint(); Mat descriptors2 = new Mat(); detector.detect(img2Mat, keypoints2); extractor.compute(img2Mat, keypoints2, descriptors2); DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMINGLUT); MatOfDMatch matches = new MatOfDMatch(); matcher.match(descriptors1, descriptors2, matches); Mat resultImg = new Mat(); Features2d.drawMatches(img1Mat, keypoints1, img2Mat, keypoints2, matches, resultImg); Texture2D texture = new Texture2D(resultImg.cols(), resultImg.rows(), TextureFormat.RGBA32, false); Utils.matToTexture2D(resultImg, texture); gameObject.GetComponent <Renderer> ().material.mainTexture = texture; }
/// <summary> /// Finds the pattern. /// </summary> /// <returns><c>true</c>, if pattern was found, <c>false</c> otherwise.</returns> /// <param name="image">Image.</param> /// <param name="info">Info.</param> public bool findPattern(Mat image, PatternTrackingInfo info) { // Convert input image to gray getGray(image, m_grayImg); // Extract feature points from input gray image extractFeatures(m_grayImg, m_queryKeypoints, m_queryDescriptors); // Get matches with current pattern getMatches(m_queryDescriptors, m_matches); //(GameObject.Find ("DebugHelpers").GetComponent<DebugHelpers> ()).showMat (DebugHelpers.getMatchesImage (m_grayImg, m_pattern.grayImg, m_queryKeypoints, m_pattern.keypoints, m_matches, 100)); // Find homography transformation and detect good matches bool homographyFound = refineMatchesWithHomography( m_queryKeypoints, m_pattern.keypoints, homographyReprojectionThreshold, m_matches, m_roughHomography); if (homographyFound) { //(GameObject.Find ("DebugHelpers").GetComponent<DebugHelpers> ()).showMat (DebugHelpers.getMatchesImage (m_grayImg, m_pattern.grayImg, m_queryKeypoints, m_pattern.keypoints, m_matches, 100)); // If homography refinement enabled improve found transformation if (enableHomographyRefinement) { // Warp image using found homography Imgproc.warpPerspective(m_grayImg, m_warpedImg, m_roughHomography, m_pattern.size, Imgproc.WARP_INVERSE_MAP | Imgproc.INTER_CUBIC); //(GameObject.Find ("DebugHelpers").GetComponent<DebugHelpers> ()).showMat(m_warpedImg); // Get refined matches: using (MatOfKeyPoint warpedKeypoints = new MatOfKeyPoint()) using (MatOfDMatch refinedMatches = new MatOfDMatch()) { // Detect features on warped image extractFeatures(m_warpedImg, warpedKeypoints, m_queryDescriptors); // Match with pattern getMatches(m_queryDescriptors, refinedMatches); // Estimate new refinement homography homographyFound = refineMatchesWithHomography( warpedKeypoints, m_pattern.keypoints, homographyReprojectionThreshold, refinedMatches, m_refinedHomography); } //(GameObject.Find ("DebugHelpers").GetComponent<DebugHelpers> ()).showMat(DebugHelpers.getMatchesImage(m_warpedImg, m_pattern.grayImg, warpedKeypoints, m_pattern.keypoints, refinedMatches, 100)); // Get a result homography as result of matrix product of refined and rough homographies: // info.homography = m_roughHomography * m_refinedHomography; Core.gemm(m_roughHomography, m_refinedHomography, 1, new Mat(), 0, info.homography); //Debug.Log ("info.homography " + info.homography.ToString ()); // Transform contour with rough homography //Core.perspectiveTransform (m_pattern.points2d, info.points2d, m_roughHomography); //info.draw2dContour (image, new Scalar (200, 0, 0, 255)); // Transform contour with precise homography Core.perspectiveTransform(m_pattern.points2d, info.points2d, info.homography); //info.draw2dContour (image, new Scalar (0, 200, 0, 255)); } else { info.homography = m_roughHomography; //Debug.Log ("m_roughHomography " + m_roughHomography.ToString ()); //Debug.Log ("info.homography " + info.homography.ToString ()); // Transform contour with rough homography Core.perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography); //info.draw2dContour (image, new Scalar (0, 200, 0, 255)); } } //(GameObject.Find ("DebugHelpers").GetComponent<DebugHelpers> ()).showMat (DebugHelpers.getMatchesImage (m_grayImg, m_pattern.grayImg, m_queryKeypoints, m_pattern.keypoints, m_matches, 100)); //Debug.Log ("Features:" + m_queryKeypoints.ToString () + " Matches: " + m_matches.ToString ()); return(homographyFound); }
// // C++: void cv::xfeatures2d::matchGMS(Size size1, Size size2, vector_KeyPoint keypoints1, vector_KeyPoint keypoints2, vector_DMatch matches1to2, vector_DMatch& matchesGMS, bool withRotation = false, bool withScale = false, double thresholdFactor = 6.0) // /** * GMS (Grid-based Motion Statistics) feature matching strategy described in CITE: Bian2017gms . * param size1 Input size of image1. * param size2 Input size of image2. * param keypoints1 Input keypoints of image1. * param keypoints2 Input keypoints of image2. * param matches1to2 Input 1-nearest neighbor matches. * param matchesGMS Matches returned by the GMS matching strategy. * param withRotation Take rotation transformation into account. * param withScale Take scale transformation into account. * param thresholdFactor The higher, the less matches. * <b>Note:</b> * Since GMS works well when the number of features is large, we recommend to use the ORB feature and set FastThreshold to 0 to get as many as possible features quickly. * If matching results are not satisfying, please add more features. (We use 10000 for images with 640 X 480). * If your images have big rotation and scale changes, please set withRotation or withScale to true. */ public static void matchGMS(Size size1, Size size2, MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, MatOfDMatch matchesGMS, bool withRotation, bool withScale, double thresholdFactor) { if (keypoints1 != null) { keypoints1.ThrowIfDisposed(); } if (keypoints2 != null) { keypoints2.ThrowIfDisposed(); } if (matches1to2 != null) { matches1to2.ThrowIfDisposed(); } if (matchesGMS != null) { matchesGMS.ThrowIfDisposed(); } Mat keypoints1_mat = keypoints1; Mat keypoints2_mat = keypoints2; Mat matches1to2_mat = matches1to2; Mat matchesGMS_mat = matchesGMS; xfeatures2d_Xfeatures2d_matchGMS_10(size1.width, size1.height, size2.width, size2.height, keypoints1_mat.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, matchesGMS_mat.nativeObj, withRotation, withScale, thresholdFactor); }
private void TryProcessImage(int index) { if (UseWebCam == false) { CurrentTexture = Sources[index]; } else { CurrentTexture = webCamTexture; } using (Mat imgMat = new Mat(CurrentTexture.height, CurrentTexture.width, CvType.CV_8UC1)) using (FeatureDetector blobDetector = FeatureDetector.create(FeatureDetector.SIMPLEBLOB)) using (Mat outImgMat = new Mat()) using (MatOfKeyPoint keypoints = new MatOfKeyPoint()) { if (CurrentTexture is Texture2D) { Utils.texture2DToMat(CurrentTexture as Texture2D, imgMat); } else if (CurrentTexture is WebCamTexture) { Utils.webCamTextureToMat(CurrentTexture as WebCamTexture, imgMat); } else { Utils.textureToMat(CurrentTexture, imgMat); } Debug.Log("imgMat dst ToString " + imgMat.ToString()); Imgproc.threshold(imgMat, imgMat, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU); Imgproc.erode(imgMat, imgMat, erodeMat, new Point(1, 1), 5); blobDetector.read(Utils.getFilePath("blobparams.yml")); blobDetector.detect(imgMat, keypoints); Features2d.drawKeypoints(imgMat, keypoints, outImgMat); KeyPoint[] points = keypoints.toArray(); ProcessKeyPoints(points, outImgMat); Mat finalMat = outImgMat; if (texture != null && (texture.width != finalMat.cols() || texture.height != finalMat.rows())) { DestroyImmediate(texture); texture = null; } if (texture == null) { texture = new Texture2D(finalMat.cols(), finalMat.rows(), TextureFormat.RGBA32, false); } Utils.matToTexture2D(finalMat, texture); gameObject.GetComponent <Renderer>().material.mainTexture = texture; } }