/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param> /// <returns>The image features founded on the keypoint location</returns> public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints) { if (keyPoints.Length == 0) { return(new ImageFeature[0]); } using (VectorOfFloat descs = new VectorOfFloat()) { GCHandle handle = GCHandle.Alloc(keyPoints, GCHandleType.Pinned); CvSIFTDetectorComputeDescriptors(_ptr, image, mask, handle.AddrOfPinnedObject(), keyPoints.Length, descs); handle.Free(); int n = keyPoints.Length; long address = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = DescriptorSize; for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = keyPoints[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return(features); } }
/// <summary> /// Match the Image feature from the observed image to the features from the model image, using brute force matcher /// </summary> /// <param name="observedFeatures">The Image feature from the observed image</param> /// <param name="k">The number of neighbors to find</param> /// <returns>The matched features</returns> public MatchedImageFeature[] MatchFeature(ImageFeature <TDescriptor>[] observedFeatures, int k) { VectorOfKeyPoint obsKpts; Matrix <TDescriptor> observedDescriptors; ImageFeature <TDescriptor> .ConvertToRaw(observedFeatures, out obsKpts, out observedDescriptors); try { DistanceType dt = typeof(TDescriptor) == typeof(Byte) ? DistanceType.Hamming : DistanceType.L2; using (Matrix <int> indices = new Matrix <int>(observedDescriptors.Rows, k)) using (Matrix <float> dists = new Matrix <float>(observedDescriptors.Rows, k)) using (BruteForceMatcher <TDescriptor> matcher = new BruteForceMatcher <TDescriptor>(dt)) { matcher.Add(_modelDescriptors); matcher.KnnMatch(observedDescriptors, indices, dists, k, null); return(ConvertToMatchedImageFeature(_modelKeyPoints, _modelDescriptors, obsKpts, observedDescriptors, indices, dists, null)); } } finally { obsKpts.Dispose(); observedDescriptors.Dispose(); } }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param> /// <returns>The image features founded on the keypoint location</returns> public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints) { if (keyPoints.Length == 0) { return(new ImageFeature[0]); } using (VectorOfFloat descVec = new VectorOfFloat()) using (VectorOfKeyPoint kpts = new VectorOfKeyPoint()) { kpts.Push(keyPoints); CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec); int n = keyPoints.Length; float[] descs = descVec.ToArray(); //long address = descVec.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = DescriptorSize; for (int i = 0; i < n; i++) { features[i].KeyPoint = keyPoints[i]; float[] d = new float[sizeOfdescriptor]; Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor); features[i].Descriptor = d; } return(features); } }
/// <summary> /// Convert the raw keypoints and descriptors to ImageFeature /// </summary> /// <param name="keyPointsVec">The raw keypoints vector</param> /// <param name="descriptors">The raw descriptor matrix</param> /// <returns>An array of image features</returns> public static ImageFeature <TDescriptor>[] ConvertFromRaw(VectorOfKeyPoint keyPointsVec, Matrix <TDescriptor> descriptors) { if (keyPointsVec.Size == 0) { return(new ImageFeature <TDescriptor> [0]); } Debug.Assert(keyPointsVec.Size == descriptors.Rows, "Size of keypoints vector do not match the rows of the descriptors matrix."); int sizeOfdescriptor = descriptors.Cols; MKeyPoint[] keyPoints = keyPointsVec.ToArray(); ImageFeature <TDescriptor>[] features = new ImageFeature <TDescriptor> [keyPoints.Length]; MCvMat header = descriptors.MCvMat; long address = header.data.ToInt64(); int rowSizeInByte = sizeOfdescriptor * Marshal.SizeOf(typeof(TDescriptor)); for (int i = 0; i < keyPoints.Length; i++, address += header.step) { features[i].KeyPoint = keyPoints[i]; TDescriptor[] desc = new TDescriptor[sizeOfdescriptor]; GCHandle handler = GCHandle.Alloc(desc, GCHandleType.Pinned); Toolbox.memcpy(handler.AddrOfPinnedObject(), new IntPtr(address), rowSizeInByte); handler.Free(); features[i].Descriptor = desc; } return(features); }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature <TDescriptor>[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask) { using (VectorOfKeyPoint pts = new VectorOfKeyPoint()) using (Matrix <TDescriptor> descVec = DetectAndCompute(image, mask, pts)) { return(ImageFeature <TDescriptor> .ConvertFromRaw(pts, descVec)); } }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature <byte>[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask) { using (VectorOfKeyPoint pts = this.DetectKeyPointsRaw(image, mask)) using (Matrix <byte> descVec = ComputeDescriptorsRaw(image, mask, pts)) { return(ImageFeature <byte> .ConvertFromRaw(pts, descVec)); } }
/// <summary> /// Create a matched feature structure. /// </summary> /// <param name="observedFeature">The feature from the observed image</param> /// <param name="modelFeatures">The matched feature from the model</param> /// <param name="dist">The distances between the feature from the observerd image and the matched feature from the model image</param> public MatchedImageFeature(ImageFeature observedFeature, ImageFeature[] modelFeatures, double[] dist) { ObservedFeature = observedFeature; _similarFeatures = new SimilarFeature[modelFeatures.Length]; for (int i = 0; i < modelFeatures.Length; i++) { _similarFeatures[i] = new SimilarFeature(dist[i], modelFeatures[i]); } }
/// <summary> /// Use camshift to track the feature /// </summary> /// <param name="observedFeatures">The feature found from the observed image</param> /// <param name="initRegion">The predicted location of the model in the observed image. If not known, use MCvBox2D.Empty as default</param> /// <param name="priorMask">The mask that should be the same size as the observed image. Contains a priori value of the probability a match can be found. If you are not sure, pass an image fills with 1.0s</param> /// <returns>If a match is found, the homography projection matrix is returned. Otherwise null is returned</returns> public HomographyMatrix CamShiftTrack(ImageFeature[] observedFeatures, MCvBox2D initRegion, Image<Gray, Single> priorMask) { using (Image<Gray, Single> matchMask = new Image<Gray, Single>(priorMask.Size)) { #region get the list of matched point on the observed image Single[, ,] matchMaskData = matchMask.Data; //Compute the matched features MatchedImageFeature[] matchedFeature = _matcher.MatchFeature(observedFeatures, 2, 20); matchedFeature = VoteForUniqueness(matchedFeature, 0.8); foreach (MatchedImageFeature f in matchedFeature) { PointF p = f.ObservedFeature.KeyPoint.Point; matchMaskData[(int)p.Y, (int)p.X, 0] = 1.0f / (float)f.SimilarFeatures[0].Distance; } #endregion Rectangle startRegion; if (initRegion.Equals(MCvBox2D.Empty)) startRegion = matchMask.ROI; else { startRegion = PointCollection.BoundingRectangle(initRegion.GetVertices()); if (startRegion.IntersectsWith(matchMask.ROI)) startRegion.Intersect(matchMask.ROI); } CvInvoke.cvMul(matchMask.Ptr, priorMask.Ptr, matchMask.Ptr, 1.0); MCvConnectedComp comp; MCvBox2D currentRegion; //Updates the current location CvInvoke.cvCamShift(matchMask.Ptr, startRegion, new MCvTermCriteria(10, 1.0e-8), out comp, out currentRegion); #region find the Image features that belongs to the current Region MatchedImageFeature[] featuesInCurrentRegion; using (MemStorage stor = new MemStorage()) { Contour<System.Drawing.PointF> contour = new Contour<PointF>(stor); contour.PushMulti(currentRegion.GetVertices(), Emgu.CV.CvEnum.BACK_OR_FRONT.BACK); CvInvoke.cvBoundingRect(contour.Ptr, 1); //this is required before calling the InContour function featuesInCurrentRegion = Array.FindAll(matchedFeature, delegate(MatchedImageFeature f) { return contour.InContour(f.ObservedFeature.KeyPoint.Point) >= 0; }); } #endregion return GetHomographyMatrixFromMatchedFeatures(VoteForSizeAndOrientation(featuesInCurrentRegion, 1.5, 20)); } }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="extractor">The descriptor extractor</param> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param> /// <returns>The descriptors founded on the keypoint location</returns> public static ImageFeature <TDepth>[] ComputeDescriptors <TDepth>(this IDescriptorExtractor <TDepth> extractor, Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints) where TDepth : struct { if (keyPoints.Length == 0) { return(new ImageFeature <TDepth> [0]); } using (VectorOfKeyPoint kpts = new VectorOfKeyPoint()) { kpts.Push(keyPoints); using (Matrix <TDepth> descriptor = extractor.ComputeDescriptorsRaw(image, mask, kpts)) { return(ImageFeature <TDepth> .ConvertFromRaw(kpts, descriptor)); } } }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask) { using (VectorOfKeyPoint pts = new VectorOfKeyPoint()) using (VectorOfFloat descVec = new VectorOfFloat()) { CvSIFTDetectorDetectFeature(_ptr, image, mask, pts, descVec); MKeyPoint[] kpts = pts.ToArray(); float[] desc = descVec.ToArray(); int n = kpts.Length; int sizeOfdescriptor = DescriptorSize; ImageFeature[] features = new ImageFeature[n]; for (int i = 0; i < n; i++) { features[i].KeyPoint = kpts[i]; float[] d = new float[sizeOfdescriptor]; Array.Copy(desc, i * sizeOfdescriptor, d, 0, sizeOfdescriptor); features[i].Descriptor = d; } return(features); } }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask) { using (VectorOfKeyPoint pts = new VectorOfKeyPoint()) using (VectorOfFloat descs = new VectorOfFloat()) { CvSURFDetectorDetectFeature(ref this, image, mask, pts, descs); MKeyPoint[] kpts = pts.ToArray(); int n = kpts.Length; long add = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = extended == 0 ? 64 : 128; for (int i = 0; i < n; i++, add += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = kpts[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(add), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return(features); } }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask) { using (MemStorage stor = new MemStorage()) using (VectorOfFloat descs = new VectorOfFloat()) { Seq <MKeyPoint> pts = new Seq <MKeyPoint>(stor); CvSIFTDetectorDetectFeature(_ptr, image, mask, pts, descs); MKeyPoint[] kpts = pts.ToArray(); int n = kpts.Length; long add = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = DescriptorSize; for (int i = 0; i < n; i++, add += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = kpts[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(add), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return(features); } }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param> /// <returns>The image features founded on the keypoint location</returns> public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints) { using (VectorOfFloat descs = new VectorOfFloat()) using (VectorOfKeyPoint kpts = new VectorOfKeyPoint()) { kpts.Push(keyPoints); CvSURFDetectorComputeDescriptors(ref this, image, mask, kpts, descs); int n = keyPoints.Length; long address = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = extended == 0 ? 64 : 128; for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = keyPoints[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return features; } }
/// <summary> /// Convert the raw keypoints and descriptors to ImageFeature /// </summary> /// <param name="keyPointsVec">The raw keypoints vector</param> /// <param name="descriptors">The raw descriptor matrix</param> /// <returns>An array of image features</returns> public static ImageFeature[] ConvertToImageFeature(VectorOfKeyPoint keyPointsVec, Matrix <float> descriptors) { if (keyPointsVec.Size == 0) { return(new ImageFeature[0]); } Debug.Assert(keyPointsVec.Size == descriptors.Rows, "Size of keypoints vector do not match the rows of the descriptors matrix."); int sizeOfdescriptor = descriptors.Cols; MKeyPoint[] keyPoints = keyPointsVec.ToArray(); ImageFeature[] features = new ImageFeature[keyPoints.Length]; MCvMat header = descriptors.MCvMat; long address = header.data.ToInt64(); for (int i = 0; i < keyPoints.Length; i++, address += header.step) { features[i].KeyPoint = keyPoints[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return(features); }
/// <summary> /// Create a Image tracker, where Image is matched with flann /// </summary> /// <param name="modelFeatures">The Image feature from the model image</param> public Features2DTracker(ImageFeature <TDescriptor>[] modelFeatures) { ImageFeature <TDescriptor> .ConvertToRaw(modelFeatures, out _modelKeyPoints, out _modelDescriptors); }
void TestDataProcessing(ImageFeature<float>[] TrainD) { for (int imgeindx = 0; imgeindx < TrainD.Count(); imgeindx++) { // for (int descindx = 0; descindx < TrainD[imgeindx].Count(); descindx++) { sample temp = new sample(); for (int i = 0; i < TrainD[imgeindx].Descriptor.Count(); i++) { temp.features[i] = TrainD[imgeindx].Descriptor[i]; } Testing_data.Add(temp); } } }
/* private static int CompareSimilarFeature(SimilarFeature f1, SimilarFeature f2) { if (f1.Distance < f2.Distance) return -1; if (f1.Distance == f2.Distance) return 0; else return 1; }*/ /// <summary> /// Match the Image feature from the observed image to the features from the model image /// </summary> /// <param name="observedFeatures">The Image feature from the observed image</param> /// <param name="k">The number of neighbors to find</param> /// <param name="emax">For k-d tree only: the maximum number of leaves to visit.</param> /// <returns>The matched features</returns> public MatchedImageFeature[] MatchFeature(ImageFeature[] observedFeatures, int k, int emax) { if (observedFeatures.Length == 0) return new MatchedImageFeature[0]; float[][] descriptors = new float[observedFeatures.Length][]; for (int i = 0; i < observedFeatures.Length; i++) descriptors[i] = observedFeatures[i].Descriptor; using (Matrix<int> result1 = new Matrix<int>(descriptors.Length, k)) using (Matrix<float> dist1 = new Matrix<float>(descriptors.Length, k)) { _modelIndex.KnnSearch(CvToolbox.GetMatrixFromDescriptors(descriptors), result1, dist1, k, emax); int[,] indexes = result1.Data; float[,] distances = dist1.Data; MatchedImageFeature[] res = new MatchedImageFeature[observedFeatures.Length]; List<SimilarFeature> matchedFeatures = new List<SimilarFeature>(); for (int i = 0; i < res.Length; i++) { matchedFeatures.Clear(); for (int j = 0; j < k; j++) { int index = indexes[i, j]; if (index >= 0) { matchedFeatures.Add(new SimilarFeature(distances[i, j], _modelFeatures[index])); } } res[i].ObservedFeature = observedFeatures[i]; res[i].SimilarFeatures = matchedFeatures.ToArray(); } return res; } }
/// <summary> /// Create a similar Image feature /// </summary> /// <param name="distance">The distance to the comparing Image feature</param> /// <param name="feature">A similar Image feature</param> public SimilarFeature(double distance, ImageFeature feature) { _distance = distance; _feature = feature; }
/// <summary> /// Create k-d feature trees using the Image feature extracted from the model image. /// </summary> /// <param name="modelFeatures">The Image feature extracted from the model image</param> public ImageFeatureMatcher(ImageFeature[] modelFeatures) { Debug.Assert(modelFeatures.Length > 0, "Model Features should have size > 0"); _modelIndex = new Flann.Index( CvToolbox.GetMatrixFromDescriptors( Array.ConvertAll<ImageFeature, float[]>( modelFeatures, delegate(ImageFeature f) { return f.Descriptor; })), 1); _modelFeatures = modelFeatures; }
/// <summary> /// Create a Image tracker, where Image is matched with flann /// </summary> /// <param name="modelFeatures">The Image feature from the model image</param> public Features2DTracker(ImageFeature[] modelFeatures) { _matcher = new ImageFeatureMatcher(modelFeatures); }
/// <summary> /// Create a matched feature structure. /// </summary> /// <param name="observedFeature">The feature from the observed image</param> /// <param name="modelFeatures">The matched feature from the model</param> /// <param name="dist">The distances between the feature from the observerd image and the matched feature from the model image</param> public MatchedImageFeature(ImageFeature observedFeature, ImageFeature[] modelFeatures, double[] dist) { ObservedFeature = observedFeature; _similarFeatures = new SimilarFeature[modelFeatures.Length]; for (int i = 0; i < modelFeatures.Length; i++) _similarFeatures[i] = new SimilarFeature(dist[i], modelFeatures[i]); }
/// <summary> /// Convert the raw keypoints and descriptors to array of managed structure. /// </summary> /// <param name="modelKeyPointVec">The model keypoint vector</param> /// <param name="modelDescriptorMat">The mode descriptor vector</param> /// <param name="observedKeyPointVec">The observerd keypoint vector</param> /// <param name="observedDescriptorMat">The observed descriptor vector</param> /// <param name="indices">The indices matrix</param> /// <param name="dists">The distances matrix</param> /// <param name="mask">The mask</param> /// <returns>The managed MatchedImageFeature array</returns> public static MatchedImageFeature[] ConvertToMatchedImageFeature( VectorOfKeyPoint modelKeyPointVec, Matrix <TDescriptor> modelDescriptorMat, VectorOfKeyPoint observedKeyPointVec, Matrix <TDescriptor> observedDescriptorMat, Matrix <int> indices, Matrix <float> dists, Matrix <Byte> mask) { MKeyPoint[] modelKeyPoints = modelKeyPointVec.ToArray(); MKeyPoint[] observedKeyPoints = observedKeyPointVec.ToArray(); int resultLength = (mask == null) ? observedKeyPoints.Length : CvInvoke.cvCountNonZero(mask); MatchedImageFeature[] result = new MatchedImageFeature[resultLength]; MCvMat modelMat = (MCvMat)Marshal.PtrToStructure(modelDescriptorMat.Ptr, typeof(MCvMat)); long modelPtr = modelMat.data.ToInt64(); int modelStep = modelMat.step; MCvMat observedMat = (MCvMat)Marshal.PtrToStructure(observedDescriptorMat.Ptr, typeof(MCvMat)); long observedPtr = observedMat.data.ToInt64(); int observedStep = observedMat.step; int descriptorLength = modelMat.cols; int descriptorSizeInByte = descriptorLength * Marshal.SizeOf(typeof(TDescriptor)); int k = dists.Cols; TDescriptor[] tmp = new TDescriptor[descriptorLength]; GCHandle handle = GCHandle.Alloc(tmp, GCHandleType.Pinned); IntPtr address = handle.AddrOfPinnedObject(); int resultIdx = 0; for (int i = 0; i < observedKeyPoints.Length; i++) { if (mask != null && mask.Data[i, 0] == 0) { continue; } SimilarFeature[] features = new SimilarFeature[k]; for (int j = 0; j < k; j++) { features[j].Distance = dists.Data[i, j]; ImageFeature <TDescriptor> imgFeature = new ImageFeature <TDescriptor>(); int idx = indices.Data[i, j]; if (idx == -1) { Array.Resize(ref features, j); break; } imgFeature.KeyPoint = modelKeyPoints[idx]; imgFeature.Descriptor = new TDescriptor[descriptorLength]; Emgu.Util.Toolbox.memcpy(address, new IntPtr(modelPtr + modelStep * idx), descriptorSizeInByte); tmp.CopyTo(imgFeature.Descriptor, 0); features[j].Feature = imgFeature; } result[resultIdx].SimilarFeatures = features; ImageFeature <TDescriptor> observedFeature = new ImageFeature <TDescriptor>(); observedFeature.KeyPoint = observedKeyPoints[i]; observedFeature.Descriptor = new TDescriptor[descriptorLength]; Emgu.Util.Toolbox.memcpy(address, new IntPtr(observedPtr + observedStep * i), descriptorSizeInByte); tmp.CopyTo(observedFeature.Descriptor, 0); result[resultIdx].ObservedFeature = observedFeature; resultIdx++; } handle.Free(); return(result); }
/// <summary> /// Match the Image feature from the observed image to the features from the model image /// </summary> /// <param name="observedFeatures">The Image feature from the observed image</param> /// <param name="k">The number of neighbors to find</param> /// <param name="emax">For k-d tree only: the maximum number of leaves to visit.</param> /// <returns>The matched features</returns> public MatchedImageFeature[] MatchFeature(ImageFeature[] observedFeatures, int k, int emax) { return _matcher.MatchFeature(observedFeatures, k, emax); }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param> /// <returns>The image features founded on the keypoint location</returns> public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints) { if (keyPoints.Length == 0) return new ImageFeature[0]; using (VectorOfFloat descVec = new VectorOfFloat()) using (VectorOfKeyPoint kpts = new VectorOfKeyPoint()) { kpts.Push(keyPoints); CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec); int n = keyPoints.Length; float[] descs = descVec.ToArray(); //long address = descVec.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = DescriptorSize; for (int i = 0; i < n; i++) { features[i].KeyPoint = keyPoints[i]; float[] d = new float[sizeOfdescriptor]; Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor); features[i].Descriptor = d; } return features; } }
public void Testing( ImageFeature<float>[] TestImage , List<int> desired) { Testing_data.Clear(); int[] res = new int[OutPut.Count()]; TestDataProcessing(TestImage); for (int sampleindex = 0; sampleindex < Testing_data.Count; sampleindex++) { #region Getting Hidden layer Output List<double> HidOut = new List<double>(); for (int clusterindx = 0; clusterindx < Clusters.Count(); clusterindx++) { double seg = segma[clusterindx]; HidOut.Add(distance(Testing_data[sampleindex].features, Clusters[clusterindx].center.features)); HidOut[clusterindx] = -1 * Math.Pow(HidOut[clusterindx], 2); seg= 2 *Math.Pow(seg, 2); HidOut[clusterindx] = Math.Exp(HidOut[clusterindx] / seg); } #endregion #region Calculate Output Layer List<double> _outp = new List<double>(); for (int Outindx = 0; Outindx < OutPut.Count(); Outindx++) { List<double> weights = OutPut[Outindx].get_weights(); double sumv = 0; for (int weightindx = 0; weightindx < weights.Count(); weightindx++) { sumv += (weights[weightindx] * HidOut[weightindx]); } _outp.Add(sumv); } List<int> __outp = get_out(_outp); for (int cc = 0; cc < OutPut.Count; cc++) { res[cc] += __outp[cc]; } #endregion } #region Find Detected Objects string str = ""; for (int ee = 0; ee < OutPut.Count; ee++) { if (res[ee] >= 3) { // object of ee is exist in the image XD str += objects[ee]; str += "\t"; } } MessageBox.Show(str + " Is Exist\n"); #endregion }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature[] DetectFeatures(Image<Gray, Byte> image, Image<Gray, byte> mask) { using (MemStorage stor = new MemStorage()) using (VectorOfFloat descs = new VectorOfFloat()) { Seq<MKeyPoint> pts = new Seq<MKeyPoint>(stor); CvSURFDetectorDetectFeature(ref this, image, mask, pts, descs); MKeyPoint[] kpts = pts.ToArray(); int n = kpts.Length; long add = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = extended == 0 ? 64 : 128; for (int i = 0; i < n; i++, add += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = kpts[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(add), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return features; } }
/// <summary> /// Match the Image feature from the observed image to the features from the model image /// </summary> /// <param name="observedFeatures">The Image feature from the observed image</param> /// <param name="k">The number of neighbors to find</param> /// <returns>The matched features</returns> public MatchedImageFeature[] MatchFeature(ImageFeature[] observedFeatures, int k) { VectorOfKeyPoint obsKpts; Matrix<float> obsDscpts; ConvertFromImageFeature(observedFeatures, out obsKpts, out obsDscpts); using (BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32)) using (Matrix<int> indices = new Matrix<int>(obsKpts.Size, k)) using (Matrix<float> dists = new Matrix<float>(indices.Size)) { matcher.Add(_modelDescriptors); matcher.KnnMatch(obsDscpts, indices, dists, k, null); MatchedImageFeature[] result = new MatchedImageFeature[observedFeatures.Length]; for (int i = 0; i < observedFeatures.Length; i++) { result[i].SimilarFeatures = new SimilarFeature[k]; for (int j = 0; j < k; j++) { result[i].SimilarFeatures[j].Distance = dists.Data[i, j]; result[i].SimilarFeatures[j].Feature = _modelFeatures[indices.Data[i, j]]; } result[i].ObservedFeature = observedFeatures[i]; } obsKpts.Dispose(); obsDscpts.Dispose(); return result; } }
/// <summary> /// Create a Image tracker, where Image is matched with flann /// </summary> /// <param name="modelFeatures">The Image feature from the model image</param> public Features2DTracker(ImageFeature[] modelFeatures) { //_matcher = new ImageFeatureMatcher(modelFeatures); ConvertFromImageFeature(modelFeatures, out _modelKeyPoints, out _modelDescriptors); _modelFeatures = modelFeatures; }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature[] DetectFeatures(Image<Gray, Byte> image, Image<Gray, byte> mask) { using (VectorOfKeyPoint pts = new VectorOfKeyPoint()) using (VectorOfFloat descVec = new VectorOfFloat()) { CvSIFTDetectorDetectFeature(_ptr, image, mask, pts, descVec); MKeyPoint[] kpts = pts.ToArray(); float[] desc = descVec.ToArray(); int n = kpts.Length; int sizeOfdescriptor = DescriptorSize; ImageFeature[] features = new ImageFeature[n]; for (int i = 0; i < n; i++) { features[i].KeyPoint = kpts[i]; float[] d = new float[sizeOfdescriptor]; Array.Copy(desc, i * sizeOfdescriptor, d, 0, sizeOfdescriptor); features[i].Descriptor = d; } return features; } }
/// <summary> /// Detect the if the model features exist in the observed features. If true, an homography matrix is returned, otherwise, null is returned. /// </summary> /// <param name="observedFeatures">The observed features</param> /// <param name="uniquenessThreshold">The distance different ratio which a match is consider unique, a good number will be 0.8</param> /// <returns>If the model features exist in the observed features, an homography matrix is returned, otherwise, null is returned.</returns> public HomographyMatrix Detect(ImageFeature[] observedFeatures, double uniquenessThreshold) { MatchedImageFeature[] matchedGoodFeatures = MatchFeature(observedFeatures, 2, 20); //Stopwatch w1 = Stopwatch.StartNew(); matchedGoodFeatures = VoteForUniqueness(matchedGoodFeatures, uniquenessThreshold); //Trace.WriteLine(w1.ElapsedMilliseconds); if (matchedGoodFeatures.Length < 4) return null; //Stopwatch w2 = Stopwatch.StartNew(); matchedGoodFeatures = VoteForSizeAndOrientation(matchedGoodFeatures, 1.5, 20); //Trace.WriteLine(w2.ElapsedMilliseconds); if (matchedGoodFeatures.Length < 4) return null; return GetHomographyMatrixFromMatchedFeatures(matchedGoodFeatures); }
/// <summary> /// Convert the image features to keypoint vector and descriptor matrix /// </summary> private static void ConvertFromImageFeature(ImageFeature[] features, out VectorOfKeyPoint keyPoints, out Matrix<float> descriptors) { keyPoints = new VectorOfKeyPoint(); keyPoints.Push( Array.ConvertAll<ImageFeature, MKeyPoint>(features, delegate(ImageFeature feature) { return feature.KeyPoint; })); descriptors = new Matrix<float>(features.Length, features[0].Descriptor.Length); int descriptorLength = features[0].Descriptor.Length; float[,] data = descriptors.Data; for (int i = 0; i < features.Length; i++) { for (int j = 0; j < descriptorLength; j++) data[i, j] = features[i].Descriptor[j]; } }
/// <summary> /// Create a similar Image feature /// </summary> /// <param name="distance">The distance to the comparing Image feature</param> /// <param name="feature">A similar Image feature</param> public SimilarFeature(double distance, ImageFeature <TDescriptor> feature) { _distance = distance; _feature = feature; }
/// <summary> /// Convert the raw keypoints and descriptors to ImageFeature /// </summary> /// <param name="keyPointsVec">The raw keypoints vector</param> /// <param name="descriptors">The raw descriptor matrix</param> /// <returns>An array of image features</returns> public static ImageFeature[] ConvertToImageFeature(VectorOfKeyPoint keyPointsVec, Matrix<float> descriptors) { if (keyPointsVec.Size == 0) return new ImageFeature[0]; Debug.Assert(keyPointsVec.Size == descriptors.Rows, "Size of keypoints vector do not match the rows of the descriptors matrix."); int sizeOfdescriptor = descriptors.Cols; MKeyPoint[] keyPoints = keyPointsVec.ToArray(); ImageFeature[] features = new ImageFeature[keyPoints.Length]; MCvMat header = descriptors.MCvMat; long address = header.data.ToInt64(); for (int i = 0; i < keyPoints.Length; i++, address += header.step) { features[i].KeyPoint = keyPoints[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return features; }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param> /// <returns>The image features founded on the keypoint location</returns> public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints) { if (keyPoints.Length == 0) return new ImageFeature[0]; using (VectorOfFloat descs = new VectorOfFloat()) { GCHandle handle = GCHandle.Alloc(keyPoints, GCHandleType.Pinned); CvSIFTDetectorComputeDescriptors(_ptr, image, mask, handle.AddrOfPinnedObject(), keyPoints.Length, descs); handle.Free(); int n = keyPoints.Length; long address = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = DescriptorSize; for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = keyPoints[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return features; } }