public void TestQuaternionsMultiplicationPerformance() { Quaternions q = new Quaternions(); Random r = new Random(); q.SetEuler(r.NextDouble(), r.NextDouble(), r.NextDouble()); Stopwatch watch = Stopwatch.StartNew(); Quaternions sum = Quaternions.Empty; for (int i = 0; i < 1000000; i++) { sum *= q; } watch.Stop(); EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); }
public void TestRTreesLetterRecognition() { Matrix <float> data, response; ReadLetterRecognitionData(out data, out response); int trainingSampleCount = (int)(data.Rows * 0.8); Matrix <Byte> varType = new Matrix <byte>(data.Cols + 1, 1); varType.SetValue((byte)MlEnum.VarType.Numerical); //the data is numerical varType[data.Cols, 0] = (byte)MlEnum.VarType.Categorical; //the response is catagorical Matrix <byte> sampleIdx = new Matrix <byte>(data.Rows, 1); using (Matrix <byte> sampleRows = sampleIdx.GetRows(0, trainingSampleCount, 1)) sampleRows.SetValue(255); using (RTrees forest = new RTrees()) using ( TrainData td = new TrainData(data, MlEnum.DataLayoutType.RowSample, response, null, sampleIdx, null, varType)) { forest.MaxDepth = 10; forest.MinSampleCount = 10; forest.RegressionAccuracy = 0.0f; forest.UseSurrogates = false; forest.MaxCategories = 15; forest.CalculateVarImportance = true; forest.ActiveVarCount = 4; forest.TermCriteria = new MCvTermCriteria(100, 0.01f); bool success = forest.Train(td); if (!success) { return; } double trainDataCorrectRatio = 0; double testDataCorrectRatio = 0; for (int i = 0; i < data.Rows; i++) { using (Matrix <float> sample = data.GetRow(i)) { double r = forest.Predict(sample, null); r = Math.Abs(r - response[i, 0]); if (r < 1.0e-5) { if (i < trainingSampleCount) { trainDataCorrectRatio++; } else { testDataCorrectRatio++; } } } } trainDataCorrectRatio /= trainingSampleCount; testDataCorrectRatio /= (data.Rows - trainingSampleCount); StringBuilder builder = new StringBuilder("Variable Importance: "); /* * using (Matrix<float> varImportance = forest.VarImportance) * { * for (int i = 0; i < varImportance.Cols; i++) * { * builder.AppendFormat("{0} ", varImportance[0, i]); * } * }*/ EmguAssert.WriteLine(String.Format("Prediction accuracy for training data :{0}%", trainDataCorrectRatio * 100)); EmguAssert.WriteLine(String.Format("Prediction accuracy for test data :{0}%", testDataCorrectRatio * 100)); EmguAssert.WriteLine(builder.ToString()); } }
public void TestSURFDetector2() { //Trace.WriteLine("Size of MCvSURFParams: " + Marshal.SizeOf(typeof(MCvSURFParams))); Image <Gray, byte> box = EmguAssert.LoadImage <Gray, byte>("box.png"); SURF detector = new SURF(400); Stopwatch watch = Stopwatch.StartNew(); VectorOfKeyPoint vp1 = new VectorOfKeyPoint(); Mat descriptors1 = new Mat(); detector.DetectAndCompute(box, null, vp1, descriptors1, false); watch.Stop(); EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds)); watch.Reset(); watch.Start(); MKeyPoint[] keypoints = detector.Detect(box, null); //ImageFeature<float>[] features2 = detector.Compute(box, keypoints); watch.Stop(); EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds)); watch.Reset(); watch.Start(); //MCvSURFParams p = detector.SURFParams; //SURFFeature[] features3 = box.ExtractSURF(ref p); //watch.Stop(); //EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds)); // EmguAssert.IsTrue(features1.Length == features2.Length); //EmguAssert.IsTrue(features2.Length == features3.Length); PointF[] pts = #if NETFX_CORE Extensions. #else Array. #endif ConvertAll <MKeyPoint, PointF>(keypoints, delegate(MKeyPoint mkp) { return(mkp.Point); }); //SURFFeature[] features = box.ExtractSURF(pts, null, ref detector); //int count = features.Length; /* * for (int i = 0; i < features1.Length; i++) * { * Assert.AreEqual(features1[i].KeyPoint.Point, features2[i].KeyPoint.Point); * float[] d1 = features1[i].Descriptor; * float[] d2 = features2[i].Descriptor; * * for (int j = 0; j < d1.Length; j++) * Assert.AreEqual(d1[j], d2[j]); * }*/ foreach (MKeyPoint kp in keypoints) { box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1); } }
public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator) { //for (int k = 0; k < 1; k++) { Feature2D feature2D = null; if (keyPointDetector == descriptorGenerator) { feature2D = keyPointDetector as Feature2D; } Mat modelImage = EmguAssert.LoadMat("box.png"); //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg"); //modelImage = modelImage.Resize(400, 400, true); //modelImage._EqualizeHist(); #region extract features from the object image Stopwatch stopwatch = Stopwatch.StartNew(); VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint(); Mat modelDescriptors = new Mat(); if (feature2D != null) { feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false); } else { keyPointDetector.DetectRaw(modelImage, modelKeypoints); descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors); } stopwatch.Stop(); EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds)); #endregion //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg"); Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png"); //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0)); //image = image.Resize(400, 400, true); //observedImage._EqualizeHist(); #region extract features from the observed image stopwatch.Reset(); stopwatch.Start(); VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint(); using (Mat observedDescriptors = new Mat()) { if (feature2D != null) { feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false); } else { keyPointDetector.DetectRaw(observedImage, observedKeypoints); descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors); } stopwatch.Stop(); EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds)); #endregion //Merge the object image and the observed image into one big image for display Image <Gray, Byte> res = modelImage.ToImage <Gray, Byte>().ConcateVertical(observedImage); Rectangle rect = new Rectangle(Point.Empty, modelImage.Size); PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top) }; Mat homography = null; stopwatch.Reset(); stopwatch.Start(); int k = 2; DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2; //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k)) //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k)) using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) using (BFMatcher matcher = new BFMatcher(dt)) { //ParamDef[] parameterDefs = matcher.GetParams(); matcher.Add(modelDescriptors); matcher.KnnMatch(observedDescriptors, matches, k, null); Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); mask.SetTo(new MCvScalar(255)); //mask.SetValue(255); Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= 4) { nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20); if (nonZeroCount >= 4) { homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2); } } } stopwatch.Stop(); EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds)); bool success = false; if (homography != null) { PointF[] points = pts.Clone() as PointF[]; points = CvInvoke.PerspectiveTransform(points, homography); //homography.ProjectPoints(points); for (int i = 0; i < points.Length; i++) { points[i].Y += modelImage.Height; } res.DrawPolyline( #if NETFX_CORE Extensions. #else Array. #endif ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5); success = true; } //Emgu.CV.UI.ImageViewer.Show(res); return(success); } /* * stopwatch.Reset(); stopwatch.Start(); * //set the initial region to be the whole image * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size)) * { * priorMask.SetValue(1.0); * homography = tracker.CamShiftTrack( * observedFeatures, * (RectangleF)observedImage.ROI, * priorMask); * } * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds)); * * if (homography != null) //set the initial tracking window to be the whole image * { * PointF[] points = pts.Clone() as PointF[]; * homography.ProjectPoints(points); * * for (int i = 0; i < points.Length; i++) * points[i].Y += modelImage.Height; * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5); * return true; * } * else * { * return false; * }*/ } }
public void TestERTreesLetterRecognition() { Matrix <float> data, response; ReadLetterRecognitionData(out data, out response); int trainingSampleCount = (int)(data.Rows * 0.8); Matrix <Byte> varType = new Matrix <byte>(data.Cols + 1, 1); varType.SetValue((byte)MlEnum.VarType.Numerical); //the data is numerical varType[data.Cols, 0] = (byte)MlEnum.VarType.Categorical; //the response is catagorical MCvRTParams param = new MCvRTParams(); param.maxDepth = 10; param.minSampleCount = 10; param.regressionAccuracy = 0.0f; param.useSurrogates = false; param.maxCategories = 15; param.priors = IntPtr.Zero; param.calcVarImportance = true; param.nactiveVars = 4; param.termCrit = new MCvTermCriteria(100, 0.01f); param.termCrit.Type = Emgu.CV.CvEnum.TermCritType.Iter; using (ERTrees forest = new ERTrees()) { bool success = forest.Train( data.GetRows(0, trainingSampleCount, 1), Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, response.GetRows(0, trainingSampleCount, 1), null, null, varType, null, param); if (!success) { return; } #if !NETFX_CORE String fileName = Path.Combine(Path.GetTempPath(), "ERTree.xml"); forest.Save(fileName); if (File.Exists(fileName)) { File.Delete(fileName); } #endif double trainDataCorrectRatio = 0; double testDataCorrectRatio = 0; for (int i = 0; i < data.Rows; i++) { using (Matrix <float> sample = data.GetRow(i)) { double r = forest.Predict(sample, null); r = Math.Abs(r - response[i, 0]); if (r < 1.0e-5) { if (i < trainingSampleCount) { trainDataCorrectRatio++; } else { testDataCorrectRatio++; } } } } trainDataCorrectRatio /= trainingSampleCount; testDataCorrectRatio /= (data.Rows - trainingSampleCount); EmguAssert.WriteLine(String.Format("Prediction accuracy for training data :{0}%", trainDataCorrectRatio * 100)); EmguAssert.WriteLine(String.Format("Prediction accuracy for test data :{0}%", testDataCorrectRatio * 100)); } }
public void TestDTreesMushroom() { Matrix <float> data, response; ReadMushroomData(out data, out response); //Use the first 80% of data as training sample int trainingSampleCount = (int)(data.Rows * 0.8); Matrix <Byte> varType = new Matrix <byte>(data.Cols + 1, 1); varType.SetValue((byte)MlEnum.VarType.Categorical); //the data is categorical Matrix <byte> sampleIdx = new Matrix <byte>(data.Rows, 1); using (Matrix <byte> sampleRows = sampleIdx.GetRows(0, trainingSampleCount, 1)) sampleRows.SetValue(255); float[] priors = new float[] { 1, 0.5f }; GCHandle priorsHandle = GCHandle.Alloc(priors, GCHandleType.Pinned); MCvDTreeParams param = new MCvDTreeParams(); param.maxDepth = 8; param.minSampleCount = 10; param.regressionAccuracy = 0; param.useSurrogates = true; param.maxCategories = 15; param.cvFolds = 10; param.use1seRule = true; param.truncatePrunedTree = true; param.priors = priorsHandle.AddrOfPinnedObject(); using (DTree dtree = new DTree()) { bool success = dtree.Train( data, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, response, null, sampleIdx, varType, null, param); if (!success) { return; } double trainDataCorrectRatio = 0; double testDataCorrectRatio = 0; for (int i = 0; i < data.Rows; i++) { using (Matrix <float> sample = data.GetRow(i)) { double r = dtree.Predict(sample, null, false).value; r = Math.Abs(r - response[i, 0]); if (r < 1.0e-5) { if (i < trainingSampleCount) { trainDataCorrectRatio++; } else { testDataCorrectRatio++; } } } } trainDataCorrectRatio /= trainingSampleCount; testDataCorrectRatio /= (data.Rows - trainingSampleCount); EmguAssert.WriteLine(String.Format("Prediction accuracy for training data :{0}%", trainDataCorrectRatio * 100)); EmguAssert.WriteLine(String.Format("Prediction accuracy for test data :{0}%", testDataCorrectRatio * 100)); } priorsHandle.Free(); }