public static Matrix <double> ToMathNetMatrix(this Emgu.CV.Mat mat, bool isAppend = false) { Emgu.CV.Matrix <double> matrix = new Emgu.CV.Matrix <double>(mat.Rows, mat.Cols); mat.CopyTo(matrix); double[,] array; if (isAppend) { array = new double[mat.Rows + 1, mat.Cols]; } else { array = new double[mat.Rows, mat.Cols]; } for (int i = 0; i < mat.Rows; i++) { for (int j = 0; j < mat.Cols; j++) { array[i, j] = matrix[i, j]; } } if (isAppend) { for (int j = 0; j < mat.Cols - 1; j++) { array[mat.Rows, j] = 0; } array[mat.Rows, mat.Cols - 1] = 1; } return(DenseMatrix.OfArray(array)); }
static public List <Emgu.CV.Matrix <int> > ScaleIntensity(List <int[, ]> temperatureDatas, out double min, out double max) { List <Emgu.CV.Matrix <int> > intMatrices = new List <Emgu.CV.Matrix <int> >(); List <double> minTemps = new List <double>(); List <double> maxTemps = new List <double>(); for (int i = 0; i < temperatureDatas.Count; i++) { intMatrices.Add(new Emgu.CV.Matrix <int>(new System.Drawing.Size(160, 120))); minTemps.Add(new double()); maxTemps.Add(new double()); } Parallel.For(0, temperatureDatas.Count, i => { intMatrices[i] = new Emgu.CV.Matrix <int>(temperatureDatas[i]); double minT; double maxT; System.Drawing.Point minTPoint; System.Drawing.Point maxTPoint; intMatrices[i].MinMax(out minT, out maxT, out minTPoint, out maxTPoint); minTemps[i] = minT; maxTemps[i] = maxT; }); min = ((from l in minTemps select l).Min()); max = ((from l in maxTemps select l).Max()); max = max - ((max - min) * 0.75); return(intMatrices); }
public void TestConvertParsleyToEmgu() { MathNet.Numerics.LinearAlgebra.Vector v = new MathNet.Numerics.LinearAlgebra.Vector(new double[] { 1.0f, 2.0f, 3.0f }); Emgu.CV.Structure.MCvPoint3D32f f = v.ToEmguF(); Assert.AreEqual(1.0, f.x); Assert.AreEqual(2.0, f.y); Assert.AreEqual(3.0, f.z); Emgu.CV.Structure.MCvPoint3D64f d = v.ToEmgu(); Assert.AreEqual(1.0, d.x); Assert.AreEqual(2.0, d.y); Assert.AreEqual(3.0, d.z); double[,] data = new double[2, 3] { { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 } }; MathNet.Numerics.LinearAlgebra.Matrix m = MathNet.Numerics.LinearAlgebra.Matrix.Create(data); Emgu.CV.Matrix <double> m2 = m.ToEmgu(); Assert.AreEqual(data[0, 0], m2[0, 0]); Assert.AreEqual(data[0, 1], m2[0, 1]); Assert.AreEqual(data[0, 2], m2[0, 2]); Assert.AreEqual(data[1, 0], m2[1, 0]); Assert.AreEqual(data[1, 1], m2[1, 1]); Assert.AreEqual(data[1, 2], m2[1, 2]); }
private void Calibrate() { if (m_skeletonCalibPoints.Count == m_calibPoints.Count) { //seketon 3D positions --> 3d positions in depth camera Point3D p0 = convertSkeletonPointToDepthPoint(m_skeletonCalibPoints[0]); Point3D p1 = convertSkeletonPointToDepthPoint(m_skeletonCalibPoints[1]); Point3D p2 = convertSkeletonPointToDepthPoint(m_skeletonCalibPoints[2]); Point3D p3 = convertSkeletonPointToDepthPoint(m_skeletonCalibPoints[3]); //3d positions depth camera --> positions on a 2D plane Vector3D v1 = p1 - p0; v1.Normalize(); Vector3D v2 = p2 - p0; v2.Normalize(); Vector3D planeNormalVec = Vector3D.CrossProduct(v1, v2); planeNormalVec.Normalize(); Vector3D resultingPlaneNormal = new Vector3D(0, 0, 1); m_groundPlaneTransform = Util.make_align_axis_matrix(resultingPlaneNormal, planeNormalVec); Point3D p0OnPlane = m_groundPlaneTransform.Transform(p0); Point3D p1OnPlane = m_groundPlaneTransform.Transform(p1); Point3D p2OnPlane = m_groundPlaneTransform.Transform(p2); Point3D p3OnPlane = m_groundPlaneTransform.Transform(p3); //2d plane positions --> exact 2d square on screen (using perspective transform) System.Drawing.PointF[] src = new System.Drawing.PointF[4]; src[0] = new System.Drawing.PointF((float)p0OnPlane.X, (float)p0OnPlane.Y); src[1] = new System.Drawing.PointF((float)p1OnPlane.X, (float)p1OnPlane.Y); src[2] = new System.Drawing.PointF((float)p2OnPlane.X, (float)p2OnPlane.Y); src[3] = new System.Drawing.PointF((float)p3OnPlane.X, (float)p3OnPlane.Y); System.Drawing.PointF[] dest = new System.Drawing.PointF[4]; dest[0] = new System.Drawing.PointF((float)m_calibPoints[0].X, (float)m_calibPoints[0].Y); dest[1] = new System.Drawing.PointF((float)m_calibPoints[1].X, (float)m_calibPoints[1].Y); dest[2] = new System.Drawing.PointF((float)m_calibPoints[2].X, (float)m_calibPoints[2].Y); dest[3] = new System.Drawing.PointF((float)m_calibPoints[3].X, (float)m_calibPoints[3].Y); Emgu.CV.Mat transform = Emgu.CV.CvInvoke.GetPerspectiveTransform(src, dest); m_transform = new Emgu.CV.Matrix <double>(transform.Rows, transform.Cols, transform.NumberOfChannels); transform.CopyTo(m_transform); //test to see if resulting perspective transform is correct //tResultx should be same as points in m_calibPoints Point tResult0 = kinectToProjectionPoint(m_skeletonCalibPoints[0]); Point tResult1 = kinectToProjectionPoint(m_skeletonCalibPoints[1]); Point tResult2 = kinectToProjectionPoint(m_skeletonCalibPoints[2]); Point tResult3 = kinectToProjectionPoint(m_skeletonCalibPoints[3]); txtCalib.Text = tResult0.ToString(CultureInfo.InvariantCulture) + ";\n" + tResult1.ToString(CultureInfo.InvariantCulture) + ";\n" + tResult2.ToString(CultureInfo.InvariantCulture) + ";\n" + tResult3.ToString(CultureInfo.InvariantCulture); } }
/// <summary> /// Convert MathNet.Numerics.LinearAlgebra.Matrix to Emgu.CV.Matrix /// </summary> /// <param name="m">MathNet.Numerics.LinearAlgebra.Matrix</param> /// <returns>Emgu.CV.Matrix<double></returns> public static Emgu.CV.Matrix<double> ToEmgu(this MathNet.Numerics.LinearAlgebra.Matrix m) { Emgu.CV.Matrix<double> res = new Emgu.CV.Matrix<double>(m.RowCount, m.ColumnCount); for (int r = 0; r < m.RowCount; ++r) { for (int c = 0; c < m.ColumnCount; ++c) { res[r, c] = m[r, c]; } } return res; }
/// <summary> /// Convert MathNet.Numerics.LinearAlgebra.Matrix to Emgu.CV.Matrix /// </summary> /// <param name="m">MathNet.Numerics.LinearAlgebra.Matrix</param> /// <returns>Emgu.CV.Matrix<double></returns> public static Emgu.CV.Matrix <double> ToEmgu(this MathNet.Numerics.LinearAlgebra.Matrix m) { Emgu.CV.Matrix <double> res = new Emgu.CV.Matrix <double>(m.RowCount, m.ColumnCount); for (int r = 0; r < m.RowCount; ++r) { for (int c = 0; c < m.ColumnCount; ++c) { res[r, c] = m[r, c]; } } return(res); }
public void TestConvertEmguToParsley() { Emgu.CV.Structure.MCvPoint3D32f p = new Emgu.CV.Structure.MCvPoint3D32f(1.0f, 2.0f, 3.0f); MathNet.Numerics.LinearAlgebra.Vector v = p.ToParsley(); Assert.AreEqual(1.0, v[0]); Assert.AreEqual(2.0, v[1]); Assert.AreEqual(3.0, v[2]); double[,] data = new double[2, 3] { { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 } }; Emgu.CV.Matrix<double> m = new Emgu.CV.Matrix<double>(data); MathNet.Numerics.LinearAlgebra.Matrix m2 = m.ToParsley(); Assert.AreEqual(data[0, 0], m2[0, 0]); Assert.AreEqual(data[0, 1], m2[0, 1]); Assert.AreEqual(data[0, 2], m2[0, 2]); Assert.AreEqual(data[1, 0], m2[1, 0]); Assert.AreEqual(data[1, 1], m2[1, 1]); Assert.AreEqual(data[1, 2], m2[1, 2]); }
static void Main() { AlgLAMP.Test(); double[,] d = new double[10, 3] { { 1, 2, 3 }, { 3, 2, 1 }, { 4, 5, 6 }, { 2, 3, 4 }, { 4, 3, 2 }, { 6, 8, 9 }, { 2, 4, 6 }, { 1, 9, 5 }, { 1, 3, 9 }, { 9, 8, 7 } }; int[] idx = new int[3] { 1, 5, 9 }; double[,] arr_ys = new double[3, 2] { { 4, 9 }, { 1, 1 }, { 9, 6 } }; Emgu.CV.Matrix <double> X = new Emgu.CV.Matrix <double>(d); Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); Application.Run(new Form1()); }
public void TestConvertEmguToParsley() { Emgu.CV.Structure.MCvPoint3D32f p = new Emgu.CV.Structure.MCvPoint3D32f(1.0f, 2.0f, 3.0f); MathNet.Numerics.LinearAlgebra.Vector v = p.ToParsley(); Assert.AreEqual(1.0, v[0]); Assert.AreEqual(2.0, v[1]); Assert.AreEqual(3.0, v[2]); double[,] data = new double[2, 3] { { 1.0, 2.0, 3.0 }, { 4.0, 5.0, 6.0 } }; Emgu.CV.Matrix <double> m = new Emgu.CV.Matrix <double>(data); MathNet.Numerics.LinearAlgebra.Matrix m2 = m.ToParsley(); Assert.AreEqual(data[0, 0], m2[0, 0]); Assert.AreEqual(data[0, 1], m2[0, 1]); Assert.AreEqual(data[0, 2], m2[0, 2]); Assert.AreEqual(data[1, 0], m2[1, 0]); Assert.AreEqual(data[1, 1], m2[1, 1]); Assert.AreEqual(data[1, 2], m2[1, 2]); }
public void Train(int samples) { if (samples > 0 && TryLoadTrainingSet(samples)) { return; } List<string> knowledge = new List<string>(); foreach (string file in knowledgeFiles) { knowledge.AddRange(File.ReadAllLines(file)); } string[] lines = knowledge.ToArray(); samples = lines.Length; ; int pixels = DimensionY * DimensionX; int classes = netKeys.Count; Emgu.CV.Matrix<Single> training = new Emgu.CV.Matrix<Single>(samples, pixels); Emgu.CV.Matrix<Single> class_training = new Emgu.CV.Matrix<Single>(samples, classes); Emgu.CV.Matrix<int> layers = new Emgu.CV.Matrix<int>(3, 1); layers[0, 0] = pixels; layers[1, 0] = (int)(factor * netKeys.Count); layers[2, 0] = classes; for (int i = 0; i < samples; i++) { LetterInfo info = LetterInfo.ReadLetterInfoLine(lines[i]); byte[] bytes = Convert.FromBase64String(info.Base64); float[] input = AdjustInput(bytes); for (int j = 0; j < pixels; j++) { training[i, j] = input[j]; } /* for (int a = -1; a <= 1; a++) for (int b = -1; b <= 1; b++) for (int c = 0; c < DimensionX; c++) for (int d = 0; d < DimensionY; d++) { if (0 > c + a || c + a >= DimensionX) continue; if (0 > d + b || d + b >= DimensionY) continue; training[i, d * DimensionX + c] = bytes[(b + d) * DimensionX + (a + c)]; } */ int d = netKeys.IndexOf(info.Char); class_training[i, d] = 1; } nnet = new Emgu.CV.ML.ANN_MLP(layers, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 0.6, 1); Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams p = new Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams(); p.term_crit.type = Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_EPS | Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_ITER; p.term_crit.max_iter = 1000; p.term_crit.epsilon = 0.000001; p.train_method = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP; p.bp_dw_scale = 0.1; p.bp_moment_scale = 0.1; bool success = false; try { if (File.Exists(saveFile)) { nnet.Load(saveFile); success = true; } } catch { } if (!success) { int iteration = nnet.Train(training, class_training, null, p, Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT); if (saveFile != null) { Directory.CreateDirectory(Path.GetDirectoryName(saveFile)); nnet.Save(saveFile); } } }
internal char Predict(byte[] bytes, bool margin, out double quality) { int pixels = DimensionX * DimensionY; int samples = 1; int classes = netKeys.Count; Emgu.CV.Matrix<Single> test = new Emgu.CV.Matrix<Single>(samples, pixels); Emgu.CV.Matrix<Single> class_test = new Emgu.CV.Matrix<Single>(samples, classes); Emgu.CV.Matrix<Single> result = new Emgu.CV.Matrix<float>(1, classes); float[] input = AdjustInput(bytes); for (int j = 0; j < pixels; j++) { test[0, j] = input[j]; } float max, max2; int max_idx; Emgu.CV.Matrix<Single> sample = test.GetRow(0); nnet.Predict(sample, result); max_idx = 0; max = result[0, 0]; max2 = 0; for (int j = 0; j < classes; j++) { if (result[0, j] > max) { max_idx = j; max = result[0, j]; } } for (int j = 0; j < classes; j++) { if (result[0, j] > max2 && j != max_idx) { max2 = result[0, j]; } } quality = max; quality = quality - Math.Max(0.25 - Math.Abs(max - max2), 0); // Map the range [0.5, 1] to [0, 1] quality = (quality * 1.5) - 0.5; quality = Math.Max(0.0, Math.Min(quality, 1.0)); if (margin && Math.Abs(max - max2) < 0.1 && max > 0.5) return '*'; if (max > 0.5) return netKeys[max_idx]; //if (max > max2+0.25) return netKeys[max_idx]; return '*'; }
internal float[] PredictDetailed(byte[] bytes) { int pixels = DimensionX * DimensionY; int samples = 1; int classes = netKeys.Count; Emgu.CV.Matrix<Single> test = new Emgu.CV.Matrix<Single>(samples, pixels); Emgu.CV.Matrix<Single> class_test = new Emgu.CV.Matrix<Single>(samples, classes); Emgu.CV.Matrix<Single> result = new Emgu.CV.Matrix<float>(1, classes); for (int j = 0; j < pixels; j++) { test[0, j] = bytes[j]; } float[] floats = new float[classes]; Emgu.CV.Matrix<Single> sample = test.GetRow(0); nnet.Predict(sample, result); for (int j = 0; j < result.Cols; j++) { floats[j] = result[0, j]; } return floats; }
private bool TryLoadTrainingSet(int samples) { int pixels = DimensionY * DimensionX; int classes = netKeys.Count; Emgu.CV.Matrix<Single> training = new Emgu.CV.Matrix<Single>(samples, pixels); Emgu.CV.Matrix<Single> class_training = new Emgu.CV.Matrix<Single>(samples, classes); Emgu.CV.Matrix<int> layers = new Emgu.CV.Matrix<int>(3, 1); layers[0, 0] = pixels; layers[1, 0] = (int)(factor * netKeys.Count); layers[2, 0] = classes; nnet = new Emgu.CV.ML.ANN_MLP(layers, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 0.6, 1); Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams p = new Emgu.CV.ML.Structure.MCvANN_MLP_TrainParams(); p.term_crit.type = Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_EPS | Emgu.CV.CvEnum.TERMCRIT.CV_TERMCRIT_ITER; p.term_crit.max_iter = 1000; p.term_crit.epsilon = 0.000001; p.train_method = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP; p.bp_dw_scale = 0.1; p.bp_moment_scale = 0.1; try { if (File.Exists(saveFile)) { nnet.Load(saveFile); return true; } } catch { } return false; }
private void Calibrate() { if (m_skeletonCalibPoints.Count == m_calibPoints.Count // We need at least for points to map a rectangular region. && m_skeletonCalibPoints.Count == 4) { //seketon 3D positions --> 3d positions in depth camera Point3D p0 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[0]); Point3D p1 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[1]); Point3D p2 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[2]); Point3D p3 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[3]); //3d positions depth camera --> positions on a 2D plane Vector3D v1 = p1 - p0; v1.Normalize(); Vector3D v2 = p2 - p0; v2.Normalize(); Vector3D planeNormalVec = Vector3D.CrossProduct(v1, v2); planeNormalVec.Normalize(); Vector3D resultingPlaneNormal = new Vector3D(0, 0, 1); m_groundPlaneTransform = Util.Make_align_axis_matrix(resultingPlaneNormal, planeNormalVec); Point3D p0OnPlane = m_groundPlaneTransform.Transform(p0); Point3D p1OnPlane = m_groundPlaneTransform.Transform(p1); Point3D p2OnPlane = m_groundPlaneTransform.Transform(p2); Point3D p3OnPlane = m_groundPlaneTransform.Transform(p3); //2d plane positions --> exact 2d square on screen (using perspective transform) System.Drawing.PointF[] src = new System.Drawing.PointF[4]; src[0] = new System.Drawing.PointF((float)p0OnPlane.X, (float)p0OnPlane.Y); src[1] = new System.Drawing.PointF((float)p1OnPlane.X, (float)p1OnPlane.Y); src[2] = new System.Drawing.PointF((float)p2OnPlane.X, (float)p2OnPlane.Y); src[3] = new System.Drawing.PointF((float)p3OnPlane.X, (float)p3OnPlane.Y); System.Drawing.PointF[] dest = new System.Drawing.PointF[4]; dest[0] = new System.Drawing.PointF((float)m_calibPoints[0].X, (float)m_calibPoints[0].Y); dest[1] = new System.Drawing.PointF((float)m_calibPoints[1].X, (float)m_calibPoints[1].Y); dest[2] = new System.Drawing.PointF((float)m_calibPoints[2].X, (float)m_calibPoints[2].Y); dest[3] = new System.Drawing.PointF((float)m_calibPoints[3].X, (float)m_calibPoints[3].Y); Emgu.CV.Mat transform = Emgu.CV.CvInvoke.GetPerspectiveTransform(src, dest); m_transform = new Emgu.CV.Matrix <double>(transform.Rows, transform.Cols, transform.NumberOfChannels); transform.CopyTo(m_transform); m_calibrationStatus = CalibrationStep.Calibrated; //test to see if resulting perspective transform is correct //tResultx should be same as points in m_calibPoints //Point tResult0 = KinectToProjectionPoint(m_skeletonCalibPoints[0]); //Point tResult1 = KinectToProjectionPoint(m_skeletonCalibPoints[1]); //Point tResult2 = KinectToProjectionPoint(m_skeletonCalibPoints[2]); //Point tResult3 = KinectToProjectionPoint(m_skeletonCalibPoints[3]); //Debug.Assert(tResult0.Equals(m_calibPoints[0])); //Debug.Assert(tResult1.Equals(m_calibPoints[1])); //Debug.Assert(tResult2.Equals(m_calibPoints[2])); //Debug.Assert(tResult3.Equals(m_calibPoints[3])); } }
/// <summary> /// Convert Emgu.CV.Matrix to MathNet.Numerics.LinearAlgebra.Matrix /// </summary> /// <param name="m"> Emgu.CV.Matrix</param> /// <returns>MathNet.Numerics.LinearAlgebra.Matrix</returns> public static MathNet.Numerics.LinearAlgebra.Matrix ToParsley(this Emgu.CV.Matrix <double> m) { return(MathNet.Numerics.LinearAlgebra.Matrix.Create(m.Data)); }