private void Form2_Load(object sender, EventArgs e) { CheckData(); int width = 512, height = 512; Image<Bgr, Byte> img = new Image<Bgr, byte>(width, height); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { Matrix<float> sampleMat = new Matrix<float>(1, 2); sampleMat[0, 0] = i / (float)width; sampleMat[0, 1] = j / (float)height; //sampleMat[0, 0] = i; //sampleMat[0, 1] = j; //Mat sampleMat = new Mat(1, 2, Emgu.CV.CvEnum.DepthType.Cv32S, 1); //sampleMat.SetTo<float>(new float[] { i / (float)width, j / (float)height }); Matrix<float> responseMat = new Matrix<float>(1, 1); bp.Predict(sampleMat, responseMat); if (responseMat[0, 0] >= 0.5) img[i, j] = new Bgr(Color.Green.B, Color.Green.G, Color.Green.R); else img[i, j] = new Bgr(Color.Blue.B, Color.Blue.G, Color.Blue.R); } } pictureBox1.Image = img.Bitmap; }
// Predict on loaded Dataset public void Predict() { Matrix <float> prediction = new Matrix <float>(1, 1); Matrix <float> sample = new Matrix <float>(1, InputLayers); int tp = 0; try { Console.WriteLine("______________ Testing Begins Here_____________"); for (int i = 0; i < testData.Rows; i++) { for (int j = 0; j < testData.Cols; j++) { sample[0, j] = testData[i, j]; } nnet.Predict(sample, prediction); predictedClasses[i, 0] = GetCloseValue(prediction.Data[0, 0]); if (predictedClasses[i, 0] == testClasses[i, 0]) { tp = tp + 1; } Console.WriteLine("Actual : " + testClasses[i, 0] + " Predicted: " + predictedClasses[i, 0]); } accuracy = (float)tp / (float)testClasses.Rows; Console.WriteLine("Accuracy: " + accuracy); } catch (Exception ex) { Console.WriteLine("Error occured Predicting..." + ex.Message); } }
public void TrainANNModel() { LoadImageGray(); // the output layer must know the number of classes. //var numberOfClasses = imageBindingModel.ConvertAll(x => x.Label).Distinct().Count(); var numberOfClasses = 1; var numberInput = resolutionImage.Width * resolutionImage.Height; int trainSampleCount = imageBindingModel.Count; Matrix <float> trainData = new Matrix <float>(trainSampleCount, numberInput); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, numberOfClasses); for (int i = 0; i < imageBindingModel.Count; i++) { for (int j = 0; j < numberInput; j++) { trainData[i, j] = imageBindingModel[i].Image.Bytes[j]; trainClasses[i, 0] = (float)imageBindingModel[i].Id; } } Matrix <int> layerSize = new Matrix <int>(new int[] { numberInput, numberInput + numberInput, numberOfClasses }); MCvANN_MLP_TrainParams parameters = new MCvANN_MLP_TrainParams(); parameters.term_crit = new MCvTermCriteria(100, 1.0e-8); parameters.train_method = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP; parameters.bp_dw_scale = 0.1; parameters.bp_moment_scale = 0.1; using (ANN_MLP network = new ANN_MLP(layerSize, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 1.0, 1.0)) { network.Train(trainData, trainClasses, null, null, parameters, Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT); Matrix <float> sample = new Matrix <float>(1, numberInput); Matrix <float> prediction = new Matrix <float>(1, numberOfClasses); int recog_true = 0; for (int i = 0; i < imageBindingModel.Count; i++) { for (int j = 0; j < numberInput; j++) { sample[0, j] = trainData[i, j]; } network.Predict(sample, prediction); var response = prediction.Data[0, 0]; if (Math.Abs(response - imageBindingModel[i].Id) < 0.5) { recog_true++; } Console.WriteLine($"recoge : {response} -- target: {imageBindingModel[i].Id} -- result: {Math.Abs(response - imageBindingModel[i].Id) < 0.5}"); } } }
public Matrix <float> testingMLP(Matrix <float> testData, string modelName, int hiddenLayers = 2, ANN_MLP.AnnMlpActivationFunction activationType = ANN_MLP.AnnMlpActivationFunction.SigmoidSym) { Matrix <float> finalResult = null; layerSize = new Matrix <int>(new int[] { testData.Cols, hiddenLayers, 1 }); try { using (ANN_MLP network1 = new ANN_MLP()) // Testing trainned Data { network1.SetActivationFunction(activationType); network1.SetLayerSizes(layerSize); network1.Read(new FileStorage(modelName + ".xml", FileStorage.Mode.Read).GetFirstTopLevelNode()); // Load trainned ANN weights IInputArray Sample_test = testData; IOutputArray Result = new Matrix <float>(1, 1); network1.Predict(Sample_test, Result); //Start Network prediction finalResult = (Matrix <float>)Result; return(finalResult); } } catch (Exception ee) { return(finalResult); } }
private Rectangle[] PredictBP(Bitmap img) { Rectangle[] tmpR = new Rectangle[] { }; Bitmap tmpImg = ZoomImg(img, bpWidth, bpHeight, ref tmpR); List <Rectangle> retRects = new List <Rectangle>(); Image <Bgr, float> trainingData = new Image <Bgr, float>(img); Matrix <float> trainingDataMats = new Matrix <float>(1, bpWidth * bpHeight); for (int i = 0; i < bpWidth * bpHeight; i++) { trainingDataMats[0, i] = Color.FromArgb( (int)trainingData.Data[i / bpWidth, i % bpHeight, 2], (int)trainingData.Data[i / bpWidth, i % bpHeight, 1], (int)trainingData.Data[i / bpWidth, i % bpHeight, 0] ).ToArgb() / (float)0xFFFFFF; } Matrix <float> labelsMats = new Matrix <float>(1, bpRectangleCount * 4); bp.Predict(trainingDataMats, labelsMats); for (int i = 0; i < bpRectangleCount * 4; i += 4) { Rectangle r = new Rectangle(); r.X = (int)(labelsMats[0, i] * (float)bpWidth); r.Y = (int)(labelsMats[0, i + 1] * (float)bpHeight); r.Width = (int)(labelsMats[0, i + 2] * (float)bpWidth); r.Height = (int)(labelsMats[0, i + 3] * (float)bpHeight); if (r.X >= 0 && r.Y >= 0 && r.Width > 0 && r.Height > 0) { retRects.Add(r); } } return(retRects.ToArray()); }
private void annTraining() { string finalOutput = ""; int features = 16; int classes = 26; Matrix <int> layers = new Matrix <int>(6, 1); layers[0, 0] = features; layers[1, 0] = classes * 16; layers[2, 0] = classes * 8; layers[3, 0] = classes * 4; layers[4, 0] = classes * 2; layers[5, 0] = classes; FileStorage fileStorageRead = new FileStorage(@"ANN_Model.xml", FileStorage.Mode.Read); ann.Read(fileStorageRead.GetRoot(0)); ann.SetLayerSizes(layers); ann.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0); ann.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0, 0); ann.Train(allFeatureOfSample, DataLayoutType.RowSample, annAllResponse); FileStorage fileStorageWrite = new FileStorage(@"ANN_Model.xml", FileStorage.Mode.Write); ann.Write(fileStorageWrite); Matrix <float> testSample = new Matrix <float>(1, 16); for (int q = 0; q < 16; q++) { testSample[0, q] = allFeatureOfSample[12, q]; } float real = ann.Predict(testSample); finalOutput += labelArray[(int)real]; label5.Text = finalOutput.ToString(); SpeechSynthesizer reader1 = new SpeechSynthesizer(); if (label5.Text != " ") { reader1.Dispose(); reader1 = new SpeechSynthesizer(); reader1.SpeakAsync(finalOutput.ToString()); } else { MessageBox.Show("No Text Present!"); } System.IO.File.WriteAllText(@"ANNResult.txt", real.ToString()); }
public static bool IsAnswer(Image <Gray, byte> img) { if (network == null) { return(false); } Matrix <float> sample = new Matrix <float>(1, width * height); Matrix <float> prediction = new Matrix <float>(1, 1); var testData = getImgData(img); for (int j = 0; j < testData.Count; j++) { sample[0, j] = testData[j]; } network.Predict(sample, prediction); float response = prediction.Data[0, 0]; return(response > 0.5); }
private void Btn_reg_Click(object sender, EventArgs e) { using (Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 5, 1 })) using (Mat layerSizeMat = layerSize.Mat) using (ANN_MLP network = new ANN_MLP()) { network.Load(annFileName); //network.SetLayerSizes(layerSizeMat); //network.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0); //network.TermCriteria = new MCvTermCriteria(10, 1.0e-8); //network.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0.1, 0.1); float[,] testData = new float[1, 2] { { float.Parse(this.txb_percent.Text), float.Parse(this.txb_avg.Text) } }; Matrix <float> sample = new Matrix <float>(testData); Matrix <float> prediction = new Matrix <float>(1, 1); network.Predict(sample, prediction); float response = prediction.Data[0, 0]; MessageBox.Show($"判断结果:{response}"); } }
public void ANN() { int trainSampleCount = 100; #region Generate the traning data and classes Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> prediction = new Matrix <float>(1, 1); Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 5, 1 }); MCvANN_MLP_TrainParams parameters = new MCvANN_MLP_TrainParams(); parameters.term_crit = new MCvTermCriteria(10, 1.0e-8); parameters.train_method = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP; parameters.bp_dw_scale = 0.1; parameters.bp_moment_scale = 0.1; using (ANN_MLP network = new ANN_MLP(layerSize, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 1.0, 1.0)) { network.Train(trainData, trainClasses, null, null, parameters, Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; network.Predict(sample, prediction); // estimates the response and get the neighbors' labels float response = prediction.Data[0, 0]; // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response < 1.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0); } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1); PointF p2 = new PointF((int)trainData2[i, 0], (int)trainData2[i, 1]); img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1); } Emgu.CV.UI.ImageViewer.Show(img); }
public void TestANN_MLP() { int trainSampleCount = 100; #region Generate the traning data and classes Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> prediction = new Matrix <float>(1, 1); Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion using (Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 5, 1 })) using (Mat layerSizeMat = layerSize.Mat) using (TrainData td = new TrainData(trainData, MlEnum.DataLayoutType.RowSample, trainClasses)) using (ANN_MLP network = new ANN_MLP()) { network.SetLayerSizes(layerSizeMat); network.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0); network.TermCriteria = new MCvTermCriteria(10, 1.0e-8); network.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0.1, 0.1); network.Train(td, (int)Emgu.CV.ML.MlEnum.AnnMlpTrainingFlag.Default); #if !NETFX_CORE String fileName = Path.Combine(Path.GetTempPath(), "ann_mlp_model.xml"); network.Save(fileName); if (File.Exists(fileName)) { File.Delete(fileName); } #endif for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; network.Predict(sample, prediction); // estimates the response and get the neighbors' labels float response = prediction.Data[0, 0]; // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response < 1.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0); } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1); PointF p2 = new PointF((int)trainData2[i, 0], (int)trainData2[i, 1]); img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1); } //Emgu.CV.UI.ImageViewer.Show(img); }
private async void moduleFeatureExtraction(int first, int last) { string fghfh = ""; double[,] RawData = new double[16, 3780]; int mid = (first + last) / 2; int low = mid - 8;; int high = mid + 8; for (int i = 0; i < 16; i++) { for (int j = 0; j < 26; j++) { if (j == adasas) { response[i, j] = 1; } if (j != adasas) { response[i, j] = 0; } } } adasas++; if (low < first) { low++; } if (high > last) { low++; } int length = high - low; for (int k = (low); k < (high); k++) { string frameName = "gesture//" + k + ".jpeg"; Image <Bgr, byte> featurExtractionInput = new Image <Bgr, byte>(frameName); //pictureBox3.Image = featurExtractionInput.Bitmap; //label4.Text = k.ToString(); await Task.Delay(1000 / Convert.ToInt32(2)); float[] desc = new float[3780]; desc = GetVector(featurExtractionInput); int i = k - (low); for (int j = 0; j < 3780; j++) { double val = Convert.ToDouble(desc[j]); RawData.SetValue(val, i, j); } if (k == (high - 1)) { Matrix <Double> DataMatrix = new Matrix <Double>(RawData); Matrix <Double> Mean = new Matrix <Double>(1, 3780); Matrix <Double> EigenValues = new Matrix <Double>(1, 3780); Matrix <Double> EigenVectors = new Matrix <Double>(3780, 3780); CvInvoke.PCACompute(DataMatrix, Mean, EigenVectors, 16); Matrix <Double> result = new Matrix <Double>(16, 16); CvInvoke.PCAProject(DataMatrix, Mean, EigenVectors, result); String filePath = @"test.xml"; StringBuilder sb = new StringBuilder(); (new XmlSerializer(typeof(Matrix <double>))).Serialize(new StringWriter(sb), result); XmlDocument xDoc = new XmlDocument(); xDoc.LoadXml(sb.ToString()); System.IO.File.WriteAllText(filePath, sb.ToString()); Matrix <double> matrix = (Matrix <double>)(new XmlSerializer(typeof(Matrix <double>))).Deserialize(new XmlNodeReader(xDoc)); string djf = null; djf = System.IO.File.ReadAllText(@"g.txt"); djf += Environment.NewLine; djf += Environment.NewLine; for (int p = 0; p < 16; p++) { for (int q = 0; q < 16; q++) { djf += p + " , " + q + " " + matrix[p, q].ToString() + " "; } djf += Environment.NewLine; } Matrix <float> masjhdb = result.Convert <float>(); TrainData trainData = new TrainData(masjhdb, DataLayoutType.RowSample, response); int features = 16; int classes = 26; Matrix <int> layers = new Matrix <int>(6, 1); layers[0, 0] = features; layers[1, 0] = classes * 16; layers[2, 0] = classes * 8; layers[3, 0] = classes * 4; layers[4, 0] = classes * 2; layers[5, 0] = classes; ANN_MLP ann = new ANN_MLP(); FileStorage fileStorageRead = new FileStorage(@"abc.xml", FileStorage.Mode.Read); ann.Read(fileStorageRead.GetRoot(0)); ann.SetLayerSizes(layers); ann.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0); ann.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0, 0); ann.Train(masjhdb, DataLayoutType.RowSample, response); FileStorage fileStorageWrite = new FileStorage(@"abc.xml", FileStorage.Mode.Write); ann.Write(fileStorageWrite); Matrix <float> hehe = new Matrix <float>(1, 16); for (int q = 0; q < 16; q++) { hehe[0, q] = masjhdb[11, q]; } float real = ann.Predict(hehe); fghfh += array[(int)real]; SpeechSynthesizer reader = new SpeechSynthesizer(); if (richTextBox1.Text != " ") { reader.Dispose(); reader = new SpeechSynthesizer(); reader.SpeakAsync(fghfh.ToString()); } else { MessageBox.Show("No Text Present!"); } richTextBox1.Text = fghfh.ToString(); System.IO.File.WriteAllText(@"g.txt", real.ToString()); } } }
public void TestANN_MLP() { int trainSampleCount = 100; #region Generate the traning data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> prediction = new Matrix<float>(1, 1); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion Matrix<int> layerSize = new Matrix<int>(new int[] { 2, 5, 1 }); MCvANN_MLP_TrainParams parameters = new MCvANN_MLP_TrainParams(); parameters.term_crit = new MCvTermCriteria(10, 1.0e-8); parameters.train_method = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP; parameters.bp_dw_scale = 0.1; parameters.bp_moment_scale = 0.1; using (ANN_MLP network = new ANN_MLP(layerSize, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 1.0, 1.0)) { network.Train(trainData, trainClasses, null, null, parameters, Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT); network.Save("ann_mlp_model.xml"); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; network.Predict(sample, prediction); // estimates the response and get the neighbors' labels float response = prediction.Data[0,0]; // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response < 1.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0); } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1); PointF p2 = new PointF((int)trainData2[i, 0], (int)trainData2[i, 1]); img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1); } }
private void btn_ANNReg_Click(object sender, EventArgs e) { var regPath = txbregPath.Text; if (string.IsNullOrEmpty(regPath)) { MessageBox.Show("待识别文件夹不能空"); return; } var isAct = ckbIsAct.Checked; var files = Directory.GetFiles(regPath); var testData = new List <List <float> >(); for (int i = 0; i < files.Length; i++) { var path = files[i]; Image <Gray, byte> img = new Image <Gray, byte>(path); testData.Add(getImgData(img)); } using (ANN_MLP network = new ANN_MLP()) { network.Load(annFileName); int colCount = width * height; Matrix <float> sample = new Matrix <float>(1, colCount); Matrix <float> prediction = new Matrix <float>(1, 1); //1测试数据 var testCount = testData.Count; var rightCount = 0;//正确act识别数量 for (int i = 0; i < testCount; i++) { var testColData = testData[i]; for (int j = 0; j < testColData.Count; j++) { sample[0, j] = testColData[j]; } network.Predict(sample, prediction); float response = prediction.Data[0, 0]; if (isAct && response > 0.5) { rightCount++; Console.WriteLine($"该数据是涂答的,正确识别{response}"); } else if (isAct && response <= 0.5) { Console.WriteLine($"该数据是涂答的,错误识别{response}"); File.Copy(files[i], Path.Combine(actRegErrorDir, Path.GetFileName(files[i])), true); } else if (!isAct && response <= 0.5) { rightCount++; Console.WriteLine($"该数据是未涂答的,正确识别{response}"); } else if (!isAct && response > 0.5) { Console.WriteLine($"该数据是未涂答的,错误识别{response}"); File.Copy(files[i], Path.Combine(negRegErrorDir, Path.GetFileName(files[i])), true); } else { Console.WriteLine("未知识别结果"); } } var result = $"测试数量:{testCount},正确数量:{rightCount},正确率:{rightCount * 1.0 / testCount}"; Console.WriteLine(result); MessageBox.Show(result); } }
private void Btn_ann2_Click(object sender, EventArgs e) { this.prepareData(); if (trainNegData?.Count == 0 || trainActData?.Count == 0) { MessageBox.Show("训练数据不能为空"); return; } int trainSampleCount = trainActData.Count + trainNegData.Count; int colCount = width * height; Matrix <float> trainData = new Matrix <float>(trainSampleCount, colCount); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Matrix <float> sample = new Matrix <float>(1, colCount); Matrix <float> prediction = new Matrix <float>(1, 1); //准备正面数据 var actCount = trainActData.Count; //Matrix<float> trainActDataMatr = new Matrix<float>(actCount, width * height); //Matrix<float> trainActClassesMatr = new Matrix<float>(actCount, 1); for (int i = 0; i < actCount; i++) { var colData = trainActData[i]; var colCount1 = colData.Count; for (int j = 0; j < colCount1; j++) { trainData.Data[i, j] = trainActData[i][j]; } trainClasses.Data[i, 0] = 1; //trainClasses.Data[i, 1] = 0; } //准备未涂答数据 var negCount = trainNegData.Count; //Matrix<float> trainNegDataMatr = new Matrix<float>(negCount, width * height); //Matrix<float> trainNegClassesMatr = new Matrix<float>(negCount, 1); for (int i = 0; i < negCount; i++) { var colData = trainNegData[i]; var colCount1 = colData.Count; for (int j = 0; j < colCount1; j++) { trainData.Data[i + actCount, j] = trainNegData[i][j]; } trainClasses.Data[i + actCount, 0] = 0; //trainClasses.Data[i + actCount, 1] = 1; } //训练 using (Matrix <int> layerSize = new Matrix <int>(new int[] { 286, 10, 10, 1 })) using (Mat layerSizeMat = layerSize.Mat) using (TrainData td = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses)) using (ANN_MLP network = new ANN_MLP()) { network.SetLayerSizes(layerSizeMat); network.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0); network.TermCriteria = new MCvTermCriteria(10, 1.0e-8); network.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0.01, 0.01); network.Train(td, (int)Emgu.CV.ML.MlEnum.AnnMlpTrainingFlag.Default); //String fileName = "ann_mlp_model.xml"; //Path.Combine(Path.GetTempPath(), "ann_mlp_model.xml"); network.Save(annFileName); //if (File.Exists(fileName)) // File.Delete(fileName); //测试 //1测试正面数据 var testActCount = testActData.Count; var rightActCount = 0;//正确act识别数量 for (int i = 0; i < testActCount; i++) { var testData = testActData[i]; for (int j = 0; j < testData.Count; j++) { sample[0, j] = testData[j]; } network.Predict(sample, prediction); float response = prediction.Data[0, 0]; if (response > 0.5) { rightActCount++; Console.WriteLine($"该数据是涂答的,正确识别{response}"); } else { Console.WriteLine($"该数据是涂答的,错误识别{response}"); } } //2测试负面数据 var testNegCount = testNegData.Count; var rightNegCount = 0;//正确neg识别数量 for (int i = 0; i < testNegCount; i++) { var testData = testNegData[i]; for (int j = 0; j < testData.Count; j++) { sample[0, j] = testData[j]; } network.Predict(sample, prediction); float response = prediction.Data[0, 0]; if (response <= 0.5) { rightNegCount++; Console.WriteLine($"该数据是未涂答的,正确识别{response}"); } else { Console.WriteLine($"该数据是未涂答的,错误识别{response}"); } } MessageBox.Show("训练完毕,并测试"); } }
private void Btn_CNN2_Click(object sender, EventArgs e) { var positiveData = GetPositiveData(); var negativeData = GetNegativeData(); if (positiveData?.Count == 0 || negativeData?.Count == 0) { MessageBox.Show("训练数据不能为空"); return; } int trainSampleCount = positiveData.Count + negativeData.Count; Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> prediction = new Matrix <float>(1, 1); for (int i = 0; i < positiveData.Count; i++) { var item = positiveData[i]; trainData.Data[i, 0] = item.Percent; trainData.Data[i, 1] = item.Avg; trainClasses.Data[i, 0] = 1; } for (int i = 0; i < negativeData.Count; i++) { var item = negativeData[i]; int row = positiveData.Count + i; trainData.Data[row, 0] = item.Percent; trainData.Data[row, 1] = item.Avg; trainClasses.Data[row, 0] = 0; } Image <Bgr, Byte> img = new Image <Bgr, byte>(765, 300); using (Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 5, 1 })) using (Mat layerSizeMat = layerSize.Mat) using (TrainData td = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses)) using (ANN_MLP network = new ANN_MLP()) { network.SetLayerSizes(layerSizeMat); network.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym, 0, 0); network.TermCriteria = new MCvTermCriteria(10, 1.0e-8); network.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0.1, 0.1); network.Train(td, (int)Emgu.CV.ML.MlEnum.AnnMlpTrainingFlag.Default); //String fileName = "ann_mlp_model.xml"; //Path.Combine(Path.GetTempPath(), "ann_mlp_model.xml"); network.Save(annFileName); //if (File.Exists(fileName)) // File.Delete(fileName); //画图 for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = i * 1.0f / (100 * 3); sample.Data[0, 1] = 255 - j * 1.0f / 3.0f; network.Predict(sample, prediction); // estimates the response and get the neighbors' labels float response = prediction.Data[0, 0]; // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response < 0.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0); } } } // display the original training samples for (int i = 0; i < positiveData.Count; i++) { var d = positiveData[i]; PointF p1 = new PointF((255 - d.Avg) * 3, d.Percent * 300); img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1); } for (int i = 0; i < negativeData.Count; i++) { var d = negativeData[i]; PointF p1 = new PointF((255 - d.Avg) * 3, d.Percent * 300); img.Draw(new CircleF(p1, 2), new Bgr(100, 255, 100), -1); } this.ib_result.Image = img; MessageBox.Show("训练完毕"); }
private void button1_Click(object sender, EventArgs e) { int trainSampleCount = 100; #region Generate the traning data and classes Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> prediction = new Matrix <float>(1, 1); Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion using (Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 10, 2 })) using (Mat layerSizeMat = layerSize.Mat) using (TrainData td = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses)) using (ANN_MLP network = new ANN_MLP()) { network.SetLayerSizes(layerSizeMat); network.SetActivationFunction(ANN_MLP.AnnMlpActivationFunction.SigmoidSym); network.TermCriteria = new MCvTermCriteria(10000, 1.0e-8); network.SetTrainMethod(ANN_MLP.AnnMlpTrainMethod.Backprop, 0.1, 0.1); network.Train(td); network.Save("temp.txt"); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; network.Predict(sample, prediction); // estimates the response and get the neighbors' labels float response = prediction.Data[0, 0]; // highlight the pixel depending on the accuracy (or confidence) if (response < 1.5) { img[i, j] = new Bgr(90, 0, 0); } else { img[i, j] = new Bgr(0, 90, 0); } } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1); PointF p2 = new PointF((int)trainData2[i, 0], (int)trainData2[i, 1]); img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1); } pictureBox1.Image = img.ToBitmap(); // Emgu.CV.UI.ImageViewer.Show(img); }