public void TestKNearest() { int K = 10; int trainSampleCount = 100; #region Generate the training data and classes Matrix<float> trainData = new Matrix<float>(trainSampleCount, 2); Matrix<float> trainClasses = new Matrix<float>(trainSampleCount, 1); Image<Bgr, Byte> img = new Image<Bgr, byte>(500, 500); Matrix<float> sample = new Matrix<float>(1, 2); Matrix<float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix<float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix<float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix<float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion Matrix<float> results, neighborResponses; results = new Matrix<float>(sample.Rows, 1); neighborResponses = new Matrix<float>(sample.Rows, K); //dist = new Matrix<float>(sample.Rows, K); using (KNearest knn = new KNearest(trainData, trainClasses, null, false, K)) { //TODO: find out when knn.save will be implemented //knn.Save("knn.xml"); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; // estimates the response and get the neighbors' labels float response = knn.FindNearest(sample, K, results, null, neighborResponses, null); int accuracy = 0; // compute the number of neighbors representing the majority for (int k = 0; k < K; k++) { if (neighborResponses.Data[0, k] == response) accuracy++; } // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response == 1 ? (accuracy > 5 ? new Bgr(90, 0, 0) : new Bgr(90, 40, 0)) : (accuracy > 5 ? new Bgr(0, 90, 0) : new Bgr(40, 90, 0)); } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); } }
public void TestKNearest() { int K = 10; int trainSampleCount = 100; #region Generate the training data and classes Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion Matrix <float> results, neighborResponses; results = new Matrix <float>(sample.Rows, 1); neighborResponses = new Matrix <float>(sample.Rows, K); //dist = new Matrix<float>(sample.Rows, K); using (KNearest knn = new KNearest()) { knn.DefaultK = K; knn.IsClassifier = true; knn.Train(trainData, MlEnum.DataLayoutType.RowSample, trainClasses); //ParamDef[] defs = knn.GetParams(); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; // estimates the response and get the neighbors' labels float response = knn.Predict(sample); //knn.FindNearest(sample, K, results, null, neighborResponses, null); int accuracy = 0; // compute the number of neighbors representing the majority for (int k = 0; k < K; k++) { if (neighborResponses.Data[0, k] == response) { accuracy++; } } // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response == 1 ? (accuracy > 5 ? new Bgr(90, 0, 0) : new Bgr(90, 40, 0)) : (accuracy > 5 ? new Bgr(0, 90, 0) : new Bgr(40, 90, 0)); } } String knnModelStr; //save stat model to string using (FileStorage fs = new FileStorage(".yml", FileStorage.Mode.Write | FileStorage.Mode.Memory)) { knn.Write(fs, "knn"); knnModelStr = fs.ReleaseAndGetString(); } KNearest knn2 = new KNearest(); knn2.LoadFromString(knnModelStr, "knn"); String knnModelStr2 = knn.SaveToString(); KNearest knn3 = new KNearest(); knn3.LoadFromString(knnModelStr2); #if !NETFX_CORE String fileName = "knnModel.xml"; knn.Save(fileName); String text = File.ReadAllText(fileName); #endif } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); } //Emgu.CV.UI.ImageViewer.Show(img); }
static void check_device_color(System.Collections.Specialized.StringDictionary args) { System.Console.WriteLine($"Open COM port: {args["port"]}."); //1.open serial port try { _port = new System.IO.Ports.SerialPort(args["port"]); //_port = new SerialPort(args["port"], 9600); _port.BaudRate = 9600; _port.Parity = Parity.None; _port.StopBits = StopBits.One; _port.DataBits = 8; _port.Handshake = Handshake.None; _port.RtsEnable = true; _port.DtrEnable = true; _port.ReadTimeout = 1000; _port.WriteTimeout = 1000; _port.DataReceived += _port_DataReceived; _port.Open(); } catch (Exception) { _port = null; System.Console.WriteLine($"Fail to open COM port: {args["port"]}."); goto exit; } DateTime _start = DateTime.Now; bool done = false; System.Console.WriteLine($"Waiting for sensor ready."); //2.wait for sensor ready while (!done) { string s = get_data(); Match m = Regex.Match(s, "Found sensor", RegexOptions.None, Regex.InfiniteMatchTimeout); if (m.Success) { System.Console.WriteLine($"Sensor is ready."); done = true; } if ((DateTime.Now - _start).TotalSeconds > 10) { break; } } if (!done) { System.Console.WriteLine($"Sensor is not ready."); goto exit; } Regex r = new Regex(@"^Color Temp: (\d+) K - Lux: (\d+) - R: (\d+) G: (\d+) B: (\d+) C: (\d+)\s*$"); //3.turn off led System.Console.WriteLine($"Trun off LED."); _port.Write(new byte[] { 0x00 }, 0, 1); System.Console.WriteLine($"Read data for white noise."); System.Console.WriteLine($"Please remove device from sensor, and press any key to continue and q to quit."); ConsoleKeyInfo k = System.Console.ReadKey(); if (k.KeyChar == 'q' || k.KeyChar == 'q') { goto exit; } //4.read data for white noise int samples = 10; int[,] white_noise = new int[samples, 6]; System.Console.WriteLine($"Read {samples} sample data for white noise."); done = false; int i = 0; int[] white_noise_lux = new int[samples]; int[] white_noise_c = new int[samples]; while (!done && i < samples) { System.Threading.Thread.Sleep(1000); string s = get_data(); System.Console.WriteLine($"White noise data: {s}"); Match m = r.Match(s); if (m.Success && m.Groups.Count > 6) { white_noise[i, 0] = Int32.Parse(m.Groups[1].Value); white_noise[i, 1] = Int32.Parse(m.Groups[2].Value); white_noise[i, 2] = Int32.Parse(m.Groups[3].Value); white_noise[i, 3] = Int32.Parse(m.Groups[4].Value); white_noise[i, 4] = Int32.Parse(m.Groups[5].Value); white_noise[i, 5] = Int32.Parse(m.Groups[6].Value); white_noise_lux[i] = white_noise[i, 1]; white_noise_c[i] = white_noise[i, 5]; i++; } } System.Console.WriteLine($"Complete to sample data for white noise."); // MeanStandardDeviation Tuple <double, double> wn_lux = MathNet.Numerics.Statistics.ArrayStatistics.MeanStandardDeviation(white_noise_lux); Tuple <double, double> wn_c = MathNet.Numerics.Statistics.ArrayStatistics.MeanStandardDeviation(white_noise_c); System.Console.WriteLine($"White noise. mean of lux={wn_lux.Item1}, stddev={wn_lux.Item2}"); System.Console.WriteLine($"White noise. mean of C={wn_c.Item1}, stddev={wn_c.Item2}"); // load existing data for knn System.Console.WriteLine("Load training data."); KNearest knn = new KNearest(); done = false; { Matrix <float> trained_data; Matrix <int> response; ReadColorData(out trained_data, out response); //using (KNearest knn = new KNearest()) { knn.DefaultK = 3; knn.IsClassifier = true; bool ok = knn.Train(trained_data, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, response); if (ok) { System.Console.WriteLine("Load training data Success."); done = true; //knn.Save("knn.xml"); //int cols = data.Cols; //Matrix<float> sample = new Matrix<float>(1, cols); //Matrix<float> sample; //test_data(out sample); //float r = knn.Predict(sample); } } } if (!done) { System.Console.WriteLine("Fail to load training data ."); goto exit; } string data = ""; done = false; System.Console.WriteLine($"Check device color. please place devices, press q to quit."); //List<int[]> color_data = new List<int[]>(); int device_stage = 0; while (!done) { System.Threading.Thread.Sleep(1000); if (System.Console.KeyAvailable) { k = System.Console.ReadKey(); if (k.KeyChar == 'q' || k.KeyChar == 'q') { done = true; continue; } } //string data; data = get_data(); Match m = r.Match(data); if (m.Success) { //System.Console.WriteLine($"Data: {data}"); if (m.Groups.Count > 6) { if (device_stage == 0) { // wiat for device in place, // get lux and c int lux = Int32.Parse(m.Groups[2].Value); int c = Int32.Parse(m.Groups[6].Value); double r1 = (wn_lux.Item1 - lux) / wn_lux.Item1; double r2 = (wn_c.Item1 - c) / wn_c.Item1; if (r1 > 0.5 && r2 > 0.5) { // device in place System.Console.WriteLine($"Device In-Place."); device_stage = 1; } } else if (device_stage == 1) { // device in-place // led on. System.Console.WriteLine($"Turn On LED ."); _port.Write(new byte[] { 0xff }, 0, 1); device_stage = 2; System.Threading.Thread.Sleep(2000); } else if (device_stage == 2) { System.Console.WriteLine($"Color Data: {data}"); // save color data //int[] c = new int[6]; //c[0] = Int32.Parse(m.Groups[1].Value); //c[1] = Int32.Parse(m.Groups[2].Value); //c[2] = Int32.Parse(m.Groups[3].Value); //c[3] = Int32.Parse(m.Groups[4].Value); //c[4] = Int32.Parse(m.Groups[5].Value); //c[5] = Int32.Parse(m.Groups[6].Value); //color_data.Add(c); // predict device color string s = parse_color_data(data); Matrix <float> sample; test_data(out sample, s); float idx = knn.Predict(sample); System.Console.WriteLine($"Predict: device color idx is {idx}"); device_stage = 3; } else if (device_stage == 3) { // device in-place // led on. System.Console.WriteLine($"Turn Off LED ."); _port.Write(new byte[] { 0x00 }, 0, 1); device_stage = 4; System.Threading.Thread.Sleep(1000); } else if (device_stage == 4) { System.Console.WriteLine($"Please remove device and place another one."); // wiat for device remove, // get lux and c int lux = Int32.Parse(m.Groups[2].Value); int c = Int32.Parse(m.Groups[6].Value); double r1 = (wn_lux.Item1 - lux) / wn_lux.Item1; double r2 = (wn_c.Item1 - c) / wn_c.Item1; if (r1 < 0.2 && r2 < 0.2) { // device in place System.Console.WriteLine($"Device removed."); device_stage = 0; } } else { } } } } //5.press any key to continue to read device color //6.place device //7.wait for device in-place //8.read data for device color //9.wait for device removal //10.press 'q' to quit or go to 7. //11.done. exit: if (_port != null) { if (_port.IsOpen) { _port.Write(new byte[] { 0x00 }, 0, 1); _port.Close(); } } }
public String DoOCR_ReturnString(KNearest kNearest, Mat src) { String RtnString = null; var gray = new Mat(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGRA2GRAY); var threshImage = new Mat(); Cv2.Threshold(gray, threshImage, Thresh, ThresholdMaxVal, ThresholdTypes.BinaryInv); // Threshold to find contour OpenCvSharp.Point[][] contours; HierarchyIndex[] hierarchyIndexes; Cv2.FindContours( threshImage, out contours, out hierarchyIndexes, mode: RetrievalModes.CComp, method: ContourApproximationModes.ApproxSimple); if (contours.Length == 0) { //throw new NotSupportedException("Couldn't find any object in the image."); return(null); } //Create input sample by contour finding and cropping var dst = new Mat(src.Rows, src.Cols, MatType.CV_8UC3, Scalar.All(0)); var contourIndex = 0; while ((contourIndex >= 0)) { var contour = contours[contourIndex]; var boundingRect = Cv2.BoundingRect(contour); //Find bounding rect for each contour Cv2.Rectangle(src, new OpenCvSharp.Point(boundingRect.X, boundingRect.Y), new OpenCvSharp.Point(boundingRect.X + boundingRect.Width, boundingRect.Y + boundingRect.Height), new Scalar(0, 0, 255), 2); var roi = new Mat(threshImage, boundingRect); //Crop the image var resizedImage = new Mat(); var resizedImageFloat = new Mat(); Cv2.Resize(roi, resizedImage, new OpenCvSharp.Size(10, 10)); //resize to 10X10 resizedImage.ConvertTo(resizedImageFloat, MatType.CV_32FC1); //convert to float var result = resizedImageFloat.Reshape(1, 1); var results = new Mat(); var neighborResponses = new Mat(); var dists = new Mat(); var detectedClass = (int)kNearest.FindNearest(result, 1, results, neighborResponses, dists); RtnString = RtnString + detectedClass.ToString(CultureInfo.InvariantCulture); contourIndex = hierarchyIndexes[contourIndex].Next; } return(RtnString); }
public void TestKNearest() { int K = 10; int trainSampleCount = 100; #region Generate the training data and classes Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion Matrix <float> results, neighborResponses; results = new Matrix <float>(sample.Rows, 1); neighborResponses = new Matrix <float>(sample.Rows, K); //dist = new Matrix<float>(sample.Rows, K); using (KNearest knn = new KNearest(trainData, trainClasses, null, false, K)) { //TODO: find out when knn.save will be implemented //knn.Save("knn.xml"); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; // estimates the response and get the neighbors' labels float response = knn.FindNearest(sample, K, results, null, neighborResponses, null); int accuracy = 0; // compute the number of neighbors representing the majority for (int k = 0; k < K; k++) { if (neighborResponses.Data[0, k] == response) { accuracy++; } } // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response == 1 ? (accuracy > 5 ? new Bgr(90, 0, 0) : new Bgr(90, 40, 0)) : (accuracy > 5 ? new Bgr(0, 90, 0) : new Bgr(40, 90, 0)); } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); } }
public KNearestClassifier() { kNearest = new KNearest(); }
public void DoOCR(KNearest kNearest, string path) { var src = Cv2.ImRead(path); Cv2.ImShow("Source", src); var gray = new Mat(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGRA2GRAY); var threshImage = new Mat(); Cv2.Threshold(gray, threshImage, Thresh, ThresholdMaxVal, ThresholdTypes.BinaryInv); // Threshold to find contour Point[][] contours; HierarchyIndex[] hierarchyIndexes; Cv2.FindContours( threshImage, out contours, out hierarchyIndexes, mode: RetrievalModes.CComp, method: ContourApproximationModes.ApproxSimple); if (contours.Length == 0) { throw new NotSupportedException("Couldn't find any object in the image."); } //Create input sample by contour finding and cropping var dst = new Mat(src.Rows, src.Cols, MatType.CV_8UC3, Scalar.All(0)); var contourIndex = 0; while ((contourIndex >= 0)) { var contour = contours[contourIndex]; var boundingRect = Cv2.BoundingRect(contour); //Find bounding rect for each contour Cv2.Rectangle(src, new Point(boundingRect.X, boundingRect.Y), new Point(boundingRect.X + boundingRect.Width, boundingRect.Y + boundingRect.Height), new Scalar(0, 0, 255), 2); var roi = new Mat(threshImage, boundingRect); //Crop the image var resizedImage = new Mat(); var resizedImageFloat = new Mat(); Cv2.Resize(roi, resizedImage, new Size(10, 10)); //resize to 10X10 resizedImage.ConvertTo(resizedImageFloat, MatType.CV_32FC1); //convert to float var result = resizedImageFloat.Reshape(1, 1); var results = new Mat(); var neighborResponses = new Mat(); var dists = new Mat(); var detectedClass = (int)kNearest.FindNearest(result, 1, results, neighborResponses, dists); //Console.WriteLine("DetectedClass: {0}", detectedClass); //Cv2.ImShow("roi", roi); //Cv.WaitKey(0); //Cv2.ImWrite(string.Format("det_{0}_{1}.png",detectedClass, contourIndex), roi); Cv2.PutText( dst, detectedClass.ToString(CultureInfo.InvariantCulture), new Point(boundingRect.X, boundingRect.Y + boundingRect.Height), 0, 1, new Scalar(0, 255, 0), 2); contourIndex = hierarchyIndexes[contourIndex].Next; } Cv2.ImShow("Segmented Source", src); Cv2.ImShow("Detected", dst); Cv2.ImWrite("dest.jpg", dst); Cv2.WaitKey(); }
//Initialize the model. void InitModel() { _pModel = new KNearest(); //new KNearest(_samples, _responses); }
//Recognize a single digit. public int Recognize(Mat img) { const int RESIZED_IMAGE_WIDTH = 10; const int RESIZED_IMAGE_HEIGHT = 10; int cres = '?'; var mtxClassifications = _responses; int intNumberOfTrainingSamples = mtxClassifications.Rows; mtxClassifications = new Matrix <float>(447, 1); var mtxTrainingImages = new Matrix <float>(447, 100); //TODO: mtxTrainingImages = _samples; // train KNearest kNearest = new KNearest(); kNearest.DefaultK = 1; kNearest.Train(mtxTrainingImages, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, mtxClassifications); Mat imgTestingNumbers = img; //declare various images Mat imgGrayscale = new Mat(); Mat imgBlurred = new Mat(); Mat imgThresh = new Mat(); Mat imgThreshCopy = new Mat(); //convert to grayscale CvInvoke.CvtColor(imgTestingNumbers, imgGrayscale, ColorConversion.Bgr2Gray); //blur CvInvoke.GaussianBlur(imgGrayscale, imgBlurred, new Size(5, 5), 0); //threshold image from grayscale to black and white CvInvoke.AdaptiveThreshold(imgBlurred, imgThresh, 255.0, AdaptiveThresholdType.GaussianC, ThresholdType.BinaryInv, 11, 2.0); //make a copy of the thresh image, this in necessary b/c findContours modifies the image imgThreshCopy = imgThresh.Clone(); var contours = new VectorOfVectorOfPoint(); //get external countours only CvInvoke.FindContours(imgThreshCopy, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple); //declare a list of contours with data var listOfContoursWithData = new List <ContourWithData>(); //populate list of contours with data //for each contour for (int i = 0; i <= contours.Size - 1; i++) { //declare new contour with data ContourWithData contourWithData = new ContourWithData(); //populate contour member variable contourWithData.contour = contours[i]; //calculate bounding rectangle contourWithData.boundingRect = CvInvoke.BoundingRectangle(contourWithData.contour); //calculate area contourWithData.dblArea = CvInvoke.ContourArea(contourWithData.contour); //if contour with data is valid if ((contourWithData.CheckIfContourIsValid())) { //add to list of contours with data listOfContoursWithData.Add(contourWithData); } } //sort contours with data from left to right listOfContoursWithData.Sort((oneContourWithData, otherContourWithData) => oneContourWithData.boundingRect.X.CompareTo(otherContourWithData.boundingRect.X)); //declare final string, this will have the final number sequence by the end of the program string strFinalString = ""; //for each contour in list of valid contours foreach (ContourWithData contourWithData in listOfContoursWithData) { //draw green rect around the current char CvInvoke.Rectangle(imgTestingNumbers, contourWithData.boundingRect, new MCvScalar(0.0, 255.0, 0.0), 2); //get ROI image of bounding rect Mat imgROItoBeCloned = new Mat(imgThresh, contourWithData.boundingRect); //clone ROI image so we don't change original when we resize Mat imgROI = imgROItoBeCloned.Clone(); Mat imgROIResized = new Mat(); //resize image, this is necessary for char recognition CvInvoke.Resize(imgROI, imgROIResized, new Size(RESIZED_IMAGE_WIDTH, RESIZED_IMAGE_HEIGHT)); //declare a Matrix of the same dimensions as the Image we are adding to the data structure of training images Matrix <float> mtxTemp = new Matrix <float>(imgROIResized.Size); //declare a flattened (only 1 row) matrix of the same total size Matrix <float> mtxTempReshaped = new Matrix <float>(1, RESIZED_IMAGE_WIDTH * RESIZED_IMAGE_HEIGHT); //convert Image to a Matrix of Singles with the same dimensions imgROIResized.ConvertTo(mtxTemp, DepthType.Cv32F); //flatten Matrix into one row by RESIZED_IMAGE_WIDTH * RESIZED_IMAGE_HEIGHT number of columns for (int intRow = 0; intRow <= RESIZED_IMAGE_HEIGHT - 1; intRow++) { for (int intCol = 0; intCol <= RESIZED_IMAGE_WIDTH - 1; intCol++) { mtxTempReshaped[0, (intRow * RESIZED_IMAGE_WIDTH) + intCol] = mtxTemp[intRow, intCol]; } } float sngCurrentChar = 0; //finally we can call Predict !!! sngCurrentChar = kNearest.Predict(mtxTempReshaped); //append current char to full string of chars strFinalString = strFinalString + (char)sngCurrentChar; } //Console.WriteLine("results: " + results); //Console.WriteLine("neighborResponses: " + neighborResponses); //Console.WriteLine("dists: " + dists); //Console.WriteLine("results: " + results); return(cres); }
private void Button2_Click(object sender, EventArgs e) { int K = 10; int trainSampleCount = 100; #region Generate the training data and classes Matrix <float> trainData = new Matrix <float>(trainSampleCount, 2); Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1); Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500); Matrix <float> sample = new Matrix <float>(1, 2); Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1); trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50)); Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50)); Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1); trainClasses1.SetValue(1); Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1); trainClasses2.SetValue(2); #endregion Matrix <float> results, neighborResponses; results = new Matrix <float>(sample.Rows, 1); neighborResponses = new Matrix <float>(sample.Rows, K); //dist = new Matrix<float>(sample.Rows, K); using (KNearest knn = new KNearest()) { knn.DefaultK = K; knn.IsClassifier = true; knn.Train(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses); for (int i = 0; i < img.Height; i++) { for (int j = 0; j < img.Width; j++) { sample.Data[0, 0] = j; sample.Data[0, 1] = i; // estimates the response and get the neighbors' labels float response = knn.Predict(sample); //knn.FindNearest(sample, K, results, null, neighborResponses, null); int accuracy = 0; // compute the number of neighbors representing the majority for (int k = 0; k < K; k++) { if (neighborResponses.Data[0, k] == response) { accuracy++; } } // highlight the pixel depending on the accuracy (or confidence) img[i, j] = response == 1 ? (accuracy > 5 ? new Bgr(90, 0, 0) : new Bgr(90, 40, 0)) : (accuracy > 5 ? new Bgr(0, 90, 0) : new Bgr(40, 90, 0)); } } } // display the original training samples for (int i = 0; i < (trainSampleCount >> 1); i++) { PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]); img.Draw(new CircleF(p1, 2.0f), new Bgr(255, 100, 100), -1); PointF p2 = new PointF(trainData2[i, 0], trainData2[i, 1]); img.Draw(new CircleF(p2, 2.0f), new Bgr(100, 255, 100), -1); } CvInvoke.Imshow("K-mean", img); CvInvoke.WaitKey(); }