public void PerformOcr(List<Tuple<int, int>> textRowLocations) { int DarkPixels; var engine = new TesseractEngine(Program.GetDataPath(@"tessdata"), Program.DBCon.getIniValue<String>(IBE.MTSettings.tabSettings.DB_GROUPNAME, "TraineddataFile"), EngineMode.Default); engine.DefaultPageSegMode = PageSegMode.SingleLine; string Stationname_OCR; string StationameAnalysisBase; // delete the old brainerous images - otherwise Brainerous will process older but not relevant images too if (Directory.Exists(Program.GetDataPath(@"Brainerous\images"))) foreach (string file in Directory.GetFiles(Program.GetDataPath(@"Brainerous\\images"), "*.*")) File.Delete(file); else Directory.CreateDirectory(Program.GetDataPath(@"\Brainerousimages")); float level; var text = AnalyseFrameUsingTesseract(_bTrimmedHeader, engine, out level); Stationname_OCR = StripPunctuationFromScannedText(text);// (text + " {" + page.GetMeanConfidence() + "}\r\n"); string[] StationsInSystem = Program.Data.getStations(SystemAtTimeOfScreenshot); if(Program.actualCondition.Location.Equals("", StringComparison.InvariantCultureIgnoreCase)) StationameAnalysisBase = Stationname_OCR; else StationameAnalysisBase = Program.actualCondition.Location; string headerResult_temp = StationsInSystem.FirstOrDefault(x => x.Equals(StationameAnalysisBase, StringComparison.InvariantCultureIgnoreCase)); if(headerResult_temp == null) { // station not found in database var matchesInStationReferenceList = StationsInSystem.OrderBy(x => _levenshtein.LD2(Stationname_OCR, x)).ToList(); if(matchesInStationReferenceList.Count > 0) { var ld = _levenshtein.LD2(Stationname_OCR, matchesInStationReferenceList[0].ToUpper()); // this depends on the length of the word - this factor works really good double LevenshteinLimit = Math.Round((matchesInStationReferenceList[0].Length * 1.0), 0); if (ld <= LevenshteinLimit) Stationname_OCR = matchesInStationReferenceList[0]; } } else { Stationname_OCR = headerResult_temp; } // show station on GUI _callingForm.cOcrCaptureAndCorrect.DisplayResults(Stationname_OCR); var commodityColumnText = new string[textRowLocations.Count(), 8]; var originalBitmaps = new Bitmap[textRowLocations.Count(),8]; var originalBitmapConfidences = new float[textRowLocations.Count(), 8]; var rowIds = new string[textRowLocations.Count()]; var rowCtr = 0; var bitmapCtr = 0; foreach (var row in textRowLocations) { int startRow = row.Item1 - 3; int heightRow = row.Item2 - row.Item1 + 6; if (startRow < 0) startRow = 0; if (heightRow + startRow > _bTrimmed_4_OCR.Height) heightRow = _bTrimmed_4_OCR.Height - startRow; // We'll use this later to identify the right correction image rowIds[rowCtr] = Guid.NewGuid().ToString(); using (Bitmap b = RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(0, startRow, _bTrimmed_4_OCR.Width, heightRow))) { b.Save(Program.GetDataPath(@"OCR Correction Images\" + rowIds[rowCtr] + ".png")); } int columnCounter = 0; while (columnCounter < 8) { int left, width; switch(columnCounter) { case 0: // commodity left = 0; width = _calibrationPoints[3].X - _calibrationPoints[2].X; break; case 1: // sell left = _calibrationPoints[3].X - _calibrationPoints[2].X; width = _calibrationPoints[4].X - _calibrationPoints[3].X; break; case 2: //buy left = _calibrationPoints[4].X - _calibrationPoints[2].X; width = _calibrationPoints[5].X - _calibrationPoints[4].X; break; case 3: // freight left = _calibrationPoints[5].X - _calibrationPoints[2].X; width = _calibrationPoints[6].X - _calibrationPoints[5].X; break; case 4: // demand left = _calibrationPoints[6].X - _calibrationPoints[2].X; width = _calibrationPoints[7].X - _calibrationPoints[6].X; break; case 5: // demand level left = _calibrationPoints[7].X - _calibrationPoints[2].X; width = _calibrationPoints[8].X - _calibrationPoints[7].X; break; case 6: // supply left = _calibrationPoints[8].X - _calibrationPoints[2].X; width = _calibrationPoints[9].X - _calibrationPoints[8].X; break; case 7: // supply level left = _calibrationPoints[9].X - _calibrationPoints[2].X; width = _calibrationPoints[10].X - _calibrationPoints[9].X; break; default: left = 0; width = _calibrationPoints[3].X - _calibrationPoints[2].X; break; } var fudgeFactor = 0;// _bOriginal.Height * 6 / 1440; left = left + fudgeFactor; width = width - fudgeFactor; DarkPixels = 0; if (Program.DBCon.getIniValue<Boolean>(MTSettings.tabSettings.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString(), false, true)) { if (PixelTest == null) PixelTest = new EBPixeltest(); if (columnCounter == 3) { var brainerousOut = RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow, width, heightRow)); // check how much dark pixels are on the bitmap for (int i = 0; i < brainerousOut.Height; i++) for (int j = 0; j < brainerousOut.Width; j++) if (brainerousOut.GetPixel(j, i).GetBrightness() < Program.DBCon.getIniValue<Int32>(IBE.MTSettings.tabSettings.DB_GROUPNAME, "EBPixelThreshold")) DarkPixels++; PixelTest.addPicture(brainerousOut, DarkPixels); } } else { // RNGraphics.Crop a little bit more form the left border because sometimes if theres // the line of the table it was recognized as "1" or "7" left += 10; width -= 10; if (columnCounter != 0 && columnCounter != 5 && columnCounter != 7) { //If it's a numeric column write it out for Brainerous to process later var brainerousOut = RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow, width, heightRow)); if (Program.DBCon.getIniValue<Int32>(IBE.MTSettings.tabSettings.DB_GROUPNAME, "EBPixelAmount") > 0) { // check how much dark pixels are on the bitmap -> we process only bitmaps // with something on it (minimum one digit supposed, a "1" hat about 25 pixels in default 1920x1200) for (int i = 0; i < brainerousOut.Height; i++) for (int j = 0; j < brainerousOut.Width; j++) if (brainerousOut.GetPixel(j, i).GetBrightness() < Program.DBCon.getIniValue<Int32>(IBE.MTSettings.tabSettings.DB_GROUPNAME, "EBPixelThreshold")) DarkPixels++; } if (DarkPixels >= Program.DBCon.getIniValue<Int32>(IBE.MTSettings.tabSettings.DB_GROUPNAME, "EBPixelAmount")) brainerousOut.Save(Program.GetDataPath(@"Brainerous\images\" + bitmapCtr + ".png")); bitmapCtr++; } else { // It's a text column, we'll use Tesseract // Prepare some different versions of the bitmap, we will take the best result var c = new Bitmap[7]; c[0] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow, width, heightRow))); c[1] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left + 1, startRow, width, heightRow))); c[2] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left - 1, startRow, width, heightRow))); c[3] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow - 1, width, heightRow))); c[4] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left + 1, startRow - 1, width, heightRow))); c[5] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left - 1, startRow - 1, width, heightRow))); c[6] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow + 2, width, heightRow - 2))); var t = new string[c.Length]; var cf = new float[c.Length]; for (int i = 0; i < c.Length; i++) { t[i] = AnalyseFrameUsingTesseract((Bitmap)(c[i].Clone()), engine, out cf[i]); } int result = 0; float confidence = cf[0]; for (int i = 1; i < c.Length; i++) { if (confidence < cf[i]) { result = i; confidence = cf[i]; } } originalBitmaps[rowCtr, columnCounter] = (Bitmap)(c[result].Clone()); switch (columnCounter) { //bodges for number columns case 1: case 2: case 3: t[result] = t[result].Replace(" ", "").Replace("O", "0").Replace("I", "1").Replace("'", ""); t[result] = System.Text.RegularExpressions.Regex.Replace(t[result], @"[a-zA-Z\s]+", string.Empty); // remove any alphas that remain break; case 5: case 7: t[result] = t[result].Replace(" ", "").Replace("-", ""); if (t[result] == "HIGH" || t[result] == "MED" || t[result] == "LOW") { cf[result] = 1; } break; } if ((columnCounter == 5 && t[result].Contains("ENTER")) || (columnCounter == 6 && (t[result].Contains("NGAR") || t[result].Contains("SURFACE")))) { t[result] = ""; cf[result] = 1; } commodityColumnText[rowCtr, columnCounter] += t[result]; originalBitmapConfidences[rowCtr, columnCounter] = cf[result]; } } columnCounter++; } rowCtr++; } if (Program.DBCon.getIniValue<Boolean>(MTSettings.tabSettings.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString(), false, true)) { PixelTest.StartModal(_callingForm); } else { if (textRowLocations.Count > 0) { // Call out to Brainerous to process the numeric bitmaps we saved earlier var outputFromBrainerous = ""; var pr = new Process(); pr.StartInfo.UseShellExecute = false; pr.StartInfo.CreateNoWindow = true; pr.StartInfo.RedirectStandardOutput = true; pr.StartInfo.FileName = Program.GetDataPath(@"Brainerous\nn_training.exe"); pr.StartInfo.WorkingDirectory = Program.GetDataPath("Brainerous"); pr.Start(); outputFromBrainerous = pr.StandardOutput.ReadToEnd(); while (outputFromBrainerous.Contains("Failed to pad successfully")) { var o2 = outputFromBrainerous.IndexOf("Failed to "); var o3 = outputFromBrainerous.Substring(0, o2); var o4 = outputFromBrainerous.Substring(o2).IndexOf(Program.GetDataPath("images"), StringComparison.InvariantCultureIgnoreCase); // I had a string with "Failed to pad successfully" and only some trash behind but no "./images" // so "o4" was "-1" and this results in strange behaviour if (o4 > 0) { var o5 = outputFromBrainerous.Substring(o2 + o4); outputFromBrainerous = o3 + "\r\n" + o5; } else { outputFromBrainerous = o3; } } pr.WaitForExit(); List<string> splitOutput = ((string[])outputFromBrainerous.Replace("\r", "").Split('\n')).ToList(); for (var i = 0; i < (textRowLocations.Count * 10); i += 2) { string Filename = (i / 2).ToString() + ".png"; if ((splitOutput.Count <= i) || (splitOutput[i].Length < 14) || (splitOutput[i].Substring(9) != Filename)) { splitOutput.Insert(i, Program.GetDataPath(@"images\" + Filename)); splitOutput.Insert(i + 1, ""); } } // Load the result from Brainerous into the OCR output for (var i = 0; i < textRowLocations.Count; i++) { commodityColumnText[i, 1] = splitOutput[i * 10 + 1]; originalBitmaps[i, 1] = null; originalBitmapConfidences[i, 1] = 1; commodityColumnText[i, 2] = splitOutput[i * 10 + 3]; originalBitmaps[i, 2] = null; originalBitmapConfidences[i, 2] = 1; commodityColumnText[i, 3] = splitOutput[i * 10 + 5]; originalBitmaps[i, 3] = null; originalBitmapConfidences[i, 3] = 1; commodityColumnText[i, 4] = splitOutput[i * 10 + 7]; originalBitmaps[i, 4] = null; originalBitmapConfidences[i, 4] = 1; commodityColumnText[i, 6] = splitOutput[i * 10 + 9]; originalBitmaps[i, 6] = null; originalBitmapConfidences[i, 6] = 1; } } } _bOriginal.Dispose(); _bOriginalClone.Dispose(); engine.Dispose(); if (Program.DBCon.getIniValue<Boolean>(MTSettings.tabSettings.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString(), false, true)) { Program.DBCon.setIniValue(MTSettings.tabSettings.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString()); Form1.InstanceObject.cOcrCaptureAndCorrect.clearOcrOutput(); } else { // Send the results for this screenshot back to the Form _callingForm.cOcrCaptureAndCorrect.DisplayCommodityResults(commodityColumnText, originalBitmaps, originalBitmapConfidences, rowIds, CurrentScreenshot); } // ...and if we've got any buffered screenshots waiting to be processed, process the next one if (ScreenshotBuffer.Count > 0) { var screenshot = ScreenshotBuffer[0]; ScreenshotBuffer.Remove(screenshot); ProcessNewScreenshot(screenshot); } Working = false; Debug.WriteLine("set to " + Working); }
public void PerformOcr(List <Tuple <int, int> > textRowLocations) { int DarkPixels; var engine = new TesseractEngine(Program.GetDataPath(@"tessdata"), Program.DBCon.getIniValue <String>(IBE.IBESettingsView.DB_GROUPNAME, "TraineddataFile"), EngineMode.Default); engine.DefaultPageSegMode = PageSegMode.SingleLine; string Stationname_OCR; string StationameAnalysisBase; // delete the old brainerous images - otherwise Brainerous will process older but not relevant images too if (Directory.Exists(Program.GetDataPath(@"Brainerous\images"))) { foreach (string file in Directory.GetFiles(Program.GetDataPath(@"Brainerous\\images"), "*.*")) { File.Delete(file); } } else { Directory.CreateDirectory(Program.GetDataPath(@"\Brainerousimages")); } float level; var text = AnalyseFrameUsingTesseract(_bTrimmedHeader, engine, out level); Stationname_OCR = StripPunctuationFromScannedText(text);// (text + " {" + page.GetMeanConfidence() + "}\r\n"); string[] StationsInSystem = Program.Data.getStations(SystemAtTimeOfScreenshot); if (Program.actualCondition.Station.Equals("", StringComparison.InvariantCultureIgnoreCase)) { StationameAnalysisBase = Stationname_OCR; } else { StationameAnalysisBase = Program.actualCondition.Station; } string headerResult_temp = StationsInSystem.FirstOrDefault(x => x.Equals(StationameAnalysisBase, StringComparison.InvariantCultureIgnoreCase)); if (headerResult_temp == null) { // station not found in database var matchesInStationReferenceList = StationsInSystem.OrderBy(x => _levenshtein.LD2(Stationname_OCR, x)).ToList(); if (matchesInStationReferenceList.Count > 0) { var ld = _levenshtein.LD2(Stationname_OCR, matchesInStationReferenceList[0].ToUpper()); // this depends on the length of the word - this factor works really good double LevenshteinLimit = Math.Round((matchesInStationReferenceList[0].Length * 1.0), 0); if (ld <= LevenshteinLimit) { Stationname_OCR = matchesInStationReferenceList[0]; } } } else { Stationname_OCR = headerResult_temp; } // show station on GUI _callingForm.cOcrCaptureAndCorrect.DisplayResults(Stationname_OCR); var commodityColumnText = new string[textRowLocations.Count(), 8]; var originalBitmaps = new Bitmap[textRowLocations.Count(), 8]; var originalBitmapConfidences = new float[textRowLocations.Count(), 8]; var rowIds = new string[textRowLocations.Count()]; var rowCtr = 0; var bitmapCtr = 0; foreach (var row in textRowLocations) { int startRow = row.Item1 - 3; int heightRow = row.Item2 - row.Item1 + 6; if (startRow < 0) { startRow = 0; } if (heightRow + startRow > _bTrimmed_4_OCR.Height) { heightRow = _bTrimmed_4_OCR.Height - startRow; } // We'll use this later to identify the right correction image rowIds[rowCtr] = Guid.NewGuid().ToString(); using (Bitmap b = RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(0, startRow, _bTrimmed_4_OCR.Width, heightRow))) { b.Save(Program.GetDataPath(@"OCR Correction Images\" + rowIds[rowCtr] + ".png")); } int columnCounter = 0; while (columnCounter < 8) { int left, width; switch (columnCounter) { case 0: // commodity left = 0; width = _calibrationPoints[3].X - _calibrationPoints[2].X; break; case 1: // sell left = _calibrationPoints[3].X - _calibrationPoints[2].X; width = _calibrationPoints[4].X - _calibrationPoints[3].X; break; case 2: //buy left = _calibrationPoints[4].X - _calibrationPoints[2].X; width = _calibrationPoints[5].X - _calibrationPoints[4].X; break; case 3: // freight left = _calibrationPoints[5].X - _calibrationPoints[2].X; width = _calibrationPoints[6].X - _calibrationPoints[5].X; break; case 4: // demand left = _calibrationPoints[6].X - _calibrationPoints[2].X; width = _calibrationPoints[7].X - _calibrationPoints[6].X; break; case 5: // demand level left = _calibrationPoints[7].X - _calibrationPoints[2].X; width = _calibrationPoints[8].X - _calibrationPoints[7].X; break; case 6: // supply left = _calibrationPoints[8].X - _calibrationPoints[2].X; width = _calibrationPoints[9].X - _calibrationPoints[8].X; break; case 7: // supply level left = _calibrationPoints[9].X - _calibrationPoints[2].X; width = _calibrationPoints[10].X - _calibrationPoints[9].X; break; default: left = 0; width = _calibrationPoints[3].X - _calibrationPoints[2].X; break; } var fudgeFactor = 0;// _bOriginal.Height * 6 / 1440; left = left + fudgeFactor; width = width - fudgeFactor; DarkPixels = 0; if (Program.DBCon.getIniValue <Boolean>(IBESettingsView.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString(), false, true)) { if (PixelTest == null) { PixelTest = new EBPixeltest(); } if (columnCounter == 3) { var brainerousOut = RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow, width, heightRow)); // check how much dark pixels are on the bitmap for (int i = 0; i < brainerousOut.Height; i++) { for (int j = 0; j < brainerousOut.Width; j++) { if (brainerousOut.GetPixel(j, i).GetBrightness() < Program.DBCon.getIniValue <Int32>(IBE.IBESettingsView.DB_GROUPNAME, "EBPixelThreshold")) { DarkPixels++; } } } PixelTest.addPicture(brainerousOut, DarkPixels); } } else { // RNGraphics.Crop a little bit more form the left border because sometimes if theres // the line of the table it was recognized as "1" or "7" left += 10; width -= 10; if (columnCounter != 0 && columnCounter != 5 && columnCounter != 7) { //If it's a numeric column write it out for Brainerous to process later var brainerousOut = RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow, width, heightRow)); if (Program.DBCon.getIniValue <Int32>(IBE.IBESettingsView.DB_GROUPNAME, "EBPixelAmount") > 0) { // check how much dark pixels are on the bitmap -> we process only bitmaps // with something on it (minimum one digit supposed, a "1" hat about 25 pixels in default 1920x1200) for (int i = 0; i < brainerousOut.Height; i++) { for (int j = 0; j < brainerousOut.Width; j++) { if (brainerousOut.GetPixel(j, i).GetBrightness() < Program.DBCon.getIniValue <Int32>(IBE.IBESettingsView.DB_GROUPNAME, "EBPixelThreshold")) { DarkPixels++; } } } } if (DarkPixels >= Program.DBCon.getIniValue <Int32>(IBE.IBESettingsView.DB_GROUPNAME, "EBPixelAmount")) { brainerousOut.Save(Program.GetDataPath(@"Brainerous\images\" + bitmapCtr + ".png")); } bitmapCtr++; } else { // It's a text column, we'll use Tesseract // Prepare some different versions of the bitmap, we will take the best result var c = new Bitmap[7]; c[0] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow, width, heightRow))); c[1] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left + 1, startRow, width, heightRow))); c[2] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left - 1, startRow, width, heightRow))); c[3] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow - 1, width, heightRow))); c[4] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left + 1, startRow - 1, width, heightRow))); c[5] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left - 1, startRow - 1, width, heightRow))); c[6] = (RNGraphics.Crop(_bTrimmed_4_OCR, new Rectangle(left, startRow + 2, width, heightRow - 2))); var t = new string[c.Length]; var cf = new float[c.Length]; for (int i = 0; i < c.Length; i++) { t[i] = AnalyseFrameUsingTesseract((Bitmap)(c[i].Clone()), engine, out cf[i]); } int result = 0; float confidence = cf[0]; for (int i = 1; i < c.Length; i++) { if (confidence < cf[i]) { result = i; confidence = cf[i]; } } originalBitmaps[rowCtr, columnCounter] = (Bitmap)(c[result].Clone()); switch (columnCounter) { //bodges for number columns case 1: case 2: case 3: t[result] = t[result].Replace(" ", "").Replace("O", "0").Replace("I", "1").Replace("'", ""); t[result] = System.Text.RegularExpressions.Regex.Replace(t[result], @"[a-zA-Z\s]+", string.Empty); // remove any alphas that remain break; case 5: case 7: t[result] = t[result].Replace(" ", "").Replace("-", ""); if (t[result] == "HIGH" || t[result] == "MED" || t[result] == "LOW") { cf[result] = 1; } break; } if ((columnCounter == 5 && t[result].Contains("ENTER")) || (columnCounter == 6 && (t[result].Contains("NGAR") || t[result].Contains("SURFACE")))) { t[result] = ""; cf[result] = 1; } commodityColumnText[rowCtr, columnCounter] += t[result]; originalBitmapConfidences[rowCtr, columnCounter] = cf[result]; } } columnCounter++; } rowCtr++; } if (Program.DBCon.getIniValue <Boolean>(IBESettingsView.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString(), false, true)) { PixelTest.StartModal(_callingForm); } else { if (textRowLocations.Count > 0) { // Call out to Brainerous to process the numeric bitmaps we saved earlier var outputFromBrainerous = ""; var pr = new Process(); pr.StartInfo.UseShellExecute = false; pr.StartInfo.CreateNoWindow = true; pr.StartInfo.RedirectStandardOutput = true; pr.StartInfo.FileName = Program.GetDataPath(@"Brainerous\nn_training.exe"); pr.StartInfo.WorkingDirectory = Program.GetDataPath("Brainerous"); pr.Start(); outputFromBrainerous = pr.StandardOutput.ReadToEnd(); while (outputFromBrainerous.Contains("Failed to pad successfully")) { var o2 = outputFromBrainerous.IndexOf("Failed to "); var o3 = outputFromBrainerous.Substring(0, o2); var o4 = outputFromBrainerous.Substring(o2).IndexOf(Program.GetDataPath("images"), StringComparison.InvariantCultureIgnoreCase); // I had a string with "Failed to pad successfully" and only some trash behind but no "./images" // so "o4" was "-1" and this results in strange behaviour if (o4 > 0) { var o5 = outputFromBrainerous.Substring(o2 + o4); outputFromBrainerous = o3 + "\r\n" + o5; } else { outputFromBrainerous = o3; } } pr.WaitForExit(); List <string> splitOutput = ((string[])outputFromBrainerous.Replace("\r", "").Split('\n')).ToList(); for (var i = 0; i < (textRowLocations.Count * 10); i += 2) { string Filename = (i / 2).ToString() + ".png"; if ((splitOutput.Count <= i) || (splitOutput[i].Length < 14) || (splitOutput[i].Substring(9) != Filename)) { splitOutput.Insert(i, Program.GetDataPath(@"images\" + Filename)); splitOutput.Insert(i + 1, ""); } } // Load the result from Brainerous into the OCR output for (var i = 0; i < textRowLocations.Count; i++) { commodityColumnText[i, 1] = splitOutput[i * 10 + 1]; originalBitmaps[i, 1] = null; originalBitmapConfidences[i, 1] = 1; commodityColumnText[i, 2] = splitOutput[i * 10 + 3]; originalBitmaps[i, 2] = null; originalBitmapConfidences[i, 2] = 1; commodityColumnText[i, 3] = splitOutput[i * 10 + 5]; originalBitmaps[i, 3] = null; originalBitmapConfidences[i, 3] = 1; commodityColumnText[i, 4] = splitOutput[i * 10 + 7]; originalBitmaps[i, 4] = null; originalBitmapConfidences[i, 4] = 1; commodityColumnText[i, 6] = splitOutput[i * 10 + 9]; originalBitmaps[i, 6] = null; originalBitmapConfidences[i, 6] = 1; } } } _bOriginal.Dispose(); _bOriginalClone.Dispose(); engine.Dispose(); if (Program.DBCon.getIniValue <Boolean>(IBESettingsView.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString(), false, true)) { Program.DBCon.setIniValue(IBESettingsView.DB_GROUPNAME, "CheckNextScreenshotForOne", false.ToString()); Form1.InstanceObject.cOcrCaptureAndCorrect.clearOcrOutput(); } else { // Send the results for this screenshot back to the Form _callingForm.cOcrCaptureAndCorrect.DisplayCommodityResults(commodityColumnText, originalBitmaps, originalBitmapConfidences, rowIds, CurrentScreenshot); } // ...and if we've got any buffered screenshots waiting to be processed, process the next one if (ScreenshotBuffer.Count > 0) { var screenshot = ScreenshotBuffer[0]; ScreenshotBuffer.Remove(screenshot); ProcessNewScreenshot(screenshot); } Working = false; Debug.WriteLine("set to " + Working); }