public static int average_on_border(Bytearray a) { int sum = 0; int right = a.Dim(0) - 1; int top = a.Dim(1) - 1; for (int x = 0; x < a.Dim(0); x++) { sum += a[x, 0]; } for (int x = 0; x < a.Dim(0); x++) { sum += a[x, top]; } for (int y = 1; y < top; y++) { sum += a[0, y]; } for (int y = 1; y < top; y++) { sum += a[right, y]; } // If average border intensity is between 127-128, inverting the // image does not work correctly float average_border_intensity = sum / ((right + top) * 2.0f); if (!(average_border_intensity <= 127 || average_border_intensity >= 128)) { Console.WriteLine("average border intensity is between 127-128, inverting the image does not work correctly"); } return(sum / ((right + top) * 2)); }
public static void binary_and(Bytearray image, Bytearray image2, int dx, int dy) { int w = image.Dim(0); int h = image.Dim(1); for (int i = 0; i < w; i++) for (int j = 0; j < h; j++) image[i, j] = Math.Min(image[i, j], NarrayUtil.Ext(image2, i - dx, j - dy)); }
public List<List<float>> SpaceCosts(List<Candidate> candidates, Bytearray image) { /* Given a list of character recognition candidates and their classifications, and an image of the corresponding text line, compute a list of pairs of costs for putting/not putting a space after each of the candidate characters. The basic idea behind this simple algorithm is to try larger and larger horizontal closing operations until most of the components start having a "wide" aspect ratio; that's when characters have merged into words. The remaining whitespace should be spaces. This is just a simple stopgap measure; it will be replaced with trainable space modeling. */ int w = image.Dim(0); int h = image.Dim(1); Bytearray closed = new Bytearray(); int r; for (r = 0; r < maxrange; r++) { if (r > 0) { closed.Copy(image); Morph.binary_close_circle(closed, r); } else closed.Copy(image); Intarray labeled = new Intarray(); labeled.Copy(closed); ImgLabels.label_components(ref labeled); Narray<Rect> rects = new Narray<Rect>(); ImgLabels.bounding_boxes(ref rects, labeled); Floatarray aspects = new Floatarray(); for (int i = 0; i < rects.Length(); i++) { Rect rect = rects[i]; float aspect = rect.Aspect(); aspects.Push(aspect); } float maspect = NarrayUtil.Median(aspects); if (maspect >= this.aspect_threshold) break; } // close with a little bit of extra space closed.Copy(image); Morph.binary_close_circle(closed, r+1); // compute the remaining aps //Morph.binary_dilate_circle(); // every character box that ends near a cap gets a space appended return null; }
public static void Thin(ref Bytearray uci) { int w = uci.Dim(0) - 1; int h = uci.Dim(1) - 1; for (int i = 0, n = uci.Length1d(); i < n; i++) { if (uci.At1d(i) > 0) uci.Put1d(i, ON); else uci.Put1d(i, OFF); } bool flag; do { flag = false; for (int j = 0; j < 8; j += 2) { for (int x = 1; x < w; x++) for (int y = 1; y < h; y++) { if (uci[x, y] != ON) continue; if (uci[x + nx[j], y + ny[j]] != OFF) continue; int b = 0; for (int i = 7; i >= 0; i--) { b <<= 1; b |= (uci[x + nx[i], y + ny[i]] != OFF ? 1 : 0); } if (ttable[b] > 0) uci[x, y] = SKEL; else { uci[x, y] = DEL; flag = true; } } if (!flag) continue; for (int x = 1; x < w; x++) for (int y = 1; y < h; y++) if (uci[x, y] == DEL) uci[x, y] = OFF; } } while (flag); for (int i = 0, n = uci.Length1d(); i < n; i++) { if (uci.At1d(i) == SKEL) uci.Put1d(i, 255); else uci.Put1d(i, 0); } }
public void SetImage(Bytearray image_) { Bytearray image = new Bytearray(); //image = image_; image.Copy(image_); dimage.Copy(image); if (PGeti("fill_holes") > 0) { Bytearray holes = new Bytearray(); SegmRoutine.extract_holes(ref holes, image); for (int i = 0; i < image.Length(); i++) { if (holes.At1d(i) > 0) { image.Put1d(i, 255); } } } int w = image.Dim(0), h = image.Dim(1); wimage.Resize(w, h); wimage.Fill(0); float s1 = 0.0f, sy = 0.0f; for (int i = 1; i < w; i++) { for (int j = 0; j < h; j++) { if (image[i, j] > 0) { s1++; sy += j; } if (image[i, j] > 0) { wimage[i, j] = inside_weight; } else { wimage[i, j] = outside_weight; } } } if (s1 == 0) { where = image.Dim(1) / 2; } else { where = (int)(sy / s1); } for (int i = 0; i < dimage.Dim(0); i++) { dimage[i, where] = 0x008000; } }
public static void binary_or(Bytearray image, Bytearray image2, int dx, int dy) { int w = image.Dim(0); int h = image.Dim(1); for (int i = 0; i < w; i++) { for (int j = 0; j < h; j++) { image[i, j] = Math.Max(image[i, j], NarrayUtil.Ext(image2, i - dx, j - dy)); } } }
public void SetLine(Bytearray image) { CHECK_ARG(image.Dim(1) < PGeti("maxheight"), "image.Dim(1) < PGeti(\"maxheight\")"); // run the segmenter /*Narray<Rect> bboxes = new Narray<Rect>(); * Intarray iar = new Intarray(); * iar.Copy(image); * ImgLabels.bounding_boxes(ref bboxes, iar);*/ //Console.WriteLine("IMG SETLINE: imin:{0} imax:{1}", NarrayUtil.ArgMin(iar), NarrayUtil.ArgMax(iar)); //Console.WriteLine("INDEX_BLACK:{0} {1} {2} {3}", bboxes[0].x0, bboxes[0].y0, bboxes[0].x1, bboxes[0].y1); //ImgIo.write_image_gray("image.png", image); OcrRoutine.binarize_simple(binarized, image); segmenter.Object.Charseg(ref segmentation, binarized); /*Intarray segm = new Intarray(); * segm.Copy(segmentation); * ImgLabels.simple_recolor(segm); * ImgIo.write_image_packed("segm_image.png", segm);*/ //NarrayUtil.Sub(255, binarized); SegmRoutine.make_line_segmentation_black(segmentation); SegmRoutine.remove_small_components(segmentation, 3, 3); // i add this line ImgLabels.renumber_labels(segmentation, 1); // set up the grouper grouper.Object.SetSegmentation(segmentation); }
public void RunTest() { IBookStore bstore = new SmartBookStore(); bstore.SetPrefix(@"data2"); Console.WriteLine("Pages in bookstore: {0}", bstore.NumberOfPages()); Console.WriteLine("List pages.."); for (int i = 0; i < bstore.NumberOfPages(); i++) { Console.WriteLine("page {0:0000}\t->\t{1,6} lines", i, bstore.LinesOnPage(i)); } Bytearray line = new Bytearray(); bstore.GetLine(line, 1, 5); Console.WriteLine("line{0} [{1},{2}]", 5, line.Dim(0), line.Dim(1)); Intarray cline = new Intarray(); bstore.GetCharSegmentation(cline, 1, 5); Console.WriteLine("line{0}.cseg [{1},{2}]", 5, cline.Dim(0), cline.Dim(1)); }
/// <summary> /// Remove singular points over image. /// uses in skeleton segmenter /// </summary> public static void remove_singular_points(ref Bytearray image, int d) { for (int i = d; i < image.Dim(0) - d - 1; i++) { for (int j = d; j < image.Dim(1) - d - 1; j++) { if (is_singular(image, i, j)) { for (int k = -d; k <= d; k++) { for (int l = -d; l <= d; l++) { image[i + k, j + l] = 0; } } } } } }
public static int average_on_border(Bytearray a) { int sum = 0; int right = a.Dim(0) - 1; int top = a.Dim(1) - 1; for(int x = 0; x < a.Dim(0); x++) sum += a[x, 0]; for(int x = 0; x < a.Dim(0); x++) sum += a[x, top]; for(int y = 1; y < top; y++) sum += a[0, y]; for(int y = 1; y < top; y++) sum += a[right, y]; // If average border intensity is between 127-128, inverting the // image does not work correctly float average_border_intensity = sum / ((right + top) * 2.0f); if (!(average_border_intensity <= 127 || average_border_intensity >= 128)) Console.WriteLine("average border intensity is between 127-128, inverting the image does not work correctly"); return sum / ((right + top) * 2); }
public override void Charseg(ref Intarray result_segmentation, Bytearray orig_image) { Logger.Default.Image("segmenting", orig_image); int PADDING = 3; OcrRoutine.optional_check_background_is_lighter(orig_image); Bytearray image = new Bytearray(); Narray <byte> bimage = image; image.Copy(orig_image); OcrRoutine.binarize_simple(image); OcrRoutine.Invert(image); ImgOps.pad_by(ref bimage, PADDING, PADDING); // pass image to segmenter segmenter.SetImage(image); // find all cuts in the image segmenter.FindAllCuts(); // choose the best of all cuts segmenter.FindBestCuts(); Intarray segmentation = new Intarray(); segmentation.Resize(image.Dim(0), image.Dim(1)); for (int i = 0; i < image.Dim(0); i++) { for (int j = 0; j < image.Dim(1); j++) { segmentation[i, j] = image[i, j] > 0 ? 1 : 0; } } for (int r = 0; r < segmenter.bestcuts.Length(); r++) { int c = segmenter.bestcuts[r]; Narray <Point> cut = segmenter.cuts[c]; for (int y = 0; y < image.Dim(1); y++) { for (int x = cut[y].X; x < image.Dim(0); x++) { if (segmentation[x, y] > 0) { segmentation[x, y]++; } } } } ImgOps.extract_subimage(result_segmentation, segmentation, PADDING, PADDING, segmentation.Dim(0) - PADDING, segmentation.Dim(1) - PADDING); if (small_merge_threshold > 0) { SegmRoutine.line_segmentation_merge_small_components(ref result_segmentation, small_merge_threshold); SegmRoutine.line_segmentation_sort_x(result_segmentation); } SegmRoutine.make_line_segmentation_white(result_segmentation); // set_line_number(segmentation, 1); Logger.Default.Image("resulting segmentation", result_segmentation); }
public override void SetImage(Bytearray image) { dimage.Copy(image); int w = image.Dim(0), h = image.Dim(1); wimage.Resize(w, h); wimage.Fill(0); float s1 = 0.0f, sy = 0.0f; for (int i = 1; i < w; i++) { for (int j = 0; j < h; j++) { if (image[i, j] > 0) { s1++; sy += j; } if (image[i - 1, j] == 0 && image[i, j] > 0) { wimage[i, j] = boundary_weight; } else if (image[i, j] > 0) { wimage[i, j] = inside_weight; } else { wimage[i, j] = outside_weight; } } } where = (int)(sy / s1); for (int i = 0; i < dimage.Dim(0); i++) { dimage[i, where] = 0x008000; } }
public override void Charseg(ref Intarray segmentation, Bytearray inraw) { Logger.Default.Image("segmenting", inraw); OcrRoutine.optional_check_background_is_lighter(inraw); Bytearray image = new Bytearray(); image.Copy(inraw); OcrRoutine.binarize_simple(image); OcrRoutine.Invert(image); segmenter.SetImage(image); segmenter.FindAllCuts(); segmenter.FindBestCuts(); Intarray seg = new Intarray(); seg.Copy(image); for (int r = 0; r < segmenter.bestcuts.Length(); r++) { int w = seg.Dim(0); int c = segmenter.bestcuts[r]; Narray <Point> cut = segmenter.cuts[c]; for (int y = 0; y < image.Dim(1); y++) { for (int i = -1; i <= 1; i++) { int x = cut[y].X; if (x < 1 || x >= w - 1) { continue; } seg[x + i, y] = 0; } } } ImgLabels.label_components(ref seg); // dshowr(seg,"YY"); dwait(); segmentation.Copy(image); ImgLabels.propagate_labels_to(ref segmentation, seg); SegmRoutine.line_segmentation_merge_small_components(ref segmentation, small_merge_threshold); SegmRoutine.line_segmentation_sort_x(segmentation); SegmRoutine.make_line_segmentation_white(segmentation); // set_line_number(segmentation, 1); Logger.Default.Image("resulting segmentation", segmentation); }
public static int neighbors(Bytearray image, int i, int j) { if (i < 1 || i >= image.Dim(0) - 1 || j < 1 || j > image.Dim(1) - 1) { return(0); } if (image[i, j] == 0) { return(0); } int count = -1; for (int k = -1; k <= 1; k++) { for (int l = -1; l <= 1; l++) { if (image[i + k, j + l] > 0) { count++; } } } return(count); }
public override void Charseg(ref Intarray segmentation, Bytearray inraw) { Logger.Default.Image("segmenting", inraw); OcrRoutine.optional_check_background_is_lighter(inraw); Bytearray image = new Bytearray(); image.Copy(inraw); OcrRoutine.binarize_simple(image); OcrRoutine.Invert(image); segmenter.SetImage(image); segmenter.FindAllCuts(); segmenter.FindBestCuts(); Intarray seg = new Intarray(); seg.Copy(image); for (int r = 0; r < segmenter.bestcuts.Length(); r++) { int w = seg.Dim(0); int c = segmenter.bestcuts[r]; Narray<Point> cut = segmenter.cuts[c]; for (int y = 0; y < image.Dim(1); y++) { for (int i = -1; i <= 1; i++) { int x = cut[y].X; if (x < 1 || x >= w - 1) continue; seg[x + i, y] = 0; } } } ImgLabels.label_components(ref seg); // dshowr(seg,"YY"); dwait(); segmentation.Copy(image); ImgLabels.propagate_labels_to(ref segmentation, seg); SegmRoutine.line_segmentation_merge_small_components(ref segmentation, small_merge_threshold); SegmRoutine.line_segmentation_sort_x(segmentation); SegmRoutine.make_line_segmentation_white(segmentation); // set_line_number(segmentation, 1); Logger.Default.Image("resulting segmentation", segmentation); }
public override void Charseg(ref Intarray result_segmentation, Bytearray orig_image) { Logger.Default.Image("segmenting", orig_image); int PADDING = 3; OcrRoutine.optional_check_background_is_lighter(orig_image); Bytearray image = new Bytearray(); Narray<byte> bimage = image; image.Copy(orig_image); OcrRoutine.binarize_simple(image); OcrRoutine.Invert(image); ImgOps.pad_by(ref bimage, PADDING, PADDING); // pass image to segmenter segmenter.SetImage(image); // find all cuts in the image segmenter.FindAllCuts(); // choose the best of all cuts segmenter.FindBestCuts(); Intarray segmentation = new Intarray(); segmentation.Resize(image.Dim(0), image.Dim(1)); for (int i = 0; i < image.Dim(0); i++) for (int j = 0; j < image.Dim(1); j++) segmentation[i, j] = image[i, j] > 0 ? 1 : 0; for (int r = 0; r < segmenter.bestcuts.Length(); r++) { int c = segmenter.bestcuts[r]; Narray<Point> cut = segmenter.cuts[c]; for (int y = 0; y < image.Dim(1); y++) { for (int x = cut[y].X; x < image.Dim(0); x++) { if (segmentation[x, y] > 0) segmentation[x, y]++; } } } ImgOps.extract_subimage(result_segmentation, segmentation, PADDING, PADDING, segmentation.Dim(0) - PADDING, segmentation.Dim(1) - PADDING); if (small_merge_threshold > 0) { SegmRoutine.line_segmentation_merge_small_components(ref result_segmentation, small_merge_threshold); SegmRoutine.line_segmentation_sort_x(result_segmentation); } SegmRoutine.make_line_segmentation_white(result_segmentation); // set_line_number(segmentation, 1); Logger.Default.Image("resulting segmentation", result_segmentation); }
/// <summary> /// Train on a text line, given a segmentation. /// <remarks>This is analogous to addTrainingLine(bytearray,nustring) except that /// it takes the "ground truth" line segmentation.</remarks> /// </summary> public override bool AddTrainingLine(Intarray cseg, Bytearray image_grayscale, string tr) { Bytearray image = new Bytearray(); image.Copy(image_grayscale); if (String.IsNullOrEmpty(tr)) { Global.Debugf("error", "input transcript is empty"); return false; } if (image.Dim(0) < PGeti("minheight")) { Global.Debugf("error", "input line too small ({0} x {1})", image.Dim(0), image.Dim(1)); return false; } if (image.Dim(1) > PGeti("maxheight")) { Global.Debugf("error", "input line too high ({0} x {1})", image.Dim(0), image.Dim(1)); return false; } if (image.Dim(1) * 1.0 / image.Dim(0) > PGetf("maxaspect")) { Global.Debugf("warn", "input line has bad aspect ratio ({0} x {1})", image.Dim(0), image.Dim(1)); return false; } CHECK_ARG(image.Dim(0) == cseg.Dim(0) && image.Dim(1) == cseg.Dim(1), "image.Dim(0) == cseg.Dim(0) && image.Dim(1) == cseg.Dim(1)"); bool use_reject = PGetb("use_reject") && !DisableJunk; // check and set the transcript transcript = tr; SetLine(image_grayscale); if (PGeti("invert") > 0) NarrayUtil.Sub(NarrayUtil.Max(image), image); for (int i = 0; i < transcript.Length; i++) CHECK_ARG((int)transcript[i] >= 32, "(int)transcript[i] >= 32"); // compute correspondences between actual segmentation and // ground truth segmentation Narray<Intarray> segments = new Narray<Intarray>(); GrouperRoutine.segmentation_correspondences(segments, segmentation, cseg); // now iterate through all the hypothesis segments and // train the classifier with them int total = 0; int junk = 0; for (int i = 0; i < grouper.Object.Length(); i++) { Intarray segs = new Intarray(); grouper.Object.GetSegments(segs, i); // see whether this is a ground truth segment int match = -1; for (int j = 0; j < segments.Length(); j++) { if (GrouperRoutine.Equals(segments[j], segs)) { match = j; break; } } match -= 1; // segments are numbered starting at 1 int c = reject_class; if (match >= 0) { if (match >= transcript.Length) { Global.Debugf("error", "mismatch between transcript and cseg: {0}", transcript); continue; } else { c = (int)transcript[match]; Global.Debugf("debugmismatch", "index {0} position {1} char {2} [{3}]", i, match, (char)c, c); } } if (c == reject_class) junk++; // extract the character and add it to the classifier Rect b; Bytearray mask = new Bytearray(); grouper.Object.GetMask(out b, ref mask, i, 0); Bytearray cv = new Bytearray(); grouper.Object.ExtractWithMask(cv, mask, image, i, 0); Floatarray v = new Floatarray(); v.Copy(cv); v /= 255.0; Global.Debugf("cdim", "character dimensions ({0},{1})", v.Dim(0), v.Dim(1)); total++; if (use_reject) { classifier.Object.XAdd(v, c); } else { if (c != reject_class) classifier.Object.XAdd(v, c); } if (c != reject_class) IncClass(c); ntrained++; } Global.Debugf("detail", "AddTrainingLine trained {0} chars, {1} junk", total - junk, junk); return true; }
public override void Binarize(Bytearray bin_image, Bytearray gray_image) { w = PGeti("w"); k = (float)PGetf("k"); whalf = w >> 1; // fprintf(stderr,"[sauvola %g %d]\n",k,w); if (k < 0.001 || k > 0.999) { throw new Exception("Binarize: CHECK_ARG(k>=0.001 && k<=0.999)"); } if (w == 0 || k >= 1000) { throw new Exception("Binarize: CHECK_ARG(w>0 && k<1000)"); } if (bin_image.Length1d() != gray_image.Length1d()) { bin_image.MakeLike(gray_image); } if (NarrayUtil.contains_only(gray_image, (byte)0, (byte)255)) { bin_image.Copy(gray_image); return; } int image_width = gray_image.Dim(0); int image_height = gray_image.Dim(1); whalf = w >> 1; // Calculate the integral image, and integral of the squared image Narray <long> integral_image = new Narray <long>(), rowsum_image = new Narray <long>(); Narray <long> integral_sqimg = new Narray <long>(), rowsum_sqimg = new Narray <long>(); integral_image.MakeLike(gray_image); rowsum_image.MakeLike(gray_image); integral_sqimg.MakeLike(gray_image); rowsum_sqimg.MakeLike(gray_image); int xmin, ymin, xmax, ymax; double diagsum, idiagsum, diff, sqdiagsum, sqidiagsum, sqdiff, area; double mean, std, threshold; for (int j = 0; j < image_height; j++) { rowsum_image[0, j] = gray_image[0, j]; rowsum_sqimg[0, j] = gray_image[0, j] * gray_image[0, j]; } for (int i = 1; i < image_width; i++) { for (int j = 0; j < image_height; j++) { rowsum_image[i, j] = rowsum_image[i - 1, j] + gray_image[i, j]; rowsum_sqimg[i, j] = rowsum_sqimg[i - 1, j] + gray_image[i, j] * gray_image[i, j]; } } for (int i = 0; i < image_width; i++) { integral_image[i, 0] = rowsum_image[i, 0]; integral_sqimg[i, 0] = rowsum_sqimg[i, 0]; } for (int i = 0; i < image_width; i++) { for (int j = 1; j < image_height; j++) { integral_image[i, j] = integral_image[i, j - 1] + rowsum_image[i, j]; integral_sqimg[i, j] = integral_sqimg[i, j - 1] + rowsum_sqimg[i, j]; } } //Calculate the mean and standard deviation using the integral image for (int i = 0; i < image_width; i++) { for (int j = 0; j < image_height; j++) { xmin = Math.Max(0, i - whalf); ymin = Math.Max(0, j - whalf); xmax = Math.Min(image_width - 1, i + whalf); ymax = Math.Min(image_height - 1, j + whalf); area = (xmax - xmin + 1) * (ymax - ymin + 1); // area can't be 0 here // proof (assuming whalf >= 0): // we'll prove that (xmax-xmin+1) > 0, // (ymax-ymin+1) is analogous // It's the same as to prove: xmax >= xmin // image_width - 1 >= 0 since image_width > i >= 0 // i + whalf >= 0 since i >= 0, whalf >= 0 // i + whalf >= i - whalf since whalf >= 0 // image_width - 1 >= i - whalf since image_width > i // --IM if (area <= 0) { throw new Exception("Binarize: area can't be 0 here"); } if (xmin == 0 && ymin == 0) { // Point at origin diff = integral_image[xmax, ymax]; sqdiff = integral_sqimg[xmax, ymax]; } else if (xmin == 0 && ymin > 0) { // first column diff = integral_image[xmax, ymax] - integral_image[xmax, ymin - 1]; sqdiff = integral_sqimg[xmax, ymax] - integral_sqimg[xmax, ymin - 1]; } else if (xmin > 0 && ymin == 0) { // first row diff = integral_image[xmax, ymax] - integral_image[xmin - 1, ymax]; sqdiff = integral_sqimg[xmax, ymax] - integral_sqimg[xmin - 1, ymax]; } else { // rest of the image diagsum = integral_image[xmax, ymax] + integral_image[xmin - 1, ymin - 1]; idiagsum = integral_image[xmax, ymin - 1] + integral_image[xmin - 1, ymax]; diff = diagsum - idiagsum; sqdiagsum = integral_sqimg[xmax, ymax] + integral_sqimg[xmin - 1, ymin - 1]; sqidiagsum = integral_sqimg[xmax, ymin - 1] + integral_sqimg[xmin - 1, ymax]; sqdiff = sqdiagsum - sqidiagsum; } mean = diff / area; std = Math.Sqrt((sqdiff - diff * diff / area) / (area - 1)); threshold = mean * (1 + k * ((std / 128) - 1)); if (gray_image[i, j] < threshold) { bin_image[i, j] = 0; } else { bin_image[i, j] = (byte)(MAXVAL - 1); } } } if (PGeti("debug_binarize") > 0) { ImgIo.write_image_gray("debug_binarize.png", bin_image); } }
public override void Charseg(ref Intarray segmentation, Bytearray inraw) { setParams(); //Logger.Default.Image("segmenting", inraw); int PADDING = 3; OcrRoutine.optional_check_background_is_lighter(inraw); Bytearray image = new Bytearray(); image.Copy(inraw); OcrRoutine.binarize_simple(image); OcrRoutine.Invert(image); SetImage(image); FindAllCuts(); FindBestCuts(); Intarray seg = new Intarray(); seg.MakeLike(image); seg.Fill(255); for (int r = 0; r < bestcuts.Length(); r++) { int w = seg.Dim(0); int c = bestcuts[r]; Narray<Point> cut = cuts[c]; for (int y = 0; y < image.Dim(1); y++) { for (int i = -1; i <= 1; i++) { int x = cut[y].X; if (x < 1 || x >= w - 1) continue; seg[x + i, y] = 0; } } } ImgLabels.label_components(ref seg); // dshowr(seg,"YY"); dwait(); segmentation.Copy(image); for (int i = 0; i < seg.Length1d(); i++) if (segmentation.At1d(i) == 0) seg.Put1d(i, 0); ImgLabels.propagate_labels_to(ref segmentation, seg); if (PGeti("component_segmentation") > 0) { Intarray ccseg = new Intarray(); ccseg.Copy(image); ImgLabels.label_components(ref ccseg); SegmRoutine.combine_segmentations(ref segmentation, ccseg); if (PGeti("fix_diacritics") > 0) { SegmRoutine.fix_diacritics(segmentation); } } #if false SegmRoutine.line_segmentation_merge_small_components(ref segmentation, small_merge_threshold); SegmRoutine.line_segmentation_sort_x(segmentation); #endif SegmRoutine.make_line_segmentation_white(segmentation); // set_line_number(segmentation, 1); //Logger.Default.Image("resulting segmentation", segmentation); }
public void SetImage(Bytearray image_) { Bytearray image = new Bytearray(); //image = image_; image.Copy(image_); dimage.Copy(image); if (PGeti("fill_holes") > 0) { Bytearray holes = new Bytearray(); SegmRoutine.extract_holes(ref holes, image); for (int i = 0; i < image.Length(); i++) if (holes.At1d(i) > 0) image.Put1d(i, 255); } int w = image.Dim(0), h = image.Dim(1); wimage.Resize(w, h); wimage.Fill(0); float s1 = 0.0f, sy = 0.0f; for (int i = 1; i < w; i++) for (int j = 0; j < h; j++) { if (image[i, j] > 0) { s1++; sy += j; } if (image[i, j] > 0) wimage[i, j] = inside_weight; else wimage[i, j] = outside_weight; } if(s1==0) where = image.Dim(1)/2; else where = (int)(sy / s1); for (int i = 0; i < dimage.Dim(0); i++) dimage[i, where] = 0x008000; }
public override void Binarize(Bytearray bin_image, Bytearray gray_image) { if(bin_image.Length1d() != gray_image.Length1d()) bin_image.MakeLike(gray_image); if(NarrayUtil.contains_only(gray_image, (byte)0, (byte)255)) { bin_image.Copy(gray_image); return; } int image_width = gray_image.Dim(0); int image_height = gray_image.Dim(1); int[] hist = new int[MAXVAL]; double[] pdf = new double[MAXVAL]; //probability distribution double[] cdf = new double[MAXVAL]; //cumulative probability distribution double[] myu = new double[MAXVAL]; // mean value for separation double max_sigma; double[] sigma = new double[MAXVAL]; // inter-class variance /* Histogram generation */ for(int i=0; i<MAXVAL; i++){ hist[i] = 0; } for(int x=0; x<image_width; x++){ for(int y=0; y<image_height; y++){ hist[gray_image[x,y]]++; } } /* calculation of probability density */ for(int i=0; i<MAXVAL; i++){ pdf[i] = (double)hist[i] / (image_width * image_height); } /* cdf & myu generation */ cdf[0] = pdf[0]; myu[0] = 0.0; /* 0.0 times prob[0] equals zero */ for(int i=1; i<MAXVAL; i++){ cdf[i] = cdf[i-1] + pdf[i]; myu[i] = myu[i-1] + i*pdf[i]; } /* sigma maximization sigma stands for inter-class variance and determines optimal threshold value */ int threshold = 0; max_sigma = 0.0; for(int i=0; i<MAXVAL-1; i++){ if(cdf[i] != 0.0 && cdf[i] != 1.0){ double p1p2 = cdf[i]*(1.0 - cdf[i]); double mu1mu2diff = myu[MAXVAL-1]*cdf[i]-myu[i]; sigma[i] = mu1mu2diff * mu1mu2diff / p1p2; } else sigma[i] = 0.0; if(sigma[i] > max_sigma){ max_sigma = sigma[i]; threshold = i; } } for(int x=0; x<image_width; x++){ for(int y=0; y<image_height; y++){ if (gray_image[x,y] > threshold) bin_image[x,y] = (byte)(MAXVAL-1); else bin_image[x,y] = 0; } } if(PGeti("debug_otsu") > 0) { Logger.Default.Format("Otsu threshold value = {0}\n", threshold); //ImgIo.write_image_gray("debug_otsu.png", bin_image); } }
public void Image(string description, Bytearray a, float zoom = 100f) { if (verbose) { writer.WriteLine(String.Format("image {0} w:{1}, h:{2}", description, a.Dim(0), a.Dim(1))); } }
public void Image(string description, Bytearray a, float zoom = 100f) { if (verbose) writer.WriteLine(String.Format("image {0} w:{1}, h:{2}", description, a.Dim(0), a.Dim(1))); }
public static void Thin(ref Bytearray uci) { int w = uci.Dim(0) - 1; int h = uci.Dim(1) - 1; for (int i = 0, n = uci.Length1d(); i < n; i++) { if (uci.At1d(i) > 0) { uci.Put1d(i, ON); } else { uci.Put1d(i, OFF); } } bool flag; do { flag = false; for (int j = 0; j < 8; j += 2) { for (int x = 1; x < w; x++) { for (int y = 1; y < h; y++) { if (uci[x, y] != ON) { continue; } if (uci[x + nx[j], y + ny[j]] != OFF) { continue; } int b = 0; for (int i = 7; i >= 0; i--) { b <<= 1; b |= (uci[x + nx[i], y + ny[i]] != OFF ? 1 : 0); } if (ttable[b] > 0) { uci[x, y] = SKEL; } else { uci[x, y] = DEL; flag = true; } } } if (!flag) { continue; } for (int x = 1; x < w; x++) { for (int y = 1; y < h; y++) { if (uci[x, y] == DEL) { uci[x, y] = OFF; } } } } } while (flag); for (int i = 0, n = uci.Length1d(); i < n; i++) { if (uci.At1d(i) == SKEL) { uci.Put1d(i, 255); } else { uci.Put1d(i, 0); } } }
/// <summary> /// Train on a text line, given a segmentation. /// <remarks>This is analogous to addTrainingLine(bytearray,nustring) except that /// it takes the "ground truth" line segmentation.</remarks> /// </summary> public override bool AddTrainingLine(Intarray cseg, Bytearray image_grayscale, string tr) { Bytearray image = new Bytearray(); image.Copy(image_grayscale); if (String.IsNullOrEmpty(tr)) { Global.Debugf("error", "input transcript is empty"); return(false); } if (image.Dim(0) < PGeti("minheight")) { Global.Debugf("error", "input line too small ({0} x {1})", image.Dim(0), image.Dim(1)); return(false); } if (image.Dim(1) > PGeti("maxheight")) { Global.Debugf("error", "input line too high ({0} x {1})", image.Dim(0), image.Dim(1)); return(false); } if (image.Dim(1) * 1.0 / image.Dim(0) > PGetf("maxaspect")) { Global.Debugf("warn", "input line has bad aspect ratio ({0} x {1})", image.Dim(0), image.Dim(1)); return(false); } CHECK_ARG(image.Dim(0) == cseg.Dim(0) && image.Dim(1) == cseg.Dim(1), "image.Dim(0) == cseg.Dim(0) && image.Dim(1) == cseg.Dim(1)"); bool use_reject = PGetb("use_reject") && !DisableJunk; // check and set the transcript transcript = tr; SetLine(image_grayscale); if (PGeti("invert") > 0) { NarrayUtil.Sub(NarrayUtil.Max(image), image); } for (int i = 0; i < transcript.Length; i++) { CHECK_ARG((int)transcript[i] >= 32, "(int)transcript[i] >= 32"); } // compute correspondences between actual segmentation and // ground truth segmentation Narray <Intarray> segments = new Narray <Intarray>(); GrouperRoutine.segmentation_correspondences(segments, segmentation, cseg); // now iterate through all the hypothesis segments and // train the classifier with them int total = 0; int junk = 0; for (int i = 0; i < grouper.Object.Length(); i++) { Intarray segs = new Intarray(); grouper.Object.GetSegments(segs, i); // see whether this is a ground truth segment int match = -1; for (int j = 0; j < segments.Length(); j++) { if (GrouperRoutine.Equals(segments[j], segs)) { match = j; break; } } match -= 1; // segments are numbered starting at 1 int c = reject_class; if (match >= 0) { if (match >= transcript.Length) { Global.Debugf("error", "mismatch between transcript and cseg: {0}", transcript); continue; } else { c = (int)transcript[match]; Global.Debugf("debugmismatch", "index {0} position {1} char {2} [{3}]", i, match, (char)c, c); } } if (c == reject_class) { junk++; } // extract the character and add it to the classifier Rect b; Bytearray mask = new Bytearray(); grouper.Object.GetMask(out b, ref mask, i, 0); Bytearray cv = new Bytearray(); grouper.Object.ExtractWithMask(cv, mask, image, i, 0); Floatarray v = new Floatarray(); v.Copy(cv); v /= 255.0; Global.Debugf("cdim", "character dimensions ({0},{1})", v.Dim(0), v.Dim(1)); total++; if (use_reject) { classifier.Object.XAdd(v, c); } else { if (c != reject_class) { classifier.Object.XAdd(v, c); } } if (c != reject_class) { IncClass(c); } ntrained++; } Global.Debugf("detail", "AddTrainingLine trained {0} chars, {1} junk", total - junk, junk); return(true); }
/// <summary> /// This is a weird, optional method that exposes character segmentation /// for those line recognizers that have it segmentation contains colored pixels, /// and a transition in the transducer of the form * --- 1/eps --> * --- 2/a --> * /// means that pixels with color 1 and 2 together form the letter "a" /// </summary> public override double RecognizeLine(Intarray segmentation_, IGenericFst result, Bytearray image_) { double rate = 0.0; CHECK_ARG(image_.Dim(1) < PGeti("maxheight"), String.Format("input line too high ({0} x {1})", image_.Dim(0), image_.Dim(1))); CHECK_ARG(image_.Dim(1) * 1.0 / image_.Dim(0) < PGetf("maxaspect"), String.Format("input line has bad aspect ratio ({0} x {1})", image_.Dim(0), image_.Dim(1))); bool use_reject = PGetb("use_reject") && !DisableJunk; //Console.WriteLine("IMG: imin:{0} imax:{1}", NarrayUtil.ArgMin(image_), NarrayUtil.ArgMax(image_)); Bytearray image = new Bytearray(); image.Copy(image_); SetLine(image_); if (PGeti("invert") > 0) { NarrayUtil.Sub(NarrayUtil.Max(image), image); } segmentation_.Copy(segmentation); Bytearray available = new Bytearray(); Floatarray cp = new Floatarray(); Floatarray ccosts = new Floatarray(); Floatarray props = new Floatarray(); OutputVector p = new OutputVector(); int ncomponents = grouper.Object.Length(); int minclass = PGeti("minclass"); float minprob = PGetf("minprob"); float space_yes = PGetf("space_yes"); float space_no = PGetf("space_no"); float maxcost = PGetf("maxcost"); // compute priors if possible; fall back on // using no priors if no counts are available Floatarray priors = new Floatarray(); bool use_priors = PGeti("use_priors") > 0; if (use_priors) { if (counts.Length() > 0) { priors.Copy(counts); priors /= NarrayUtil.Sum(priors); } else { if (!counts_warned) { Global.Debugf("warn", "use_priors specified but priors unavailable (old model)"); } use_priors = false; counts_warned = true; } } EstimateSpaceSize(); for (int i = 0; i < ncomponents; i++) { Rect b; Bytearray mask = new Bytearray(); grouper.Object.GetMask(out b, ref mask, i, 0); Bytearray cv = new Bytearray(); grouper.Object.ExtractWithMask(cv, mask, image, i, 0); //ImgIo.write_image_gray("extrmask_image.png", cv); Floatarray v = new Floatarray(); v.Copy(cv); v /= 255.0f; float ccost = classifier.Object.XOutputs(p, v); if (use_reject && classifier.Object.HigherOutputIsBetter) { ccost = 0; float total = p.Sum(); if (total > 1e-11f) { //p /= total; } else { p.Values.Fill(0.0f); } } int count = 0; Global.Debugf("dcost", "output {0}", p.Keys.Length()); for (int index = 0; index < p.Keys.Length(); index++) { int j = p.Keys[index]; if (j < minclass) { continue; } if (j == reject_class) { continue; } float value = p.Values[index]; if (value <= 0.0f) { continue; } if (value < minprob) { continue; } float pcost = classifier.Object.HigherOutputIsBetter ? (float)-Math.Log(value) : value; Global.Debugf("dcost", "{0} {1} {2}", j, pcost + ccost, (j > 32 ? (char)j : '_')); float total_cost = pcost + ccost; if (total_cost < maxcost) { if (use_priors) { total_cost -= (float)-Math.Log(priors[j]); } grouper.Object.SetClass(i, j, total_cost); count++; } } Global.Debugf("dcost", ""); if (count == 0) { float xheight = 10.0f; if (b.Height() < xheight / 2 && b.Width() < xheight / 2) { grouper.Object.SetClass(i, (int)'~', high_cost / 2); } else { grouper.Object.SetClass(i, (int)'#', (b.Width() / xheight) * high_cost); } } if (grouper.Object.PixelSpace(i) > space_threshold) { Global.Debugf("spaces", "space {0}", grouper.Object.PixelSpace(i)); grouper.Object.SetSpaceCost(i, space_yes, space_no); } } grouper.Object.GetLattice(result); return(rate); }
/// <summary> /// This is a weird, optional method that exposes character segmentation /// for those line recognizers that have it segmentation contains colored pixels, /// and a transition in the transducer of the form * --- 1/eps --> * --- 2/a --> * /// means that pixels with color 1 and 2 together form the letter "a" /// </summary> public override double RecognizeLine(Intarray segmentation_, IGenericFst result, Bytearray image_) { double rate = 0.0; CHECK_ARG(image_.Dim(1) < PGeti("maxheight"), String.Format("input line too high ({0} x {1})", image_.Dim(0), image_.Dim(1))); CHECK_ARG(image_.Dim(1) * 1.0 / image_.Dim(0) < PGetf("maxaspect"), String.Format("input line has bad aspect ratio ({0} x {1})", image_.Dim(0), image_.Dim(1))); bool use_reject = PGetb("use_reject") && !DisableJunk; //Console.WriteLine("IMG: imin:{0} imax:{1}", NarrayUtil.ArgMin(image_), NarrayUtil.ArgMax(image_)); Bytearray image = new Bytearray(); image.Copy(image_); SetLine(image_); if (PGeti("invert") > 0) NarrayUtil.Sub(NarrayUtil.Max(image), image); segmentation_.Copy(segmentation); Bytearray available = new Bytearray(); Floatarray cp = new Floatarray(); Floatarray ccosts = new Floatarray(); Floatarray props = new Floatarray(); OutputVector p = new OutputVector(); int ncomponents = grouper.Object.Length(); int minclass = PGeti("minclass"); float minprob = PGetf("minprob"); float space_yes = PGetf("space_yes"); float space_no = PGetf("space_no"); float maxcost = PGetf("maxcost"); // compute priors if possible; fall back on // using no priors if no counts are available Floatarray priors = new Floatarray(); bool use_priors = PGeti("use_priors") > 0; if (use_priors) { if (counts.Length() > 0) { priors.Copy(counts); priors /= NarrayUtil.Sum(priors); } else { if (!counts_warned) Global.Debugf("warn", "use_priors specified but priors unavailable (old model)"); use_priors = false; counts_warned = true; } } EstimateSpaceSize(); for (int i = 0; i < ncomponents; i++) { Rect b; Bytearray mask = new Bytearray(); grouper.Object.GetMask(out b, ref mask, i, 0); Bytearray cv = new Bytearray(); grouper.Object.ExtractWithMask(cv, mask, image, i, 0); //ImgIo.write_image_gray("extrmask_image.png", cv); Floatarray v = new Floatarray(); v.Copy(cv); v /= 255.0f; float ccost = classifier.Object.XOutputs(p, v); if (use_reject && classifier.Object.HigherOutputIsBetter) { ccost = 0; float total = p.Sum(); if (total > 1e-11f) { //p /= total; } else p.Values.Fill(0.0f); } int count = 0; Global.Debugf("dcost", "output {0}", p.Keys.Length()); for (int index = 0; index < p.Keys.Length(); index++) { int j = p.Keys[index]; if (j < minclass) continue; if (j == reject_class) continue; float value = p.Values[index]; if (value <= 0.0f) continue; if (value < minprob) continue; float pcost = classifier.Object.HigherOutputIsBetter ? (float)-Math.Log(value) : value; Global.Debugf("dcost", "{0} {1} {2}", j, pcost + ccost, (j > 32 ? (char)j : '_')); float total_cost = pcost + ccost; if (total_cost < maxcost) { if (use_priors) { total_cost -= (float)-Math.Log(priors[j]); } grouper.Object.SetClass(i, j, total_cost); count++; } } Global.Debugf("dcost", ""); if (count == 0) { float xheight = 10.0f; if (b.Height() < xheight / 2 && b.Width() < xheight / 2) { grouper.Object.SetClass(i, (int)'~', high_cost / 2); } else { grouper.Object.SetClass(i, (int)'#', (b.Width() / xheight) * high_cost); } } if (grouper.Object.PixelSpace(i) > space_threshold) { Global.Debugf("spaces", "space {0}", grouper.Object.PixelSpace(i)); grouper.Object.SetSpaceCost(i, space_yes, space_no); } } grouper.Object.GetLattice(result); return rate; }
public void SetLine(Bytearray image) { CHECK_ARG(image.Dim(1) < PGeti("maxheight"), "image.Dim(1) < PGeti(\"maxheight\")"); // run the segmenter /*Narray<Rect> bboxes = new Narray<Rect>(); Intarray iar = new Intarray(); iar.Copy(image); ImgLabels.bounding_boxes(ref bboxes, iar);*/ //Console.WriteLine("IMG SETLINE: imin:{0} imax:{1}", NarrayUtil.ArgMin(iar), NarrayUtil.ArgMax(iar)); //Console.WriteLine("INDEX_BLACK:{0} {1} {2} {3}", bboxes[0].x0, bboxes[0].y0, bboxes[0].x1, bboxes[0].y1); //ImgIo.write_image_gray("image.png", image); OcrRoutine.binarize_simple(binarized, image); segmenter.Object.Charseg(ref segmentation, binarized); /*Intarray segm = new Intarray(); segm.Copy(segmentation); ImgLabels.simple_recolor(segm); ImgIo.write_image_packed("segm_image.png", segm);*/ //NarrayUtil.Sub(255, binarized); SegmRoutine.make_line_segmentation_black(segmentation); SegmRoutine.remove_small_components(segmentation, 3, 3); // i add this line ImgLabels.renumber_labels(segmentation, 1); // set up the grouper grouper.Object.SetSegmentation(segmentation); }
public List <List <float> > SpaceCosts(List <Candidate> candidates, Bytearray image) { /* * Given a list of character recognition candidates and their * classifications, and an image of the corresponding text line, * compute a list of pairs of costs for putting/not putting a space * after each of the candidate characters. * * The basic idea behind this simple algorithm is to try larger * and larger horizontal closing operations until most of the components * start having a "wide" aspect ratio; that's when characters have merged * into words. The remaining whitespace should be spaces. * * This is just a simple stopgap measure; it will be replaced with * trainable space modeling. */ int w = image.Dim(0); int h = image.Dim(1); Bytearray closed = new Bytearray(); int r; for (r = 0; r < maxrange; r++) { if (r > 0) { closed.Copy(image); Morph.binary_close_circle(closed, r); } else { closed.Copy(image); } Intarray labeled = new Intarray(); labeled.Copy(closed); ImgLabels.label_components(ref labeled); Narray <Rect> rects = new Narray <Rect>(); ImgLabels.bounding_boxes(ref rects, labeled); Floatarray aspects = new Floatarray(); for (int i = 0; i < rects.Length(); i++) { Rect rect = rects[i]; float aspect = rect.Aspect(); aspects.Push(aspect); } float maspect = NarrayUtil.Median(aspects); if (maspect >= this.aspect_threshold) { break; } } // close with a little bit of extra space closed.Copy(image); Morph.binary_close_circle(closed, r + 1); // compute the remaining aps //Morph.binary_dilate_circle(); // every character box that ends near a cap gets a space appended return(null); }
public override void Charseg(ref Intarray segmentation, Bytearray inraw) { setParams(); //Logger.Default.Image("segmenting", inraw); int PADDING = 3; OcrRoutine.optional_check_background_is_lighter(inraw); Bytearray image = new Bytearray(); image.Copy(inraw); OcrRoutine.binarize_simple(image); OcrRoutine.Invert(image); SetImage(image); FindAllCuts(); FindBestCuts(); Intarray seg = new Intarray(); seg.MakeLike(image); seg.Fill(255); for (int r = 0; r < bestcuts.Length(); r++) { int w = seg.Dim(0); int c = bestcuts[r]; Narray <Point> cut = cuts[c]; for (int y = 0; y < image.Dim(1); y++) { for (int i = -1; i <= 1; i++) { int x = cut[y].X; if (x < 1 || x >= w - 1) { continue; } seg[x + i, y] = 0; } } } ImgLabels.label_components(ref seg); // dshowr(seg,"YY"); dwait(); segmentation.Copy(image); for (int i = 0; i < seg.Length1d(); i++) { if (segmentation.At1d(i) == 0) { seg.Put1d(i, 0); } } ImgLabels.propagate_labels_to(ref segmentation, seg); if (PGeti("component_segmentation") > 0) { Intarray ccseg = new Intarray(); ccseg.Copy(image); ImgLabels.label_components(ref ccseg); SegmRoutine.combine_segmentations(ref segmentation, ccseg); if (PGeti("fix_diacritics") > 0) { SegmRoutine.fix_diacritics(segmentation); } } #if false SegmRoutine.line_segmentation_merge_small_components(ref segmentation, small_merge_threshold); SegmRoutine.line_segmentation_sort_x(segmentation); #endif SegmRoutine.make_line_segmentation_white(segmentation); // set_line_number(segmentation, 1); //Logger.Default.Image("resulting segmentation", segmentation); }
public override void Binarize(Bytearray bin_image, Bytearray gray_image) { if (bin_image.Length1d() != gray_image.Length1d()) { bin_image.MakeLike(gray_image); } if (NarrayUtil.contains_only(gray_image, (byte)0, (byte)255)) { bin_image.Copy(gray_image); return; } int image_width = gray_image.Dim(0); int image_height = gray_image.Dim(1); int[] hist = new int[MAXVAL]; double[] pdf = new double[MAXVAL]; //probability distribution double[] cdf = new double[MAXVAL]; //cumulative probability distribution double[] myu = new double[MAXVAL]; // mean value for separation double max_sigma; double[] sigma = new double[MAXVAL]; // inter-class variance /* Histogram generation */ for (int i = 0; i < MAXVAL; i++) { hist[i] = 0; } for (int x = 0; x < image_width; x++) { for (int y = 0; y < image_height; y++) { hist[gray_image[x, y]]++; } } /* calculation of probability density */ for (int i = 0; i < MAXVAL; i++) { pdf[i] = (double)hist[i] / (image_width * image_height); } /* cdf & myu generation */ cdf[0] = pdf[0]; myu[0] = 0.0; /* 0.0 times prob[0] equals zero */ for (int i = 1; i < MAXVAL; i++) { cdf[i] = cdf[i - 1] + pdf[i]; myu[i] = myu[i - 1] + i * pdf[i]; } /* sigma maximization * sigma stands for inter-class variance * and determines optimal threshold value */ int threshold = 0; max_sigma = 0.0; for (int i = 0; i < MAXVAL - 1; i++) { if (cdf[i] != 0.0 && cdf[i] != 1.0) { double p1p2 = cdf[i] * (1.0 - cdf[i]); double mu1mu2diff = myu[MAXVAL - 1] * cdf[i] - myu[i]; sigma[i] = mu1mu2diff * mu1mu2diff / p1p2; } else { sigma[i] = 0.0; } if (sigma[i] > max_sigma) { max_sigma = sigma[i]; threshold = i; } } for (int x = 0; x < image_width; x++) { for (int y = 0; y < image_height; y++) { if (gray_image[x, y] > threshold) { bin_image[x, y] = (byte)(MAXVAL - 1); } else { bin_image[x, y] = 0; } } } if (PGeti("debug_otsu") > 0) { Logger.Default.Format("Otsu threshold value = {0}\n", threshold); //ImgIo.write_image_gray("debug_otsu.png", bin_image); } }
public override void SetImage(Bytearray image) { dimage.Copy(image); int w = image.Dim(0), h = image.Dim(1); wimage.Resize(w, h); wimage.Fill(0); float s1 = 0.0f, sy = 0.0f; for(int i=1; i<w; i++) for(int j=0; j<h; j++) { if(image[i,j] > 0) { s1++; sy += j; } if(image[i-1,j]==0 && image[i,j]>0) wimage[i,j] = boundary_weight; else if(image[i,j]>0) wimage[i,j] = inside_weight; else wimage[i,j] = outside_weight; } where = (int)(sy/s1); for(int i=0;i<dimage.Dim(0);i++) dimage[i, where] = 0x008000; }
public override void Binarize(Bytearray bin_image, Bytearray gray_image) { w = PGeti("w"); k = (float)PGetf("k"); whalf = w >> 1; // fprintf(stderr,"[sauvola %g %d]\n",k,w); if(k<0.001 || k>0.999) throw new Exception("Binarize: CHECK_ARG(k>=0.001 && k<=0.999)"); if(w==0 || k>=1000) throw new Exception("Binarize: CHECK_ARG(w>0 && k<1000)"); if(bin_image.Length1d() != gray_image.Length1d()) bin_image.MakeLike(gray_image); if(NarrayUtil.contains_only(gray_image, (byte)0, (byte)255)) { bin_image.Copy(gray_image); return; } int image_width = gray_image.Dim(0); int image_height = gray_image.Dim(1); whalf = w >> 1; // Calculate the integral image, and integral of the squared image Narray<long> integral_image = new Narray<long>(), rowsum_image = new Narray<long>(); Narray<long> integral_sqimg = new Narray<long>(), rowsum_sqimg = new Narray<long>(); integral_image.MakeLike(gray_image); rowsum_image.MakeLike(gray_image); integral_sqimg.MakeLike(gray_image); rowsum_sqimg.MakeLike(gray_image); int xmin,ymin,xmax,ymax; double diagsum,idiagsum,diff,sqdiagsum,sqidiagsum,sqdiff,area; double mean,std,threshold; for (int j = 0; j < image_height; j++) { rowsum_image[0, j] = gray_image[0, j]; rowsum_sqimg[0, j] = gray_image[0, j] * gray_image[0, j]; } for (int i = 1; i < image_width; i++) { for (int j = 0; j < image_height; j++) { rowsum_image[i, j] = rowsum_image[i - 1, j] + gray_image[i, j]; rowsum_sqimg[i, j] = rowsum_sqimg[i - 1, j] + gray_image[i, j] * gray_image[i, j]; } } for (int i = 0; i < image_width; i++) { integral_image[i, 0] = rowsum_image[i, 0]; integral_sqimg[i, 0] = rowsum_sqimg[i, 0]; } for (int i = 0; i < image_width; i++) { for (int j = 1; j < image_height; j++) { integral_image[i, j] = integral_image[i, j - 1] + rowsum_image[i, j]; integral_sqimg[i, j] = integral_sqimg[i, j - 1] + rowsum_sqimg[i, j]; } } //Calculate the mean and standard deviation using the integral image for(int i=0; i<image_width; i++){ for(int j=0; j<image_height; j++){ xmin = Math.Max(0,i-whalf); ymin = Math.Max(0, j - whalf); xmax = Math.Min(image_width - 1, i + whalf); ymax = Math.Min(image_height - 1, j + whalf); area = (xmax-xmin+1)*(ymax-ymin+1); // area can't be 0 here // proof (assuming whalf >= 0): // we'll prove that (xmax-xmin+1) > 0, // (ymax-ymin+1) is analogous // It's the same as to prove: xmax >= xmin // image_width - 1 >= 0 since image_width > i >= 0 // i + whalf >= 0 since i >= 0, whalf >= 0 // i + whalf >= i - whalf since whalf >= 0 // image_width - 1 >= i - whalf since image_width > i // --IM if (area <= 0) throw new Exception("Binarize: area can't be 0 here"); if (xmin == 0 && ymin == 0) { // Point at origin diff = integral_image[xmax, ymax]; sqdiff = integral_sqimg[xmax, ymax]; } else if (xmin == 0 && ymin > 0) { // first column diff = integral_image[xmax, ymax] - integral_image[xmax, ymin - 1]; sqdiff = integral_sqimg[xmax, ymax] - integral_sqimg[xmax, ymin - 1]; } else if (xmin > 0 && ymin == 0) { // first row diff = integral_image[xmax, ymax] - integral_image[xmin - 1, ymax]; sqdiff = integral_sqimg[xmax, ymax] - integral_sqimg[xmin - 1, ymax]; } else { // rest of the image diagsum = integral_image[xmax, ymax] + integral_image[xmin - 1, ymin - 1]; idiagsum = integral_image[xmax, ymin - 1] + integral_image[xmin - 1, ymax]; diff = diagsum - idiagsum; sqdiagsum = integral_sqimg[xmax, ymax] + integral_sqimg[xmin - 1, ymin - 1]; sqidiagsum = integral_sqimg[xmax, ymin - 1] + integral_sqimg[xmin - 1, ymax]; sqdiff = sqdiagsum - sqidiagsum; } mean = diff/area; std = Math.Sqrt((sqdiff - diff*diff/area)/(area-1)); threshold = mean*(1+k*((std/128)-1)); if(gray_image[i,j] < threshold) bin_image[i,j] = 0; else bin_image[i,j] = (byte)(MAXVAL-1); } } if(PGeti("debug_binarize") > 0) { ImgIo.write_image_gray("debug_binarize.png", bin_image); } }