public IplImage ScannerContour(IplImage src) { con = new IplImage(src.Size, BitDepth.U8, 3); Cv.Copy(src, con); bin = this.Binary(src, 150); CvMemStorage Storage = new CvMemStorage(); //CvSeq<CvPoint> contours; CvContourScanner scanner = Cv.StartFindContours(bin, Storage, CvContour.SizeOf, ContourRetrieval.List, ContourChain.ApproxNone); //while ((contours = Cv.FindNextContour(scanner)) != null) //{ // if (contours[0].Value == new CvPoint(1, 1)) continue; // Cv.DrawContours(con, contours, CvColor.Yellow, CvColor.Red, 1, 4, LineType.AntiAlias); //} //Cv.EndFindContours(scanner); foreach (CvSeq <CvPoint> contours in scanner) { if (contours[0].Value == new CvPoint(1, 1)) { continue; } con.DrawContours(contours, CvColor.Yellow, CvColor.Red, 1, 4, LineType.AntiAlias); } return(con); }
public ContourScanner() { // create IplImages using (var src = new IplImage(FilePath.Image.Lenna, LoadMode.Color)) using (var gray = new IplImage(src.Size, BitDepth.U8, 1)) using (var canny = new IplImage(src.Size, BitDepth.U8, 1)) using (var result = src.Clone()) { // detect edges Cv.CvtColor(src, gray, ColorConversion.BgrToGray); Cv.Canny(gray, canny, 50, 200); // find all contours using (CvMemStorage storage = new CvMemStorage()) { // find contours by CvContourScanner // native style /* * CvContourScanner scanner = Cv.StartFindContours(canny, storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple); * while (true) * { * CvSeq<CvPoint> c = Cv.FindNextContour(scanner); * if (c == null) * break; * else * Cv.DrawContours(result, c, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias); * } * Cv.EndFindContours(scanner); * //*/ // wrapper style using (CvContourScanner scanner = new CvContourScanner(canny, storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { foreach (CvSeq <CvPoint> c in scanner) { result.DrawContours(c, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias); } } } // show canny and result using (new CvWindow("ContourScanner canny", canny)) using (new CvWindow("ContourScanner result", result)) { Cv.WaitKey(); } } }
public ContourScanner() { // create IplImages using (var src = new IplImage(FilePath.Image.Lenna, LoadMode.Color)) using (var gray = new IplImage(src.Size, BitDepth.U8, 1)) using (var canny = new IplImage(src.Size, BitDepth.U8, 1)) using (var result = src.Clone()) { // detect edges Cv.CvtColor(src, gray, ColorConversion.BgrToGray); Cv.Canny(gray, canny, 50, 200); // find all contours using (CvMemStorage storage = new CvMemStorage()) { // find contours by CvContourScanner // native style /* CvContourScanner scanner = Cv.StartFindContours(canny, storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple); while (true) { CvSeq<CvPoint> c = Cv.FindNextContour(scanner); if (c == null) break; else Cv.DrawContours(result, c, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias); } Cv.EndFindContours(scanner); //*/ // wrapper style using (CvContourScanner scanner = new CvContourScanner(canny, storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { foreach (CvSeq<CvPoint> c in scanner) { result.DrawContours(c, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias); } } } // show canny and result using (new CvWindow("ContourScanner canny", canny)) using (new CvWindow("ContourScanner result", result)) { Cv.WaitKey(); } } }
// The OpenCVSharp is for some reason complicated and needs to be abstracted. So here is the abstraction layer... // TODO : The CV docs specifically state that the image should be in binary format. Check if it is. // see usage here: http://stackoverflow.com/questions/35418714/opencvsharps-findcontour-returns-wrong-data static public ContourData FindContours(CvMat input, ContourRetrieval retrievalMode, ContourChain chainMode, CvPoint offset, double minArea = 0) { List <CvPoint[]> pointsArrays = new List <CvPoint[]>(); List <double> areas = new List <double>(); CvSeq <CvPoint> contoursRaw; using (CvMemStorage storage = new CvMemStorage()) { Cv.FindContours(input, storage, out contoursRaw, CvContour.SizeOf, retrievalMode, chainMode, offset); using (CvContourScanner scanner = new CvContourScanner(input, storage, CvContour.SizeOf, retrievalMode, chainMode, offset)) { foreach (CvSeq <CvPoint> c in scanner) { List <CvPoint> points = new List <CvPoint>(); //Some contours have negative area! double area = c.ContourArea(); if (Math.Abs(area) >= minArea) { areas.Add(area); foreach (CvPoint p in c.ToArray()) { points.Add(p); } pointsArrays.Add(points.ToArray()); } } } } ContourData data = new ContourData(); data.contours = pointsArrays.ToArray(); data.areas = areas.ToArray(); return(data); }
/// <summary> /// 輪郭走査処理を終了する /// </summary> /// <param name="scanner">輪郭スキャナへのポインタ</param> /// <returns></returns> #else /// <summary> /// Finishes scanning process /// </summary> /// <param name="scanner">Contour scanner. </param> /// <returns></returns> #endif public static CvSeq<CvPoint> EndFindContours(CvContourScanner scanner) { if (scanner == null) { throw new ArgumentNullException("scanner"); } return scanner.EndFindContours(); }
private void task() { Camera camera = Camera.GetInstance(); MotorControler mc = MotorControler.GetInstance(parameterManager); Vector3 CurrentPoint = mc.GetPoint(); Vector3 p = new Vector3(); int BinarizeThreshold = 10; int BrightnessThreshold = 7; Mat sum = Mat.Zeros(440, 512, MatType.CV_8UC1); string datfileName = string.Format(@"c:\img\{0}.dat", System.DateTime.Now.ToString("yyyyMMdd_HHmmss_fff")); BinaryWriter writer = new BinaryWriter(File.Open(datfileName, FileMode.Create)); for (int i = 0; i < 10; i++) { byte[] b = camera.ArrayImage; writer.Write(b); p = mc.GetPoint(); Mat mat = new Mat(440, 512, MatType.CV_8U, b); mat.ImWrite(String.Format(@"c:\img\{0}_{1}_{2}_{3}.bmp", System.DateTime.Now.ToString("yyyyMMdd_HHmmss_fff"), (int)(p.X * 1000), (int)(p.Y * 1000), (int)(p.Z * 1000))); Cv2.GaussianBlur(mat, mat, Cv.Size(3, 3), -1); Mat gau = mat.Clone(); Cv2.GaussianBlur(gau, gau, Cv.Size(31, 31), -1); Cv2.Subtract(gau, mat, mat); Cv2.Threshold(mat, mat, BinarizeThreshold, 1, ThresholdType.Binary); Cv2.Add(sum, mat, sum); mc.MoveDistance(-0.003, VectorId.Z); mc.Join(); } Cv2.Threshold(sum, sum, BrightnessThreshold, 1, ThresholdType.Binary); //Cv2.FindContoursをつかうとAccessViolationExceptionになる(Release/Debug両方)ので、C-API風に書く using (CvMemStorage storage = new CvMemStorage()) { using (CvContourScanner scanner = new CvContourScanner(sum.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { //string fileName = string.Format(@"c:\img\{0}.txt", // System.DateTime.Now.ToString("yyyyMMdd_HHmmss_fff")); string fileName = string.Format(@"c:\img\u.txt"); foreach (CvSeq <CvPoint> c in scanner) { CvMoments mom = new CvMoments(c, false); if (c.ElemSize < 2) { continue; } if (mom.M00 == 0.0) { continue; } double mx = mom.M10 / mom.M00; double my = mom.M01 / mom.M00; File.AppendAllText(fileName, string.Format("{0:F} {1:F}\n", mx, my)); } } } sum *= 255; sum.ImWrite(String.Format(@"c:\img\{0}_{1}_{2}.bmp", System.DateTime.Now.ToString("yyyyMMdd_HHmmss_fff"), (int)(p.X * 1000), (int)(p.Y * 1000))); Vector2 encoderPoint = new Vector2(-1, -1); encoderPoint.X = mc.GetPoint().X; encoderPoint.Y = mc.GetPoint().Y;//おこられたのでしかたなくこうする 吉田20150427 Vector2 viewerPoint = new Vector2(-1, -1); if (TigerPatternMatch.PatternMatch(ref viewerPoint)) { encoderPoint = coordManager.TransToEmulsionCoord(viewerPoint); mc.MovePointXY(encoderPoint); mc.Join(); } }
/// <summary> /// 画像中の次の輪郭を検索する /// </summary> /// <param name="scanner">関数cvStartFindContoursで初期化された輪郭スキャナ</param> /// <returns></returns> #else /// <summary> /// Finds next contour in the image /// </summary> /// <param name="scanner">Contour scanner initialized by The function cvStartFindContours </param> #endif public static CvSeq<CvPoint> FindNextContour(CvContourScanner scanner) { if (scanner == null) { throw new ArgumentNullException("scanner"); } IntPtr result = NativeMethods.cvFindNextContour(scanner.CvPtr); if (result == IntPtr.Zero) return null; else return new CvSeq<CvPoint>(result); }
static OpenCvSharp.CPlusPlus.Point TrackDetection(List <Mat> mats, int px, int py, int shiftx = 2, int shifty = 2, int shiftpitch = 4, int windowsize = 40, int phthresh = 5, bool debugflag = false) { int x0 = px - 256; int y0 = py - 220; List <rawmicrotrack> rms = new List <rawmicrotrack>(); // Point2d pixel_cen = TrackDetection(binimages, 256, 220, 3, 3, 4, 90, 3); int counter = 0; for (int ax = -shiftx; ax <= shiftx; ax++) { for (int ay = -shifty; ay <= shifty; ay++) { using (Mat big = Mat.Zeros(600, 600, MatType.CV_8UC1)) using (Mat imgMask = Mat.Zeros(big.Height, big.Width, MatType.CV_8UC1)) { //make the size of mask int ystart = big.Height / 2 + y0 - windowsize / 2; int yend = big.Height / 2 + y0 + windowsize / 2; int xstart = big.Width / 2 + x0 - windowsize / 2; int xend = big.Width / 2 + x0 + windowsize / 2; //make mask as shape of rectangle. by use of opencv OpenCvSharp.CPlusPlus.Rect recMask = new OpenCvSharp.CPlusPlus.Rect(xstart, ystart, windowsize, windowsize); Cv2.Rectangle(imgMask, recMask, 255, -1);//brightness=1, fill for (int p = 0; p < mats.Count; p++) { int startx = big.Width / 2 - mats[p].Width / 2 + (int)(p * ax * shiftpitch / 8.0); int starty = big.Height / 2 - mats[p].Height / 2 + (int)(p * ay * shiftpitch / 8.0); Cv2.Add( big[starty, starty + mats[p].Height, startx, startx + mats[p].Width], mats[p], big[starty, starty + mats[p].Height, startx, startx + mats[p].Width]); } using (Mat big_c = big.Clone()) { Cv2.Threshold(big, big, phthresh, 255, ThresholdType.ToZero); Cv2.BitwiseAnd(big, imgMask, big); //Mat roi = big[ystart, yend , xstart, xend];//メモリ領域がシーケンシャルにならないから輪郭抽出のときに例外が出る。 if (debugflag == true) {// //bigorg.ImWrite(String.Format(@"{0}_{1}_{2}.png",counter,ax,ay)); //Mat roiwrite = roi.Clone() * 30; //roiwrite.ImWrite(String.Format(@"roi_{0}_{1}_{2}.png", counter, ax, ay)); Cv2.Rectangle(big_c, recMask, 255, 1);//brightness=1, fill Cv2.ImShow("big_cx30", big_c * 30); Cv2.ImShow("bigx30", big * 30); //Cv2.ImShow("imgMask", imgMask); //Cv2.ImShow("roi", roi * 30); Cv2.WaitKey(0); } }//using big_c using (CvMemStorage storage = new CvMemStorage()) using (CvContourScanner scanner = new CvContourScanner(big.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { foreach (CvSeq <CvPoint> c in scanner) { CvMoments mom = new CvMoments(c, false); if (c.ElemSize < 2) { continue; } if (mom.M00 < 1.0) { continue; } double mx = mom.M10 / mom.M00; double my = mom.M01 / mom.M00; rawmicrotrack rm = new rawmicrotrack(); rm.ax = ax; rm.ay = ay; rm.cx = (int)(mx - big.Width / 2); rm.cy = (int)(my - big.Height / 2); rm.pv = (int)(mom.M00); rms.Add(rm); //Console.WriteLine(string.Format("{0} {1} {2} {3} {4}", rm.pv, ax, ay, rm.cx, rm.cy )); } }//using contour //big_c.Dispose(); counter++; }//using Mat }//ay }//ax OpenCvSharp.CPlusPlus.Point trackpos = new OpenCvSharp.CPlusPlus.Point(0, 0); if (rms.Count > 0) { rawmicrotrack rm = new rawmicrotrack(); double meancx = 0; double meancy = 0; double meanax = 0; double meanay = 0; double meanph = 0; double meanpv = 0; double sumpv = 0; for (int i = 0; i < rms.Count; i++) { meanpv += rms[i].pv * rms[i].pv; meancx += rms[i].cx * rms[i].pv; meancy += rms[i].cy * rms[i].pv; meanax += rms[i].ax * rms[i].pv; meanay += rms[i].ay * rms[i].pv; sumpv += rms[i].pv; } meancx /= sumpv;//重心と傾きを輝度値で重み付き平均 meancy /= sumpv; meanax /= sumpv; meanay /= sumpv; meanpv /= sumpv; trackpos = new OpenCvSharp.CPlusPlus.Point( (int)(meancx) + 256 - meanax * shiftpitch, (int)(meancy) + 220 - meanay * shiftpitch ); double anglex = (meanax * shiftpitch * 0.267) / (3.0 * 7.0 * 2.2); double angley = (meanay * shiftpitch * 0.267) / (3.0 * 7.0 * 2.2); Console.WriteLine(string.Format("{0:f4} {1:f4}", anglex, angley)); } else { trackpos = new OpenCvSharp.CPlusPlus.Point(-1, -1); } return(trackpos); }//track detection
private void task() { TracksManager tm = parameterManager.TracksManager; Track myTrack = tm.GetTrack(tm.TrackingIndex); MotorControler mc = MotorControler.GetInstance(parameterManager); Camera camera = Camera.GetInstance(); List <Mat> image_set = new List <Mat>(); List <Mat> image_set_reverse = new List <Mat>(); Surface surface = Surface.GetInstance(parameterManager);//表面認識から境界値を取得 double uptop = surface.UpTop; double upbottom = surface.UpBottom; double lowtop = surface.LowTop; double lowbottom = surface.LowBottom; double now_x = mc.GetPoint().X; double now_y = mc.GetPoint().Y; double now_z = mc.GetPoint().Z; common_dx = myTrack.MsDX + ((0.265625 * over_dx * 3) / (0.024 * 2.2 * 1000)); common_dy = myTrack.MsDY - ((0.265625 * over_dy * 3) / (0.024 * 2.2 * 1000)); for (int i = 0; i < 8; i++) { //myTrack.MsD○はdz1mmあたりのd○の変位mm double next_x = now_x - i * common_dx * 0.003 * 2.2; //3μm間隔で撮影 double next_y = now_y - i * common_dy * 0.003 * 2.2; //Shrinkage Factor は2.2で計算(仮) mc.MovePoint(next_x, next_y, now_z - 0.003 * i); mc.Join(); byte[] b = camera.ArrayImage; Mat image = new Mat(440, 512, MatType.CV_8U, b); Mat imagec = image.Clone(); image_set.Add(imagec); } for (int i = 7; i >= 0; i--) { image_set_reverse.Add(image_set[i]); } int n = image_set.Count();//1回分の取得画像の枚数 Mat cont = new Mat(440, 512, MatType.CV_8U); Mat gau_1 = new Mat(440, 512, MatType.CV_8U); Mat gau_2 = new Mat(440, 512, MatType.CV_8U); Mat sub = new Mat(440, 512, MatType.CV_8U); Mat bin = new Mat(440, 512, MatType.CV_8U); double Max_kido; double Min_kido; OpenCvSharp.CPlusPlus.Point maxloc; OpenCvSharp.CPlusPlus.Point minloc; List <Mat> two_set = new List <Mat>(); List <Mat> Part_img = new List <Mat>(); for (int i = 0; i < image_set.Count(); i++) { Cv2.GaussianBlur((Mat)image_set_reverse[i], gau_1, Cv.Size(3, 3), -1); //パラメータ見ないといけない。 Cv2.GaussianBlur(gau_1, gau_2, Cv.Size(51, 51), -1); //パラメータ見ないといけない。 Cv2.Subtract(gau_2, gau_1, sub); Cv2.MinMaxLoc(sub, out Min_kido, out Max_kido, out minloc, out maxloc); cont = (sub - Min_kido) * 255 / (Max_kido - Min_kido); cont.ImWrite(string.Format(@"C:\set\cont_{0}.bmp", i)); Cv2.Threshold(cont, bin, 115, 1, ThresholdType.Binary);//パラメータ見ないといけない。 two_set.Add(bin); } List <mm> white_area = new List <mm>(); int x0 = 256; int y0 = 220;//視野の中心 for (int delta_xx = -1; delta_xx <= 1; delta_xx++)//一番下の画像よりどれだけずらすか { for (int delta_yy = -1; delta_yy <= 1; delta_yy++) { { // //積層写真の型作り(行列の中身は0行列) // Mat superimposed = Mat.Zeros(440 + (n - 1) * Math.Abs(delta_yy), 512 + (n - 1) * Math.Abs(delta_xx), MatType.CV_8UC1); // // //各写真の型作り // for (int i = 0; i < two_set.Count; i++) { // Mat Part = Mat.Zeros(440 + (n - 1) * Math.Abs(delta_yy), 512 + (n - 1) * Math.Abs(delta_xx), MatType.CV_8UC1); // Part_img.Add(Part); // } //積層写真の型作り(行列の中身は0行列) Mat superimposed = Mat.Zeros(440 + 3 * Math.Abs(delta_yy), 512 + 3 * Math.Abs(delta_xx), MatType.CV_8UC1); //各写真の型作り for (int i = 0; i < two_set.Count; i++) { Mat Part = Mat.Zeros(440 + 3 * Math.Abs(delta_yy), 512 + 3 * Math.Abs(delta_xx), MatType.CV_8UC1); Part_img.Add(Part); }//2枚を1セットにしてずらす場合 if (delta_xx >= 0 && delta_yy >= 0)//画像の右下への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 , 440 , 0 , 512 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + Math.Abs(delta_yy) //yの値のスタート地点 , 440 + Math.Abs(delta_yy) //yの値のゴール地点 , 0 + Math.Abs(delta_xx) //xの値のスタート地点 , 512 + Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 2 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 2 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 2 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 2 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 3 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 3 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 , 440 , 0 , 512).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } if (delta_xx >= 0 && delta_yy < 0)//画像の右上への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 + 3 , 440 + 3 , 0 , 512 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + 3 - 1 //yの値のスタート地点 , 440 + 3 - 1 //yの値のゴール地点 , 0 + Math.Abs(delta_xx) //xの値のスタート地点 , 512 + Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 3 - 2 //yの値のスタート地点 , 440 + 3 - 2 //yの値のゴール地点 , 0 + 2 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 2 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 - 3 //yの値のスタート地点 , 440 + 3 - 3 //yの値のゴール地点 , 0 + 3 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 3 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 + 3 , 440 + 3 , 0 , 512).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } if (delta_xx < 0 && delta_yy < 0)//画像の左上への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 + 3 , 440 + 3 , 0 + 3 , 512 + 3 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + 3 - 1 //yの値のスタート地点 , 440 + 3 - 1 //yの値のゴール地点 , 0 + 3 - 1 //xの値のスタート地点 , 512 + 3 - 1 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 3 - 2 //yの値のスタート地点 , 440 + 3 - 2 //yの値のゴール地点 , 0 + 3 - 2 //xの値のスタート地点 , 512 + 3 - 2 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 - 3 //yの値のスタート地点 , 440 + 3 - 3 //yの値のゴール地点 , 0 + 3 - 3 //xの値のスタート地点 , 512 + 3 - 3 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 + 3 , 440 + 3 , 0 + 3 , 512 + 3).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } if (delta_xx < 0 && delta_yy >= 0)//画像の左下への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 , 440 , 0 + 3 , 512 + 3 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + Math.Abs(delta_yy) //yの値のスタート地点 , 440 + Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 - 1 //xの値のスタート地点 , 512 + 3 - 1 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 2 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 2 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 - 2 //xの値のスタート地点 , 512 + 3 - 2 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 3 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 - 3 //xの値のスタート地点 , 512 + 3 - 3 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 , 440 , 0 + 3 , 512 + 3).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } Mat one1 = Mat.Ones(y0 - 20, 512, MatType.CV_8UC1);//視野の中心からどれだけの窓を開けるか Mat one2 = Mat.Ones(41, x0 - 20, MatType.CV_8UC1); Mat one3 = Mat.Ones(41, 491 - x0, MatType.CV_8UC1); Mat one4 = Mat.Ones(419 - y0, 512, MatType.CV_8UC1); superimposed[0, y0 - 20, 0, 512] = one1 * 0; superimposed[y0 - 20, y0 + 21, 0, x0 - 20] = one2 * 0; superimposed[y0 - 20, y0 + 21, x0 + 21, 512] = one3 * 0; superimposed[y0 + 21, 440, 0, 512] = one4 * 0;//中心から○μmの正方形以外は黒くする。 superimposed.ImWrite("C:\\set\\superimposed25_1.bmp"); using (CvMemStorage storage = new CvMemStorage()) { using (CvContourScanner scanner = new CvContourScanner(superimposed.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { foreach (CvSeq <CvPoint> c in scanner) { CvMoments mom = new CvMoments(c, false); if (c.ElemSize < 2) { continue; } if (mom.M00 == 0.0) { continue; } double mx = mom.M10 / mom.M00; double my = mom.M01 / mom.M00; mm koko = new mm(); koko.white_x = mx; koko.white_y = my; koko.white_kido = mom.M00; koko.white_dx = delta_xx; koko.white_dy = delta_yy; white_area.Add(koko); stage.WriteLine(String.Format("mx={0:f2} , my={1:f2} , dx={2:f2} , dy={3:f2} , M={4:f2}", mx, my, delta_xx, delta_yy, mom.M00)); } } } Part_img.Clear(); } //pixel移動x } //pixel移動y } if (white_area.Count > 0) { double center_x = 0; double center_y = 0; double center_dx = 0; double center_dy = 0; double kido_sum = 0; for (int i = 0; i < white_area.Count; i++) { kido_sum += white_area[i].white_kido; center_x += white_area[i].white_x * white_area[i].white_kido; center_y += white_area[i].white_y * white_area[i].white_kido; center_dx += white_area[i].white_dx * white_area[i].white_kido; center_dy += white_area[i].white_dy * white_area[i].white_kido; } center_x = center_x / kido_sum; center_y = center_y / kido_sum; center_dx = center_dx / kido_sum; center_dy = center_dy / kido_sum; int c_o_g_x; int c_o_g_y; if (center_x >= 0) { c_o_g_x = (int)(center_x + 0.5); } else { c_o_g_x = (int)(center_x - 0.5); } if (center_x >= 0) { c_o_g_y = (int)(center_y + 0.5); } else { c_o_g_y = (int)(center_y - 0.5); } int dx_pixel = c_o_g_x - x0; int dy_pixel = c_o_g_y - y0; double dx_micron = dx_pixel * 0.265625 / 1000; double dy_micron = dy_pixel * 0.265625 / 1000; double now_x2 = mc.GetPoint().X; double now_y2 = mc.GetPoint().Y; mc.MovePointXY(now_x2 - dx_micron, now_y2 + dy_micron);//pixelの軸とstageの軸の関係から mc.Join(); over_dx = center_dx; over_dy = center_dy; } }
private void BeamDetection(string outputfilename, bool isup) {// beam Detection int BinarizeThreshold = 60; int BrightnessThreshold = 4; int nop = 7; double dz = 0; if (isup == true) { dz = -0.003; } else { dz = 0.003; } Camera camera = Camera.GetInstance(); MotorControler mc = MotorControler.GetInstance(parameterManager); Vector3 InitPoint = mc.GetPoint(); Vector3 p = new Vector3(); TracksManager tm = parameterManager.TracksManager; int mod = parameterManager.ModuleNo; int pl = parameterManager.PlateNo; Track myTrack = tm.GetTrack(tm.TrackingIndex); string[] sp = myTrack.IdString.Split('-'); //string datfileName = string.Format("{0}.dat", System.DateTime.Now.ToString("yyyyMMdd_HHmmss")); string datfileName = string.Format(@"c:\test\bpm\{0}\{1}-{2}-{3}-{4}-{5}.dat", mod, mod, pl, sp[0], sp[1], System.DateTime.Now.ToString("ddHHmmss")); BinaryWriter writer = new BinaryWriter(File.Open(datfileName, FileMode.Create)); byte[] bb = new byte[440 * 512 * nop]; string fileName = string.Format("{0}", outputfilename); StreamWriter twriter = File.CreateText(fileName); string stlog = ""; List <ImageTaking> LiIT = TakeSequentialImage(0.0, 0.0, dz, nop); Mat sum = Mat.Zeros(440, 512, MatType.CV_8UC1); for (int i = 0; i < LiIT.Count; i++) { Mat bin = (Mat)DogContrastBinalize(LiIT[i].img, 31, BinarizeThreshold); Cv2.Add(sum, bin, sum); //byte[] b = LiIT[i].img.ToBytes();//format is .png MatOfByte mob = new MatOfByte(LiIT[i].img); byte[] b = mob.ToArray(); b.CopyTo(bb, 440 * 512 * i); } mc.MovePointZ(InitPoint.Z); mc.Join(); Cv2.Threshold(sum, sum, BrightnessThreshold, 1, ThresholdType.Binary); //Cv2.FindContoursをつかうとAccessViolationExceptionになる(Release/Debug両方)ので、C-API風に書く using (CvMemStorage storage = new CvMemStorage()) { using (CvContourScanner scanner = new CvContourScanner(sum.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { //string fileName = string.Format(@"c:\img\{0}.txt", // System.DateTime.Now.ToString("yyyyMMdd_HHmmss_fff")); foreach (CvSeq <CvPoint> c in scanner) { CvMoments mom = new CvMoments(c, false); if (c.ElemSize < 2) { continue; } if (mom.M00 == 0.0) { continue; } double mx = mom.M10 / mom.M00; double my = mom.M01 / mom.M00; stlog += string.Format("{0:F} {1:F}\n", mx, my); } } } twriter.Write(stlog); twriter.Close(); writer.Write(bb); writer.Flush(); writer.Close(); sum *= 255; sum.ImWrite(String.Format(@"c:\img\{0}_{1}_{2}.bmp", System.DateTime.Now.ToString("yyyyMMdd_HHmmss"), (int)(p.X * 1000), (int)(p.Y * 1000))); }//BeamDetection
static public List <microtrack> Select(List <ImageTaking> ITs, tsparams param) { List <microtrack> rms = new List <microtrack>(); for (int ax = -param.ax; ax <= param.ax; ax++) { for (int ay = -param.ay; ay <= param.ay; ay++) { //TODO: 角度におうじてMatのサイズを決定するようにしたい using (Mat big = Mat.Zeros(800, 800, MatType.CV_8UC1)) { for (int p = 0; p < ITs.Count; p++) { int startx = big.Width / 2 - ITs[p].img.Width / 2 + (int)(p * ax * param.pitchx / 16.0); int starty = big.Height / 2 - ITs[p].img.Height / 2 + (int)(p * ay * param.pitchy / 16.0); Cv2.Add( big[starty, starty + ITs[p].img.Height, startx, startx + ITs[p].img.Width], ITs[p].img, big[starty, starty + ITs[p].img.Height, startx, startx + ITs[p].img.Width]); } //Cv2.ImWrite(string.Format("{0}_{1}.png", ax, ay), big * 15); //Cv2.ImShow("big", big * 20); //Cv2.WaitKey(0); Cv2.Threshold(big, big, param.phthre, 255, ThresholdType.ToZero); //Cv2.ImWrite(string.Format("{0}_{1}.png", ax, ay), big * 15); //Cv2.ImShow("big_thre", big*20); //Cv2.WaitKey(0); using (IplImage big_copy = big.Clone().ToIplImage()) using (CvMemStorage storage = new CvMemStorage()) using (CvContourScanner scanner = new CvContourScanner(big_copy, storage, CvContour.SizeOf, ContourRetrieval.External, ContourChain.ApproxNone)) { CvScalar white = Cv.RGB(255, 255, 255); foreach (CvSeq <CvPoint> c in scanner) { IplImage mask = new IplImage(big.Size(), BitDepth.U8, 1); Cv.DrawContours(mask, c, white, white, 0, -1); Cv.And(big.ToIplImage(), mask, mask); double minval; double maxval; CvPoint minLoc; CvPoint maxLoc; Cv.MinMaxLoc(mask, out minval, out maxval, out minLoc, out maxLoc); microtrack rm = new microtrack(); rm.ph = maxval; rm.ax = ax; rm.ay = ay; rm.cx = maxLoc.X - (big.Width / 2 - ITs[0].img.Width / 2); rm.cy = maxLoc.Y - (big.Height / 2 - ITs[0].img.Height / 2); rm.pv = mask.Sum(); rms.Add(rm); //Console.WriteLine(string.Format("{0} {1} {2} {3} {4} {5}", rm.ph, rm.pv, rm.ax, rm.ay, rm.cx, rm.cy)); //Cv.ShowImage("a", mask); //Cv2.WaitKey(0); } }//using contour }//using Mat }//for[ay: }//for[ax] rms.Sort((x, y) => y.pv.CompareTo(x.pv)); //Console.WriteLine("-----"); //foreach (microtrack rm in rms) //{ // Console.WriteLine(string.Format("{0} {1} {2} {3} {4} {5}", rm.ph, rm.pv, rm.ax, rm.ay, rm.cx, rm.cy)); //} List <microtrack> ms = new List <microtrack>(); for (int i = 0; i < rms.Count; i++) { microtrack a = rms[i]; if (a.flag == true) { continue;//already clusterd } rms[i].flag = true; microtrack mym = new microtrack(); mym.ph = a.ph * a.pv; mym.pv = a.pv * a.pv; mym.ax = a.ax * a.pv; mym.ay = a.ay * a.pv; mym.cx = a.cx * a.pv; mym.cy = a.cy * a.pv; double pvsum = a.pv; for (int j = i + 1; j < rms.Count; j++) { microtrack b = rms[j]; if (b.flag == true) { continue;//already clusterd } if (Math.Abs(a.cx - b.cx) > param.clpix || Math.Abs(a.cy - b.cy) > param.clpix) { continue; } if (Math.Abs(a.ax - b.ax) > param.clang || Math.Abs(a.ay - b.ay) > param.clang) { continue; } mym.ph += b.ph * b.pv; mym.pv += b.pv * b.pv; mym.ax += b.ax * b.pv; mym.ay += b.ay * b.pv; mym.cx += b.cx * b.pv; mym.cy += b.cy * b.pv; mym.n += 1; pvsum += b.pv; rms[j].flag = true; } mym.ph /= pvsum; mym.pv /= pvsum; mym.ax /= pvsum; mym.ay /= pvsum; mym.cx /= pvsum; mym.cy /= pvsum; ms.Add(mym); } return(ms); }
private void button1_Click(object sender, EventArgs e) { getContour(); IplImage imgContour = new IplImage("dataset-dinosaur\\contour\\00.jpg", LoadMode.GrayScale); IplImage imgContourDraw = new IplImage(new CvSize(imgContour.Width, imgContour.Height), BitDepth.U8, 3); CvSeq <CvPoint> contourPointSeq; CvMemStorage memStore = new CvMemStorage(0); Cv.FindContours(imgContour, memStore, out contourPointSeq); CvContourScanner cs = Cv.StartFindContours(imgContour, memStore); CvPoint[] contourPointArray = null; for (int i = 0; i < 10; i++) { contourPointSeq = cs.FindNextContour(); contourPointArray = new CvPoint[contourPointSeq.Total]; Cv.CvtSeqToArray <CvPoint>(contourPointSeq, out contourPointArray); if (contourPointArray.Length > 100) { for (int j = 0; j < contourPointArray.Length; j++) { Cv.DrawCircle(imgContourDraw, new CvPoint(contourPointArray[j].X, contourPointArray[j].Y), 10, Cv.RGB(255, 0, 0), 10); } StreamWriter sw = new StreamWriter("file.txt", false); for (int k = 0; k < contourPointArray.Length; k++) { sw.Write(contourPointArray[k].X); sw.Write(" "); sw.Write(contourPointArray[k].Y); sw.Write("\n"); } } Cv.SaveImage("save.jpg", imgContourDraw); } int target_img_contour_num; int ref_img_contour_num; double[,] camPara1 = Read_camerpar_txt("Camera parameters.txt", "00.jpg"); double[,] camPara2 = Read_camerpar_txt("Camera parameters.txt", "01.jpg"); double[,] target_img_contour = Read_Contour_txt("目标图轮廓点.txt", "00.jpg", out target_img_contour_num); double[,] ref_img_contour = Read_Contour_txt("参考图轮廓点.txt", "01.jpg", out ref_img_contour_num); CvMat right_epiline_point = Computecorrespondepilines(camPara2, camPara1, target_img_contour); int n = 7;//共分为n个bin,每个bin弧度(max_radius-min_radius)/n List <double[]> intersection_point_list = Compute_epiline_contour_intersection(right_epiline_point, ref_img_contour, n); double[][] vertex_3d = new double[intersection_point_list.Count][]; for (int i = 0; i < intersection_point_list.Count; i++) { int temp = (int)intersection_point_list[i][0]; //vertex_3d[i] = Cross_intersection_3D( // intersection_point_list[i], // camPara2, // camPara1, // new CvPoint((int)(target_img_contour[0,temp]), (int)(target_img_contour[1,temp])), // i, 0, 0, 0, 0, 1, 0); //vertex_3d[i] = Cross_intersection_3D( // intersection_point_list[i], // camPara1, // camPara2, // new CvPoint((int)(target_img_contour[0, temp]), (int)(target_img_contour[1, temp])), // i, 0, 0, 0, 0, 1, 0); vertex_3d[i] = Cross_intersection_3D_Direct_Solve( intersection_point_list[i], camPara1, camPara2, new CvPoint((int)(target_img_contour[0, temp]), (int)(target_img_contour[1, temp])), i, 0, 0, 0, 0, 1, 0); } double[] face = new double[0]; save_ply("ply0.ply", vertex_3d, face); IplImage img1 = Cv.LoadImage("01.jpg"); //Cv.NamedWindow("win1"); //Cv.ShowImage("win1",img1); //Cv.WaitKey(0); for (int i = 1; i < ref_img_contour.GetLength(1); i++) { Cv.DrawCircle(img1, new CvPoint((int)(ref_img_contour[0, i]), (int)(ref_img_contour[1, i])), 1, Cv.RGB(0, 255, 255)); } //for (int i = 0; i < right_epiline_point.GetDimSize(1); i++) //{ // Cv.DrawLine(img1, new CvPoint((int)(right_epiline_point[0,i]), (int)(right_epiline_point[1, i])), new CvPoint((int)(right_epiline_point[2, i]), (int)(right_epiline_point[3, i])),Cv.RGB(0, 255, 0),1); //} for (int i = 0; i < intersection_point_list.Count; i++) { Cv.DrawCircle(img1, new CvPoint((int)(intersection_point_list[i][1]), (int)(intersection_point_list[i][2])), 2, Cv.RGB(255, 0, 0)); } Cv.SaveImage("01_save.jpg", img1); IplImage img2 = Cv.LoadImage("00.jpg"); for (int i = 1; i < target_img_contour.GetLength(1); i++) { Cv.DrawCircle(img2, new CvPoint((int)(target_img_contour[0, i]), (int)(target_img_contour[1, i])), 2, Cv.RGB(255, 0, 0)); } Cv.SaveImage("00_save.jpg", img2); //for(int i=0;i<right_epiline_point.GetDimSize(1);i++) //{ // CvPoint epiline_start_point = new CvPoint(right_epiline_point[0,i], // right_epiline_point[1,i]); // CvPoint epiline_end_point =new CvPoint(right_epiline_point[2,i], // right_epiline_point[3,i]); // CvPoint contour_line_point = new CvPoint // SL_Cross_Intersection(new CvPoint(,) //CvMat Epilines_point_mat = new CvMat(4, second_image_contour_point_number, MatrixType.F64C1); //Epilines_point_mat = Computecorrespondepilines(first_projcet, second_projcet, second_Contour_point_temp);//求第二幅图中的所有轮廓点在第第一副图中的极线(一次性求出) //c3.X = (int)Epilines_point_mat[0, k5]; //c3.Y = (int)Epilines_point_mat[1, k5]; // c4.X = (int)Epilines_point_mat[2, k5]; //c4.Y = (int)Epilines_point_mat[3, k5]; }