public MatchyBackend.Cv mapToService(Cv cv) { var eduMapper = new EducationMapper(); var sourceMapper = new SourceMapper(); MatchyBackend.Cv result = new MatchyBackend.Cv(); if (cv != null) { return new MatchyBackend.Cv() { Name = cv.Name, CvID = cv.CvID, Age = cv.Age, Sex = cv.Sex, Interests = cv.Interests, Personal = cv.Personal, City = cv.City, Date = cv.Date, Discipline = cv.Discipline, EducationHistory = cv.EducationHistory, EducationLevel = eduMapper.MapToService(cv.EducationLevel), Hours = cv.Hours, Profession = cv.Profession, Province = cv.Province, Email = cv.Email, JobRequirements = cv.JobRequirements, WorkExperience = cv.WorkExperience, Source = sourceMapper.MapToService(cv.Source) }; } else return result; }
public void BeforeEachTest() { _repository = new CvRepository(); _cv = new Cv(); var cv = _repository.GetElizabethsCv(); _cv.Id = cv.Id; _cv.Educations = cv.Educations; _cv.Achievements = cv.Achievements; }
public void Index() { // Instantiate the view directly. This is made possible by // the fact that we precompiled it var view = new Cv(); // Set up the data that needs to be accessed by the view view.ViewBag.Message = "Testing"; // Render it in an HtmlAgilityPack HtmlDocument. Note that // you can pass a 'model' object here if your view needs one. // Generally, what you do here is similar to how a controller //action sets up data for its view. var doc = view.RenderAsHtml().ToString(); // Use the HtmlAgilityPack object model to verify the view. // Here, we simply check that the first <h2> tag contains // what we put in view.ViewBag.Message //HtmlNode node = doc.DocumentNode.Element("h2"); //Assert.AreEqual("Testing", node.InnerHtml.Trim()); }
//Mask Image public void LoadMaskImage() { MaskImage = Cv.LoadImage("MaskImage.png", LoadMode.Color); Cv.SaveImage("MaskOutputImage.png", MaskImage); }
//OR Operator public void OROperator() { OROperation = Cv.CreateImage(srcImage.Size, BitDepth.U8, 3); Cv.Or(srcImage, MaskImage, OROperation); Cv.SaveImage("OR.png", OROperation); }
/// <summary> /// sample of C style wrapper /// </summary> private void SampleC() { // cvHoughLines2 using (IplImage srcImgGray = new IplImage(FilePath.Image.Goryokaku, LoadMode.GrayScale)) using (IplImage srcImgStd = new IplImage(FilePath.Image.Goryokaku, LoadMode.Color)) using (IplImage srcImgProb = srcImgStd.Clone()) { Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3); using (CvMemStorage storage = new CvMemStorage()) { // Standard algorithm CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Standard, 1, Math.PI / 180, 50, 0, 0); // wrapper style //CvLineSegmentPolar[] lines = src_img_gray.HoughLinesStandard(1, Math.PI / 180, 50, 0, 0); int limit = Math.Min(lines.Total, 10); for (int i = 0; i < limit; i++) { // native code style /* * unsafe * { * float* line = (float*)lines.GetElem<IntPtr>(i).Value.ToPointer(); * float rho = line[0]; * float theta = line[1]; * } * //*/ // wrapper style CvLineSegmentPolar elem = lines.GetSeqElem <CvLineSegmentPolar>(i).Value; float rho = elem.Rho; float theta = elem.Theta; double a = Math.Cos(theta); double b = Math.Sin(theta); double x0 = a * rho; double y0 = b * rho; CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) }; CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) }; srcImgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0); } // Probabilistic algorithm lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 50, 10); // wrapper style //CvLineSegmentPoint[] lines = src_img_gray.HoughLinesProbabilistic(1, Math.PI / 180, 50, 0, 0); for (int i = 0; i < lines.Total; i++) { // native code style /* * unsafe * { * CvPoint* point = (CvPoint*)lines.GetElem<IntPtr>(i).Value.ToPointer(); * src_img_prob.Line(point[0], point[1], CvColor.Red, 3, LineType.AntiAlias, 0); * } * //*/ // wrapper style CvLineSegmentPoint elem = lines.GetSeqElem <CvLineSegmentPoint>(i).Value; srcImgProb.Line(elem.P1, elem.P2, CvColor.Red, 3, LineType.AntiAlias, 0); } } using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, srcImgStd)) using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, srcImgProb)) { CvWindow.WaitKey(0); } } }
public void OROperator3() { OROperation3 = Cv.CreateImage(srcImage3.Size, BitDepth.U8, 3); Cv.Or(srcImage3, MaskImage3, OROperation3); Cv.SaveImage("LOGO.png", OROperation3); }
public IplImage ZoomOut(IplImage src) { zoomout = new IplImage(new CvSize(src.Width / 2, src.Height / 2), BitDepth.U8, 3); Cv.PyrDown(src, zoomout, CvFilter.Gaussian5x5); return(zoomout); }
private void task() { TracksManager tm = parameterManager.TracksManager; Track myTrack = tm.GetTrack(tm.TrackingIndex); MotorControler mc = MotorControler.GetInstance(parameterManager); Camera camera = Camera.GetInstance(); List <Mat> image_set = new List <Mat>(); List <Mat> image_set_reverse = new List <Mat>(); Surface surface = Surface.GetInstance(parameterManager);//表面認識から境界値を取得 double uptop = surface.UpTop; double upbottom = surface.UpBottom; double lowtop = surface.LowTop; double lowbottom = surface.LowBottom; double now_x = mc.GetPoint().X; double now_y = mc.GetPoint().Y; double now_z = mc.GetPoint().Z; common_dx = myTrack.MsDX + ((0.265625 * over_dx * 3) / (0.024 * 2.2 * 1000)); common_dy = myTrack.MsDY - ((0.265625 * over_dy * 3) / (0.024 * 2.2 * 1000)); for (int i = 0; i < 8; i++) { //myTrack.MsD○はdz1mmあたりのd○の変位mm double next_x = now_x - i * common_dx * 0.003 * 2.2; //3μm間隔で撮影 double next_y = now_y - i * common_dy * 0.003 * 2.2; //Shrinkage Factor は2.2で計算(仮) mc.MovePoint(next_x, next_y, now_z - 0.003 * i); mc.Join(); byte[] b = camera.ArrayImage; Mat image = new Mat(440, 512, MatType.CV_8U, b); Mat imagec = image.Clone(); image_set.Add(imagec); } for (int i = 7; i >= 0; i--) { image_set_reverse.Add(image_set[i]); } int n = image_set.Count();//1回分の取得画像の枚数 Mat cont = new Mat(440, 512, MatType.CV_8U); Mat gau_1 = new Mat(440, 512, MatType.CV_8U); Mat gau_2 = new Mat(440, 512, MatType.CV_8U); Mat sub = new Mat(440, 512, MatType.CV_8U); Mat bin = new Mat(440, 512, MatType.CV_8U); double Max_kido; double Min_kido; OpenCvSharp.CPlusPlus.Point maxloc; OpenCvSharp.CPlusPlus.Point minloc; List <Mat> two_set = new List <Mat>(); List <Mat> Part_img = new List <Mat>(); for (int i = 0; i < image_set.Count(); i++) { Cv2.GaussianBlur((Mat)image_set_reverse[i], gau_1, Cv.Size(3, 3), -1); //パラメータ見ないといけない。 Cv2.GaussianBlur(gau_1, gau_2, Cv.Size(51, 51), -1); //パラメータ見ないといけない。 Cv2.Subtract(gau_2, gau_1, sub); Cv2.MinMaxLoc(sub, out Min_kido, out Max_kido, out minloc, out maxloc); cont = (sub - Min_kido) * 255 / (Max_kido - Min_kido); cont.ImWrite(string.Format(@"C:\set\cont_{0}.bmp", i)); Cv2.Threshold(cont, bin, 115, 1, ThresholdType.Binary);//パラメータ見ないといけない。 two_set.Add(bin); } List <mm> white_area = new List <mm>(); int x0 = 256; int y0 = 220;//視野の中心 for (int delta_xx = -1; delta_xx <= 1; delta_xx++)//一番下の画像よりどれだけずらすか { for (int delta_yy = -1; delta_yy <= 1; delta_yy++) { { // //積層写真の型作り(行列の中身は0行列) // Mat superimposed = Mat.Zeros(440 + (n - 1) * Math.Abs(delta_yy), 512 + (n - 1) * Math.Abs(delta_xx), MatType.CV_8UC1); // // //各写真の型作り // for (int i = 0; i < two_set.Count; i++) { // Mat Part = Mat.Zeros(440 + (n - 1) * Math.Abs(delta_yy), 512 + (n - 1) * Math.Abs(delta_xx), MatType.CV_8UC1); // Part_img.Add(Part); // } //積層写真の型作り(行列の中身は0行列) Mat superimposed = Mat.Zeros(440 + 3 * Math.Abs(delta_yy), 512 + 3 * Math.Abs(delta_xx), MatType.CV_8UC1); //各写真の型作り for (int i = 0; i < two_set.Count; i++) { Mat Part = Mat.Zeros(440 + 3 * Math.Abs(delta_yy), 512 + 3 * Math.Abs(delta_xx), MatType.CV_8UC1); Part_img.Add(Part); }//2枚を1セットにしてずらす場合 if (delta_xx >= 0 && delta_yy >= 0)//画像の右下への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 , 440 , 0 , 512 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + Math.Abs(delta_yy) //yの値のスタート地点 , 440 + Math.Abs(delta_yy) //yの値のゴール地点 , 0 + Math.Abs(delta_xx) //xの値のスタート地点 , 512 + Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 2 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 2 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 2 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 2 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 3 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 3 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 , 440 , 0 , 512).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } if (delta_xx >= 0 && delta_yy < 0)//画像の右上への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 + 3 , 440 + 3 , 0 , 512 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + 3 - 1 //yの値のスタート地点 , 440 + 3 - 1 //yの値のゴール地点 , 0 + Math.Abs(delta_xx) //xの値のスタート地点 , 512 + Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 3 - 2 //yの値のスタート地点 , 440 + 3 - 2 //yの値のゴール地点 , 0 + 2 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 2 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 - 3 //yの値のスタート地点 , 440 + 3 - 3 //yの値のゴール地点 , 0 + 3 * Math.Abs(delta_xx) //xの値のスタート地点 , 512 + 3 * Math.Abs(delta_xx) //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 + 3 , 440 + 3 , 0 , 512).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } if (delta_xx < 0 && delta_yy < 0)//画像の左上への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 + 3 , 440 + 3 , 0 + 3 , 512 + 3 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + 3 - 1 //yの値のスタート地点 , 440 + 3 - 1 //yの値のゴール地点 , 0 + 3 - 1 //xの値のスタート地点 , 512 + 3 - 1 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 3 - 2 //yの値のスタート地点 , 440 + 3 - 2 //yの値のゴール地点 , 0 + 3 - 2 //xの値のスタート地点 , 512 + 3 - 2 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 - 3 //yの値のスタート地点 , 440 + 3 - 3 //yの値のゴール地点 , 0 + 3 - 3 //xの値のスタート地点 , 512 + 3 - 3 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 + 3 , 440 + 3 , 0 + 3 , 512 + 3).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } if (delta_xx < 0 && delta_yy >= 0)//画像の左下への移動 { for (int i = 0; i < two_set.Count; i++) { if (i == 0 || i == 1) { Part_img[i][ 0 , 440 , 0 + 3 , 512 + 3 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 2 || i == 3) { Part_img[i][ 0 + Math.Abs(delta_yy) //yの値のスタート地点 , 440 + Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 - 1 //xの値のスタート地点 , 512 + 3 - 1 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 4 || i == 5) { Part_img[i][ 0 + 2 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 2 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 - 2 //xの値のスタート地点 , 512 + 3 - 2 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } else if (i == 6 || i == 7) { Part_img[i][ 0 + 3 * Math.Abs(delta_yy) //yの値のスタート地点 , 440 + 3 * Math.Abs(delta_yy) //yの値のゴール地点 , 0 + 3 - 3 //xの値のスタート地点 , 512 + 3 - 3 //xの値のゴール地点 ] = two_set[i]; //処理済み画像をPartの対応する部分に入れていく } } for (int i = 0; i < Part_img.Count(); i++) { superimposed += Part_img[i]; } Cv2.Threshold(superimposed, superimposed, 5, 255, ThresholdType.ToZero);//パラメータ見ないといけない。 superimposed.SubMat(0 , 440 , 0 + 3 , 512 + 3).CopyTo(superimposed); //1枚目の画像の大きさ、場所で切り取る } Mat one1 = Mat.Ones(y0 - 20, 512, MatType.CV_8UC1);//視野の中心からどれだけの窓を開けるか Mat one2 = Mat.Ones(41, x0 - 20, MatType.CV_8UC1); Mat one3 = Mat.Ones(41, 491 - x0, MatType.CV_8UC1); Mat one4 = Mat.Ones(419 - y0, 512, MatType.CV_8UC1); superimposed[0, y0 - 20, 0, 512] = one1 * 0; superimposed[y0 - 20, y0 + 21, 0, x0 - 20] = one2 * 0; superimposed[y0 - 20, y0 + 21, x0 + 21, 512] = one3 * 0; superimposed[y0 + 21, 440, 0, 512] = one4 * 0;//中心から○μmの正方形以外は黒くする。 superimposed.ImWrite("C:\\set\\superimposed25_1.bmp"); using (CvMemStorage storage = new CvMemStorage()) { using (CvContourScanner scanner = new CvContourScanner(superimposed.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) { foreach (CvSeq <CvPoint> c in scanner) { CvMoments mom = new CvMoments(c, false); if (c.ElemSize < 2) { continue; } if (mom.M00 == 0.0) { continue; } double mx = mom.M10 / mom.M00; double my = mom.M01 / mom.M00; mm koko = new mm(); koko.white_x = mx; koko.white_y = my; koko.white_kido = mom.M00; koko.white_dx = delta_xx; koko.white_dy = delta_yy; white_area.Add(koko); stage.WriteLine(String.Format("mx={0:f2} , my={1:f2} , dx={2:f2} , dy={3:f2} , M={4:f2}", mx, my, delta_xx, delta_yy, mom.M00)); } } } Part_img.Clear(); } //pixel移動x } //pixel移動y } if (white_area.Count > 0) { double center_x = 0; double center_y = 0; double center_dx = 0; double center_dy = 0; double kido_sum = 0; for (int i = 0; i < white_area.Count; i++) { kido_sum += white_area[i].white_kido; center_x += white_area[i].white_x * white_area[i].white_kido; center_y += white_area[i].white_y * white_area[i].white_kido; center_dx += white_area[i].white_dx * white_area[i].white_kido; center_dy += white_area[i].white_dy * white_area[i].white_kido; } center_x = center_x / kido_sum; center_y = center_y / kido_sum; center_dx = center_dx / kido_sum; center_dy = center_dy / kido_sum; int c_o_g_x; int c_o_g_y; if (center_x >= 0) { c_o_g_x = (int)(center_x + 0.5); } else { c_o_g_x = (int)(center_x - 0.5); } if (center_x >= 0) { c_o_g_y = (int)(center_y + 0.5); } else { c_o_g_y = (int)(center_y - 0.5); } int dx_pixel = c_o_g_x - x0; int dy_pixel = c_o_g_y - y0; double dx_micron = dx_pixel * 0.265625 / 1000; double dy_micron = dy_pixel * 0.265625 / 1000; double now_x2 = mc.GetPoint().X; double now_y2 = mc.GetPoint().Y; mc.MovePointXY(now_x2 - dx_micron, now_y2 + dy_micron);//pixelの軸とstageの軸の関係から mc.Join(); over_dx = center_dx; over_dy = center_dy; } }
void fourPoints(CvLineSegmentPoint[] linesArray) { List <CvLineSegmentPoint> lines = new List <CvLineSegmentPoint>(linesArray); int i, j, k; List <double> angleV = new List <double>(lines.Count); for (i = lines.Count - 1; i >= 0; --i) { CvLineSegmentPoint lineSegm = lines[i]; angleV.Add(Math.Atan2(lineSegm.P1.Y - lineSegm.P2.Y, lineSegm.P1.X - lineSegm.P2.X)); } CvPoint p1, p2, p0, p0_; //Discard almost parallel lines and keep the largest // FIX : everything about this sucks for (i = 0; i < lines.Count; ++i) { CvLineSegmentPoint segi = lines[i]; p0 = segi.P1; p0_ = segi.P2; double e2 = p0.DistanceTo(p0_); for (j = 0; j < lines.Count; ++j) { if (i == j) // ugly? { continue; } if (Math.Abs(angleV[i] - angleV[j]) > 0.1 || Math.Abs(angleV[i] - angleV[j]) > Cv.PI / 2.0 - 0.1 && Math.Abs(angleV[i] - angleV[j]) < Cv.PI / 2.0 + 0.1) { continue; } CvLineSegmentPoint segj = lines[j]; p1 = segj.P1; p2 = segj.P2; if (PointOps.LineDistance(p1, p2, p0) < 15 && PointOps.LineDistance(p0, p0_, p1) < 15) { if (p1.DistanceTo(p2) > e2) { lines.RemoveAt(i); angleV.RemoveAt(i); --i; --j; break; } else { lines.RemoveAt(j); angleV.RemoveAt(j); --j; } } } } // instead of 3 lists, we could have one with custom struct containing all 3 required values List <CvPoint> allPointsV = new List <CvPoint>(); List <int> fstln = new List <int>(); List <int> secln = new List <int>(); const int bound = 50; for (i = 0; i < lines.Count; ++i) { CvLineSegmentPoint segmI = lines[i]; for (j = 0; j < lines.Count; ++j) { if (i == j) { continue; // ugly? } CvLineSegmentPoint segmJ = lines[j]; if (PointOps.LineIntersection(segmI.P1, segmI.P2, segmJ.P1, segmJ.P2, out p1)) { if (p1.X > -bound && p1.X < temp.Cols + bound && p1.Y > -bound && p1.Y < temp.Rows + bound) { bool foundSamePt = false; for (k = 0; k < allPointsV.Count; ++k) { if (p1 == allPointsV[k]) { foundSamePt = true; break; } } if (!foundSamePt) { allPointsV.Add(p1); fstln.Add(i); secln.Add(j); } } } } } if (allPointsV.Count == 0) { reset = true; return; } reset = false; // time to start doing our drawings if (imageDest3 == null) { imageDest3 = new CvMat(height, width, MatrixType.U8C3); } // are we at start or just not found any points yet? if (oldPt.Count == 0 || numFrames < 20) { //************************draw intersections************************// for (i = 0; i < allPointsV.Count; ++i) { CvScalar circleColor; if (allPointsV[i].Y < height - 10) { circleColor = Const.ScalarGreen; } else { circleColor = Const.ScalarWhite; } Cv.Circle(imageDest3, allPointsV[i], 7, circleColor); } //mapping the detected corners with lines intersections List <int> tracker = new List <int>(final4P.Count); for (j = 0; j < final4P.Count; ++j) { double dist = PointOps.Norm(final4P[j] - allPointsV[0]); tracker.Add(0); // tracker[j] = 0; for (i = 0; i < allPointsV.Count; ++i) { double distA = PointOps.Norm(final4P[j] - allPointsV[i]); if (distA < dist) { dist = distA; tracker[j] = i; } } } //******* draw mapped corners *****************// for (j = 0; j < final4P.Count; ++j) { Cv.Circle(imageDest3, allPointsV[tracker[j]], 8, Const.ScalarMagenta); } //*******************************************************************************************// List <int> linesIds = new List <int>(final4P.Count); for (i = 0; i < final4P.Count; ++i) { int counterfstln = 0; for (j = 0; j < final4P.Count; ++j) { if (i == j || fstln[tracker[i]] == fstln[tracker[j]] /*this might be redundant after 1st check*/ || fstln[tracker[i]] == secln[tracker[j]]) { ++counterfstln; } } int countersecln = 0; for (j = 0; j < final4P.Count; ++j) { if (i == j || secln[tracker[i]] == fstln[tracker[j]] || secln[tracker[i]] == secln[tracker[j]]) { ++countersecln; } } if (counterfstln < countersecln) { linesIds.Add(fstln[tracker[i]]); // linesIds[i] = fstln[tracker[i]]; } else { linesIds.Add(secln[tracker[i]]); // linesIds[i] = secln[tracker[i]]; } } List <int> maxdistpos1 = new List <int>(tracker.Count); // TODO : check if Count is always less than 3-4... for (j = 0; j < tracker.Count; j++) { maxdistpos1.Add(0); // maxdistpos1.Add( -1 ); // "initialize" maxdistpos1[j], so that it can be re-assigned below // TODO : logic is wrong!!!! Not all [j]s are assigned. Proof : if "initialized" with "-1", it just crashes later! double dist = 0; for (i = 0; i < fstln.Count; i++) { if (linesIds[j] == fstln[i] || linesIds[j] == secln[i] && allPointsV[i].Y < height - 15) { double distA = PointOps.Norm(allPointsV[tracker[j]] - allPointsV[i]); if (distA > dist) { dist = distA; maxdistpos1[j] = i; } } } } oldPt.Clear(); for (i = 0; i < final4P.Count; i++) { oldPt.Add(final4P[i]); } List <CvPoint> candidatePts = new List <CvPoint>(); List <double> candist = new List <double>(); for (i = 0; i < maxdistpos1.Count; i++) { Cv.Circle(imageDest3, allPointsV[maxdistpos1[i]], 7, Const.ScalarBlue); if (allPointsV[maxdistpos1[i]].Y > minMaskY && allPointsV[maxdistpos1[i]].Y < height - 10) { candidatePts.Add(allPointsV[maxdistpos1[i]]); } } for (i = 0; i < candidatePts.Count; i++) { double dist = 0; for (j = 0; j < oldPt.Count; j++) { dist += PointOps.Norm(candidatePts[i] - oldPt[j]); } candist.Add(dist); } while (oldPt.Count < 4) { if (candidatePts.Count != 0) { int p = candist.FindMaxIndex(); // Utils.FindMaxIndex( candist ); // p = max_element(candist.begin(),candist.end()) - candist.begin(); oldPt.Add(candidatePts[p]); candist.RemoveAt(p); candidatePts.RemoveAt(p); } else { break; } } for (j = 0; j < oldPt.Count; j++) { Cv.Circle(imageDest3, oldPt[j], 7, Const.ScalarBlue); } //***************************************************** end of estimation **************************************************************// } else { for (i = 0; i < allPointsV.Count; ++i) { if (allPointsV[i].Y < height - 10) { Cv.Circle(imageDest3, allPointsV[i], 7, Const.ScalarGreen); } else { Cv.Circle(imageDest3, allPointsV[i], 7, Const.ScalarWhite); } } //mapping the detected corners with lines intersections // List <int> tracker = new List <int>(oldPt.Count); for (j = 0; j < oldPt.Count; ++j) { tracker.Add(-1); double dist = 1000000; for (i = 0; i < allPointsV.Count; ++i) { double distA = PointOps.Norm(oldPt[j] - allPointsV[i]); if (distA < dist && allPointsV[i].Y < height - 10) { dist = distA; tracker[j] = i; } } } for (j = 0; j < oldPt.Count; ++j) { double distA = PointOps.Norm(oldPt[j] - allPointsV[tracker[j]]); //********************* threshold pou na orizei tin megisti perioxi gia anazitisi,alliws na krataei to proigoumeno simeio*******// if (distA < thresholdDist) { oldPt[j] = allPointsV[tracker[j]]; } } } }
public IplImage BlurImage(IplImage src) { blur = new IplImage(src.Size, BitDepth.U8, 1); Cv.Smooth(src, blur, SmoothType.Gaussian, 9); return(blur); }
//--------------------------------------------------------- // 関数名 : convertBgrToHsv // 機能 : 画像をBGRからHSVに変換 // 引数 : img/BGR画像 // 戻り値 : img/HSV画像 //--------------------------------------------------------- public IplImage convertBgrToHsv(IplImage i_img, IplImage h_img) { Cv.CvtColor(i_img, h_img, ColorConversion.BgrToHsv); return(h_img); }
//--------------------------------------------------------- // 関数名 : convertSmooothing // 機能 : 平滑化 // 引数 : img/平滑化前画像 // 戻り値 : img/平滑化後画像 //--------------------------------------------------------- public IplImage convertSmooothing(IplImage img) { Cv.Smooth(img, img, SmoothType.Median, 5, 0, 0, 0); return(img); }
public static void CannyFilter(IplImage gray, ref IplImage canny, int value1, int value2) { Cv.Canny(gray, canny, value1, value2); }
public static void HistogramEqualize(IplImage gray, ref IplImage equalized) { Cv.EqualizeHist(gray, equalized); }
public static void DilateImage(IplImage gray, ref IplImage dilated) { Cv.Dilate(gray, dilated); }
public static void ErodeImage(IplImage gray, ref IplImage eroded) { Cv.Erode(gray, eroded); }
// => inputMat MUST be 24/32 bit private CvMat processFrame(CvMat inputMat) { // return "inputMat" after lots. LOTS. Of processing width = inputMat.Cols; height = inputMat.Rows; // taking out 4% of the input's edges: sounds wrong #if false // I have no idea what on earth is the purpose of this: //CvMat temp2 = inputMat( new CvRect( inputMat.Cols / 25, inputMat.Cols / 25, inputMat.Cols - 2 * (inputMat.Cols / 25), inputMat.Rows - 2 * (inputMat.Rows / 25) ) ); //resize( temp2, temp2, inputMat.size() ); //temp2.copyTo( inputMat ); int borderX = inputMat.Cols / 25; // 4% of original int borderY = inputMat.Rows / 25; CvRect roi = new CvRect(borderX, borderY, inputMat.Cols - 2 * borderX, inputMat.Rows - 2 * borderY); CvMat temp2 = inputMat.GetSubRect(out temp2, roi); // stupid to pass "out temp2"? inputMat = temp2; // =TODO : What? temp2.Copy( inputMat ); // is it really required to remove 4% of the input image's edges? #endif CvMat inputMat_grey; { // TODO : looks like a waste to make two conversions from inputMat to _grey, instead of 1 // since OpenCV doesn't support it, it could be made manually CvMat inputMat_grey8 = MatOps.ConvertChannels(inputMat); inputMat_grey = MatOps.ConvertElements(inputMat_grey8, MatrixType.F32C1, 1.0 / 255.0); } // NOTE : IBO seems to give good contrast with certain images, but with bbox7, it is just disastrous. //MatOps.NewWindowShow( inputMat_grey ); //inputMat_grey = Filters.IBO( inputMat_grey ); // inputMat_grey = 32f //MatOps.NewWindowShow( inputMat_grey ); inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255); // inputMat_grey = 8u // was: SLOW : Filters.ContrastEnhancement( inputMat_grey ); // NOTE : not needed AFTER IBO // NOTE : Contrast Enhancement2 may NOT be needed AT ALL, at this point at least, ANYWAY!!! Filters.ContrastEnhancement2(inputMat_grey); // NOTE : certainly NOT needed AFTER IBO MatOps.NewWindowShow(inputMat_grey); // mask passed originally in method below was all white, so I optimized it out. Passing the number of pixels was also dumb-o. double thresh = Filters.NeighborhoodValleyEmphasis(inputMat_grey); Cv.Threshold(inputMat_grey, inputMat_grey, thresh, 255, ThresholdType.BinaryInv); IplConvKernel element = new IplConvKernel(3, 3, 1, 1, ElementShape.Cross); Cv.Erode(inputMat_grey, inputMat_grey, element); Cv.Dilate(inputMat_grey, inputMat_grey, element); MatOps.NewWindowShow(inputMat_grey); // TODO : check if check is required if (inputMat_grey.ElemType != MatrixType.U8C1) { inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255.0); } // ======= // is this just a test? CvPoint[] newPtV = Filters.DistillContours(inputMat_grey, 5, Const.PointZero); CvMat imageDest; using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 16); } // ======= kawane(newPtV); // updates thresholdDist, minMaskY, final4P //*******************************************set a greater contour for estimation of the missing points*******************************// // ======= newPtV = Filters.DistillContours(inputMat_grey, 100, Const.PointZero); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 1, LineType.AntiAlias); } // ======= CvMat mask1 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, 0); Cv.FillConvexPoly(mask1, newPtV, Const.ScalarWhite, 0, 0); temp = MatOps.ConvertChannels(inputMat); temp.Copy(imageDest, mask1); Cv.Canny(imageDest, imageDest, 150, 300, ApertureSize.Size3); IplConvKernel element2 = new IplConvKernel(3, 3, 1, 1, ElementShape.Rect); Cv.Dilate(imageDest, imageDest, element2); Cv.Erode(imageDest, imageDest, element2); CvLineSegmentPoint[] lines = Cv2.HoughLinesP(new Mat(imageDest), 1, Cv.PI / 180 /*NOTE : 1 degree angle*/, 50, 50, 50); // TODO : those 50s..? extendLines(lines, 350); // TODO : This idea sounds arbitary? And why 350? At least some percentage? // draw extended lines for (int i = 0; i < lines.Length; ++i) { CvLineSegmentPoint l = lines[i]; Cv.Line(imageDest, l.P1, l.P2, Const.ScalarWhite, 1, LineType.AntiAlias); } Cv.Dilate(imageDest, imageDest, element2); // TODO : FIX : Dilate again?! // another huge function here... fourPoints(lines); //////////// //********************************************************************* replace estimate points with mask corners ********// if (oldPt.Count != 0) { //** // BEWARE : great use of the English language following right below: // test for each and every one of the last slice delete each one of all the revisited of the above and estimate for only the best the off topic adapt //** List <int> positions = new List <int>(final4P.Count); for (int i = 0; i < final4P.Count; ++i) { positions.Add(-1); // "initialize" positions[i] double distmin = 10000; for (int j = 0; j < oldPt.Count; ++j) { double distAB = PointOps.Norm(oldPt[j] - final4P[i]); if (distAB < distmin) { distmin = distAB; positions[i] = j; } } } int flagFrCounter = 0; for (int i = 0; i < final4P.Count; ++i) { double distA = PointOps.Norm(oldPt[positions[i]] - final4P[i]); //********************* threshold pou na orizei tin megisti perioxi gia anazitisi,alliws na krataei to proigoumeno simeio*******// if (distA < thresholdDist) //if(distA<80) { oldPt[positions[i]] = final4P[i]; --flagFrCounter; } ++flagFrCounter; } if (reset) { numFrames = 0; oldPt.Clear(); final4P.Clear(); } } //pointsb[0]=thresholdDist; //****************************************************************************// for (int i = 0; i < oldPt.Count; ++i) { Cv.Circle(temp, oldPt[i], 2, Const.ScalarRed, 3); } MatOps.Convert8To24(temp).Copy(inputMat); //MatOps.ConvertChannels( temp, ColorConversion.GrayToBgr ).Copy( inputMat ); //temp.Copy( inputMat ); //******************************************************OVERLAY IMAGE***********************************************////// if (oldPt.Count == 0) { return(inputMat); // end of line } CvMat black2; if (overlay != null) { black2 = overlay.Clone(); //=imread("cubes.jpg"); Cv.Resize(black2, inputMat, Interpolation.NearestNeighbor); // TODO : check if interpolation type is appropriate } else { black2 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C3); } List <CvPoint> tempPoint = new List <CvPoint>(4); //vector<Point> tempPoint; int pp = 0; // BEWARE : the guy is copy/pasting needlessly? int mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini && oldPt[i] != tempPoint[0]) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { int tempmini = Math.Abs(oldPt[i].X - tempPoint[1].X); if (tempmini < mini && oldPt[i] != tempPoint[0] && oldPt[i] != tempPoint[1]) { mini = tempmini; pp = i; } } tempPoint.Add(oldPt[pp]); for (int i = 0; i < oldPt.Count; ++i) { CvPoint pt = oldPt[i]; bool found = false; for (int j = 0; j < tempPoint.Count; ++j) { if (tempPoint[j] == pt) { found = true; break; } } if (!found) { tempPoint.Add(pt); } } // only keep up to 4 points List <CvPoint> co_ordinates = new List <CvPoint>(4); { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { co_ordinates.Add(tempPoint[i]); } } // lost me... if (outputQuad[0] == outputQuad[2]) { { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { outputQuad[i] = tempPoint[i]; } } } else { CvPoint2D32f rr; for (int i = 0; i < 4; ++i) { List <double> dist = new List <double>(tempPoint.Count); for (int j = 0; j < tempPoint.Count; ++j) { rr = tempPoint[j]; dist.Add(PointOps.Norm(outputQuad[i] - rr)); } double minimumDist = dist.Min(); int min_pos = Utils.FindIndex(dist, minimumDist); if (tempPoint.Count > 0) { outputQuad[i] = tempPoint[min_pos]; tempPoint.RemoveAt(min_pos); } } } // The 4 points where the mapping is to be done , from top-left in clockwise order inputQuad[0] = new CvPoint2D32f(0, 0); inputQuad[1] = new CvPoint2D32f(inputMat.Cols - 1, 0); inputQuad[2] = new CvPoint2D32f(inputMat.Cols - 1, inputMat.Rows - 1); inputQuad[3] = new CvPoint2D32f(0, inputMat.Rows - 1); //Input and Output Image; // Get the Perspective Transform Matrix i.e. lambda (2D warp transform) // Lambda Matrix CvMat lambda = Cv.GetPerspectiveTransform(inputQuad, outputQuad); // Apply this Perspective Transform to the src image // - get a "top-down" view of the supposedly box-y area Cv.WarpPerspective(black2, black2, lambda, Interpolation.Cubic, Const.ScalarBlack); // see nice explanation : http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/ CvMat maskOV = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, Const.ScalarBlack); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(co_ordinates, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(maskOV, updateContours, Const.ScalarWhite, 0, 100, 16); //drawContours( maskOV, co_ordinates, 0, Scalar( 255 ), CV_FILLED, 8 ); } double alpha = 0.8; double beta = (1.0 - alpha); Cv.AddWeighted(black2, alpha, inputMat, beta, 0.0, black2); black2.Copy(inputMat, maskOV); return(inputMat); }
public IplImage BinarizerMethod_Hist(IplImage src) { bina = new IplImage(src.Size, BitDepth.U8, 1); gray = this.GrayScale(src); int area = 200; int num = 0; int row = (src.Width % area == 0) ? (int)(src.Width / area) : (int)(src.Width / area + 1); int col = (src.Height % area == 0) ? (int)(src.Height / area) : (int)(src.Height / area + 1); int count = row * col; float[] data = new float[count]; IplImage[] piece = new IplImage[count]; CvRect[] piece_roi = new CvRect[count]; for (int x = 0; x < src.Width; x = x + area) { for (int y = 0; y < src.Height; y = y + area) { CvRect roi = new CvRect { X = x, Y = y, Width = area, Height = area }; if (roi.X + roi.Width > src.Width) { roi.Width = area - ((roi.X + roi.Width) - src.Width); } if (roi.Y + roi.Height > src.Height) { roi.Height = area - ((roi.Y + roi.Height) - src.Height); } gray.SetROI(roi); piece[num] = new IplImage(gray.ROI.Size, BitDepth.U8, 1); Cv.Copy(gray, piece[num]); gray.ResetROI(); //히스토그램 계산// int[] size = { area }; CvHistogram hist = new CvHistogram(size, HistogramFormat.Array); Cv.CalcHist(piece[num], hist); float minValue, maxValue; hist.GetMinMaxValue(out minValue, out maxValue); int highlevel = 0; for (int i = 0; i < area; i++) { if (maxValue == hist.Bins[i].Val0) { highlevel = i; } } piece_roi[num] = roi; data[num] = highlevel; num++; } } CvMat kernel = new CvMat(row, col, MatrixType.F32C1, data); Cv.Normalize(kernel, kernel, 255, 0, NormType.C); for (int r = 0; r < count; r++) { Cv.Threshold(piece[r], piece[r], kernel[r], 255, ThresholdType.Otsu); Cv.SetImageROI(bina, piece_roi[r]); Cv.Copy(piece[r], bina); bina.ResetROI(); } //37강 - 윈도우 창// CvWindow win = new CvWindow("window", WindowMode.StretchImage, src); win.Resize(640, 480); win.Move(100, 0); win.ShowImage(piece[0]); win.Close(); new CvWindow(piece[0]).Move(0, 0); new CvWindow(piece[1]).Move(0, 200); new CvWindow(piece[2]).Move(0, 400); //37강 - 윈도우 창// return(bina); }
public List <T> DetectFeatures(Bitmap b) { LoadFiles(); List <T> features; //Type Intializer Exception occurs if you reuse an appdomain. Always restart the server. using (IplImage orig = OpenCvSharp.BitmapConverter.ToIplImage(b)) using (IplImage gray = GetOriginalImageInGrayScale(orig)) { int w = orig.Width; int h = orig.Height; double ratio = (double)w / (double)h; double scale = 1; if (ratio > 1) { scale = (double)w / (double)scaledBounds; } if (ratio <= 1) { scale = (double)h / (double)scaledBounds; } scale = Math.Min(1, 1 / scale); using (IplImage small = new IplImage(new CvSize(Cv.Round(w * scale), Cv.Round(h * scale)), BitDepth.U8, 1)) { //Resize to smaller version Cv.Resize(gray, small, Interpolation.Area); //Equalize histogram Cv.EqualizeHist(gray, gray); using (CvMemStorage storage = new CvMemStorage()) { storage.Clear(); features = DetectFeatures(small, storage); } } //Scale all rectangles by factor to restore to original resolution for (int i = 0; i < features.Count; i++) { IFeature e = features[i]; e.Y = (float)Math.Min(h, e.Y / scale); e.X = (float)Math.Min(w, e.X / scale); e.Y2 = (float)Math.Min(h, e.Y2 / scale); e.X2 = (float)Math.Min(w, e.X2 / scale); } } return(features); }
public IplImage ZoomIn(IplImage src) { zoomin = new IplImage(new CvSize(src.Width * 2, src.Height * 2), BitDepth.U8, 3); Cv.PyrUp(src, zoomin, CvFilter.Gaussian5x5); return(zoomin); }
public IplImage ResizeImage(IplImage src) { resize = new IplImage(new CvSize(src.Width / 4, src.Height - 1200), BitDepth.U8, 3); Cv.Resize(src, resize, Interpolation.Linear); return(resize); }
/// <summary> /// Nickの手法による二値化処理を行う。 /// </summary> /// <param name="imgSrc">入力画像</param> /// <param name="imgDst">出力画像</param> /// <param name="kernelSize">局所領域のサイズ</param> /// <param name="k">係数</param> #else /// <summary> /// Binarizes by Nick's method /// </summary> /// <param name="src">Input image</param> /// <param name="dst">Output image</param> /// <param name="kernelSize">Window size</param> /// <param name="k">Adequate coefficient</param> #endif public static void Nick(IplImage src, IplImage dst, int kernelSize, double k) { if (src == null) { throw new ArgumentNullException(nameof(src)); } if (dst == null) { throw new ArgumentNullException(nameof(dst)); } // グレースケールのみ if (src.NChannels != 1) { throw new ArgumentException("src must be gray scale image"); } if (dst.NChannels != 1) { throw new ArgumentException("dst must be gray scale image"); } // サイズのチェック if (kernelSize < 3) { throw new ArgumentOutOfRangeException(nameof(kernelSize), "size must be 3 and above"); } if (kernelSize % 2 == 0) { throw new ArgumentOutOfRangeException(nameof(kernelSize), "size must be odd number"); } int borderSize = kernelSize / 2; CvRect roi = src.ROI; int width = roi.Width; int height = roi.Height; if (width != dst.Width || height != dst.Height) { throw new ArgumentException("src.Size == dst.Size"); } using (IplImage imgTemp = new IplImage(width + (borderSize * 2), height + (borderSize * 2), src.Depth, src.NChannels)) using (IplImage imgSum = new IplImage(imgTemp.Width + 1, imgTemp.Height + 1, BitDepth.F64, 1)) using (IplImage imgSqSum = new IplImage(imgTemp.Width + 1, imgTemp.Height + 1, BitDepth.F64, 1)) { Cv.CopyMakeBorder(src, imgTemp, new CvPoint(borderSize, borderSize), BorderType.Replicate, CvScalar.ScalarAll(0)); Cv.Integral(imgTemp, imgSum, imgSqSum); unsafe { byte * pSrc = src.ImageDataPtr; byte * pDst = dst.ImageDataPtr; double *pSum = (double *)imgSum.ImageDataPtr; double *pSqSum = (double *)imgSqSum.ImageDataPtr; int stepSrc = src.WidthStep; int stepDst = dst.WidthStep; int stepSum = imgSum.WidthStep / sizeof(double); int ylim = height + borderSize; int xlim = width + borderSize; int kernelPixels = kernelSize * kernelSize; for (int y = borderSize; y < ylim; y++) { for (int x = borderSize; x < xlim; x++) { int x1 = x - borderSize; int y1 = y - borderSize; int x2 = x + borderSize + 1; int y2 = y + borderSize + 1; double sum = pSum[stepSum * y2 + x2] - pSum[stepSum * y2 + x1] - pSum[stepSum * y1 + x2] + pSum[stepSum * y1 + x1]; double sqsum = pSqSum[stepSum * y2 + x2] - pSqSum[stepSum * y2 + x1] - pSqSum[stepSum * y1 + x2] + pSqSum[stepSum * y1 + x1]; double mean = sum / kernelPixels; double term = (sqsum - mean * mean) / kernelPixels; if (term < 0.0) { term = 0.0; } term = Math.Sqrt(term); double threshold = mean + k * term; int offsetSrc = stepSrc * (y + roi.Y - borderSize) + (x + roi.X - borderSize); int offsetDst = stepDst * (y - borderSize) + (x - borderSize); if (pSrc[offsetSrc] < threshold) { pDst[offsetDst] = 0; } else { pDst[offsetDst] = 255; } } } } } }
public void Dispose() { if (gray != null) { Cv.ReleaseImage(gray); } if (inversion != null) { Cv.ReleaseImage(inversion); } if (bin != null) { Cv.ReleaseImage(bin); } if (blur != null) { Cv.ReleaseImage(blur); } if (zoomin != null) { Cv.ReleaseImage(zoomin); } if (zoomout != null) { Cv.ReleaseImage(zoomout); } if (resize != null) { Cv.ReleaseImage(resize); } if (slice != null) { Cv.ReleaseImage(slice); } if (symm != null) { Cv.ReleaseImage(symm); } if (rotate != null) { Cv.ReleaseImage(rotate); } if (affine != null) { Cv.ReleaseImage(affine); } if (perspective != null) { Cv.ReleaseImage(perspective); } if (draw != null) { Cv.ReleaseImage(draw); } if (hsv != null) { Cv.ReleaseImage(hsv); } if (morp != null) { Cv.ReleaseImage(morp); } if (canny != null) { Cv.ReleaseImage(canny); } if (sobel != null) { Cv.ReleaseImage(sobel); } if (laplace != null) { Cv.ReleaseImage(laplace); } if (con != null) { Cv.ReleaseImage(con); } if (corner != null) { Cv.ReleaseImage(corner); } if (apcon != null) { Cv.ReleaseImage(apcon); } if (convex != null) { Cv.ReleaseImage(convex); } if (mom != null) { Cv.ReleaseImage(mom); } if (houline != null) { Cv.ReleaseImage(houline); } if (houcircle != null) { Cv.ReleaseImage(houcircle); } if (skin != null) { Cv.ReleaseImage(skin); } if (haarface != null) { Cv.ReleaseImage(haarface); } if (bound != null) { Cv.ReleaseImage(bound); } if (ipl != null) { Cv.ReleaseImage(ipl); } if (hdcgrahics != null) { Cv.ReleaseImage(hdcgrahics); } if (blob != null) { Cv.ReleaseImage(blob); } if (blobcontour != null) { Cv.ReleaseImage(blobcontour); } if (filter != null) { Cv.ReleaseImage(filter); } if (bina != null) { Cv.ReleaseImage(bina); } if (gamma != null) { Cv.ReleaseImage(gamma); } if (calc != null) { Cv.ReleaseImage(calc); } if (inpaint != null) { Cv.ReleaseImage(inpaint); } if (dist != null) { Cv.ReleaseImage(dist); } if (pyrseg != null) { Cv.ReleaseImage(pyrseg); } if (pyrmean != null) { Cv.ReleaseImage(pyrmean); } if (match != null) { Cv.ReleaseImage(match); } if (templit != null) { Cv.ReleaseImage(templit); } if (optical != null) { Cv.ReleaseImage(optical); } if (snake != null) { Cv.ReleaseImage(snake); } }
public OpticalFlowBM() { // cvCalcOpticalFlowBM const int BlockSize = 16; const int ShiftSize = 8; const int Range = 32; CvSize blockSize = new CvSize(BlockSize, BlockSize); CvSize shiftSize = new CvSize(ShiftSize, ShiftSize); CvSize maxRange = new CvSize(Range, Range); using (IplImage srcPrev = Cv.LoadImage(FilePath.Image.Penguin1, LoadMode.GrayScale)) using (IplImage srcCurr = Cv.LoadImage(FilePath.Image.Penguin2, LoadMode.GrayScale)) using (IplImage dst = Cv.LoadImage(FilePath.Image.Penguin2, LoadMode.Color)) { CvSize velSize = new CvSize { Width = (srcPrev.Width - blockSize.Width + shiftSize.Width) / shiftSize.Width, Height = (srcPrev.Height - blockSize.Height + shiftSize.Height) / shiftSize.Height }; using (CvMat velx = Cv.CreateMat(velSize.Height, velSize.Width, MatrixType.F32C1)) using (CvMat vely = Cv.CreateMat(velSize.Height, velSize.Width, MatrixType.F32C1)) { /*if (!CV_ARE_SIZES_EQ(srcA, srcB) || * !CV_ARE_SIZES_EQ(velx, vely) || * velx->width != velSize.width || * vely->height != velSize.height) * CV_Error(CV_StsUnmatchedSizes, "");*/ if (srcPrev.Size != srcCurr.Size) { throw new Exception(); } if (velx.Width != vely.Width) { throw new Exception(); } if (velx.Height != vely.Height) { throw new Exception(); } if (velx.Cols != velSize.Width) { throw new Exception(); } if (vely.Rows != velSize.Height) { throw new Exception(); } Cv.SetZero(velx); Cv.SetZero(vely); Cv.CalcOpticalFlowBM(srcPrev, srcCurr, blockSize, shiftSize, maxRange, false, velx, vely); for (int r = 0; r < velx.Rows; r++) { for (int c = 0; c < vely.Cols; c++) { int dx = (int)Cv.GetReal2D(velx, r, c); int dy = (int)Cv.GetReal2D(vely, r, c); //Console.WriteLine("i:{0} j:{1} dx:{2} dy:{3}", i, j, dx, dy); if (dx != 0 || dy != 0) { CvPoint p1 = new CvPoint(c * ShiftSize, r * ShiftSize); CvPoint p2 = new CvPoint(c * ShiftSize + dx, r * ShiftSize + dy); Cv.Line(dst, p1, p2, CvColor.Red, 1, LineType.AntiAlias, 0); } } } using (CvWindow windowPrev = new CvWindow("prev", srcPrev)) using (CvWindow windowCurr = new CvWindow("curr", srcCurr)) using (CvWindow windowDst = new CvWindow("dst", dst)) //using (CvWindow windowVelX = new CvWindow("velx", velx)) //using (CvWindow windowVelY = new CvWindow("vely", vely)) { Cv.WaitKey(0); } } } }
public IplImage Symmetry(IplImage src) { symm = new IplImage(src.Size, BitDepth.U8, 3); Cv.Flip(src, symm, FlipMode.Y); return(symm); }
public void Run() { CvCapture cap = Cv.CreateCameraCapture(1); IplImage pic = new IplImage("rump.jpg"); Cv.Flip(pic, pic, FlipMode.Y); int width = 5; int height = 4; int sqares = 20; CvSize size = new CvSize(width, height); CvMat wMatrix = Cv.CreateMat(3, 3, MatrixType.F32C1); CvPoint2D32f[] corners = new CvPoint2D32f[sqares]; IplImage img; IplImage disp; IplImage cimg; IplImage nimg; int cornerCount; while (thread != null) { img = Cv.QueryFrame(cap); Cv.Flip(img, img, FlipMode.Y); disp = Cv.CreateImage(Cv.GetSize(img), BitDepth.U8, 3); cimg = Cv.CreateImage(Cv.GetSize(img), BitDepth.U8, 3); nimg = Cv.CreateImage(Cv.GetSize(img), BitDepth.U8, 3); IplImage gray = Cv.CreateImage(Cv.GetSize(img), img.Depth, 1); bool found = Cv.FindChessboardCorners(img, size, out corners, out cornerCount, ChessboardFlag.AdaptiveThresh | ChessboardFlag.FilterQuads); Cv.CvtColor(img, gray, ColorConversion.BgrToGray); CvTermCriteria criteria = new CvTermCriteria(CriteriaType.Epsilon, 30, 0.1); Cv.FindCornerSubPix(gray, corners, cornerCount, new CvSize(11, 11), new CvSize(-1, -1), criteria); if (cornerCount == sqares) { if (option == 1) { CvPoint2D32f[] p = new CvPoint2D32f[4]; CvPoint2D32f[] q = new CvPoint2D32f[4]; IplImage blank = Cv.CreateImage(Cv.GetSize(pic), BitDepth.U8, 3); q[0].X = (float)pic.Width * 0; q[0].Y = (float)pic.Height * 0; q[1].X = (float)pic.Width; q[1].Y = (float)pic.Height * 0; q[2].X = (float)pic.Width; q[2].Y = (float)pic.Height; q[3].X = (float)pic.Width * 0; q[3].Y = (float)pic.Height; p[0].X = corners[0].X; p[0].Y = corners[0].Y; p[1].X = corners[4].X; p[1].Y = corners[4].Y; p[2].X = corners[19].X; p[2].Y = corners[19].Y; p[3].X = corners[15].X; p[3].Y = corners[15].Y; Cv.GetPerspectiveTransform(q, p, out wMatrix); Cv.Zero(nimg); Cv.Zero(cimg); Cv.WarpPerspective(pic, nimg, wMatrix); Cv.WarpPerspective(blank, cimg, wMatrix); Cv.Not(cimg, cimg); Cv.And(cimg, img, cimg); Cv.Or(cimg, nimg, img); Cv.Flip(img, img, FlipMode.Y); Bitmap bm = BitmapConverter.ToBitmap(img); bm.SetResolution(pictureBox1.Width, pictureBox1.Height); pictureBox1.Image = bm; } else { CvPoint[] p = new CvPoint[4]; p[0].X = (int)corners[0].X; p[0].Y = (int)corners[0].Y; p[1].X = (int)corners[4].X; p[1].Y = (int)corners[4].Y; p[2].X = (int)corners[19].X; p[2].Y = (int)corners[19].Y; p[3].X = (int)corners[15].X; p[3].Y = (int)corners[15].Y; Cv.Line(img, p[0], p[1], CvColor.Red, 2); Cv.Line(img, p[1], p[2], CvColor.Green, 2); Cv.Line(img, p[2], p[3], CvColor.Blue, 2); Cv.Line(img, p[3], p[0], CvColor.Yellow, 2); Cv.DrawChessboardCorners(img, size, corners, found); Cv.Flip(img, img, FlipMode.Y); Bitmap bm = BitmapConverter.ToBitmap(img); bm.SetResolution(pictureBox1.Width, pictureBox1.Height); pictureBox1.Image = bm; } } else { Cv.Flip(gray, gray, FlipMode.Y); Bitmap bm = BitmapConverter.ToBitmap(gray); bm.SetResolution(pictureBox1.Width, pictureBox1.Height); pictureBox1.Image = bm; } } }
public IplImage CannyEdge(IplImage src) { canny = new IplImage(src.Size, BitDepth.U8, 1); Cv.Canny(src, canny, 100, 255, ApertureSize.Size3); return(canny); }
public void LoadOriginalImage(string fname) { srcImage = Cv.LoadImage("BinaryImage.png", LoadMode.Color); Cv.SaveImage("BinaryImageSave.png", srcImage); }
public void BeforeEachTest() { _repository = new CvRepository(); _cv = _repository.GetElizabethsCv(); }
//AND Operator public void ANDOperator() { ANDOperation = Cv.CreateImage(srcImage.Size, BitDepth.U8, 3); Cv.And(srcImage, MaskImage, ANDOperation); Cv.SaveImage("AND.png", ANDOperation); }
public IplImage InversionImage(IplImage src) { inversion = new IplImage(src.Size, BitDepth.U8, 1); Cv.Not(src, inversion); return(inversion); }
public IplImage GrayScale(IplImage src) { gray = new IplImage(src.Size, BitDepth.U8, 1); Cv.CvtColor(src, gray, ColorConversion.BgrToGray); return(gray); }
public PartialViewResult Theme(int?id, Cv cvs) { var cookie = new FunctionController(); var idus = cookie.CookieID(); var code = Guid.NewGuid().ToString(); var fakeid = code + id; Cv checkcv = db.Cvs.SingleOrDefault(n => n.theme_id == id && n.user_id == idus.user_id); if (checkcv != null) { return(PartialView(checkcv)); } else { cvs.cv_fakeid = fakeid; cvs.cv_datecreate = DateTime.Now; cvs.user_id = idus.user_id; cvs.theme_id = id; cvs.cv_active = Common.Common.ACTIVE_HD; cvs.cv_option = false; cvs.cv_bin = false; cvs.cv_view = 1; cvs.cv_deadline = DateTime.Now.AddMonths(1); ItemCv itemCv1 = new ItemCv { ic_name = "Đại học QuickJob", ic_content = "Tốt nghiệp loại Giỏi, điểm trung bình 8.0", ic_mid = "Chuyên ngành: Quản trị Doanh nghiệp", ic_fake = fakeid, ic_theme_id = id, ic_enddate = "05/2014", ic_todate = "10/2010", ic_stt = Common.Common.CV_HOCTAP, ic_usid = idus.user_id }; db.ItemCvs.Add(itemCv1); db.SaveChanges(); ItemCv itemCv2 = new ItemCv { ic_name = "Công ty QuickJob", ic_content = "- Hỗ trợ viết bài quảng cáo sản phẩm qua kênh facebook, các forum,... \n - Giới thiệu, tư vấn sản phẩm, giải đáp các vấn đề thắc mắc của khách hàng qua điện thoại và email.", ic_mid = "Nhân viên bán hàng", ic_fake = fakeid, ic_theme_id = id, ic_enddate = "05/2014", ic_todate = "10/2010", ic_stt = Common.Common.CV_KINHNGHIEMVIECLAM, ic_usid = idus.user_id }; db.ItemCvs.Add(itemCv2); db.SaveChanges(); ItemCv itemCv3 = new ItemCv { ic_name = "Nhóm tình nguyện QuickJob", ic_content = "Tập hợp các món quà và phân phát tới người vô gia cư. \n - Chia sẻ, động viên họ vượt qua giai đoạn khó khăn, giúp họ có những suy nghĩ lạc quan.", ic_mid = "Tình nguyện viên", ic_fake = fakeid, ic_theme_id = id, ic_enddate = "05/2014", ic_todate = "10/2010", ic_stt = Common.Common.CV_HOATDONG, ic_usid = idus.user_id }; db.ItemCvs.Add(itemCv3); db.SaveChanges(); ItemCv itemCv4 = new ItemCv { ic_name = "Giải nhất tài năng QuickJob 2017", ic_fake = fakeid, ic_theme_id = id, ic_enddate = "2017", ic_stt = Common.Common.CV_CHUNGCHI, ic_usid = idus.user_id }; db.ItemCvs.Add(itemCv4); db.SaveChanges(); ItemCv itemCv5 = new ItemCv { ic_name = "Nhân viên xuất sắc năm công ty QuickJob", ic_fake = fakeid, ic_theme_id = id, ic_enddate = "2017", ic_stt = Common.Common.CV_GIAITHUONG, ic_usid = idus.user_id }; db.ItemCvs.Add(itemCv5); db.SaveChanges(); for (var i = 0; i < 2; i++) { ItemCv itemCv6 = new ItemCv { ic_name = "Tin học văn phòng QuickJob", ic_fake = fakeid, ic_theme_id = id, ic_enddate = "", ic_mid = "Sử dụng thành thạo các công cụ Word, Excel, Power Point", ic_stt = Common.Common.CV_CACKYNANG, ic_usid = idus.user_id }; db.ItemCvs.Add(itemCv6); db.SaveChanges(); } var dao = new CvsDao(); if (dao.Create(cvs)) { Cv cV = db.Cvs.SingleOrDefault(n => n.cv_fakeid == fakeid); return(PartialView(cV)); } else { } } return(PartialView()); }