public ConvexityDefect() { using (IplImage imgSrc = new IplImage(@"img\hand_p.jpg", LoadMode.Color)) using (IplImage imgHSV = new IplImage(imgSrc.Size, BitDepth.U8, 3)) using (IplImage imgH = new IplImage(imgSrc.Size, BitDepth.U8, 1)) using (IplImage imgS = new IplImage(imgSrc.Size, BitDepth.U8, 1)) using (IplImage imgV = new IplImage(imgSrc.Size, BitDepth.U8, 1)) using (IplImage imgBackProjection = new IplImage(imgSrc.Size, BitDepth.U8, 1)) using (IplImage imgFlesh = new IplImage(imgSrc.Size, BitDepth.U8, 1)) using (IplImage imgHull = new IplImage(imgSrc.Size, BitDepth.U8, 1)) using (IplImage imgDefect = new IplImage(imgSrc.Size, BitDepth.U8, 3)) using (IplImage imgContour = new IplImage(imgSrc.Size, BitDepth.U8, 3)) using (CvMemStorage storage = new CvMemStorage()) { // RGB -> HSV Cv.CvtColor(imgSrc, imgHSV, ColorConversion.BgrToHsv); Cv.CvtPixToPlane(imgHSV, imgH, imgS, imgV, null); IplImage[] hsvPlanes = {imgH, imgS, imgV}; // 肌色領域を求める RetrieveFleshRegion(imgSrc, hsvPlanes, imgBackProjection); // 最大の面積の領域を残す FilterByMaximalBlob(imgBackProjection, imgFlesh); Interpolate(imgFlesh); // 輪郭を求める CvSeq<CvPoint> contours = FindContours(imgFlesh, storage); if (contours != null) { Cv.DrawContours(imgContour, contours, CvColor.Red, CvColor.Green, 0, 3, LineType.AntiAlias); // 凸包を求める int[] hull; Cv.ConvexHull2(contours, out hull, ConvexHullOrientation.Clockwise); Cv.Copy(imgFlesh, imgHull); DrawConvexHull(contours, hull, imgHull); // 凹状欠損を求める Cv.Copy(imgContour, imgDefect); CvSeq<CvConvexityDefect> defect = Cv.ConvexityDefects(contours, hull); DrawDefects(imgDefect, defect); } using (new CvWindow("src", imgSrc)) using (new CvWindow("back projection", imgBackProjection)) using (new CvWindow("hull", imgHull)) using (new CvWindow("defect", imgDefect)) { Cv.WaitKey(); } } }
public void update_mhi(IplImage imgMain, ref IplImage imgDst, int diff_threshold) { double timestamp = (double)DateTime.Now.Second; CvSize size = new CxCore.CvSize(imgMain.width, imgMain.height); int i, idx1 = last, idx2; IplImage silh; CvSeq seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; //allocate images at the beginning or reallocate them if the frame size is changed if (mhi.ptr == null || mhi.width != size.width || mhi.height != size.height) { for (i = 0; i < N; i++) { buf[i] = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_8U, 1); cxcore.CvZero(ref buf[i]); } cxcore.CvReleaseImage(ref mhi); cxcore.CvReleaseImage(ref orient); cxcore.CvReleaseImage(ref segmask); cxcore.CvReleaseImage(ref mask); mhi = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1); cxcore.CvZero(ref mhi); orient = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1); segmask = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1); mask = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1); } cv.CvCvtColor(ref imgMain, ref buf[last], cvtypes.CV_BGR2GRAY); idx2 = (last + 1) % N; last = idx2; silh = buf[idx2]; cxcore.CvAbsDiff(ref buf[idx1], ref buf[idx2], ref silh); cv.CvThreshold(ref silh, ref silh, diff_threshold, 1, cv.CV_THRESH_BINARY); cv.CvUpdateMotionHistory(ref silh, ref mhi, timestamp, MHI_DURATION); cxcore.CvConvertScale(ref mhi, ref mask, 255 / MHI_DURATION, (MHI_DURATION - timestamp) * 255 / MHI_DURATION); cxcore.CvZero(ref imgDst); cxcore.CvMerge(ref mask, ref imgDst); cv.CvCalcMotionGradient(ref mhi, ref mask, ref orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3); if (storage.ptr == null) storage = cxcore.CvCreateMemStorage(); else cxcore.CvClearMemStorage(ref storage); seq = cv.CvSegmentMotion(ref mhi, ref segmask, ref storage, timestamp, MAX_TIME_DELTA); for (i = -1; i < seq.total; i++) { if (i < 0) { comp_rect = new CvRect(0, 0, size.width, size.height); color = cxcore.CV_RGB(255, 255, 255); magnitude = 100; } else { IntPtr ptr = cxcore.CvGetSeqElem(ref seq, i); CvConnectedComp c = (CvConnectedComp)cvconvert.PtrToType(ptr, typeof(CvConnectedComp)); comp_rect = c.rect; if (comp_rect.width + comp_rect.height < 100) continue; color = cxcore.CV_RGB(255, 0, 0); magnitude = 30; } //select component ROI cxcore.CvSetImageROI(ref silh, comp_rect); cxcore.CvSetImageROI(ref mhi, comp_rect); cxcore.CvSetImageROI(ref orient, comp_rect); cxcore.CvSetImageROI(ref mask, comp_rect); //calculate orientation angle = cv.CvCalcGlobalOrientation(ref orient, ref mask, ref mhi, timestamp, MHI_DURATION); angle = 360 - angle; count = cxcore.CvNorm(ref silh); //<<<<<<<<<<<<<<< recheck cxcore.CvResetImageROI(ref mhi); cxcore.CvResetImageROI(ref orient); cxcore.CvResetImageROI(ref mask); cxcore.CvResetImageROI(ref silh); //check for the case of little motion if (count < comp_rect.width * comp_rect.height * 0.05) continue; //draw a clock with arrow indicating the direction center = new CvPoint((comp_rect.x + comp_rect.width / 2), (comp_rect.y + comp_rect.height / 2)); cxcore.CvCircle(ref imgDst, center, cxcore.CvRound(magnitude * 1.2), color, 3, cxcore.CV_AA, 0); cxcore.CvLine(ref imgDst, center, new CvPoint(cxcore.CvRound(center.x + magnitude * Math.Cos(angle * Math.PI / 180)), cxcore.CvRound(center.y - magnitude * Math.Sin(angle * Math.PI / 180))), color, 3, cxcore.CV_AA, 0); } }
/// <summary> /// MSERのすべての輪郭情報を抽出する /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> #else /// <summary> /// Extracts the contours of Maximally Stable Extremal Regions /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> #endif public CvPoint[][] Extract(Mat image, Mat mask) { if(image == null) throw new ArgumentNullException("image"); CvMat _image = image.ToCvMat(); IntPtr pmask = (mask == null) ? IntPtr.Zero : mask.ToCvMat().CvPtr; IntPtr pcontours = IntPtr.Zero; using(CvMemStorage storage = new CvMemStorage(0)) { CvInvoke.cvExtractMSER(_image.CvPtr, pmask, ref pcontours, storage.CvPtr, Struct); if (pcontours == IntPtr.Zero) { return new CvPoint[0][]; } CvSeq<IntPtr> seq = new CvSeq<IntPtr>(pcontours); CvContour[] contours = Array.ConvertAll<IntPtr, CvContour>(seq.ToArray(), delegate(IntPtr p) { return new CvContour(p); }); CvPoint[][] result = new CvPoint[contours.Length][]; for (int i = 0; i < contours.Length; i++) { result[i] = contours[i].ToArray(); } return result; } }
/// <summary> /// 輪郭を得る /// </summary> /// <param name="img"></param> /// <param name="storage"></param> /// <returns></returns> private CvSeq<CvPoint> FindContours(IplImage img, CvMemStorage storage) { // 輪郭抽出 CvSeq<CvPoint> contours; using (IplImage imgClone = img.Clone()) { Cv.FindContours(imgClone, storage, out contours); if (contours == null) { return null; } contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 3, true); } // 一番長そうな輪郭のみを得る CvSeq<CvPoint> max = contours; for (CvSeq<CvPoint> c = contours; c != null; c = c.HNext) { if (max.Total < c.Total) { max = c; } } return max; }
private ShapeClip DetectClip(CvSeq <CvPoint> contour, IplImage image) { // Approximate contours to rectange. CvMemStorage cstorage = new CvMemStorage(); CvSeq <CvPoint> verts = contour.ApproxPoly(CvContour.SizeOf, cstorage, ApproxPolyMethod.DP, contour.ContourPerimeter() * 0.05); CvRect rect = Cv.BoundingRect(verts); // scale BB CvSize originalSize = rect.Size; CvSize size = new CvSize((int)(rect.Width * 1.5), (int)(rect.Height * 1.5)); CvSize sizeDist = new CvSize(rect.Width - size.Width, rect.Height - size.Height); rect = new CvRect( Math.Max(rect.Location.X + sizeDist.Width / 2, 0), Math.Max(rect.Location.Y + sizeDist.Height / 2, 0), size.Width, size.Height); // If rect, convert to region of interest and approximate top. if (verts.Total >= 4 && new CvRect(0, 0, image.Width, image.Height).Contains(rect)) { DetectionState detectionState = verts.Total == 4 ? DetectionState.SemiOriented : DetectionState.Candidate; double angle = (180.0 / Math.PI) * ComputeOrientationFromVerts(verts.ToArray()); using (IplImage region = image.Clone(rect)) using (IplImage finalRegion = image.Clone(rect)) using (IplImage colorRegion = new IplImage(region.Size.Width, region.Size.Height, BitDepth.U8, 3)) using (IplImage debug = new IplImage(region.Size.Width + 20, region.Size.Height + 20, BitDepth.U8, 3)) { // Rotate into position based on the line angle estimate Cv.WarpAffine(region, region, Cv.GetRotationMatrix2D(new CvPoint2D32f(rect.Width / 2, rect.Height / 2), angle, 1)); Cv.FloodFill(region, new CvPoint(0, 0), 255, 0, 150); // Project image and find clusters region.Not(region); double[] horizontalProjection, verticalProjection; int[] horizontalPrjClusters = ComputeClusters(region, true, out horizontalProjection); int horizontalClusters = horizontalPrjClusters[0], lastHorizontalCluster = horizontalPrjClusters[1]; int[] verticalPrjClusters = ComputeClusters(region, false, out verticalProjection); int verticalClusters = verticalPrjClusters[0], lastVerticalCluster = verticalPrjClusters[1]; // Correct the orientation based on the clusters found bool foundLDRs = false; if (verticalClusters > horizontalClusters) { // 90 deg if (lastHorizontalCluster < region.Width / 2) { // 90deg angle += 90; foundLDRs = true; } else { // 270 deg angle += 270; foundLDRs = true; } } else if (verticalClusters < horizontalClusters) { // 0 deg if (lastVerticalCluster < region.Height / 2) { // 0deg foundLDRs = true; } else { // 180 deg angle += 180; foundLDRs = true; } } else { // something went wrong with our initial alignment // NO proper orientation found - could not identify the LDRs } #region DEBUG //debug.Zero(); //Cv.CvtColor(finalRegion, colorRegion, ColorConversion.GrayToRgb); //debug.DrawImage(20, 0, region.Width, region.Height, colorRegion); //for (int i = 0; i < region.Width / 2; i++) // debug.DrawRect(20 + i, debug.Height - (int)(horizontalProjection[i] * 100), 20 + i, debug.Height, CvColor.Red, 1); //for (int i = 0; i < region.Height / 2; i++) // debug.DrawRect(0, i, (int)(verticalProjection[i] * 100), i, CvColor.Red, 1); //debugWindow.ShowImage(debug); #endregion if (foundLDRs) { detectionState = DetectionState.FullyOriented; } } // Compute pixel space mapping Vec2F scale = new Vec2F(screenResolution.X / image.Width, screenResolution.Y / image.Height); return(new ShapeClip( detectionState, new Vec2F(rect.Location.X + 0.5f * rect.Width, rect.Location.Y + 0.5f * rect.Height).Scale(scale), new Vec2F(originalSize).Scale(scale), angle)); } else { return(null); } }
public IplImage cariHaar(IplImage image) { cxcore.CvFlip(ref image, 1); imgSkin = new IplImage(); imgSkin = cxcore.CvCreateImage(cxcore.CvGetSize(ref image), 8, 3); imgSkin = skinDet.skin_hsv(image); IplImage gray = cxcore.CvCreateImage(new CvSize(imgSkin.width, imgSkin.height), (int)cxtypes.IPL_DEPTH_8U, 1); cv.CvCvtColor(ref imgSkin, ref gray, cvtypes.CV_BGR2GRAY); IplImage small_image = imgSkin; CvMemStorage storage = cxcore.CvCreateMemStorage(0); CvSeq handOpen, handClose; int i, scale = 1; bool do_pyramids = true; #region percepat proses if (do_pyramids) { small_image = cxcore.CvCreateImage(new CvSize(imgSkin.width / 2, imgSkin.height / 2), (int)cxtypes.IPL_DEPTH_8U, 3); cv.CvPyrDown(ref imgSkin, ref small_image, (int)CvFilter.CV_GAUSSIAN_5x5); scale = 2; } #endregion #region open hand IntPtr ptrO = cxcore.CvLoad("..\\..\\Training\\handOpen.xml"); cascadeO = (CvHaarClassifierCascade)cvconvert.PtrToType(ptrO, typeof(CvHaarClassifierCascade)); cascadeO.ptr = ptrO; handOpen = cv.CvHaarDetectObjects(ref small_image, ref cascadeO, ref storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, new CvSize(0, 0)); if (handOpen.total != 0) { for (i = 0; i < handOpen.total; i++) { ho_rect = (CvRect)cvconvert.PtrToType(cxcore.CvGetSeqElem(ref handOpen, i), typeof(CvRect)); cxcore.CvRectangle(ref image, new CvPoint(ho_rect.x * scale - 10, ho_rect.y * scale - 10), new CvPoint((ho_rect.x + ho_rect.width) * scale + 10, (ho_rect.y + ho_rect.height) * scale + 10), cxcore.CV_RGB(255, 0, 0), 1, 8, 0); } form.closex = 0; form.closey = 0; form.openx = image.width - ((ho_rect.x * scale) + ((ho_rect.width * scale) / 2)); form.openy = ho_rect.y * scale + ((ho_rect.height * scale) / 2); form.roiX = 640 - (ho_rect.x * scale - 10) - (ho_rect.width * scale + 10); form.roiY = ho_rect.y * scale - 10; form.roiW = ho_rect.width * scale + 10; form.roiH = ho_rect.height * scale + 10; } #endregion #region close hand if (handOpen.total == 0) { IntPtr ptrC = cxcore.CvLoad("..\\..\\Training\\handClose.xml"); cascadeC = (CvHaarClassifierCascade)cvconvert.PtrToType(ptrC, typeof(CvHaarClassifierCascade)); cascadeC.ptr = ptrC; handClose = cv.CvHaarDetectObjects(ref small_image, ref cascadeC, ref storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING, new CvSize(0, 0)); if (handClose.total != 0) { for (i = 0; i < handClose.total; i++) { hc_rect = (CvRect)cvconvert.PtrToType(cxcore.CvGetSeqElem(ref handClose, i), typeof(CvRect)); cxcore.CvRectangle(ref image, new CvPoint(hc_rect.x * scale, hc_rect.y * scale), new CvPoint((hc_rect.x + hc_rect.width) * scale, (hc_rect.y + hc_rect.height) * scale), cxcore.CV_RGB(0, 0, 255), 1, 8, 0); } form.closex = image.width - ((hc_rect.x * scale) + ((hc_rect.width * scale) / 2)); form.closey = hc_rect.y * scale + ((hc_rect.height * scale) / 2); } } #endregion cxcore.CvReleaseMemStorage(ref storage); cv.CvReleaseHaarClassifierCascade(ref cascadeO); if (handOpen.total == 0) { cv.CvReleaseHaarClassifierCascade(ref cascadeC); } cxcore.CvReleaseImage(ref gray); cxcore.CvReleaseImage(ref small_image); cxcore.CvReleaseImage(ref imgSkin); cxcore.CvFlip(ref image, 1); return(image); }
private void Form1_Load(object sender, EventArgs e) { Boolean access = check_lcns(); if (access) { pinfo.MdiParent = this; pinfo.Show(); panel1.Controls.Add(pinfo); } else { DoctorInfoForm di_Form = new DoctorInfoForm();//병원 정보입력폼 di_Form.Owner = this; di_Form.ShowDialog(); if (di_Form.DialogResult == DialogResult.OK) { pinfo.MdiParent = this; pinfo.Show(); panel1.Controls.Add(pinfo); } else { this.DialogResult = di_Form.DialogResult; } } IplImage src = Cv.LoadImage("test4.jpg"); IplImage dst = new IplImage(600, 600, BitDepth.U8, 3); IplImage dst2 = new IplImage(600, 600, BitDepth.U8, 1); Cv.Resize(src, dst); IplImage result = new IplImage(600, 600, BitDepth.U8, 3); IplImage result2 = new IplImage(600, 600, BitDepth.U8, 3); Cv.CvtColor(dst, dst2, ColorConversion.BgrToGray); Cv.Threshold(dst2, dst2, 20, 255, ThresholdType.Binary); Cv.Smooth(dst2, dst2, SmoothType.Gaussian); Cv.NamedWindow("nonthresh"); Cv.ShowImage("nonthresh", dst2); CvSeq <CvPoint> contours, contours2; CvMemStorage storage = new CvMemStorage(); Cv.FindContours(dst2, storage, out contours, CvContour.SizeOf, ContourRetrieval.External, ContourChain.ApproxSimple); contours = Cv.ApproxPoly(contours, CvContour.SizeOf, storage, ApproxPolyMethod.DP, 3, true); contours2 = contours; Cv.DrawContours(result, contours, CvColor.Green, CvColor.Red, 3); Cv.NamedWindow("img12"); Cv.ShowImage("img12", result); CvSeq <CvPoint> first_contour; int i; int contour_max = 0; for (first_contour = contours; contours != null; contours = contours.HNext) { if (contour_max < contours.Total) { contour_max = contours.Total; } } CvPoint[] ptseq = new CvPoint[contour_max]; for (first_contour = contours2; contours2 != null; contours2 = contours2.HNext) { if (contours2.Total == contour_max) { for (i = 0; i < contours2.Total; i++) { CvPoint?pt = Cv.GetSeqElem <CvPoint>(contours2, i); ptseq[i] = new CvPoint { X = pt.Value.X, Y = pt.Value.Y }; } } } CvPoint[] hull; Cv.ConvexHull2(ptseq, out hull, ConvexHullOrientation.Counterclockwise); CvPoint pt0 = hull[hull.Length - 1]; foreach (CvPoint pt in hull) { Cv.Line(dst, pt0, pt, CvColor.Green); pt0 = pt; } Cv.NamedWindow("img"); Cv.ShowImage("img", dst); Cv.WaitKey(); }
/// <summary> /// Detects features on a grayscale image. /// </summary> /// <param name="img"></param> /// <param name="storage"></param> /// <returns></returns> protected override List <Face> DetectFeatures(IplImage img, CvMemStorage storage) { //Determine minimum face size var minSize = Math.Max(12, (int)Math.Round((double)MinSizePercent / 100.0 * Math.Min(img.Width, img.Height))); //Detect faces (frontal). Stopwatch watch = Stopwatch.StartNew(); CvAvgComp[] faces = BorrowCascade("FaceCascadeAlt", c => Cv.HaarDetectObjects(img, c, storage, 1.0850, MinConfidenceLevel, HaarDetectionType.DoCannyPruning, new CvSize(minSize, minSize), new CvSize(0, 0)).ToArrayAndDispose()); //Sort by accuracy Array.Sort <CvAvgComp>(faces, CompareByNeighbors); //Convert into feature objects list List <Face> features = new List <Face>(faces.Length); foreach (CvAvgComp face in faces) { features.Add(new Face(PolygonMath.ScaleRect(face.Rect.ToRectangleF(), ExpandX, ExpandY), face.Neighbors)); } // Doesn't add much, and would have to be deduplicated. //CvAvgComp[] profiles = BorrowCascade("FaceProfile", c => Cv.HaarDetectObjects(img, c, storage, 1.2, MinConfidenceLevel + 2, HaarDetectionType.FindBiggestObject | HaarDetectionType.DoRoughSearch | HaarDetectionType.DoCannyPruning, new CvSize(img.Width / 8, img.Height / 8), new CvSize(0, 0)).ToArrayAndDispose()); //foreach (CvAvgComp face in profiles) features.Add(new Face(PolygonMath.ScaleRect(face.Rect.ToRectangleF(), ExpandX, ExpandY), face.Neighbors)); // Test for eyes, if faces > 20 pixels foreach (var face in features) { var w = (int)(face.X2 - face.X); var h = (int)((face.Y2 - face.Y) * 0.6); if (w > 20) { img.SetROI((int)face.X, (int)face.Y, w, h); storage.Clear(); CvAvgComp[] eyes = BorrowCascade("Eye", c => Cv.HaarDetectObjects(img, c, storage, 1.0850, 4, HaarDetectionType.FindBiggestObject | HaarDetectionType.DoRoughSearch, new CvSize(4, 4), new CvSize(img.Width / 2, img.Height / 2)) .ToArrayAndDispose()); if (eyes.Length == 0) { // Halve the estimated accuracy if there are no eyes detected face.Accuracy = face.Accuracy / 2; // We never want to boost accuracy, because the walls have eyes } } } //Unless we're below MinFaces, filter out the low confidence matches. while (features.Count > MinFaces && features[features.Count - 1].Accuracy < ConfidenceLevelThreshold) { features.RemoveAt(features.Count - 1); } watch.Stop(); totalTime += watch.ElapsedMilliseconds; count++; Debug.WriteLine($"Face detection time: {watch.ElapsedMilliseconds}ms (avg {totalTime / count}ms)"); //Never return more than [MaxFaces] return((features.Count > MaxFaces) ? features.GetRange(0, MaxFaces) : features); }