/// <summary> /// Create a new HOGDescriptor using the specific parameters /// </summary> /// <param name="blockSize">Block size in cells. Only (2,2) is supported for now.</param> /// <param name="cellSize">Cell size. Only (8, 8) is supported for now.</param> /// <param name="blockStride">Block stride. Must be a multiple of cell size.</param> /// <param name="gammaCorrection">Do gamma correction preprocessing or not.</param> /// <param name="L2HysThreshold">L2-Hys normalization method shrinkage.</param> /// <param name="nbins">Number of bins. Only 9 bins per cell is supported for now.</param> /// <param name="nLevels">Maximum number of detection window increases.</param> /// <param name="winSigma">Gaussian smoothing window parameter.</param> /// <param name="winSize">Detection window size. Must be aligned to block size and block stride.</param> public GpuHOGDescriptor( Size winSize, Size blockSize, Size blockStride, Size cellSize, int nbins, double winSigma, double L2HysThreshold, bool gammaCorrection, int nLevels) { _ptr = gpuHOGDescriptorCreate( ref winSize, ref blockSize, ref blockStride, ref cellSize, nbins, winSigma, L2HysThreshold, gammaCorrection, nLevels); _rectStorage = new MemStorage(); _rectSeq = new Seq<Rectangle>(_rectStorage); }
/// <summary> /// Create a new HOGDescriptor /// </summary> public GpuHOGDescriptor() { _ptr = gpuHOGDescriptorCreateDefault(); _rectStorage = new MemStorage(); _rectSeq = new Seq<Rectangle>(_rectStorage); _vector = new VectorOfFloat(); }
/// <summary> /// Detect the Fast keypoints from the image /// </summary> /// <param name="image">The image to extract keypoints from</param> /// <returns>The array of fast keypoints</returns> public MKeyPoint[] DetectKeyPoints(Image<Gray, byte> image) { using (MemStorage stor = new MemStorage()) { Seq<MKeyPoint> keypoints = new Seq<MKeyPoint>(stor); CvInvoke.CvFASTKeyPoints(image, keypoints, Threshold, NonmaxSupression); return keypoints.ToArray(); } }
/// <summary> /// Get the model points stored in this detector /// </summary> /// <returns>The model points stored in this detector</returns> public MKeyPoint[] GetModelPoints() { using (MemStorage stor = new MemStorage()) { Seq<MKeyPoint> modelPoints = new Seq<MKeyPoint>(stor); CvInvoke.CvPlanarObjectDetectorGetModelPoints(_ptr, modelPoints); return modelPoints.ToArray(); } }
/// <summary> /// Detect planar object from the specific image /// </summary> /// <param name="image">The image where the planar object will be detected</param> /// <param name="h">The homography matrix which will be updated</param> /// <returns>The four corners of the detected region</returns> public PointF[] Detect(Image<Gray, Byte> image, HomographyMatrix h) { using (MemStorage stor = new MemStorage()) { Seq<PointF> corners = new Seq<PointF>(stor); CvInvoke.CvPlanarObjectDetectorDetect(_ptr, image, h, corners); return corners.ToArray(); } }
/// <summary> /// Detect the Lepetit keypoints from the image /// </summary> /// <param name="image">The image to extract Lepetit keypoints</param> /// <param name="maxCount">The maximum number of keypoints to be extracted, use 0 to ignore the max count</param> /// <param name="scaleCoords">Indicates if the coordinates should be scaled</param> /// <returns>The array of Lepetit keypoints</returns> public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, int maxCount, bool scaleCoords) { using (MemStorage stor = new MemStorage()) { Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor); CvLDetectorDetectKeyPoints(ref this, image, seq.Ptr, maxCount, scaleCoords); return seq.ToArray(); } }
/// <summary> /// Use camshift to track the feature /// </summary> /// <param name="observedFeatures">The feature found from the observed image</param> /// <param name="initRegion">The predicted location of the model in the observed image. If not known, use MCvBox2D.Empty as default</param> /// <param name="priorMask">The mask that should be the same size as the observed image. Contains a priori value of the probability a match can be found. If you are not sure, pass an image fills with 1.0s</param> /// <returns>If a match is found, the homography projection matrix is returned. Otherwise null is returned</returns> public HomographyMatrix CamShiftTrack(SURFFeature[] observedFeatures, MCvBox2D initRegion, Image<Gray, Single> priorMask) { using (Image<Gray, Single> matchMask = new Image<Gray, Single>(priorMask.Size)) { #region get the list of matched point on the observed image Single[, ,] matchMaskData = matchMask.Data; //Compute the matched features MatchedSURFFeature[] matchedFeature = _matcher.MatchFeature(observedFeatures, 2, 20); matchedFeature = VoteForUniqueness(matchedFeature, 0.8); foreach (MatchedSURFFeature f in matchedFeature) { PointF p = f.ObservedFeature.Point.pt; matchMaskData[(int)p.Y, (int)p.X, 0] = 1.0f / (float) f.SimilarFeatures[0].Distance; } #endregion Rectangle startRegion; if (initRegion.Equals(MCvBox2D.Empty)) startRegion = matchMask.ROI; else { startRegion = PointCollection.BoundingRectangle(initRegion.GetVertices()); if (startRegion.IntersectsWith(matchMask.ROI)) startRegion.Intersect(matchMask.ROI); } CvInvoke.cvMul(matchMask.Ptr, priorMask.Ptr, matchMask.Ptr, 1.0); MCvConnectedComp comp; MCvBox2D currentRegion; //Updates the current location CvInvoke.cvCamShift(matchMask.Ptr, startRegion, new MCvTermCriteria(10, 1.0e-8), out comp, out currentRegion); #region find the SURF features that belongs to the current Region MatchedSURFFeature[] featuesInCurrentRegion; using (MemStorage stor = new MemStorage()) { Contour<System.Drawing.PointF> contour = new Contour<PointF>(stor); contour.PushMulti(currentRegion.GetVertices(), Emgu.CV.CvEnum.BACK_OR_FRONT.BACK); CvInvoke.cvBoundingRect(contour.Ptr, 1); //this is required before calling the InContour function featuesInCurrentRegion = Array.FindAll(matchedFeature, delegate(MatchedSURFFeature f) { return contour.InContour(f.ObservedFeature.Point.pt) >= 0; }); } #endregion return GetHomographyMatrixFromMatchedFeatures(VoteForSizeAndOrientation(featuesInCurrentRegion, 1.5, 20 )); } }
/// <summary> /// Extracts the contours of Maximally Stable Extremal Regions /// </summary> /// <param name="image">The image where mser will be extracted from</param> /// <param name="mask">Can be null if not needed. Optional parameter for the region of interest</param> /// <param name="param">MSER parameter</param> /// <param name="storage">The storage where the contour will be saved</param> /// <returns>The MSER regions</returns> public Seq<Point>[] ExtractContours(IImage image, Image<Gray, Byte> mask, ref MSERDetector param, MemStorage storage) { IntPtr mserPtr = new IntPtr(); CvInvoke.cvExtractMSER(image.Ptr, mask, ref mserPtr, storage, param); IntPtr[] mserSeq = new Seq<IntPtr>(mserPtr, storage).ToArray(); return Array.ConvertAll<IntPtr, Seq<Point>>(mserSeq, delegate(IntPtr ptr) { return new Seq<Point>(ptr, storage); }); }
public static Image <Rgb, byte> GetRgbOfDepthPixels(Image <Gray, float> depth, Image <Rgb, byte> rgb, Image <Rgb, float> uvmap, bool getRgbContour, ref Rectangle rgbInDepthRect) { var resImg = new Image <Rgb, byte>(depth.Width, depth.Height); // number of rgb pixels per depth pixel var regWidth = rgb.Width / depth.Width; var regHeight = rgb.Height / depth.Height; var rgbWidth = rgb.Width; var rgbHeight = rgb.Height; var xfactor = 1.0f / 255.0f * rgbWidth; var yfactor = 1.0f / 255.0f * rgbHeight; var uvmapData = uvmap.Data; var rgbData = rgb.Data; var resImgData = resImg.Data; Image <Gray, byte> contourImg = null; byte[, ,] contourImgData = null; if (getRgbContour) { // dummy image to extract contour of RGB image in depth image contourImg = new Image <Gray, byte>(depth.Width, depth.Height); contourImgData = contourImg.Data; } Parallel.For(0, depth.Height, y => { for (int x = 0; x < depth.Width; x++) { int xindex = (int)(uvmapData[y, x, 0] * xfactor + 0.5); int yindex = (int)(uvmapData[y, x, 1] * yfactor + 0.5); double rsum = 0, gsum = 0, bsum = 0; int pixelcount = 0; for (int rx = xindex - regWidth / 2; rx < xindex + regWidth / 2; rx++) { for (int ry = yindex - regHeight / 2; ry < yindex + regHeight / 2; ry++) { if (rx > 0 && ry > 0 && rx < rgbWidth && ry < rgbHeight) { rsum += rgbData[ry, rx, 0]; gsum += rgbData[ry, rx, 1]; bsum += rgbData[ry, rx, 2]; pixelcount++; } } } resImgData[y, x, 0] = (byte)(rsum / pixelcount); resImgData[y, x, 1] = (byte)(gsum / pixelcount); resImgData[y, x, 2] = (byte)(bsum / pixelcount); if ((resImgData[y, x, 0] + resImgData[y, x, 1] + resImgData[y, x, 2]) > 0.01) { if (getRgbContour && contourImgData != null) { contourImgData[y, x, 0] = 255; } } } }); if (getRgbContour) { using (var storage = new MemStorage()) { for (var contours = contourImg.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_EXTERNAL, storage); contours != null; contours = contours.HNext) { var currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage); if (currentContour.Area > 160 * 120) { rgbInDepthRect = currentContour.BoundingRectangle; } } } } return(resImg); }
internal static extern Seq cvLatentSvmDetectObjects( IplImage image, LatentSvmDetector detector, MemStorage storage, float overlap_threshold, int numThreads);
private Image <Gray, byte> search(double thr, Image <Gray, byte> grayImage, double min, double max , out List <Rectangle> list_out, out int count, Image <Bgr, byte> color, out Image <Bgr, byte> color_out, Image <Gray, byte> bi, out Image <Gray, byte> bi_out) { List <Rectangle> listR = null, list_best = null; Image <Bgr, byte> color2 = color; Image <Gray, byte> src = grayImage; Image <Gray, byte> bi2 = bi; int c = 0, c_best = 0; for (double value = min; value <= max; value += 0.1) { double t = thr / value; src = grayImage.ThresholdBinary(new Gray(t), new Gray(255)); using (MemStorage storage = new MemStorage()) { Contour <Point> contours = src.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); while (contours != null) { Rectangle rect = contours.BoundingRectangle; CvInvoke.cvDrawContours(color2, contours, new MCvScalar(255, 255, 0), new MCvScalar(0), -1, 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); if (rect.Width > 20 && rect.Width < 150 && rect.Height > 80 && rect.Height < 150) { c++; CvInvoke.cvDrawContours(color2, contours, new MCvScalar(0, 255, 255), new MCvScalar(255), -1, 3, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); color2.Draw(contours.BoundingRectangle, new Bgr(0, 255, 0), 2); bi2.Draw(contours, new Gray(255), -1); listR.Add(contours.BoundingRectangle); } contours = contours.HNext; } for (int i = 0; i < c; i++) { for (int j = i + 1; j < c; j++) { if ((listR[j].X <(listR[i].X + listR[i].Width) && listR[j].X> listR[i].X) && (listR[j].Y <(listR[i].Y + listR[i].Width) && listR[j].Y> listR[i].Y)) { listR.RemoveAt(j); c--; j--; } else if ((listR[i].X <(listR[j].X + listR[j].Width) && listR[i].X> listR[j].X) && (listR[i].Y <(listR[j].Y + listR[j].Width) && listR[i].Y> listR[j].Y)) { listR.RemoveAt(i); c--; i--; break; } } } } if (c <= 8 && c > c_best) { list_best = listR; c_best = c; if (c == 8) { color_out = color2; bi_out = bi2; list_out = list_best; count = c_best; return(src); } } } color_out = color2; bi_out = bi2; list_out = list_best; count = c_best; return(src); }
public List <double[]> getSignatures(System.Windows.Forms.DataVisualization.Charting.DataPointCollection dataPoints) { int r1 = -1, r2 = 0; double weight = 0, totalWeight = 0; List <double[]> sigList = new List <double[]>(); //double area = dataPoints. int length = dataPoints.Count; for (int i = length - 1; i > 0; i--) { double angCurrent = dataPoints[i].YValues[0]; if ((angCurrent > 0.01) && (r1 == -1)) { r1 = i; } ; if (r1 != -1 && angCurrent < 0.05) { r2 = i; //r1 = 0; } if ((r1 != -1 && r2 != 0) && (Math.Abs(r2 - r1) > 5))// && r2 != 0) { dataPoints[r2].Color = System.Drawing.Color.Yellow; dataPoints[r1].Color = System.Drawing.Color.Red; var myMem = new MemStorage(); Contour <System.Drawing.PointF> myCont = new Contour <System.Drawing.PointF>(myMem); System.Drawing.PointF myPoint = new System.Drawing.PointF(); for (int t = r1 - 1; t > r2 + 1; t--) { myPoint.X = ((float)dataPoints[t].XValue); myPoint.Y = (float)dataPoints[t].YValues[0]; //weight = weight + dataPoints[t].YValues[0]; dataPoints[t].Color = System.Drawing.Color.Silver; myCont.Push(myPoint); } weight = myCont.Area; if (weight > totalWeight) { totalWeight = weight; } // totalWeight = totalWeight + weight; myCont.Clear(); double[] sigArr = { 0, 0, 0 }; sigArr[0] = (float)dataPoints[r1].XValue; sigArr[1] = (float)dataPoints[r2].XValue; sigArr[2] = weight; //r2_2 = r1; //if (weight > 0.5) { } sigList.Add(sigArr); r2 = 0; weight = 0; r1 = -1; } } //totalWeight = dataPoints.FindMaxByValue().YValues[0]; // textBox1.Text = totalWeight.ToString(); // foreach (double[] sign in sigList) // { // sign[2] = sign[2] / totalWeight; //} return(sigList); }
//motion detection processing private Image <Bgr, Byte> ProcessFrame(Image <Bgr, Byte> image) { // using (Image<Bgr, Byte> image = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC)) using (MemStorage storage = new MemStorage()) //create storage for motion components { if (_forgroundDetector == null) { //_forgroundDetector = new BGCodeBookModel<Bgr>(); // _forgroundDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD); _forgroundDetector = new BGStatModel <Bgr>(image, Emgu.CV.CvEnum.BG_STAT_TYPE.FGD_STAT_MODEL); } _forgroundDetector.Update(image); // imageBoxFrameGrabber.Image = image; //update the motion history _motionHistory.Update(_forgroundDetector.ForgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Image <Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size); //display the motion pixels in blue (first channel) motionImage[0] = motionMask; //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; storage.Clear(); //clear the storage Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage); if (showGridLines) { LineSegment2D line = new LineSegment2D(new Point(0, 169), new Point(520, 169)); LineSegment2D line2 = new LineSegment2D(new Point(259, 0), new Point(259, 340)); image.Draw(line, new Bgr(Color.White), 2); image.Draw(line2, new Bgr(Color.White), 2); } if (displayPosNum) { for (int i = 0; i < dsPos.Tables[0].Rows.Count; i++) { if (showPos) { image.Draw("# " + dsPos.Tables[0].Rows[i][0].ToString(), ref font, new Point(int.Parse(dsPos.Tables[0].Rows[i][1].ToString()) - 120, int.Parse(dsPos.Tables[0].Rows[i][2].ToString()) - 50), new Bgr(Color.Yellow)); } if (showNames) { image.Draw(dsPos.Tables[0].Rows[i][3].ToString(), ref font, new Point(int.Parse(dsPos.Tables[0].Rows[i][1].ToString()) - 120, int.Parse(dsPos.Tables[0].Rows[i][2].ToString()) - 70), new Bgr(Color.Yellow)); } } } if (red1 && red1cnt < 100) { red1cnt++; image.Draw(new Rectangle(0, 0, 255, 165), new Bgr(Color.Red), 3); if (red1cnt == 99) { red1 = false; red1cnt = 0; } } if (red2 && red2cnt < 100) { red2cnt++; image.Draw(new Rectangle(262, 0, 257, 167), new Bgr(Color.Red), 3); if (red2cnt == 99) { red2 = false; red2cnt = 0; } } if (red3 && red3cnt < 100) { red3cnt++; image.Draw(new Rectangle(0, 170, 260, 170), new Bgr(Color.Red), 3); if (red3cnt == 99) { red3 = false; red3cnt = 0; } } if (red4 && red4cnt < 100) { red4cnt++; image.Draw(new Rectangle(260, 170, 260, 170), new Bgr(Color.Red), 3); if (red4cnt == 99) { red4 = false; red4cnt = 0; } } if (green1 && green1cnt < 200) { green1cnt++; image.Draw(new Rectangle(0, 0, 255, 165), new Bgr(Color.Green), 3); if (green1cnt == 199) { green1 = false; green1cnt = 0; } } if (green2 && green2cnt < 200) { green2cnt++; image.Draw(new Rectangle(262, 0, 257, 167), new Bgr(Color.Green), 3); if (green2cnt == 199) { green2 = false; green2cnt = 0; } } if (green3 && green3cnt < 200) { green3cnt++; image.Draw(new Rectangle(0, 170, 260, 170), new Bgr(Color.Green), 3); if (green3cnt == 199) { green3 = false; green3cnt = 0; } } if (green4 && green4cnt < 200) { green4cnt++; image.Draw(new Rectangle(260, 170, 260, 170), new Bgr(Color.Green), 3); if (green4cnt == 199) { green4 = false; green4cnt = 0; } } //iterate through each of the motion component foreach (MCvConnectedComp comp in motionComponents) { //reject the components that have small area; if (comp.area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount); //if (motionPixelCount > 100000) { image.Draw(l5 , new Bgr(Color.Red), 10); } else { image.Draw(l5 , new Bgr(Color.Green), 10); } //reject the area that contains too few motion // if (motionPixelCount < comp.area * 0.8) continue; if (motionPixelCount < comp.area * 0.05) { continue; } int nearpos = nearestPosition(comp.rect.X, comp.rect.Y); //if (1000 > comp.area) continue; //Draw each individual motion in red // DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red)); if (nearpos == 3 && comp.area < 500) { continue; } if (nearpos == 4 && comp.area < 500) { continue; } if (comp.rect.X > 60 && comp.rect.Y > 60) { if (motionQueue.Count == 100) { motionQueue.Dequeue(); motionQueue.Enqueue(nearpos); } else { motionQueue.Enqueue(nearpos); } // LineSegment2D l5 = new LineSegment2D(new Point(comp.rect.X, comp.rect.Y), new Point(comp.rect.X, comp.rect.Y)); // image.Draw(l5, new Bgr(Color.Red), 10); // image.Draw(comp.area.ToString(), ref font, new Point(comp.rect.X, comp.rect.Y), new Bgr(Color.LightGreen)); if (showMotion) { image.Draw(comp.rect, new Bgr(Color.Yellow), 2); } } } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount); // DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green)); //Display the amount of motions found on the current image // UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount)); //Display the image of the motion // imageBoxFrameGrabber.Image = motionImage; ///motion image return(image); } }
/// <summary> /// Method used to process the image and set the output result images. /// </summary> /// <param name="colorImage">Source color image.</param> /// <param name="thresholdValue">Value used for thresholding.</param> /// <param name="processedGray">Resulting gray image.</param> /// <param name="processedColor">Resulting color image.</param> public int IdentifyContours(Bitmap colorImage, int thresholdValue, bool invert, out Bitmap processedGray, out Bitmap processedColor, out List <Rectangle> list) { List <Rectangle> listR = new List <Rectangle>(); #region Conversion To grayscale Image <Gray, byte> grayImage = new Image <Gray, byte>(colorImage); //grayImage = grayImage.Resize(400, 400, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); Image <Gray, byte> bi = new Image <Gray, byte>(grayImage.Width, grayImage.Height); Image <Bgr, byte> color = new Image <Bgr, byte>(colorImage); #endregion #region Image normalization and inversion (if required) ////CvInvoke.cvAdaptiveThreshold(grayImage, grayImage, 255, //// Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 21, 2); ////string ff = grayImage.GetAverage().Intensity; ////grayImage = grayImage.ThresholdBinary(new Gray(grayImage.GetAverage().Intensity / 2.5), new Gray(255)); ////double thr = cout_avg(grayImage) / 1.5; //double thr = cout_avg_new(grayImage)/1.5; //grayImage = grayImage.ThresholdBinary(new Gray(thr), new Gray(255)); ////grayImage = grayImage.Dilate(3); //if (invert) //{ // grayImage._Not(); //} //#endregion //#region Extracting the Contours //using (MemStorage storage = new MemStorage()) //{ // Contour<Point> contours = grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); // while (contours != null) // { // Rectangle rect = contours.BoundingRectangle; // //Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); // //color.Draw(currentContour.BoundingRectangle, new Bgr(0, 255, 0), 1); // CvInvoke.cvDrawContours(color, contours, new MCvScalar(255, 255, 0), new MCvScalar(0), -1, 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); // if (rect.Width > 20 && rect.Width < 150 // && rect.Height > 80 && rect.Height < 150) // { // count++; // Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); // CvInvoke.cvDrawContours(color, contours, new MCvScalar(0,255,255), new MCvScalar(255), -1, 3, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); // color.Draw(contours.BoundingRectangle, new Bgr(0, 255, 0), 2); // bi.Draw(contours, new Gray(255), -1); // listR.Add(contours.BoundingRectangle); // } // contours = contours.HNext; // } // for (int i = 0; i < count; i++) // { // for (int j = i + 1; j < count; j++) // { // if( (listR[j].X < (listR[i].X + listR[i].Width) && listR[j].X > listR[i].X) // && (listR[j].Y < (listR[i].Y + listR[i].Width) && listR[j].Y > listR[i].Y) ) // { // listR.RemoveAt(j); // count--; // j --; // } // else if( (listR[i].X < (listR[j].X + listR[j].Width) && listR[i].X > listR[j].X) // && (listR[i].Y < (listR[j].Y + listR[j].Width) && listR[i].Y > listR[j].Y)) // { // listR.RemoveAt(i); // count--; // i--; // break; // } // } // } //} #endregion #region tim gia tri thresh de co so ky tu lon nhat //Image<Gray, byte> bi2 = new Image<Gray, byte>(grayImage.Width, grayImage.Height); //Image<Bgr, byte> color2 = new Image<Bgr, byte>(colorImage); //double thr = cout_avg_new(grayImage); double thr = 0; if (thr == 0) { thr = grayImage.GetAverage().Intensity; } //double thr = 50; //double min = 0, max = 255; //if (thr - 80 > 0) //{ // min = thr - 80; //} //if (thr + 80 < 255) //{ // max = thr + 80; //} //List<Rectangle> list_best = null; Rectangle[] li = new Rectangle[9]; Image <Bgr, byte> color_b = new Image <Bgr, byte>(colorImage);; Image <Gray, byte> src_b = grayImage.Clone(); Image <Gray, byte> bi_b = bi.Clone(); Image <Bgr, byte> color2; Image <Gray, byte> src; Image <Gray, byte> bi2; int c = 0, c_best = 0; //IntPtr a = color_b.Ptr; //CvInvoke.cvReleaseImage(ref a); for (double value = 0; value <= 127; value += 3) { for (int s = -1; s <= 1 && s + value != 1; s += 2) { color2 = new Image <Bgr, byte>(colorImage); //src = grayImage.Clone(); bi2 = bi.Clone(); listR.Clear(); //list_best.Clear(); c = 0; double t = 127 + value * s; src = grayImage.ThresholdBinary(new Gray(t), new Gray(255)); using (MemStorage storage = new MemStorage()) { Contour <Point> contours = src.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); while (contours != null) { Rectangle rect = contours.BoundingRectangle; CvInvoke.cvDrawContours(color2, contours, new MCvScalar(255, 255, 0), new MCvScalar(0), -1, 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); double ratio = (double)rect.Width / rect.Height; if (rect.Width > 20 && rect.Width < 150 && rect.Height > 80 && rect.Height < 180 && ratio > 0.1 && ratio < 1.1 && rect.X > 20) { c++; CvInvoke.cvDrawContours(color2, contours, new MCvScalar(0, 255, 255), new MCvScalar(255), -1, 3, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); color2.Draw(contours.BoundingRectangle, new Bgr(0, 255, 0), 2); bi2.Draw(contours, new Gray(255), -1); listR.Add(contours.BoundingRectangle); } contours = contours.HNext; } } //IntPtr a = color_b.Ptr; //CvInvoke.cvReleaseImage(ref a); double avg_h = 0; double dis = 0; for (int i = 0; i < c; i++) { avg_h += listR[i].Height; for (int j = i + 1; j < c; j++) { if ((listR[j].X <(listR[i].X + listR[i].Width) && listR[j].X> listR[i].X) && (listR[j].Y <(listR[i].Y + listR[i].Width) && listR[j].Y> listR[i].Y)) { //avg_h -= listR[j].Height; listR.RemoveAt(j); c--; j--; } else if ((listR[i].X <(listR[j].X + listR[j].Width) && listR[i].X> listR[j].X) && (listR[i].Y <(listR[j].Y + listR[j].Width) && listR[i].Y> listR[j].Y)) { avg_h -= listR[i].Height; listR.RemoveAt(i); c--; i--; break; } } } avg_h = avg_h / c; for (int i = 0; i < c; i++) { dis += Math.Abs(avg_h - listR[i].Height); } if (c <= 8 && c > 1 && c > c_best && dis <= c * 8) { listR.CopyTo(li); c_best = c; color_b = color2; bi_b = bi2; src_b = src; //dis_b = dis; //if (c == 8) //{ // break; //} } } if (c_best == 8) { break; } } count = c_best; grayImage = src_b; color = color_b; bi = bi_b; listR.Clear(); for (int i = 0; i < li.Length; i++) { if (li[i].Height != 0) { listR.Add(li[i]); } } #endregion #region Asigning output processedColor = color.ToBitmap(); processedGray = grayImage.ToBitmap(); list = listR; #endregion return(count); }
// get all of the valid contour maps, valid means circumfence > 200 px // this was not in their code, I added this feature, but I used their logic public static List <ColorfulContourMap> getAllContourMap(Image <Bgr, byte> input, int index, int mode = 0) { // use for all members List <ColorfulContourMap> result = new List <ColorfulContourMap>(); Image <Gray, byte> gray = input.Convert <Gray, byte>(); // use for black background if (mode == 0) { gray = gray.SmoothGaussian(3).ThresholdBinaryInv(new Gray(245), new Gray(255)).MorphologyEx(null, CV_MORPH_OP.CV_MOP_CLOSE, 2); } // use for white background else { gray = gray.SmoothGaussian(3).ThresholdBinary(new Gray(100), new Gray(255)).MorphologyEx(null, CV_MORPH_OP.CV_MOP_CLOSE, 2); } // one time use List <Point> pointList = new List <Point>(); List <Point> polyPointList = new List <Point>(); List <ColorfulPoint> cps = new List <ColorfulPoint>(); List <ColorfulPoint> pcps = new List <ColorfulPoint>(); // fetch all the contours using Emgu CV // fetch all the polys using Emgu CV // extract the points and colors using (MemStorage storage1 = new MemStorage()) { Image <Gray, Byte> temp = gray.Clone(); Contour <Point> contour = temp.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, RETR_TYPE.CV_RETR_EXTERNAL); double area = Math.Abs(contour.Area); Contour <Point> maxArea = contour; // maxArea is used as the current contour //contour = contour.HNext; // use this to loop for (; contour != null; contour = contour.HNext) { double nextArea = Math.Abs(contour.Area); area = nextArea; if (area >= Constants.MIN_AREA) { maxArea = contour; Contour <Point> poly = maxArea.ApproxPoly(1.0, storage1); pointList = maxArea.ToList(); polyPointList = poly.ToList(); foreach (Point p in pointList) { ColorfulPoint cp = new ColorfulPoint { X = p.X, Y = p.Y, color = extractPointColor(p, input) }; cps.Add(cp); } foreach (Point p in polyPointList) { ColorfulPoint cp = new ColorfulPoint { X = p.X, Y = p.Y, color = extractPointColor(p, input) }; pcps.Add(cp); } result.Add(new ColorfulContourMap(cps, pcps, index)); } // clear temporal lists pointList = new List <Point>(); polyPointList = new List <Point>(); cps = new List <ColorfulPoint>(); pcps = new List <ColorfulPoint>(); } } return(result); }
protected void Button1_Click(object sender, EventArgs e) { if (FileUploader.HasFile) { try { FileUploader.SaveAs(Server.MapPath(DefaultFileName) + FileUploader.FileName); Image <Bgr, Byte> originalImage = new Image <Bgr, byte>(Server.MapPath(DefaultFileName) + FileUploader.FileName); int width, height, channels = 0; width = originalImage.Width; height = originalImage.Height; channels = originalImage.NumberOfChannels; Image <Bgr, byte> colorImage = new Image <Bgr, byte>(originalImage.ToBitmap()); Image <Gray, byte> grayImage = colorImage.Convert <Gray, Byte>(); float[] GrayHist; DenseHistogram Histo = new DenseHistogram(255, new RangeF(0, 255)); Histo.Calculate(new Image <Gray, Byte>[] { grayImage }, true, null); GrayHist = new float[256]; Histo.MatND.ManagedArray.CopyTo(GrayHist, 0); float largestHist = GrayHist[0]; int thresholdHist = 0; for (int i = 0; i < 255; i++) { if (GrayHist[i] > largestHist) { largestHist = GrayHist[i]; thresholdHist = i; } } grayImage = grayImage.ThresholdAdaptive(new Gray(255), ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY, 85, new Gray(4)); colorImage = colorImage.Copy(); int countRedCells = 0; using (MemStorage storage = new MemStorage()) { for (Contour <Point> contours = grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); if (currentContour.BoundingRectangle.Width > 20) { CvInvoke.cvDrawContours(colorImage, contours, new MCvScalar(0, 0, 255), new MCvScalar(0, 0, 255), -1, 2, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); colorImage.Draw(currentContour.BoundingRectangle, new Bgr(0, 255, 0), 1); countRedCells++; } } } Image <Gray, byte> grayImageCopy2 = originalImage.Convert <Gray, Byte>(); grayImageCopy2 = grayImageCopy2.ThresholdBinary(new Gray(100), new Gray(255)); colorImage = colorImage.Copy(); int countMalaria = 0; using (MemStorage storage = new MemStorage()) { for (Contour <Point> contours = grayImageCopy2.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); if (currentContour.BoundingRectangle.Width > 20) { CvInvoke.cvDrawContours(colorImage, contours, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), -1, 2, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); colorImage.Draw(currentContour.BoundingRectangle, new Bgr(0, 255, 0), 1); countMalaria++; } } } colorImage.Save(Server.MapPath(DefaultFileName2) + FileUploader.FileName); inputDiv.Attributes["style"] = "display: block; margin-left: auto; margin-right: auto"; outputDiv.Attributes["style"] = "display: block; margin-left: auto; margin-right: auto"; Image1.ImageUrl = this.ResolveUrl(DefaultFileName + FileUploader.FileName); Image2.ImageUrl = this.ResolveUrl(DefaultFileName2 + FileUploader.FileName); Chart1.DataBindTable(GrayHist); Label1.Text = "Uploaded Successfully"; Label2.Text = "File name: " + FileUploader.PostedFile.FileName + "<br>" + "File Size: " + FileUploader.PostedFile.ContentLength + " kb<br>" + "Content type: " + FileUploader.PostedFile.ContentType + "<br>" + "Resolution: " + width.ToString() + "x" + height.ToString() + "<br>" + "Number of channels: " + channels.ToString() + "<br>" + "Histogram (maximum value): " + largestHist + " @ " + thresholdHist; LabelRed.Text = countRedCells.ToString(); LabelMalaria.Text = countMalaria.ToString(); } catch (Exception ex) { Label1.Text = "ERROR: " + ex.Message.ToString(); Label2.Text = ""; } } else { Label1.Text = "You have not specified a file."; Label2.Text = ""; } }
private void GetCameraXY(System.Windows.Forms.PictureBox picturebox1, System.Windows.Forms.PictureBox picturebox2) { Image <Bgr, Byte> frame = capture.QueryFrame(); //Image<Bgr, Byte> frame = new Image<Bgr, Byte>("Capture.jpg"); if (frame != null) { Image <Gray, Byte> gray = frame.Convert <Gray, Byte>(); double cannyThreshold = 180.0; double cannyThresholdLinking = 120.0; Image <Gray, Byte> cannyEdges = gray.Canny(cannyThreshold, cannyThresholdLinking); List <Triangle2DF> triangleList = new List <Triangle2DF>(); List <MCvBox2D> boxList = new List <MCvBox2D>(); //a box is a rotated rectangle using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation for ( Contour <Point> contours = cannyEdges.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage); if (currentContour.Area > 400 && currentContour.Area < 20000) //only consider contours with area greater than 250 { if (currentContour.Total == 4) //The contour has 4 vertices. { // determine if all the angles in the contour are within [80, 100] degree bool isRectangle = true; Point[] pts = currentContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int i = 0; i < edges.Length; i++) { double angle = Math.Abs( edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } if (isRectangle) { boxList.Add(currentContour.GetMinAreaRect()); } } } } Image <Bgr, Byte> triangleRectangleImage = frame.CopyBlank(); foreach (Triangle2DF triangle in triangleList) { triangleRectangleImage.Draw(triangle, new Bgr(Color.DarkBlue), 2); } foreach (MCvBox2D box in boxList) { /* * frm.SetText(frm.Controls["textBoxImageY"], box.center.Y.ToString()); * frm.SetText(frm.Controls["textBoxDeg"], box.angle.ToString()); * frm.SetText(frm.Controls["textBoxImageX"], box.center.X.ToString()); * */ CameraHasData = true; triangleRectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2); } // add cross hairs to image int totalwidth = frame.Width; int totalheight = frame.Height; PointF[] linepointshor = new PointF[] { new PointF(0, totalheight / 2), new PointF(totalwidth, totalheight / 2) }; PointF[] linepointsver = new PointF[] { new PointF(totalwidth / 2, 0), new PointF(totalwidth / 2, totalheight) }; triangleRectangleImage.DrawPolyline(Array.ConvertAll <PointF, Point>(linepointshor, Point.Round), false, new Bgr(Color.AntiqueWhite), 1); triangleRectangleImage.DrawPolyline(Array.ConvertAll <PointF, Point>(linepointsver, Point.Round), false, new Bgr(Color.AntiqueWhite), 1); picturebox2.Image = triangleRectangleImage.ToBitmap(); frame.DrawPolyline(Array.ConvertAll <PointF, Point>(linepointshor, Point.Round), false, new Bgr(Color.AntiqueWhite), 1); frame.DrawPolyline(Array.ConvertAll <PointF, Point>(linepointsver, Point.Round), false, new Bgr(Color.AntiqueWhite), 1); picturebox1.Image = frame.ToBitmap(); } }
public static FeatureVector ExtractFeatures(Image <Gray, byte> skin, Image <Bgr, Byte> imagen) { Contour <Point> currentContour = null; Contour <Point> biggestContour = null; using (MemStorage storage = new MemStorage()) { #region extractContourAndHull Contour <Point> contours = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); Double Result1 = 0; Double Result2 = 0; while (contours != null) { Result1 = contours.Area; if (Result1 > Result2) { Result2 = Result1; biggestContour = contours; } contours = contours.HNext; } if (biggestContour != null) { currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage); imagen.Draw(currentContour, new Bgr(Color.LimeGreen), 2); biggestContour = currentContour; hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); box = biggestContour.GetMinAreaRect(); PointF[] points = box.GetVertices(); Point[] ps = new Point[points.Length]; for (int i = 0; i < points.Length; i++) { ps[i] = new Point((int)points[i].X, (int)points[i].Y); } imagen.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 2); // imagen.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 3), new Bgr(200, 125, 75), 2); // PointF center; // float radius; filteredHull = new Seq <Point>(storage); for (int i = 0; i < hull.Total; i++) { if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10) { filteredHull.Push(hull[i]); } } defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); defectArray = defects.ToArray(); } } #endregion #region find palm center(needs change) searchRadius = 6; contourReduction = 3; //this.result = null; DetectarCentroPalma(biggestContour.ToList <Point>(), obtenerListaCandidatos(box)); PointF punto = new PointF(405, 380); Point punt = new Point(405, 380); CircleF centerCircle = new CircleF(punto, 5f); //CircleF centerCircle = new CircleF(result.Location, 5f); imagen.Draw(centerCircle, new Bgr(Color.Brown), 3); /* * for (int i = 0; i < defects.Total; i++) * { * LineSegment2D lineaDedoCentro = new LineSegment2D(defectArray[i].StartPoint, punt); * imagen.Draw(lineaDedoCentro, new Bgr(Color.Green), 2); * * } * */ #endregion List <PointF> fingertips = defectsDrawing(imagen, ref punt); #region create feature vector List <PointF> newFingertips = ordenarFingertips(fingertips); List <float> angles = calculateFingerAngles(fingertips, punto); FeatureVector vector = new FeatureVector(newFingertips, angles, punto, 5); //MessageBox.Show("Done"); // frmPruebaDatos datos = new frmPruebaDatos(vector); // datos.Show(); #endregion return(vector); }
private void ProcessImage(Image <Bgr, byte> image) { Stopwatch watch = Stopwatch.StartNew(); // time the detection process List <Image <Gray, Byte> > licensePlateImagesList = new List <Image <Gray, byte> >(); List <Image <Gray, Byte> > filteredLicensePlateImagesList = new List <Image <Gray, byte> >(); List <MCvBox2D> licenseBoxList = new List <MCvBox2D>(); //List<string> words = _licensePlateDetector.DetectLicensePlate( // image, // licensePlateImagesList, // filteredLicensePlateImagesList, // licenseBoxList); var _ocr = new Tesseract(@".\tessdata\", "eng", Tesseract.OcrEngineMode.OEM_DEFAULT, "1234567890"); List <String> licenses = new List <String>(); using (Image <Gray, byte> gray = image.Convert <Gray, Byte>()) //using (Image<Gray, byte> gray = GetWhitePixelMask(img)) using (Image <Gray, Byte> canny = new Image <Gray, byte>(gray.Size)) using (MemStorage stor = new MemStorage()) { //CvInvoke.cvCanny(gray, canny, 100, 100, 3); CvInvoke.cvThreshold(gray, canny, 75, 255, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY); var bibs = new List <Image <Gray, Byte> >(); var bibsreg = new List <Rectangle>(); //imageBox1.Image = gray; var faces = DF(image); Point startPoint = new Point(10, 10); this.Text = ""; foreach (Rectangle face in faces) { canny.Draw(face, new Gray(1), 2); var r = new Rectangle(face.X - face.Width / 2, face.Y + face.Height * 2, face.Width * 2, face.Height * 3); bibsreg.Add(r); CvInvoke.cvSetImageROI(canny, r); var ni = canny.Copy(); _ocr.Recognize(ni); var words = _ocr.GetCharactors(); CvInvoke.cvResetImageROI(canny); this.Text += " bib:" + _ocr.GetText().Replace(" ", string.Empty); //AddLabelAndImage(ref startPoint, _ocr.GetText(), ni); } foreach (Rectangle r in bibsreg) { canny.Draw(r, new Gray(1), 10); } imageBox1.Image = canny; //_ocr.Recognize(canny); //var words = _ocr.GetCharactors(); //this.Text = _ocr.GetText(); //Contour<Point> contours = canny.FindContours( // Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, // Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, // stor); //FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); } watch.Stop(); //stop the timer processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds); //panel1.Controls.Clear(); //Point startPoint = new Point(10, 10); //for (int i = 0; i < words.Count; i++) //{ // AddLabelAndImage( // ref startPoint, // String.Format("License: {0}", words[i]), // licensePlateImagesList[i].ConcateVertical(filteredLicensePlateImagesList[i])); // image.Draw(licenseBoxList[i], new Bgr(Color.Red), 2); //} //imageBox1.Image = image; }
/// <summary> /// Detect the keypoints in the image /// </summary> /// <param name="image">The image from which the key point will be detected from</param> /// <returns>The key pionts in the image</returns> public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image) { using (MemStorage stor = new MemStorage()) { Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor); CvStarDetectorDetectKeyPoints(ref this, image, seq.Ptr); return seq.ToArray(); } }
public void IdentifyContours(Bitmap colorImage, int thresholdValue, bool invert, int minPerimeter, int maxPerimeter, int Rl, int Gl, int Bl, int Rh, int Gh, int Bh, out List <RecognitionType> detectedObj) { detectedObj = new List <RecognitionType>(); Rectangle gestureRectangle = new Rectangle(0, 0, 1, 1); #region Conversion To grayscale colorImage.RotateFlip(RotateFlipType.RotateNoneFlipX); Image <Gray, byte> grayImage = new Image <Gray, byte>(colorImage); Image <Bgr, byte> color = new Image <Bgr, byte>(colorImage); IColorSkinDetector skinDetection; Ycc YCrCb_min = new Ycc(Rl, Gl, Bl); Ycc YCrCb_max = new Ycc(Rh, Gh, Bh); #endregion #region Extracting the Contours skinDetection = new YCrCbSkinDetector(); Image <Gray, byte> skin = skinDetection.DetectSkin(color, YCrCb_min, YCrCb_max); if (invert) { skin._Not(); } using (MemStorage storage = new MemStorage()) { for (Contour <Point> contours = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); if (currentContour.BoundingRectangle.Width > 20) { if (contours.Perimeter > minPerimeter && contours.Perimeter < maxPerimeter) { CvInvoke.cvDrawContours(skin, contours, new MCvScalar(255), new MCvScalar(255), -1, 2, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0)); color.Draw(currentContour.BoundingRectangle, new Bgr(0, 255, 0), 1); detectedObj.Add(new RecognitionType() { GesturePosition = currentContour.BoundingRectangle, GestureImage = skin.ToBitmap().Clone(currentContour.BoundingRectangle, skin.ToBitmap().PixelFormat) }); } } } } #endregion }
/// <summary> /// Detect image features from the given image /// </summary> /// <param name="image">The image to detect features from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>The Image features detected from the given image</returns> public ImageFeature[] DetectFeatures(Image<Gray, Byte> image, Image<Gray, byte> mask) { using (MemStorage stor = new MemStorage()) using (VectorOfFloat descs = new VectorOfFloat()) { Seq<MKeyPoint> pts = new Seq<MKeyPoint>(stor); CvSURFDetectorDetectFeature(ref this, image, mask, pts, descs); MKeyPoint[] kpts = pts.ToArray(); int n = kpts.Length; long add = descs.StartAddress.ToInt64(); ImageFeature[] features = new ImageFeature[n]; int sizeOfdescriptor = extended == 0 ? 64 : 128; for (int i = 0; i < n; i++, add += sizeOfdescriptor * sizeof(float)) { features[i].KeyPoint = kpts[i]; float[] desc = new float[sizeOfdescriptor]; Marshal.Copy(new IntPtr(add), desc, 0, sizeOfdescriptor); features[i].Descriptor = desc; } return features; } }
public List <LED> GetAll(Image <Bgr, Byte> src, out int lightCount) { List <LED> leds = new List <LED>(); var range = RangeImage(src); var rangeLight = LightImage(src); List <Rectangle> _lightRects = new List <Rectangle>(); using (MemStorage lightStorage = new MemStorage()) { for (Contour <Point> contours = rangeLight.FindContours(approximationMethod, retrieveType, lightStorage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * LEDApproximation, lightStorage); if (GeometryExt.Area(currentContour.BoundingRectangle) >= MinLightArea && GeometryExt.Area(currentContour.BoundingRectangle) <= MaxLightArea) { _lightRects.Add(currentContour.BoundingRectangle); } } for (int k = 0; k < _lightRects.Count; k++) { for (int k2 = 0; k2 < _lightRects.Count; k2++) { if (k != k2) { if (GeometryExt.Distance(_lightRects[k], _lightRects[k2]) < MergeDist) { _lightRects[k] = GeometryExt.Join(_lightRects[k], _lightRects[k2]); _lightRects.RemoveAt(k2); k = -1; break; } } } } lightCount = _lightRects.Count; using (MemStorage storage = new MemStorage()) { for (Contour <Point> contours = range.FindContours(approximationMethod, retrieveType, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * LEDApproximation, storage); if (GeometryExt.Area(currentContour.BoundingRectangle) >= MinArea && GeometryExt.Area(currentContour.BoundingRectangle) <= MaxArea) { LED led = new LED(); led.HaloBox = currentContour.BoundingRectangle; led.HaloColor = HaloColor; led.LightColor = LightColor; for (int k = 0; k < _lightRects.Count; k++) //check inside ones with priority { Rectangle r = _lightRects[k]; if (GeometryExt.Area(currentContour.BoundingRectangle) >= GeometryExt.Area(r) * areaMul) { if (Collision.Inside(currentContour.BoundingRectangle, r)) { led.InsideLightBoxes.Add(r); } else if (Collision.Check(currentContour.BoundingRectangle, r)) { led.CollidingLightBoxes.Add(r); } } } if (led.MainLightBox != new Rectangle()) //good led found, add it to results { led.InsideLightBoxes = led.InsideLightBoxes.OrderByDescending(x => GeometryExt.Area(x)).ToList(); led.CollidingLightBoxes = led.CollidingLightBoxes.OrderByDescending(x => GeometryExt.Area(x)).ToList(); leds.Add(led); } } } } } range.Dispose(); rangeLight.Dispose(); return(leds); }
public void Horizontal() { System.Diagnostics.Stopwatch stop = new System.Diagnostics.Stopwatch(); stop.Start(); u = Horiz(SobelGray); stop.Stop(); u = Proizvodnij1(u); u = Proizvodnij2(u); float max = MAX(u); DrawLine(u, max, Sobel); float sum = DrawMiddleLineHoriz(u, max, Sobel); List <int> maximum = SearchMaximums(u, sum, max, Sobel, false); Greenze(maximum, u); for (int i = 0; i < Horizont.Count; i++) { //Sobel.Draw(new CircleF(new System.Drawing.PointF(Convert.ToSingle(Sobel.Width * u[Horizont[i].k1] / max), Horizont[i].k1), 3), new Bgr(System.Drawing.Color.Red), 3); //Sobel.Draw(new CircleF(new System.Drawing.PointF(Convert.ToSingle(Sobel.Width * u[Horizont[i].k2] / max), Horizont[i].k2), 3), new Bgr(System.Drawing.Color.Red), 3); Horizont[i].original = Original.Copy(new System.Drawing.Rectangle(0, Horizont[i].k1, Original.Width, Horizont[i].k2 - Horizont[i].k1)); Horizont[i].sobel = Horizont[i].original.Convert <Gray, byte>(); Horizont[i].sobel1 = Horizont[i].original.Convert <Bgr, byte>(); Vertical(i); Horizont[i].p = Proizvodnij1(Horizont[i].p); Horizont[i].p = Proizvodnij2(Horizont[i].p); Horizont[i].p = Proizvodnij2(Horizont[i].p); float max1 = MAX(Horizont[i].p); float sum1 = DrawMiddleLineVertical(i, max1); float h = Horizont[i].sobel1.Height * sum1 / max1; Horizont[i].sobel1.Draw(new LineSegment2DF(new System.Drawing.PointF(0, h), new System.Drawing.PointF(Horizont[i].sobel1.Width, h)), new Bgr(System.Drawing.Color.Blue), 2); GetPlate(i, sum1, max1); } stop.Stop(); System.Diagnostics.Stopwatch st = new System.Diagnostics.Stopwatch(); st.Start(); Int32 ret = 0; for (int i = 0; i < Plate.Count; i++) { Ugol(Plate[i]); Plate[i].rotate2 = Plate[i].rotate.Resize(260, 56, Emgu.CV.CvEnum.INTER.CV_INTER_AREA); // Plate[i].rotate1 = Plate[i].rotate2.Convert <Gray, byte>(); CvInvoke.cvCLAHE(Plate[i].rotate1, 5, new System.Drawing.Size(8, 8), Plate[i].rotate1); //Plate[i].rotate1 = Threshold.GrayHCH(Plate[i].rotate1); CvInvoke.cvNormalize(Plate[i].rotate1, Plate[i].rotate1, 0, 255, Emgu.CV.CvEnum.NORM_TYPE.CV_MINMAX, Plate[i].rotate1); CvInvoke.cvAdaptiveThreshold(Plate[i].rotate1, Plate[i].rotate1, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY_INV, 15, 13); List <System.Drawing.Rectangle> rec = new List <System.Drawing.Rectangle>(); // Plate[i].rotate1 = Plate[i].rotate1.MorphologyEx(new StructuringElementEx(5, 5, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT), Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, 1); using (Emgu.CV.MemStorage storage = new MemStorage()) { int il = 0; for (Emgu.CV.Contour <System.Drawing.Point> contours = Plate[i].rotate1.Convert <Gray, byte>().FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP); contours != null; contours = contours.HNext) { Emgu.CV.Contour <System.Drawing.Point> currentContour = contours; if (currentContour.BoundingRectangle.X > 3 && currentContour.BoundingRectangle.Width > 10 && currentContour.BoundingRectangle.Height > 10 && currentContour.BoundingRectangle.Height < 200 && currentContour.BoundingRectangle.Width < 50 && currentContour.BoundingRectangle.Height / currentContour.BoundingRectangle.Width < 1.5) { il++; Plate[i].rotate2.Draw(currentContour.BoundingRectangle, new Bgr(System.Drawing.Color.Red), 2); rec.Add(currentContour.BoundingRectangle); } } /*if (il < 1) * { * Plate.Remove(Plate[i]); * i--; * continue; * }*/ } for (int io = rec.Count - 1; io >= 0; io--) { for (int ip = 0; ip < io; ip++) { if (rec[ip].X > rec[ip + 1].X) { System.Drawing.Rectangle r = rec[ip]; rec[ip] = rec[ip + 1]; rec[ip + 1] = r; } } } GetRect(i, rec); if (!IsPlate(i, rec)) { Plate.Remove(Plate[i]); i--; continue; } for (int ih = 0; ih < rec.Count; ih++) { LetterDigit l = new LetterDigit(); l.LD = Plate[i].rotate1.Copy(rec[ih]); ret++; Plate[i].digit.Add(l); } Plate[i].px = new float[Plate[i].rotate1.Width]; for (int kl = 0; kl < Plate[i].rotate1.Width; kl++) { for (int kp = 0; kp < Plate[i].rotate1.Height; kp++) { if (Convert.ToInt32(Plate[i].rotate1[kp, kl].Intensity) == 255) { Plate[i].px[kl]++; } } } Plate[i].px = Proizvodnij1(Plate[i].px); Plate[i].px = Proizvodnij2(Plate[i].px); float maxpx = MAX(Plate[i].px); for (int h1 = 0, x = 0; h1 < Plate[i].px.Length - 1; h1++, x++) { float j = Plate[i].rotate2.Height * Plate[i].px[h1] / (maxpx); float j1 = Plate[i].rotate2.Height * Plate[i].px[h1 + 1] / (maxpx); Plate[i].rotate2.Draw(new LineSegment2DF(new System.Drawing.PointF(x, j), new System.Drawing.PointF(x, j1)), new Bgr(System.Drawing.Color.Red), 2); } List <int> min1 = SearchMininum(Plate[i].px, i, maxpx); /* double sumraz = Plate[i].rotate1.Width / min1.Count - 10; * for (int il = 0; il < min1.Count-1; il++) * { * double ggg= Math.Abs( min1[il+1] - min1[il]); * if (ggg < sumraz) * { * min1.Remove(min1[il + 1]); * il--; * continue; * } * /* if (il % 2 == 0) * Plate[i].rotate2.Draw(new LineSegment2DF(new System.Drawing.PointF(min1[il], 10), new System.Drawing.PointF(Convert.ToSingle(min1[il+1]), 10)), new Bgr(System.Drawing.Color.Aqua), 2); * if (il % 2 == 1) * Plate[i].rotate2.Draw(new LineSegment2DF(new System.Drawing.PointF(min1[il], 15), new System.Drawing.PointF(Convert.ToSingle(min1[il+1]), 15)), new Bgr(System.Drawing.Color.Orange), 2); * }*/ for (int il = 0; il < Plate[i].digit.Count; il++) { for (int gh = 0; gh < images.Length; gh++) { Image <Gray, byte> res = Plate[i].digit[il].LD.Resize(10, 18, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC).Sub(images[gh]); Plate[i].digit[il]._list.Add(Value2(images[gh], res)); } } //xOOOxxOOO Plate[i].text = TextLetter(i, 0) + TextDigit(i, 1) + TextDigit(i, 2) + TextDigit(i, 3) + TextLetter(i, 4) + TextLetter(i, 5) + TextDigit(i, 6) + TextDigit(i, 7) + TextDigit(i, 8); Original.Draw(new System.Drawing.Rectangle(Plate[i].x1, Plate[i].y1, Plate[i].x2 - Plate[i].x1, Plate[i].y2 - Plate[i].y1), new Bgr(System.Drawing.Color.Red), 2); } st.Stop(); }
/// <summary> /// Recognize gesture. /// </summary> /// <param name="contour">Hand contour</param> /// <param name="fingersCount">Number of fingers</param> /// <returns>Gesture (if any)</returns> public Gesture RecognizeGesture(Image <Gray, byte> contour, int fingersCount) { List <Gesture> recognizedGestures = new List <Gesture>(Gestures); Gesture bestFit = new Gesture(); bestFit.RecognizedData.ContourMatch = 999; bestFit.RecognizedData.HistogramMatch = 999; foreach (var g in recognizedGestures) { if (g.FingersCount != fingersCount) { continue; } using (MemStorage storage = new MemStorage()) { Contour <Point> c1 = contour.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_LIST, storage); Contour <Point> c2 = g.Image.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_LIST, storage); if (c1 != null && c2 != null) { DenseHistogram hist1 = new DenseHistogram(new int[2] { 8, 8 }, new RangeF[2] { new RangeF(-180, 180), new RangeF(100, 100) }); DenseHistogram hist2 = new DenseHistogram(new int[2] { 8, 8 }, new RangeF[2] { new RangeF(-180, 180), new RangeF(100, 100) }); CvInvoke.cvCalcPGH(c1, hist1.Ptr); CvInvoke.cvCalcPGH(c2, hist2.Ptr); CvInvoke.cvNormalizeHist(hist1.Ptr, 100.0); CvInvoke.cvNormalizeHist(hist2.Ptr, 100.0); g.RecognizedData.Hand = Hand; g.RecognizedData.HistogramMatch = CvInvoke.cvCompareHist(hist1, hist2, HISTOGRAM_COMP_METHOD.CV_COMP_BHATTACHARYYA); g.RecognizedData.ContourMatch = CvInvoke.cvMatchShapes(c1, c2, CONTOURS_MATCH_TYPE.CV_CONTOURS_MATCH_I3, 0); double rating = g.RecognizedData.ContourMatch * g.RecognizedData.HistogramMatch; double bestSoFar = bestFit.RecognizedData.ContourMatch * bestFit.RecognizedData.HistogramMatch; if (rating < bestSoFar) { bestFit = g; } } } } // Reliable, but strict: 0.01, 0.80, 0.20 if (bestFit.RecognizedData.ContourMatch * bestFit.RecognizedData.HistogramMatch <= 0.0125 && bestFit.RecognizedData.ContourMatch <= 0.80 && bestFit.RecognizedData.HistogramMatch <= 0.20) { return(bestFit); } else { return(null); } }
private void ExtractContourAndHull(Image <Gray, byte> skin) { try { using (MemStorage storage = new MemStorage()) { Contour <Point> contours = skin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); Contour <Point> biggestContour = null; Double Result1 = 0; Double Result2 = 0; while (contours != null) { Result1 = contours.Area; if (Result1 > Result2) { Result2 = Result1; biggestContour = contours; } contours = contours.HNext; } if (biggestContour != null) { //currentFrame.Draw(biggestContour, new Bgr(Color.DarkViolet), 2); Contour <Point> currentContour = biggestContour.ApproxPoly(biggestContour.Perimeter * 0.0025, storage); currentFrame.Draw(currentContour, new Bgr(Color.LimeGreen), 2); biggestContour = currentContour; hull = biggestContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); box = biggestContour.GetMinAreaRect(); PointF[] points = box.GetVertices(); //handRect = box.MinAreaRect(); //currentFrame.Draw(handRect, new Bgr(200, 0, 0), 1); Point[] ps = new Point[points.Length]; for (int i = 0; i < points.Length; i++) { ps[i] = new Point((int)points[i].X, (int)points[i].Y); } currentFrame.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 2); currentFrame.Draw(new CircleF(new PointF(box.center.X, box.center.Y), 3), new Bgr(200, 125, 75), 2); //ellip.MCvBox2D= CvInvoke.cvFitEllipse2(biggestContour.Ptr); //currentFrame.Draw(new Ellipse(ellip.MCvBox2D), new Bgr(Color.LavenderBlush), 3); PointF center; float radius; //CvInvoke.cvMinEnclosingCircle(biggestContour.Ptr, out center, out radius); //currentFrame.Draw(new CircleF(center, radius), new Bgr(Color.Gold), 2); //currentFrame.Draw(new CircleF(new PointF(ellip.MCvBox2D.center.X, ellip.MCvBox2D.center.Y), 3), new Bgr(100, 25, 55), 2); //currentFrame.Draw(ellip, new Bgr(Color.DeepPink), 2); //CvInvoke.cvEllipse(currentFrame, new Point((int)ellip.MCvBox2D.center.X, (int)ellip.MCvBox2D.center.Y), new System.Drawing.Size((int)ellip.MCvBox2D.size.Width, (int)ellip.MCvBox2D.size.Height), ellip.MCvBox2D.angle, 0, 360, new MCvScalar(120, 233, 88), 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0); //currentFrame.Draw(new Ellipse(new PointF(box.center.X, box.center.Y), new SizeF(box.size.Height, box.size.Width), box.angle), new Bgr(0, 0, 0), 2); filteredHull = new Seq <Point>(storage); for (int i = 0; i < hull.Total; i++) { if (Math.Sqrt(Math.Pow(hull[i].X - hull[i + 1].X, 2) + Math.Pow(hull[i].Y - hull[i + 1].Y, 2)) > box.size.Width / 10) { filteredHull.Push(hull[i]); } } defects = biggestContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); defectArray = defects.ToArray(); DrawAndComputeFingersNum(); } } } catch (Exception ex) { } }
public static void ProcessFile(AssessmentType type, ImageBox frameGrabber, string filename) { Rectangle roi = new Rectangle(165, 175, 810, 1125); int blockSize = 51; double param1 = 15; int erodeI = 3; int dilateI = 3; int minRectSide = 10; int maxRectSide = 50; switch (type) { case AssessmentType.Item50: blockSize = 51; param1 = 15; erodeI = 3; dilateI = 3; minRectSide = 10; maxRectSide = 50; break; case AssessmentType.Item60: blockSize = 51; param1 = 15; erodeI = 3; dilateI = 3; minRectSide = 10; maxRectSide = 50; break; case AssessmentType.Item70: blockSize = 51; param1 = 15; erodeI = 3; dilateI = 3; minRectSide = 7; maxRectSide = 50; break; case AssessmentType.Item80: blockSize = 51; param1 = 15; erodeI = 3; dilateI = 3; minRectSide = 7; maxRectSide = 50; break; case AssessmentType.Item90: blockSize = 51; param1 = 15; erodeI = 3; dilateI = 3; minRectSide = 5; maxRectSide = 50; break; case AssessmentType.Item100: blockSize = 51; param1 = 15; erodeI = 3; dilateI = 3; minRectSide = 5; maxRectSide = 50; break; } anchor = new Anchor(roi.Width, roi.Height, PartialDB.GetAssessment(type)); anchorDiag = new AnchorDiagnostics(anchor); imageBoxFrameGrabber = frameGrabber; grabber = new Capture(filename); currentFrame = grabber.QueryFrame(); crop = currentFrame.Copy(roi); gray = crop.Convert <Gray, byte>(); thres = new Image <Gray, byte>(new Size(gray.Width, gray.Height)); CvInvoke.cvAdaptiveThreshold(gray.Ptr, thres.Ptr, 255, ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY_INV, blockSize, param1); erode = thres.Erode(erodeI); dilate = erode.Dilate(dilateI); List <Geometry.Shade> filtered1 = new List <Geometry.Shade>(); using (MemStorage storage = new MemStorage()) { Contour <Point> contours = dilate.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_TREE, storage); while (contours != null) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage); if (currentContour.BoundingRectangle.Width > minRectSide && currentContour.BoundingRectangle.Width < maxRectSide && currentContour.BoundingRectangle.Height > minRectSide && currentContour.BoundingRectangle.Height < maxRectSide) { filtered1.Add(new Geometry.Shade( currentContour.BoundingRectangle.X, currentContour.BoundingRectangle.Y, currentContour.BoundingRectangle.Width, currentContour.BoundingRectangle.Height)); } contours = contours.HNext; } } anchor.Process(filtered1); anchorDiag.DrawDiagnostics(crop); imageBoxFrameGrabber.Image = crop; }
public List <Ball3D> Detect(KinectInterface kinect) { float threshDepth = 8.9f; float expectedRadius = 0.0251f; float radThres = 0.0025f; var balls = new List <Ball3D>(); int w = KinectInterface.w; int h = KinectInterface.h; int sw = w / 2; int sh = h / 2; byte depthByte = (byte)((int)threshDepth * 1000 >> 4); var dsmall = kinect.FullDepth.PyrDown(); var depthMask = dsmall.CopyBlank(); CvInvoke.cvThreshold(dsmall.Ptr, depthMask.Ptr, depthByte, 255, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY_INV); var depthMaskBlock = depthMask.Erode(1).Dilate(1); var depthMaskOverlay = depthMaskBlock.Convert <Bgr, Byte>(); var edges = depthMaskBlock.Canny(new Gray(180), new Gray(120)); debugOut = edges.Convert <Bgr, Byte>(); MemStorage storage = new MemStorage(); //allocate storage for contour approximation for (Contour <System.Drawing.Point> contours = edges.FindContours(); contours != null; contours = contours.HNext) { //var ptsRaw = contours.Select(pt => new System.Drawing.PointF(pt.X, pt.Y)).ToArray(); //var centroid = new System.Drawing.PointF( // ptsRaw.Sum(p => p.X) / ptsRaw.Length, // ptsRaw.Sum(p => p.Y) / ptsRaw.Length); //TODO: fix this method to be actually correct //var cPts = ptsRaw.Select(p => new System.Drawing.PointF( // p.X - centroid.X, // p.Y - centroid.Y)).ToArray(); int bbxcent = contours.BoundingRectangle.X + contours.BoundingRectangle.Width / 2; int bbycent = contours.BoundingRectangle.Y + contours.BoundingRectangle.Height / 2; byte bbcentVal = depthMaskBlock.Data[bbycent, bbxcent, 0]; int minDim = Math.Min(contours.BoundingRectangle.Width, contours.BoundingRectangle.Height); if (bbcentVal == 255 && minDim > 5)//contour is filled in & greater than some pixel size { //var defects = approxContour.GetConvexityDefacts(storage, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); MCvBox2D box = contours.GetMinAreaRect(storage); float xc = (box.center.X * (w / sw)); float yc = (box.center.Y * (h / sh)); float rMin = (w / sw) * (float)(Math.Min(box.size.Width, box.size.Height) / 2); double dAvg = avgDepth(kinect.depthMM, (int)xc, (int)yc, (int)(rMin * 2 / 3)); double zproj = 0.001 * dAvg; //in meters //project var projectedPosV3 = kinect.UnprojectDepth((float)dAvg, xc, yc); var projectedBound = kinect.UnprojectDepth((float)dAvg, xc + rMin, yc); float actualRadius = (projectedPosV3 - projectedBound).Length(); if (actualRadius < expectedRadius + radThres && actualRadius > expectedRadius - radThres) { //RotationMatrix2D<float> rot = new RotationMatrix2D<float>(new System.Drawing.PointF(0, 0), box.angle, 1); //not -box.angle, because stupidly matrix rotations are counter-clockwise but box.angle is measured clockwise... //rot.RotatePoints(cPts); //var cnormPts = cPts.Select(p => new System.Drawing.PointF( // p.X / (box.size.Width / 2), // p.Y / (box.size.Height / 2))); //var variance = cnormPts.Sum(p => //{ // var d = Math.Sqrt(p.X * p.X + p.Y * p.Y); // return (d - 1) * (d - 1); //}) / cnormPts.Count(); //if (variance * rApprox * rApprox < 2.5f) if (contours.Area >= box.size.Width * box.size.Height * Math.PI / 4 * 0.9) { Emgu.CV.Structure.Ellipse ellipse = new Emgu.CV.Structure.Ellipse(box.center, new System.Drawing.SizeF(box.size.Height, box.size.Width), box.angle); depthMaskOverlay.Draw(ellipse, new Bgr(0, 255, 0), 3); depthMaskOverlay.Draw(new Cross2DF(box.center, 10, 10), new Bgr(0, 0, 255), 1); Ball3D ball = new Ball3D() { Position = projectedPosV3.ToLinV(), Radius = actualRadius }; balls.Add(ball); } else { //depthMaskOverlay.Draw(box, new Bgr(0, 0, 255), 2); } } } } storage.Dispose(); DetectorOverlay = depthMaskOverlay; return(balls); }
private void processImage(object sender, EventArgs e) { skin = camera.QueryFrame();//line 1 // CaptureImageBox.Image = skin; if (skin == null) { return; } pictureBox1.Image = skin.Bitmap; skin._SmoothGaussian(3); Thread.Sleep(100); // skin._EqualizeHist(); BinaryHandImage = skinDetector.DetectSkin(skin, YCrCb_min, YCrCb_max); //background subtraction // Image<Bgr, Byte> BinaryImage =null; // CvInvoke.cvAbsDiff(skin, bg, BinaryImage); // Image<Bgr, Byte> BinaryImage = skin.AbsDiff(bg); //find the absolute difference // Image<Gray, Byte> BinaryHandImage = BinaryImage.Convert<Gray, byte>(); // use glove // BinaryHandImage = skin.InRange(new Bgr(100, 100, 100), new Bgr(255, 255, 255)); BinaryHandImage._Erode(3); BinaryHandImage._Dilate(3); /// if (inDetectionPhase) { /// BinaryHandImage.ROI = RealImageRect; imageBoxSkin.Image = BinaryHandImage; Contour <Point> handContour = null; count++; using (MemStorage m = new MemStorage()) { Contour <System.Drawing.Point> MaxContour = new Contour <Point>(m); Contour <System.Drawing.Point> Contours = BinaryHandImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE); //Contours.OrderByDescending <Contour ,double)(); handContour = fingerTipDetection.ExtractBiggestContour(Contours); if (handContour != null) { // Matrix<int> indeces = new Matrix<int>(handContour.Total, 1); //CvInvoke.cvConvexHull2(handContour.Ptr, indeces.Ptr, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE,-1); //// Seq<int> convexHull = new Seq<int>(CvInvoke.cvConvexHull2(handContour, m.Ptr, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE, -1), m); Seq <Point> hull = handContour.GetConvexHull(Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); //for (int i = 0; i < hull.Total; i++) //{ // skin.Draw(new Emgu.CV.Structure.CircleF(hull.ElementAt(i), 5), new Bgr(Color.Blue), 6); //} // skin.Draw(hull, new Bgr(Color.Green), 2); // Contour<System.Drawing.Point> Contour = handContour.ApproxPoly(handContour.Perimeter * .1); //for (int i = 0; i < handContour.Total; i++) //{ // skin.Draw(new Emgu.CV.Structure.CircleF(handContour.ElementAt(i), 5), new Bgr(Color.Blue), 6); //} if (count == 30) { Thread.Sleep(10000); count = 0; } // skin.Draw(handContour, new Bgr(Color.Red), 2); Seq <MCvConvexityDefect> defects = handContour.GetConvexityDefacts(m, Emgu.CV.CvEnum.ORIENTATION.CV_CLOCKWISE); // List<Point> tips = fingerTipDetection.find_fingerTips(defects, handContour.BoundingRectangle); //List<Point> tips = fingerTipDetection.filtering_tips(defects, handContour); // List<Point> tips = fingerTipDetection.findFingerTips(handContour); List <Point> tips = fingerTipDetection.findFingerTipsUsingK_Curvature(handContour); ////draw Tips // for (int i = 0; i < tips.Count; i++) // skin.Draw(new Emgu.CV.Structure.CircleF(tips[i], 5), new Bgr(Color.DarkRed), 15); //List<Point> tip = fingerTipDetection.Cluster(tips); // for (int i = 0; i < tip.Count; i++) // skin.Draw(new Emgu.CV.Structure.CircleF(tip[i], 5), new Bgr(Color.Yellow), 15); // Robust Fingertip Tracking with Improved Kalman Filter paper //filter tips Image <Gray, float> distTransform = new Image <Gray, float>(BinaryHandImage.Size); CvInvoke.cvDistTransform(BinaryHandImage, distTransform, Emgu.CV.CvEnum.DIST_TYPE.CV_DIST_L2, 3, null, IntPtr.Zero); distTransform = distTransform.ThresholdBinary(new Gray(30), new Gray(255)); // distTransform._Dilate(3); distTransform._Erode(3); Image <Gray, byte> disTransImage = distTransform.Convert <Gray, byte>(); Contour <Point> contour = disTransImage.FindContours(); contour = fingerTipDetection.ExtractBiggestContour(contour); if (contour != null) { disTransImage.Draw(contour, new Gray(100), 8); distanceTransformImage.Image = disTransImage; // tips= fingerTipDetection.Cluster(tips); for (int i = 0; i < tips.Count; i++) { if (CvInvoke.cvPointPolygonTest(contour, tips[i], true) < -30) { skin.Draw(new Emgu.CV.Structure.CircleF(tips[i], 5), new Bgr(Color.Yellow), 15); } else { tips.RemoveAt(i); } } } /// for (int i = 0; i < tips.Count; i++) /// skin.Draw(new Emgu.CV.Structure.CircleF(tips[i], 5), new Bgr(Color.Yellow), 15); skin.Draw("FingerTips : " + tips.Count, ref f, new Point(10, 40), new Bgr(0, 255, 0)); /* if (tips.Count == 1) * previousPoint = tips[0]; * else if (tips.Count == 2) * { * previousPoint = tips[0]; * previosPoint2 = tips[1]; * }*/ //track tips // trackFingerTips(tips); } } }
private void ProcessFrame(object sender, EventArgs e) { using (Image <Bgr, Byte> image = _capture.QueryFrame()) using (MemStorage storage = new MemStorage()) //create storage for motion components { if (_forgroundDetector == null) { //_forgroundDetector = new BGCodeBookModel<Bgr>(); //_forgroundDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD); _forgroundDetector = new BGStatModel <Bgr>(image, Emgu.CV.CvEnum.BG_STAT_TYPE.FGD_STAT_MODEL); } _forgroundDetector.Update(image); capturedImageBox.Image = image; //update the motion history _motionHistory.Update(_forgroundDetector.ForgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Image <Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size); //display the motion pixels in blue (first channel) motionImage[0] = motionMask; //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; storage.Clear(); //clear the storage Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage); //iterate through each of the motion component foreach (MCvConnectedComp comp in motionComponents) { //reject the components that have small area; if (comp.area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < comp.area * 0.05) { continue; } //Draw each individual motion in red DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount); DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green)); //Display the amount of motions found on the current image UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount)); //Display the image of the motion motionImageBox.Image = motionImage; } }
public Image <Gray, byte> CalcProfileEyes() { var Rect = eo.getRectFromImage("ojoI.xml", My_Image_prof); var img3 = My_Image_prof.Copy(Rect); var imgeye = eo.CannyImage(img3, Value1, Value2); var min = Rect.Width; var max = 0; var storage = new MemStorage(); for (var contours = imgeye.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { var pts = contours.ToArray(); for (var i = 1; i < pts.Length; i++) { if (min > pts[i].X) { min = pts[i].X; } if (max < pts[i].X) { max = pts[i].X; } } } min += Rect.Left; max += Rect.Left; var r = new StreamReader("pointconf\\EyeZl.ptZ"); var cou = int.Parse(r.ReadLine()); for (var i = 0; i < cou; i++) { var s = int.Parse(r.ReadLine()); Zpos[s] = min; } r.Close(); r = null; //right side Z var reyePos = Rect.Left + ((Rect.Width) / 3 * 2); r = new StreamReader("pointconf\\EyeZr.ptZ"); cou = int.Parse(r.ReadLine()); for (var i = 0; i < cou; i++) { var s = int.Parse(r.ReadLine()); Zpos[s] = reyePos; } r.Close(); r = null; //left brows Z r = new StreamReader("pointconf\\brZl.ptZ"); cou = int.Parse(r.ReadLine()); for (var i = 0; i < cou; i++) { var s = int.Parse(r.ReadLine()); Zpos[s] = Rect.Left; } r.Close(); r = null; float koefeye = reyePos - min; r = new StreamReader("pointconf\\EyecoefZ.ptZ"); cou = int.Parse(r.ReadLine()); for (var i = 0; i < cou; i++) { var cof = float.Parse(r.ReadLine()); var ptZ = (int)(cof * koefeye) + min; var cu = int.Parse(r.ReadLine()); for (var j = 0; j < cu; j++) { var s = int.Parse(r.ReadLine()); Zpos[s] = ptZ; } } r.Close(); r = null; return(imgeye); }
private double cout_avg_new(Image <Gray, byte> src) { double d = 0; List <Rectangle> lsR = new List <Rectangle>(); Image <Gray, byte> grayImage = new Image <Gray, byte>(src.Width, src.Height); CvInvoke.cvAdaptiveThreshold(src, grayImage, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 21, 2); grayImage = grayImage.Dilate(3); grayImage = grayImage.Erode(3); using (MemStorage storage = new MemStorage()) { Contour <Point> contours = grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); while (contours != null) { Rectangle rect = contours.BoundingRectangle; if (rect.Width > 50 && rect.Width < 150 && rect.Height > 80 && rect.Height < 150) { lsR.Add(rect); } contours = contours.HNext; } } for (int i = 0; i < lsR.Count; i++) { Bitmap tmp = src.ToBitmap(); Bitmap tmp2 = tmp.Clone(lsR[i], tmp.PixelFormat); Image <Gray, byte> tmp3 = new Image <Gray, byte>(tmp2); int T = 0; int T0 = 128; do { T = T0; int m = 0, M = 0; int min = 0, max = 0; for (int y = 0; y < tmp3.Rows; y++) { for (int x = 0; x < tmp3.Cols; x++) { int value = (int)tmp3.Data[y, x, 0]; if (value <= T) { m++; min += value; } else { M++; max += value; } } } T0 = (min / m + max / M) / 2; } while (T - T0 > 1 || T0 - T > 1); d += (double)T0 / (double)lsR.Count; } return(d); }
public bool DetectRectangle(Image <Bgr, Byte> inputimg) { bool rectangleIsFound = false; #region Canny Image <Bgr, Byte> img = new Image <Bgr, byte>(inputimg.Bitmap);//??????????????????????????????????????????????????????????????mishe ba khatte oain edgham kard? //Convert the image to grayscale and filter out the noise Image <Gray, Byte> gray = img.Convert <Gray, Byte>(); Gray cannyThreshold = new Gray(BThreshold); //Gray(180); Gray cannyThresholdLinking = new Gray(GThreshold); //Gray(120); METState.Current.ProcessTimeSceneBranch.Timer("cannyEdges", "Start"); Image <Gray, Byte> cannyEdges = gray.PyrDown().Canny(cannyThreshold.Intensity, cannyThresholdLinking.Intensity).PyrUp().Dilate(1);//.Erode(1);// METState.Current.ProcessTimeSceneBranch.Timer("cannyEdges", "Stop"); #endregion Canny #region Contour List <MCvBox2D> boxList = new List <MCvBox2D>(); //a box is a rotated rectangle double MinArea = ((double)rectangleMinSize / 100.0) * METState.Current.SceneImageOrginal.Width * ((double)rectangleMinSize / 100.0) * METState.Current.SceneImageOrginal.Height; METState.Current.ProcessTimeSceneBranch.Timer("Contours", "Start"); using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation for (Contour <Point> contours = cannyEdges.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.08, storage); //0.08 has been tested and works well with ...d, cannyThresholdLinking).PyrUp().Dilate(1) if (currentContour.Total == 4 && currentContour.Area > MinArea) //selecting the bigest contour { rectangleIsFound = true; MinArea = currentContour.Area; RectangleArea = currentContour.Area; #region determine if all the angles in the contour are within [80, 100] degree // bool isRectangle = true; //Point[] pts = currentContour.ToArray(); //LineSegment2D[] edges = PointCollection.PolyLine(pts, true); //for (int i = 0; i < edges.Length; i++) //{ // double angle = Math.Abs( // edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i])); // if (angle < 80 || angle > 100) // { // isRectangle = false; // break; // } //} #endregion for (int i = 0; i < 4; i++) { RectangleCorners[i].X = currentContour[i].X; RectangleCorners[i].Y = currentContour[i].Y; } } } SortRectangleCorners(); METState.Current.ProcessTimeSceneBranch.Timer("Contours", "Stop"); #endregion Contour #region draw rectangles //Image<Bgr, Byte> RectangleImage = img.CopyBlank(); //foreach (MCvBox2D box in boxList) //{ // RectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2); //} //METState.Current.SceneImageForShow = METState.Current.SceneImageForShow.Add(RectangleImage); #endregion draw rectangles #region draw screen if (METState.Current.showEdges) { // METState.Current.SceneImageProcessed = new Image<Bgr, byte>(cannyEdges.Bitmap); // EmgImgProcssing.DrawRectangle(METState.Current.SceneImageProcessed, ScreenCorners, 0, true, "Area = " + RectangleArea); METState.Current.SceneImageForShow = METState.Current.SceneImageForShow.Or(new Image <Bgr, byte>(cannyEdges.Bitmap)); } if (METState.Current.showScreen) { //show min size rect if (METState.Current.showScreenSize) { AForge.Point RECTcenter = new AForge.Point(METState.Current.SceneImageForShow.Width / 2, METState.Current.SceneImageForShow.Height / 2); SizeF RECTsize = new SizeF(((float)(rectangleMinSize) / 100.0f) * METState.Current.SceneImageForShow.Width, ((float)(rectangleMinSize) / 100.0f) * METState.Current.SceneImageForShow.Height); EmgImgProcssing.DrawRectangle(METState.Current.SceneImageForShow, RECTcenter, RECTsize); } //Show found screen } #endregion draw Screen return(rectangleIsFound); }
// this is for object detection private void ProcessFrame(object sender, EventArgs arg) { Image <Bgr, Byte> frame = null; // the frame retrieved from camera bool showWindow = true; // toggle whether or not to show the live feed (performance hit!) // variables you'll want to calibrate //int bluerange = 200; //int redrange = 200; //int greenrange = 100; int hue = 0; int sat = 0; int val = 0; int maxHue = 255; int maxSat = 255; int maxVal = 255; double ContourAccuracy = 0.0005; int minAreaSize = 2000; int maxAreaSize = 20000; try { frame = _capture.RetrieveBgrFrame(); } catch (Exception ax) { Console.WriteLine("Image retrieval failed! quiting: " + ax.ToString()); return; } framecounter++; // counter for framerate // 30 frames have passed, time to print framerate! if (framecounter == 30) { //watch.Stop(); double framerate = 30.0 / (Convert.ToDouble(watch.ElapsedMilliseconds) / 1000); Console.WriteLine(framerate); framecounter = 0; watch.Stop(); watch.Reset(); watch.Start(); framecounter = 0; } Image <Hsv, Byte> frame_HSV = frame.Convert <Hsv, Byte>(); Image <Gray, Byte> frame_thresh; frame_thresh = frame_HSV.InRange(new Hsv(hue, sat, val), new Hsv(maxHue, maxSat, maxVal)); //processedFrame = frame.Clone ().ThresholdBinary (new Bgr (redrange, greenrange, bluerange), new Bgr (255, 255, 255)); using (MemStorage storage = new MemStorage()) { List <Contour <Point> > unidentifiedObjects = this.DetectObjects(storage, frame_thresh, ContourAccuracy, minAreaSize, maxAreaSize); List <IdentifiedObject> identifiedObjects = new List <IdentifiedObject> (); foreach (Contour <Point> contour in unidentifiedObjects) { identifiedObjects.Add(this.IdentifyObject(contour, ContourAccuracy)); } } if (showWindow) { CvInvoke.cvShowImage("Capture", frame); // cvlib. CvInvoke.cvWaitKey(1); } }
private void srcImgBox_MouseUp(object sender, MouseEventArgs e) // Обработка события прекращения нажатия на левую кнопку мыши { int step = 30; // Минимально возможный размер выделяемого изображения по ширине и высоте pic.Width = 0; pic.Height = 0; pic.Visible = false; // Скрытие выделяемой области if (resize == true) { if ((e.X >= begin_x + step) && (e.Y >= begin_y + step)) // Выделяемая область должна быть больше или равна минимально возможному размеру { Rectangle rec = new Rectangle(begin_x, begin_y, e.X - begin_x, e.Y - begin_y); // Инициализация прямоугольной области //Загрузка выделенного изображения и его масштабирование trimg = new Image <Bgr, byte>(Copy(srcImgBox.Image, rec)).Resize(step, step, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, true); //Перевод выделенного изображения в монохромное Image <Gray, Byte> gray = trimg.Convert <Gray, Byte>().PyrDown().PyrUp(); #region Circle detection double cannyThreshold = 100.0; double circleAccumulatorThreshold = 300.0; CircleF[] circles = gray.HoughCircles( new Gray(cannyThreshold), new Gray(circleAccumulatorThreshold), 2.0, // Разрешение аккумулятора для поиска центров окружностей 300.0, // Минимальное расстояние 30, // Минимальный радиус 200 // Максимальный радиус )[0]; // Выбор окружностей с первого канала #endregion #region Canny and edge detection double cannyThresholdLinking = 200.0; Image <Gray, Byte> cannyEdges = gray.Canny(cannyThreshold, cannyThresholdLinking); LineSegment2D[] lines = cannyEdges.HoughLinesBinary( 20, // Дистанция между пиксельно-связными областями Math.PI / 45.0, //Угол, выраженный в радианах 100, // Порог 50, // Минимальная ширина линии 40 // Пропуск между линиями )[0]; // Выбор линий с первого канала #endregion #region Find triangles and rectangles List <Triangle2DF> triangleList = new List <Triangle2DF>(); List <MCvBox2D> boxList = new List <MCvBox2D>(); using (MemStorage storage = new MemStorage()) // Использование хранилища для хранения контуров for (Contour <Point> contours = cannyEdges.FindContours( Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext) { Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage); if (currentContour.Area > 250) // Рассматриваются контуры с площадью более 250 { if (currentContour.Total == 3) // У контура 3 вершины, это треугольник. { Point[] pts = currentContour.ToArray(); triangleList.Add(new Triangle2DF(pts[0], pts[1], pts[2])); } else if (currentContour.Total == 4) // У контура 4 вершины, это прямоугольник { #region determine if all the angles in the contour are within [80, 100] degree bool isRectangle = true; Point[] pts = currentContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int i = 0; i < edges.Length; i++) { double angle = Math.Abs(edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } #endregion if (isRectangle) { boxList.Add(currentContour.GetMinAreaRect()); } } } } #endregion #region Draw objects foreach (Triangle2DF triangle in triangleList) { trimg.Draw(triangle, new Bgr(Color.Aqua), 1); // Выделение треугольников } foreach (MCvBox2D box in boxList) { trimg.Draw(box, new Bgr(Color.DarkOrange), 1); // Выделение прямоугольников } foreach (CircleF circle in circles) { trimg.Draw(circle, new Bgr(Color.Green), 1); // Выделение окружностей } /* foreach (LineSegment2D line in lines) * trimg.Draw(line, new Bgr(Color.Green), 1); */ detimgBox.Image = trimg; #endregion // detimgBox.Image = Copy(srcImgBox.Image, rec); Comp_button.Enabled = true; } } resize = false; }
/// <summary> /// Event handler for Kinect sensor's DepthFrameReady event /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e) { // in the middle of shutting down, so nothing to do if (null == this.sensor) { return; } bool depthReceived = false; bool colorReceived = false; Skeleton[] skeletons = new Skeleton[0]; using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame()) { if (skeletonFrame != null) { skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo(skeletons); if (skeletons.Length != 0) { foreach (Skeleton skel in skeletons) { Joint joint0 = skel.Joints[JointType.WristLeft]; if (joint0.Position.Z != 0) { myJoint = joint0; // keyVal = joint0.Position.Z; depthPoint = this.sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(joint0.Position, DepthImageFormat.Resolution640x480Fps30); keyVal = depthPoint.Depth; } } } } } using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (null != depthFrame) { // Copy the pixel data from the image to a temporary array depthFrame.CopyDepthImagePixelDataTo(this.depthPixels); depthReceived = true; } } using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) { if (null != colorFrame) { // Copy the pixel data from the image to a temporary array colorFrame.CopyPixelDataTo(this.colorPixels); colorReceived = true; } } // do our processing outside of the using block // so that we return resources to the kinect as soon as possible if (true == depthReceived) { this.sensor.CoordinateMapper.MapDepthFrameToColorFrame( DepthFormat, this.depthPixels, ColorFormat, this.colorCoordinates); Array.Clear(this.playerPixelData, 0, this.playerPixelData.Length); // loop over each row and column of the depth for (int y = 0; y < this.depthHeight; ++y) { for (int x = 0; x < this.depthWidth; ++x) { // calculate index into depth array int depthIndex = x + (y * this.depthWidth); DepthImagePixel depthPixel = this.depthPixels[depthIndex]; int player = depthPixel.PlayerIndex; // if we're tracking a player for the current pixel, sets it opacity to full //if (player > 0) if (keyVal != 0 && depthPixel.Depth < (keyVal + 30) && depthPixel.Depth > (keyVal - 1000)) { // retrieve the depth to color mapping for the current depth pixel ColorImagePoint colorImagePoint = this.colorCoordinates[depthIndex]; // scale color coordinates to depth resolution int colorInDepthX = colorImagePoint.X / this.colorToDepthDivisor; int colorInDepthY = colorImagePoint.Y / this.colorToDepthDivisor; // make sure the depth pixel maps to a valid point in color space // check y > 0 and y < depthHeight to make sure we don't write outside of the array // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left // because of how the sensor works it is more correct to do it this way than to set to the right if (colorInDepthX > 0 && colorInDepthX < this.depthWidth && colorInDepthY >= 0 && colorInDepthY < this.depthHeight) { // calculate index into the player mask pixel array int playerPixelIndex = colorInDepthX + (colorInDepthY * this.depthWidth); // set opaque this.playerPixelData[playerPixelIndex] = opaquePixelValue; // compensate for depth/color not corresponding exactly by setting the pixel // to the left to opaque as well this.playerPixelData[playerPixelIndex - 1] = opaquePixelValue; } } } } } // do our processing outside of the using block // so that we return resources to the kinect as soon as possible if (true == colorReceived) { // Write the pixel data into our bitmap this.colorBitmap.WritePixels( new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight), this.colorPixels, this.colorBitmap.PixelWidth * sizeof(int), 0); if (this.playerOpacityMaskImage == null) { this.playerOpacityMaskImage = new WriteableBitmap( this.depthWidth, this.depthHeight, 96, 96, PixelFormats.Bgra32, null); MaskedColor.OpacityMask = new ImageBrush { ImageSource = this.playerOpacityMaskImage }; } this.playerOpacityMaskImage.WritePixels( new Int32Rect(0, 1, this.depthWidth, this.depthHeight - 1), this.playerPixelData, this.depthWidth * ((this.playerOpacityMaskImage.Format.BitsPerPixel + 7) / 8), 0); Image <Gray, Byte> My_Image = new Image <Gray, byte>(BitmapFromSource(this.colorBitmap)); Image <Gray, Byte> My_Mask = new Image <Gray, byte>(BitmapFromSource(this.playerOpacityMaskImage)); Image <Gray, byte> armMask = My_Mask.PyrUp(); armMask = armMask.Erode(2); armMask = armMask.Dilate(1); ////////////////////////////////// Image <Gray, Byte> HandG = My_Image.Copy(armMask); Gray gray = new Gray(255); Image <Gray, Byte> iLine = HandG.ThresholdBinaryInv(new Gray(50), gray); armMask = armMask.Erode(4); // CvInvoke.cvNamedWindow("gray"); //CvInvoke.cvShowImage("gray", armMask); iLine = iLine.Copy(armMask); System.Windows.Point myPoint = this.SkeletonPointToScreen(myJoint.Position); HandG.ROI = new Rectangle((int)(myPoint.X - 70), (int)(myPoint.Y - 90), 140, 140); iLine.ROI = new Rectangle((int)(myPoint.X - 70), (int)(myPoint.Y - 90), 140, 140); iLine = iLine.Erode(1); //iLine = iLine.Dilate(2); //Create the window using the specific name // CvInvoke.cvNamedWindow("line"); //CvInvoke.cvShowImage("line", iLine); Image <Gray, Byte> resultImage = HandG.CopyBlank(); Image <Gray, Single> resultImageIN = resultImage.Convert <Gray, Single>(); Image <Gray, Byte> maskC = HandG.CopyBlank(); Image <Bgr, Byte> iAffiche = new Image <Bgr, byte>(resultImage.Width, resultImage.Height); Double Result1 = 0; Double Result2 = 0; HandG = HandG.ThresholdBinary(new Gray(50), gray); HandG = HandG.Erode(2); LineSegment2D[] lines = iLine.HoughLinesBinary(1, Math.PI / 45, 15, 5, 15)[0]; //if (lines.Length != 0) //{ // int a1 = lines[0].P2.Y - lines[0].P1.Y; // int b1 = lines[0].P1.X - lines[0].P2.X; // for (int i = 0; i < HandG.Width; i++) // { // for (int j = 0; j < HandG.Height; j++) // { // if (HandG[i, j].Intensity == gray.Intensity) // { // int a2 = i - lines[0].P1.X; // int b2 = j - lines[0].P1.Y; // if (a1 * a2 + b1 * b2 < 0) // { // HandG[i, j] = new Gray(0); // } // } // } // } //} using (var mem = new MemStorage()) { Contour <System.Drawing.Point> contour = HandG.FindContours( CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, RETR_TYPE.CV_RETR_LIST, mem); Contour <System.Drawing.Point> biggestContour = null; while (contour != null) { Result1 = contour.Area; if (Result1 > Result2) { Result2 = Result1; biggestContour = contour; } contour = contour.HNext; } if (biggestContour != null) { // biggestContour = biggestContour.ApproxPoly(1.5); resultImage.Draw(biggestContour, gray, 2); maskC.Draw(biggestContour, gray, -1); Emgu.CV.Image <Gray, byte> binaryIM = resultImage.ThresholdBinaryInv(new Gray(100), new Gray(255)); CvInvoke.cvDistTransform(binaryIM, resultImageIN, DIST_TYPE.CV_DIST_L1, 3, null, IntPtr.Zero); CvInvoke.cvNormalize(resultImageIN, resultImageIN, 0, 1, Emgu.CV.CvEnum.NORM_TYPE.CV_MINMAX, IntPtr.Zero); double minD = 0, maxD = 0; System.Drawing.Point maxP = System.Drawing.Point.Empty, minP = System.Drawing.Point.Empty; CvInvoke.cvMinMaxLoc(resultImageIN, ref minD, ref maxD, ref minP, ref maxP, maskC.Ptr); iAffiche[0] = resultImage; iAffiche[1] = resultImage; iAffiche[2] = resultImage; if (lines.Length != 0 && lines.Length < 3) { System.Drawing.Point lineP1 = lines[0].P1; iAffiche.Draw(lines[0], new Bgr(0, 255, 0), 2); int sPx = lineP1.X; int sPy = lineP1.Y; System.Drawing.Point startP = lineP1; double minDist = 999999.0; System.Drawing.Point[] bContArr = biggestContour.ToArray(); firstPoint = bContArr[0]; int nbPoints = bContArr.Length; double startAngle = 0; double startDist = 0; double rInsCircle = 99999.0; for (int i = 0; i < nbPoints; i++) { System.Drawing.Point v = bContArr[i]; double tempDist = ((v.X - sPx) * (v.X - sPx) + (v.Y - sPy) * (v.Y - sPy)); double tempCirc = Math.Sqrt((v.X - maxP.X) * (v.X - maxP.X) + (v.Y - maxP.Y) * (v.Y - maxP.Y)); if (tempDist < minDist) { minDist = tempDist; startP = v; } if (tempCirc < rInsCircle) { rInsCircle = tempCirc; } } chart1.Series.SuspendUpdates(); foreach (var series in chart1.Series) { series.Points.Clear(); } List <double[]> pointList = new List <double[]>(); pointList.Clear(); startAngle = -1 * getTSAngle(maxP, startP); startDist = getTSDist(maxP, startP); for (int i = 0; i < (nbPoints); i++) { System.Drawing.Point v = bContArr[i]; tsAngle = -1 * getTSAngle(maxP, v); tsAngle = tsAngle - startAngle; if (tsAngle < 0) { tsAngle = tsAngle + 360; } tsAngle = tsAngle / 360; tsDist = getTSDist(maxP, v); tsDist = (tsDist / (rInsCircle)) - (1.9); if (tsDist < 0) { tsDist = 0.0; } if (tsDist != 0 && i == 0) { System.Drawing.Point a = bContArr[i]; Array.Copy(bContArr, 1, bContArr, 0, bContArr.Length - 1); bContArr[bContArr.Length - 1] = a; i = -1; } if (i != -1) { chart1.Series["Series1"].Points.AddXY(tsAngle, tsDist); double[] XY = { tsAngle, tsDist }; pointList.Add(XY); } } CircleF startPoint = new CircleF(startP, 3); iAffiche.Draw(startPoint, new Bgr(0, 0, 255), 2); CircleF palmCir = new CircleF(maxP, 1); iAffiche.Draw(palmCir, new Bgr(255, 255, 0), 3); //chart1.Series.Invalidate(); ///////////////////SIGNATURES List <double[]> inGesteSignatures = new List <double[]>(); inGesteSignatures.Clear(); inGesteSignatures = getSignatures(chart1.Series["Series1"].Points); int gNo = -1; if ((inGesteSignatures.Count != 0) && (inGesteSignatures.Count < 7)) { gNo = findGesture(inGesteSignatures, references); // textBox1.Text = gNo.ToString(); if (old_gNo != -1 && gNo != -1 && gNo == old_gNo) { textBox1.Text = gNo.ToString(); } old_gNo = gNo; } //chart1.Series.Invalidate(); // chart1.Series.ResumeUpdates(); CvInvoke.cvNamedWindow("affiche"); CvInvoke.cvShowImage("affiche", iAffiche); } } } } }
private void button2_Click(object sender, EventArgs e) { Image <Bgr, Byte> img1 = new Image <Bgr, byte>(new Bitmap(pictureBox1.Image)); //Convert the img1 to grayscale and then filter out the noise Image <Gray, Byte> gray1 = img1.Convert <Gray, Byte>().PyrDown().PyrUp(); //Canny Edge Detector Image <Gray, Byte> cannyGray = gray1.Canny(120, 180); //Lists to store triangles and rectangles List <Triangle2DF> triangleList = new List <Triangle2DF>(); List <MCvBox2D> boxList = new List <MCvBox2D>(); //New Memstorage using (MemStorage storage1 = new MemStorage()) for (Contour <Point> contours1 = cannyGray.FindContours(); contours1 != null; contours1 = contours1.HNext) { //Polygon Approximations Contour <Point> contoursAP = contours1.ApproxPoly(contours1.Perimeter * 0.05, storage1); //Use area to wipe out the unnecessary result if (contours1.Area >= 200) { //Use vertices to determine the shape if (contoursAP.Total == 3) { //Triangle Point[] points = contoursAP.ToArray(); triangleList.Add(new Triangle2DF( points[0], points[1], points[2] )); } else if (contoursAP.Total == 4) { //Rectangle bool isRectangle = true; Point[] points = contoursAP.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(points, true); //degree within the range of [75, 105] will be detected for (int i = 0; i < edges.Length; i++) { double angle = Math.Abs( edges[(i + 1) % edges.Length].GetExteriorAngleDegree(edges[i])); if (angle < 75 || angle > 105) { isRectangle = false; break; } } if (isRectangle) { boxList.Add(contoursAP.GetMinAreaRect()); } } } } //Draw result Image <Bgr, Byte> imageResult = img1.CopyBlank(); foreach (Triangle2DF triangle in triangleList) { imageResult.Draw(triangle, new Bgr(Color.LightSteelBlue), 5); } foreach (MCvBox2D box in boxList) { imageResult.Draw(box, new Bgr(Color.LimeGreen), 5); } //Show result pictureBox2.Image = imageResult.ToBitmap(); }
private void button8_Click(object sender, EventArgs e) { // 이미지에서 라인 빼오기 Image <Bgr, Byte> temp = new Image <Bgr, byte>(new Bitmap(this.pictureBox2.Image)); // 이미지 프로세싱 (업 다운 샘플링으로 영상의 잡음 제거) Image <Gray, Byte> tempGray = temp.Convert <Gray, Byte>().PyrDown().PyrUp(); // 캐니 엣지 Image <Gray, Byte> tempCanny = tempGray.Canny(trkMin.Value, trkMax.Value); // 사격형 추출 List <MCvBox2D> rectangles = new List <MCvBox2D>(); // 해당 영역의 이미지를 추출하기 위해서 임시 Rectangle 객체 생성 Rectangle r = new Rectangle(); // 메모리 객체를 생성 using (MemStorage storage = new MemStorage()) { // 캐니엣지된 이미지에서 윤곽선 포인트를 추출함 Contour <Point> contours = tempCanny.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP, storage); // 윤곽선 가져있는 객체에서 검색 for (; contours != null; contours = contours.HNext) { // 현재 윤곽선을 가지고 옴 Contour <Point> contour = contours.ApproxPoly(contours.Perimeter * 0.05, storage); // 영역이 일정 크기 이상을 가지고 옴 if (contour.Area > trkArea.Value) { // 포인트 점이 4개일 경우 (사각형이란 의미) if (contour.Total == 4) { // 윤곽의 각도를 분석 bool isRectecgle = true; Point[] points = contour.ToArray(); LineSegment2D[] vertices = PointCollection.PolyLine(points, true); for (int i = 0; i < vertices.Length; i++) { double angle = Math.Abs(vertices[(i + 1) % vertices.Length].GetExteriorAngleDegree(vertices[i])); // 하나의 각도가 80도보다 작거나 100도를 넘으면 사각형으로 보지 않고 바로 그만둠 if (angle < 80 || angle > 100) { isRectecgle = false; break; } } if (isRectecgle) { rectangles.Add(contour.GetMinAreaRect()); r.Location = points[0]; r.Size = rectangles[0].size.ToSize(); } } } } } MessageBox.Show("Rectecgle: " + rectangles.Count.ToString()); // 사각형 그리기 foreach (MCvBox2D rectacgle in rectangles) { temp.Draw(rectacgle, new Bgr(Color.Blue), 3); Bitmap bitmap = new Bitmap((int)rectacgle.size.Width, (int)rectacgle.size.Height); using (Graphics g = Graphics.FromImage(bitmap)) { //g.DrawImage(temp.Bitmap, r.Location.X, r.Location.Y, r, GraphicsUnit.Pixel); //g.DrawImage() } pictureBox3.Image = bitmap; } pictureBox2.Image = temp.Bitmap; }
/// <summary> /// Get the contour that defines the blob /// </summary> /// <param name="storage">The memory storage when the resulting contour will be allocated</param> /// <returns>The contour of the blob</returns> public Contour<Point> GetContour(MemStorage storage) { Contour<Point> contour = new Contour<Point>(storage); CvInvoke.cvbCvBlobGetContour(_ptr, contour); return contour; }
/// <summary> /// Sets the Camera Source to a depth map showing contours. /// Directly accesses the underlying image buffer of the DepthFrame to /// create a displayable bitmap. /// This function requires the /unsafe compiler option as we make use of direct /// access to the native memory pointed to by the depthFrameData pointer. /// </summary> /// <param name="depthFrameData">Pointer to the DepthFrame image data</param> /// <param name="depthFrameDataSize">Size of the DepthFrame image data</param> /// <param name="minDepth">The minimum reliable depth value for the frame</param> /// <param name="maxDepth">The maximum reliable depth value for the frame</param> private unsafe void ProcessContourFrameData(DepthFrame depthFrame, IntPtr depthFrameData, uint depthFrameDataSize, ushort minDepth, ushort maxDepth) { // depth frame data is a 16 bit value ushort *frameData = (ushort *)depthFrameData; // convert depth to a visual representation /*for (int i = 0; i < (int)(depthFrameDataSize / this.depthFrameDescription.BytesPerPixel); ++i) * { * // Get the depth for this pixel * ushort depth = frameData[i]; * * // To convert to a byte, we're mapping the depth value to the byte range. * // Values outside the reliable depth range are mapped to 0 (black). * this.depthPixels[i] = (byte)(depth >= minDepth && depth <= maxDepth ? (depth / MapDepthToByte) : 0); * }*/ int depth; int gray; int loThreshold = 25; int hiThreshold = 1600; int bytesPerPixel = 4; byte[] rgb = new byte[3]; byte[] enhPixelData = new byte[depthFrame.FrameDescription.Width * depthFrame.FrameDescription.Height * bytesPerPixel]; for (int i = 0, j = 0; i < (int)(depthFrameDataSize / this.depthFrameDescription.BytesPerPixel); i++, j += bytesPerPixel) { depth = frameData[i] >> 1; if (depth < loThreshold || depth > hiThreshold) { gray = 0xFF; //gray = 0; } else { gray = (255 * depth / 0xFFF); } enhPixelData[j] = (byte)gray; enhPixelData[j + 1] = (byte)gray; enhPixelData[j + 2] = (byte)gray; } /*camera.Source = BitmapSource.Create(depthFrame.FrameDescription.Width, depthFrame.FrameDescription.Height, * 96, 96, PixelFormats.Bgr32, null, * enhPixelData, * depthFrame.FrameDescription.Width * bytesPerPixel);*/ int width = depthFrame.FrameDescription.Width; int height = depthFrame.FrameDescription.Height; System.Windows.Media.PixelFormat format = PixelFormats.Bgr32; WriteableBitmap colorBitmap = new WriteableBitmap(width, height, 96.0, 96.0, PixelFormats.Bgr32, null); colorBitmap.WritePixels( new Int32Rect(0, 0, colorBitmap.PixelWidth, colorBitmap.PixelHeight), enhPixelData, colorBitmap.PixelWidth * sizeof(int), 0); Bitmap bmp2 = BitmapFromWriteableBitmap(colorBitmap); Image <Bgr, Byte> img = new Image <Bgr, Byte>(bmp2); Image <Gray, Byte> grey = new Image <Gray, Byte>(bmp2.Size); CvInvoke.cvInRangeS(img.Ptr, new MCvScalar(0.0, 0.0, 0.0), new MCvScalar(250.0, 250.0, 250.0), grey.Ptr); CvInvoke.cvErode(grey.Ptr, grey.Ptr, (IntPtr)null, 4); CvInvoke.cvDilate(grey.Ptr, grey.Ptr, (IntPtr)null, 3); Image <Gray, Byte> dst = new Image <Gray, Byte>(grey.Size); Image <Rgba, Byte> dst2 = new Image <Rgba, Byte>(grey.Size); CvInvoke.cvCanny(grey.Ptr, dst.Ptr, 50, 200, 3); CvInvoke.cvCvtColor(dst, dst2, Emgu.CV.CvEnum.COLOR_CONVERSION.CV_GRAY2BGR); // byte a = grey.Data[200, 200, 0]; using (MemStorage stor = new MemStorage()) { IntPtr lines = CvInvoke.cvHoughLines2(dst.Ptr, stor.Ptr, Emgu.CV.CvEnum.HOUGH_TYPE.CV_HOUGH_STANDARD, 10, (10 * Math.PI) / 180, 50, 50, 10); int maxLines = 100; for (int i = 0; i < maxLines; i++) { IntPtr line = CvInvoke.cvGetSeqElem(lines, i); if (line == IntPtr.Zero) { // No more lines break; } MCvScalar color = new MCvScalar(255, 0, 0); PolarCoordinates coords = (PolarCoordinates)System.Runtime.InteropServices.Marshal.PtrToStructure(line, typeof(PolarCoordinates)); float rho = coords.Rho, theta = coords.Theta; System.Drawing.Point pt1 = new System.Drawing.Point(); System.Drawing.Point pt2 = new System.Drawing.Point(); double a = Math.Cos(theta), b = Math.Sin(theta); double x0 = a * rho, y0 = b * rho; pt1.X = (int)(x0 * 10 + 100000 * (-b)); pt1.Y = (int)(y0 * 10 + 100000 * (a)); pt2.X = (int)(x0 * 10 - 100000 * (-b)); pt2.Y = (int)(y0 * 10 - 100000 * (a)); //PolarCoordinates coords2= (PolarCoordinates)System.Runtime.InteropServices.Marshal.PtrToStructure(line2, typeof(PolarCoordinates)); // CvInvoke.cvLine(dst2, new System.Drawing.Point((int)coords1.Rho, (int)coords1.Theta), new System.Drawing.Point((int)coords2.Rho, (int)coords2.Theta), color, 3, LINE_TYPE.EIGHT_CONNECTED, 8); CvInvoke.cvLine(dst2, pt1, pt2, color, 3, LINE_TYPE.CV_AA, 8); //LineSegment2D line = new LineSegment2D( new System.Drawing.Point((int)coords1.Rho, (int)coords1.Theta), new System.Drawing.Point((int)coords2.Rho, (int)coords2.Theta)); //dst2.Draw(line, new Rgba(255, 0, 0,0), 5); } } camera.Source = ToBitmapSource3(dst); }
/// <summary> /// Returns coefficients of the classifier trained for people detection (for default window size). /// </summary> /// <returns>The default people detector</returns> public static float[] GetDefaultPeopleDetector() { using (MemStorage stor = new MemStorage()) { Seq<float> desc = new Seq<float>(stor); gpuHOGDescriptorPeopleDetectorCreate(desc); return desc.ToArray(); } }
/// <summary> /// Detect the MSER keypoints from the image /// </summary> /// <param name="image">The image to extract MSER keypoints from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <returns>An array of MSER key points</returns> public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, Image<Gray, byte> mask) { using (MemStorage stor = new MemStorage()) { Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor); CvMSERKeyPoints(image, mask, seq.Ptr, ref this); return seq.ToArray(); } }
// this is for object detection private void ProcessFrame(object sender, EventArgs arg) { Image <Bgr, Byte> frame = null; // the frame retrieved from camera //bool showWindow = true; // toggle whether or not to show the live feed (performance hit!) // variables you'll want to calibrate //int bluerange = 200; //int redrange = 200; //int greenrange = 100; try { frame = _capture.RetrieveBgrFrame(); } catch (Exception ax) { Console.WriteLine("Image retrieval failed! quiting: " + ax.ToString()); return; } framecounter++; // counter for framerate // 30 frames have passed, time to print framerate! if (framecounter == 30) { //watch.Stop(); double framerate = 30.0 / (Convert.ToDouble(watch.ElapsedMilliseconds) / 1000); if (showWindow) { Console.WriteLine(framerate); } framecounter = 0; watch.Stop(); watch.Reset(); watch.Start(); framecounter = 0; } Image <Hsv, Byte> frame_HSV = frame.Convert <Hsv, Byte> (); Image <Gray, Byte> frame_thresh; frame_thresh = frame_HSV.InRange(new Hsv(hue, sat, val), new Hsv(maxHue, maxSat, maxVal)); //processedFrame = frame.Clone ().ThresholdBinary (new Bgr (redrange, greenrange, bluerange), new Bgr (255, 255, 255)); using (MemStorage storage = new MemStorage()) { List <Contour <Point> > unidentifiedObjects = this.DetectObjects(storage, frame_thresh, ContourAccuracy, minAreaSize, maxAreaSize); //if (unidentifiedObjects.Count == 0) Console.WriteLine ("no objects detected"); foreach (Contour <Point> contour in unidentifiedObjects) { long timeinms = DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond; bool found = false; // calculate center double rectHeight = (double)contour.BoundingRectangle.Height; double rectWidth = (double)contour.BoundingRectangle.Width; // find center double dy = 0.5 * rectHeight + contour.BoundingRectangle.Y; double dx = 0.5 * rectWidth + contour.BoundingRectangle.X; if (identifiedObjects.Count > 0) { //Console.WriteLine ("check 1a"); foreach (IdentifiedObject io in identifiedObjects) { // calculate center point // accuracy is in pixels, 10 for now if (io.liesWithin((int)dx, (int)dy, recentlyChanged, maxPixTravel, timeinms)) { found = true; //if (showWindow) Console.WriteLine("existing object"); break; } } } //Console.WriteLine("check 2"); // if not found, add object! if (!found) { IdentifiedObject iox = this.IdentifyObject(contour, ContourAccuracy, timeinms); if (iox != null) { if (showWindow) { Console.WriteLine("new object detected"); } identifiedObjects.Add(iox); } } //else {if (showWindow)Console.WriteLine("existing object");} } if (recentlyChanged) { recentlyChanged = false; } } if (showWindow) { List <IdentifiedObject> toRemove = new List <IdentifiedObject>(); foreach (IdentifiedObject io in identifiedObjects) { long timeinms = DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond; //Console.WriteLine (io.ToString()); Point center = io.getPosition(timeinms); if (center.X == -1) { toRemove.Add(io); //Console.WriteLine("ready for removal"); } else { PointF centerf = new PointF((float)center.X, (float)center.Y); CircleF circlef = new CircleF(centerf, 5); if (io.getShape() == "circle") { frame.Draw(circlef, io.getColor(), 5); } else if (io.getShape() == "square") { frame.Draw(circlef, io.getColor(), 10); } else { frame.Draw(circlef, io.getColor(), 1); } } } foreach (IdentifiedObject io in toRemove) { identifiedObjects.Remove(io); } CvInvoke.cvShowImage("Capture", frame); CvInvoke.cvWaitKey(1); } }