public static Bitmap PerformShapeDetection(Bitmap frame, ShapeDetectionVariables detectionVars) { StringBuilder msgBuilder = new StringBuilder("Performance: "); Image <Bgr, Byte> img = new Image <Bgr, byte>(frame); Mat MatImg = img.Mat; Mat outputImg = new Mat(); if (CudaInvoke.HasCuda) { using (GpuMat gMatSrc = new GpuMat()) using (GpuMat gMatDst = new GpuMat()) { gMatSrc.Upload(MatImg); CudaGaussianFilter noiseReducetion = new CudaGaussianFilter(MatImg.Depth, img.NumberOfChannels, MatImg.Depth, img.NumberOfChannels, new Size(1, 1), 0); noiseReducetion.Apply(gMatSrc, gMatDst); gMatDst.Download(outputImg); } } else { Mat pyrDown = new Mat(); CvInvoke.PyrDown(img, pyrDown); CvInvoke.PyrUp(pyrDown, img); outputImg = img.Mat; } UMat uimage = new UMat(); CvInvoke.CvtColor(outputImg, uimage, ColorConversion.Bgr2Gray); CircleF[] circles = new CircleF[0]; if (detectionVars.calcCircles) { circles = CvInvoke.HoughCircles( uimage, HoughType.Gradient, 1.0, 20.0, detectionVars.circleCannyThreshold, detectionVars.circleAccumulatorThreshold == 0 ? 1 : detectionVars.circleAccumulatorThreshold, detectionVars.minradius, detectionVars.maxRadius); } #region Canny and edge detection UMat cannyEdges = new UMat(); CvInvoke.Canny(uimage, cannyEdges, detectionVars.lineCannyThreshold, detectionVars.cannyThresholdLinking); LineSegment2D[] lines = new LineSegment2D[0]; if (detectionVars.calcLines) { lines = CvInvoke.HoughLinesP( cannyEdges, 1, //Distance resolution in pixel-related units Math.PI / 45.0, //Angle resolution measured in radians. detectionVars.lineThreshold, //threshold detectionVars.minLineWidth, //min Line width 10); //gap between lines } #endregion #region Find triangles and rectangles List <RotatedRect> boxList = new List <RotatedRect>(); //a box is a rotated rectangle if (detectionVars.calcRectTri) { using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250 { if (approxContour.Size == 4) //The contour has 4 vertices. { #region determine if all the angles in the contour are within [80, 100] degree bool isRectangle = true; Point[] pts = approxContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int j = 0; j < edges.Length; j++) { double angle = Math.Abs( edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } #endregion if (isRectangle) { boxList.Add(CvInvoke.MinAreaRect(approxContour)); } } } } } } } #endregion Image <Bgra, Byte> alphaImgShape = new Image <Bgra, byte>(img.Size.Width, img.Size.Height, new Bgra(0, 0, 0, .5)); Mat alphaimg = new Mat(); CvInvoke.CvtColor(img, alphaimg, ColorConversion.Bgr2Bgra); #region draw rectangles and triangles if (detectionVars.calcRectTri) { Image <Bgr, Byte> triangleRectangleImage = new Image <Bgr, Byte>(img.Size); foreach (RotatedRect box in boxList) { CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(0, 255, 0).MCvScalar, 2); } CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(triangleRectangleImage), .5, 0, alphaImgShape); if (CudaInvoke.HasCuda) { using (GpuMat gMatSrc = new GpuMat()) using (GpuMat gMatSrc2 = new GpuMat()) using (GpuMat gMatDst = new GpuMat()) { gMatSrc.Upload(alphaimg); gMatSrc2.Upload(alphaImgShape); CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus); gMatDst.Download(alphaimg); } } else { img = Overlay(img, alphaImgShape); } } #endregion #region draw circles if (detectionVars.calcCircles) { Image <Bgr, Byte> circleImage = new Image <Bgr, Byte>(img.Size); foreach (CircleF circle in circles.Take(10)) { CvInvoke.Circle(circleImage, Point.Round(circle.Center), (int)circle.Radius, new Bgr(0, 255, 0).MCvScalar, 2); } alphaImgShape = new Image <Bgra, byte>(img.Size.Width, img.Size.Height, new Bgra(0, 0, 0, .5)); CvInvoke.AddWeighted(alphaImgShape, .7, BlackTransparent(circleImage), .5, 0, alphaImgShape); if (CudaInvoke.HasCuda) { using (GpuMat gMatSrc = new GpuMat()) using (GpuMat gMatSrc2 = new GpuMat()) using (GpuMat gMatDst = new GpuMat()) { gMatSrc.Upload(alphaimg); gMatSrc2.Upload(alphaImgShape); CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus); gMatDst.Download(alphaimg); } } else { img = Overlay(img, alphaImgShape); } } #endregion #region draw lines if (detectionVars.calcLines) { Image <Bgr, Byte> lineImage = new Image <Bgr, Byte>(img.Size); foreach (LineSegment2D line in lines) { CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(0, 255, 0).MCvScalar, 2); } alphaImgShape = new Image <Bgra, byte>(img.Size.Width, img.Size.Height, new Bgra(0, 0, 0, .5)); CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(lineImage), .5, 0, alphaImgShape); if (CudaInvoke.HasCuda) { using (GpuMat gMatSrc = new GpuMat()) using (GpuMat gMatSrc2 = new GpuMat()) using (GpuMat gMatDst = new GpuMat()) { gMatSrc.Upload(alphaimg); gMatSrc2.Upload(alphaImgShape); CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus); gMatDst.Download(alphaimg); } } else { img = Overlay(img, alphaImgShape); } } #endregion GC.Collect(); // first time I've had to use this but this program will use as much memory as possible, resulting in corrptions return(alphaimg.Bitmap ?? frame); }
// calculates the optical flow according to the Farneback algorithm public Bitmap Dense_Optical_Flow(Bitmap bmp, OpticalFlowVariable optiVariables, Camera cam) { frameReduction = optiVariables.frameReduction < 1 ? 1 : optiVariables.frameReduction; // frame becomes previous frame (i.e., prev_frame stores information about current frame) prev_frame = matframe; Image <Bgr, Byte> imageCV = new Image <Bgr, byte>(bmp); //Image Class from Emgu.CV matframe = imageCV.Mat; //This is your Image converted to Mat if (prev_frame == null) { return(bmp); } // frame_nr increment by number of steps given in textfield on user interface frame_nr += 1; // intialize this Image Matrix before resizing (see below), so it remains at original size img_average_vectors = new Image <Bgr, byte>(matframe.Width, matframe.Height); orig_height = matframe.Height; Size n_size = new Size(matframe.Width / frameReduction, matframe.Height / frameReduction); // Resize frame and previous frame (smaller to reduce processing load) //Source Mat matFramDst = new Mat(); using (GpuMat gMatSrc = new GpuMat()) using (GpuMat gMatDst = new GpuMat()) { gMatSrc.Upload(matframe); Emgu.CV.Cuda.CudaInvoke.Resize(gMatSrc, gMatDst, new Size(0, 0), (double)1 / frameReduction, (double)1 / frameReduction); gMatDst.Download(matFramDst); } matframe = matFramDst; if (prev_frame.Height != matframe.Height) { return(bmp); } // images that are compared during the flow operations (see below) // these need to be greyscale images Image <Gray, Byte> prev_grey_img, curr_grey_img; prev_grey_img = new Image <Gray, byte>(prev_frame.Width, prev_frame.Height); curr_grey_img = new Image <Gray, byte>(matframe.Width, matframe.Height); // Image arrays to store information of flow vectors (one image array for each direction, which is x and y) Image <Gray, float> flow_x; Image <Gray, float> flow_y; flow_x = new Image <Gray, float>(matframe.Width, matframe.Height); flow_y = new Image <Gray, float>(matframe.Width, matframe.Height); // assign information stored in frame and previous frame in greyscale images (works without convert function) CvInvoke.CvtColor(matframe, curr_grey_img, ColorConversion.Bgr2Gray); CvInvoke.CvtColor(prev_frame, prev_grey_img, ColorConversion.Bgr2Gray); // Apply Farneback dense optical flow // parameters are the two greyscale images (these are compared) // and two image arrays storing the flow information // the results of the procedure are stored // the rest of the parameters are: // pryScale: specifies image scale to build pyramids: 0.5 means that each next layer is twice smaller than the former // levels: number of pyramid levels: 1 means no extra layers // winSize: the average window size; larger values = more robust to noise but more blur // iterations: number of iterations at each pyramid level // polyN: size of pixel neighbourhood: higher = more precision but more blur // polySigma // flags CvInvoke.CalcOpticalFlowFarneback(prev_grey_img, curr_grey_img, flow_x, flow_y, 0.5, 3, 10, 3, 6, 1.3, 0); // call function that shows results of Farneback algorithm Image <Bgr, Byte> farnebackImg = Draw_Farneback_flow_map(matframe.ToImage <Bgr, Byte>(), flow_x, flow_y, optiVariables);// given in global variables section // Release memory prev_grey_img.Dispose(); curr_grey_img.Dispose(); flow_x.Dispose(); flow_y.Dispose(); //return farnebackImg.ToBitmap(); Image <Bgra, Byte> alphaImgShape = new Image <Bgra, byte>(imageCV.Size.Width, imageCV.Size.Height, new Bgra(0, 0, 0, .5)); CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(farnebackImg), .5, 0, alphaImgShape); Mat alphaimg = new Mat(); CvInvoke.CvtColor(imageCV, alphaimg, ColorConversion.Bgr2Bgra); if (CudaInvoke.HasCuda) { using (GpuMat gMatSrc = new GpuMat()) using (GpuMat gMatSrc2 = new GpuMat()) using (GpuMat gMatDst = new GpuMat()) { gMatSrc.Upload(alphaimg); gMatSrc2.Upload(alphaImgShape); CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus); gMatDst.Download(alphaimg); } return(alphaimg.Bitmap); } else { return(Overlay(imageCV, alphaImgShape).ToBitmap()); } }