public static byte[] GenerateDetectedImage(Image<Bgr, Byte> image, List<System.Drawing.Rectangle> faces, List<System.Drawing.Rectangle> eyes) { foreach (System.Drawing.Rectangle face in faces) image.Draw(face, new Bgr(System.Drawing.Color.Red), 10); foreach (System.Drawing.Rectangle eye in eyes) image.Draw(eye, new Bgr(System.Drawing.Color.Blue), 10); var allBytes = image.ToJpegData(); return allBytes; }
public Image<Bgr, Byte> GrabFrame() { Image<Bgr, Byte> img = new Image<Bgr, byte>(300, 300, new Bgr(255, 255, 255)); String str = DateTime.Now.Ticks.ToString(); img.Draw(str, new System.Drawing.Point(50, 150), FontFace.HersheyPlain, 1.0, new Bgr(0, 0, 0)); return img; }
public Image<Bgr, Byte> GrabFrame() { Image<Bgr, Byte> img = new Image<Bgr, byte>(300, 300, new Bgr(255, 255, 255)); MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 1.0, 1.0); String str = DateTime.Now.Ticks.ToString(); img.Draw(str, ref f, new System.Drawing.Point(50, 150), new Bgr(0, 0, 0)); return img; }
static void Main(string[] args) { String win1 = "Test Window"; //The name of the window CvInvoke.cvNamedWindow(win1); //Create the window using the specific name Image<Bgr, Byte> img = new Image<Bgr, byte>(400, 200, new Bgr(255, 0, 0)); //Create an image of 400x200 of Blue color MCvFont f = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0); //Create the font img.Draw("Hello, world", ref f, new System.Drawing.Point(10, 80), new Bgr(0, 255, 0)); //Draw "Hello, world." on the image using the specific font CvInvoke.cvShowImage(win1, img); //Show the image CvInvoke.cvWaitKey(0); //Wait for the key pressing event CvInvoke.cvDestroyWindow(win1); //Destory the window }
private void QueryFrame() { while (IsRunning) { nextFrame = capture.QueryFrame().Flip(FLIP.HORIZONTAL); if (nextFrame != null) { gray = nextFrame.Convert<Gray, Byte>(); MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(haarCascade, 1.2, 2, HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(40, 40)); foreach (MCvAvgComp face in facesDetected[0]) nextFrame.Draw(face.rect, new Bgr(System.Drawing.Color.White), 2); byte[] bgrData = nextFrame.Bytes; for (int i = 0; i < colorData.Length; i++) colorData[i] = new Color(bgrData[3 * i + 2], bgrData[3 * i + 1], bgrData[3 * i]); } } }
void videoCapture_NewFrame(object sender, EventArgs e) { frame = videoCapture.ReadAs<Bgr, byte>(); if (frame == null) return; long preprocessTime, matchTime; var bestRepresentatives = findObjects(frame, out preprocessTime, out matchTime); /************************************ drawing ****************************************/ foreach (var m in bestRepresentatives) { frame.Draw(m.BoundingRect, new Bgr(0, 0, 255), 1); if (m.Template is ImageTemplateWithMask) { var mask = ((ImageTemplateWithMask)m.Template).BinaryMask; if (mask == null) continue; //just draw bounding boxes var area = new Rectangle(m.X, m.Y, mask.Width, mask.Height); if (area.X < 0 || area.Y < 0 || area.Right >= frame.Width || area.Bottom >= frame.Height) continue; //must be fully inside using (var someImage = new Image<Bgr, byte>(mask.Width, mask.Height, Bgr8.Red)) { someImage.CopyTo(frame.GetSubRect(area), mask); } } else { frame.Draw(m, Bgr8.Blue, 3, true, Bgr8.Red); } Console.WriteLine("Best template: " + m.Template.ClassLabel + " score: " + m.Score); } frame.Draw(String.Format("Matching {0} templates in: {1} ms", templPyrs.Count, matchTime), font, new PointF(5, 10), new Bgr(0, 255, 0)); /************************************ drawing ****************************************/ this.pictureBox.Image = frame.ToBitmap(); //it will be just casted (data is shared) 24bpp color //frame.Save(String.Format("C:/probaImages/imgMarked_{0}.jpg", i)); b.Save(String.Format("C:/probaImages/img_{0}.jpg", i)); i++; GC.Collect(); }
void videoCapture_NewFrame(object sender, EventArgs e) { frame = videoCapture.ReadAs<Bgr, byte>()/*.SmoothGaussian(5)*/; //smoothing <<parallel operation>> if (frame == null) return; if (!isROISelected) { Application.Idle += videoCapture_InitFrame; Application.Idle -= videoCapture_NewFrame; return; } long start = DateTime.Now.Ticks; Image<Gray, byte> probabilityMap; Rectangle prevSearchArea; Box2D foundBox; processImage(frame, out probabilityMap, out prevSearchArea, out foundBox); long end = DateTime.Now.Ticks; long elapsedMs = (end - start) / TimeSpan.TicksPerMillisecond; frame.Draw("Processed: " + elapsedMs + " ms", font, new PointF(15, 10), new Bgr(0, 255, 0)); frame.Draw(prevSearchArea, new Bgr(0, 0, 255), 3); frame.Draw(foundBox, new Bgr(0, 255, 0), 5); Console.WriteLine("angle: " + foundBox.Angle); this.pictureBox.Image = frame.ToBitmap(); //it will be just casted (data is shared) 24bpp color this.pbProbabilityImage.Image = probabilityMap.ToBitmap(); //it will be just casted (data is shared) 8bpp gray GC.Collect(); }
private void doImageProcessing() { // Translate our most recent color coordinates - Done before the bg worker as // we cant acess the sensor inside another thread // Clear the green screen Array.Clear(_greenScreenPixelData, 0, _greenScreenPixelData.Length); // Emgu CV Image using (Image<Emgu.CV.Structure.Gray, byte> emguOriginal = new Image<Emgu.CV.Structure.Gray, byte>(640, 480)) { byte[, ,] emguData = emguOriginal.Data; // We have to iterate the whole depth image for (int y = 0; y < _depthStreamFrameHeight; ++y) { for (int x = 0; x < _depthStreamFrameWidth; ++x) { // calculate index into depth array int depthIndex = x + (y * _sensorRef.DepthStream.FrameWidth); DepthImagePixel depthPixel = _depthPixels[depthIndex]; // retrieve the depth to color mapping for the current depth pixel ColorImagePoint colorImagePoint = _colorCoordinates[depthIndex]; // scale color coordinates to depth resolution int colorInDepthX = colorImagePoint.X; int colorInDepthY = colorImagePoint.Y; // make sure the depth pixel maps to a valid point in color space // check y > 0 and y < depthHeight to make sure we don't write outside of the array // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left // because of how the sensor works it is more correct to do it this way than to set to the right if (colorInDepthX > 0 && colorInDepthX < _depthStreamFrameWidth && colorInDepthY >= 0 && colorInDepthY < _depthStreamFrameHeight) { // calculate index into the green screen pixel array int greenScreenIndex = colorInDepthX + (colorInDepthY * _depthStreamFrameWidth); // OK emgu needs a black and white only image. if (depthPixel.Depth < _depthThreshold && depthPixel.Depth != 0) { // set opaque _greenScreenPixelData[greenScreenIndex] = opaquePixelValue; // compensate for depth/color not corresponding exactly by setting the pixel // to the left to opaque as well _greenScreenPixelData[greenScreenIndex - 1] = opaquePixelValue; // Emgu needs an all black image with pure white where the depth data is emguData[colorInDepthY, colorInDepthX, 0] = 255; // set the pixel before this white too. We dont need this in blob detection as the blobs will fill in // it just ends up adding extra on all the left edges /* if (colorInDepthX - 1 > -1) { emguData[colorInDepthY, colorInDepthX - 1, 0] = 255; } */ } } } } // emguCV work Emgu.CV.Cvb.CvBlobs resultingBlobs = new Emgu.CV.Cvb.CvBlobs(); Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector(); uint numLabeledPixels = bDetect.Detect(emguOriginal, resultingBlobs); Image<Emgu.CV.Structure.Bgra, double> blobImg = new Image<Emgu.CV.Structure.Bgra, double>(emguOriginal.Width, emguOriginal.Height, new Emgu.CV.Structure.Bgra(0, 0, 0, 0)); foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingBlobs.Values) { using (MemStorage mem_BlobContours = new MemStorage()) { Contour<System.Drawing.Point> allContourPointsInBlob = targetBlob.GetContour(mem_BlobContours); // If thre are more than five points smooth them if (allContourPointsInBlob.Total > 5) { System.Drawing.Point[] originalPoints = allContourPointsInBlob.ToArray(); System.Drawing.Point[] smoothedPoints = EmguUtilities.getSmoothedContour(originalPoints, 6, (float)0.5, Properties.Settings.Default.kinectGreenScreenMaskXPixelShift); //------------- FILL ----------------------------------- // Sweet shove em back into a contour collection MemStorage finalFillStorage = new MemStorage(); Contour<System.Drawing.Point> finalFillContours = new Contour<System.Drawing.Point>(finalFillStorage); finalFillContours.PushMulti(smoothedPoints, Emgu.CV.CvEnum.BACK_OR_FRONT.BACK); blobImg.Draw(finalFillContours, black, -1); // ------------ END FILL ------------------------------ } } } // Converts an emgu cv image to a bitmapsource BitmapSource finalRef = EmguUtilities.ToBitmapSource(blobImg); finalRef.Freeze(); // Ensure the greenScreenMask is locked before doing this // copy pixels - I get the feeling this isnt supposed to be used on bigger areas but it seems like the fastest way to do it? finalRef.CopyPixels(_copyArea, _pBackBuffer, _gsBufferSize, _gsStride); // Just in case dispose of the image blobImg.Dispose(); //emguEroded.Dispose(); } // make a copy to be more thread-safe - we really dont need this anymore but oh well /* EventHandler handler = frameReadyForDisplay; if (handler != null) { // invoke the subscribed event-handler(s) handler(this, EventArgs.Empty); } */ }
public static void WriteDebugText(Image<Bgr, Byte> img, int x, int y, string text, params object[] args) { img.Draw(String.Format(text, args), ref WriteDebugTextFont, new Point(x, y), WriteDebugTextColor); }
void videoCapture_InitFrame(object sender, EventArgs e) { frame = videoCapture.ReadAs<Bgr, byte>(); if (frame == null) return; if (isROISelected) { initTracking(frame); Application.Idle -= videoCapture_InitFrame; Application.Idle += videoCapture_NewFrame; return; } else { frame.Draw(roi, new Bgr(0, 0, 255), 3); } this.pictureBox.Image = frame.ToBitmap(); //it will be just casted (data is shared) GC.Collect(); }
void videoCapture_NewFrame(object sender, EventArgs e) { frame = videoCapture.ReadAs<Bgr, byte>(); if (frame == null) return; var im = frame.Convert<FlowColor, float>();//.SmoothGaussian(5); //smoothing <<parallel operation>>; long start = DateTime.Now.Ticks; List<PointF> newPositions; processImage(prevIm, im, this.oldPositions, out newPositions); prevIm = im; oldPositions = newPositions; long end = DateTime.Now.Ticks; long elapsedMs = (end - start) / TimeSpan.TicksPerMillisecond; frame.Draw("Processed: " + elapsedMs + " ms", font, new PointF(15, 10), new Bgr(0, 255, 0)); drawPoints(frame, newPositions); this.pictureBox.Image = frame.ToBitmap(); //it will be just casted (data is shared) 24bpp color GC.Collect(); }
private void drawPoints(Image<Bgr, byte> im, List<PointF> points) { foreach (var pt in points) { /*im[(int)pt.Y, (int)pt.X] = new Bgr(Color.Red); continue;*/ var rect = new RectangleF(pt.X, pt.Y, 1, 1); rect.Inflate(winSize / 2, winSize / 2); im.Draw(rect, System.Drawing.Color.Red.ToBgr(), 3); } }
private static void drawContour(IList<PointF> controlPoints, Image<Bgr, byte> image) { const float CONTOUR_TENSION = 0; /******************** contour and control points *********************/ var pointIndices = CardinalSpline.GetEqualyDistributedPoints(controlPoints, CONTOUR_TENSION, 500); var points = CardinalSpline.InterpolateAt(controlPoints, CONTOUR_TENSION, pointIndices); var normals = new List<LineSegment2DF>(); var normalIndices = CardinalSpline.GetEqualyDistributedPoints(controlPoints, CONTOUR_TENSION, 100); foreach (var idx in normalIndices) { var pt = CardinalSpline.InterpolateAt(controlPoints, CONTOUR_TENSION, idx); var normalDirection = CardinalSpline.NormalAt(controlPoints, CONTOUR_TENSION, idx); var orientation = (int)Angle.ToDegrees(System.Math.Atan2(normalDirection.Y, normalDirection.X)); var normal = getLine(orientation, pt, 20); normals.Add(normal); } /******************** contour and control points *********************/ image.Draw(points.ToArray(), Bgr8.Blue, 3); image.Draw(controlPoints.Select(x => new CircleF(x, 3)), Bgr8.Red, 3); image.Draw(normals, Bgr8.Green, 3, false); }
public static void DrawTrack(Image<Bgr, Byte> image, TrackedObject trackedObject) { image.Draw(trackedObject.CurrentFrame.SourceRectangle, White, 1); if (string.IsNullOrEmpty(trackedObject.Identity)) return; image.Draw(trackedObject.Identity, ref _font, trackedObject.CurrentFrame.SourceRectangle.Location, White); }
private void drawParticles(IEnumerable<ColorParticle> particles, Image<Bgr, byte> img) { var circles = particles.Select(x => new CircleF { X = x.Position.X, Y = x.Position.Y, Radius = 1.5f }); img.Draw(circles, Bgr8.Blue, 5); }
void videoCapture_ProcessFrame(object sender, EventArgs e) { frame = videoCapture.ReadAs<Bgr, byte>(); if (frame == null) return; long start = DateTime.Now.Ticks; predict(); update(); long end = DateTime.Now.Ticks; long elapsedMs = (end - start) / TimeSpan.TicksPerMillisecond; drawParticles(particleFilter, frame); frame.Draw("Processed: " + elapsedMs + " ms", font, new PointF(15, 10), new Bgr(0, 255, 0)); this.pictureBox.Image = frame.ToBitmap(); //it will be just casted (data is shared) GC.Collect(); }
void videoCapture_NewFrame(object sender, EventArgs e) { frame = videoCapture.ReadAs<Bgr, byte>(); if (frame == null) return; if (!isROISelected) { Application.Idle += videoCapture_InitFrame; Application.Idle -= videoCapture_NewFrame; return; } long start = DateTime.Now.Ticks; Rectangle prevSearchArea = searchArea; //processImage overwrites searchArea bool isPredicted = nonVisibleCount > 0; Image<Gray, byte> probabilityMap; Box2D foundBox; trackOneStep(frame, out probabilityMap, out foundBox); long end = DateTime.Now.Ticks; long elapsedMs = (end - start) / TimeSpan.TicksPerMillisecond; frame.Draw("Processed: " + elapsedMs + " ms", font, new PointF(15, 10), new Bgr(0, 255, 0)); frame.Draw(prevSearchArea, new Bgr(0, 0, 255), 3); frame.Draw(foundBox, new Bgr(0, 255, 0), 5); Console.WriteLine("angle: " + foundBox.Angle); this.pictureBox.Image = frame.ToBitmap(); //it will be just casted (data is shared) 24bpp color this.pbProbabilityImage.Image = probabilityMap.ToBitmap(); //it will be just casted (data is shared) 8bpp gray GC.Collect(); //Thread.Sleep(100); }