private static void drawContour(IList<PointF> controlPoints, Bgr<byte>[,] image) { const float CONTOUR_TENSION = 0; /******************** contour and control points *********************/ var pointIndices = CardinalSpline.GetEqualyDistributedPointIndices(controlPoints, CONTOUR_TENSION, 500); var points = CardinalSpline.InterpolateAt(controlPoints, CONTOUR_TENSION, pointIndices); var normals = new List<LineSegment2DF>(); var normalIndices = CardinalSpline.GetEqualyDistributedPointIndices(controlPoints, CONTOUR_TENSION, 100); foreach (var idx in normalIndices) { var pt = CardinalSpline.InterpolateAt(controlPoints, CONTOUR_TENSION, idx); var normalDirection = CardinalSpline.NormalAt(controlPoints, CONTOUR_TENSION, idx); var orientation = (int)Angle.ToDegrees(System.Math.Atan2(normalDirection.Y, normalDirection.X)); var normal = getLine(orientation, pt, 20); normals.Add(normal); } /******************** contour and control points *********************/ image.Draw(points.Select(x=>x.Round()).ToArray(), Bgr<byte>.Blue, 3); image.Draw(controlPoints.Select(x => new Circle(x.Round(), 3)), Bgr<byte>.Red, 3); //image.Draw(normals, Bgr<byte>.Green, 3, false); }
private void drawDetections(IEnumerable<Rectangle> detections, Bgr color, int thickness) { foreach (var detection in detections) { debugImage.Draw(detection, color, thickness); } }
private void initTracking(Bgr<byte>[,] frame) { initializeKalman(roi.Center()); //get hue channel from search area var hsvImg = frame.ToHsv(); //user constraints... Gray<byte>[,] mask = hsvImg.InRange(new Hsv<byte>(0, 0, (byte)minV), new Hsv<byte>(0, 0, (byte)maxV), Byte.MaxValue, 2); originalObjHist.Calculate(hsvImg.SplitChannels<Hsv<byte>, byte>(roi, 0, 1), false, mask, roi.Location); originalObjHist.Scale((float)1 / roi.Area()); //originalObjHist.Normalize(Byte.MaxValue); var backgroundArea = roi.Inflate(1.5, 1.5, frame.Size()); var backgroundMask = mask.Clone(backgroundArea); backgroundMask.SetValue<Gray<byte>>(0, new Rectangle(roi.X - backgroundArea.X, roi.Y - backgroundArea.Y, roi.Width, roi.Height)); backgroundHist.Calculate(hsvImg.SplitChannels<Hsv<byte>, byte>(backgroundArea, 0, 1), false, mask, backgroundArea.Location); backgroundHist.Scale((float)1 / (backgroundArea.Area() - roi.Area())); //backgroundHist.Normalize(Byte.MaxValue); //how good originalObjHist and objHist match (suppresses possible selected background) ratioHist = originalObjHist.CreateRatioHistogram(backgroundHist, Byte.MaxValue, 3); searchArea = roi; roi = Rectangle.Empty; }
/// <summary> /// Rectification filter for projective transformation. /// <para>Accord.NET internal call. Please see: <see cref="Accord.Imaging.Filters.Rectification"/> for details.</para> /// </summary> /// <param name="img">Image.</param> /// <param name="homography">The homography matrix used to map a image passed to the filter to the overlay image.</param> /// <param name="fillColor">The filling color used to fill blank spaces.</param> /// <returns>Rectified image.</returns> public static Bgr<byte>[,] Rectification(this Bgr<byte>[,] img, double[,] homography, Bgr<byte> fillColor) { Rectification r = new Rectification(homography); r.FillColor = fillColor.ToColor(); return img.ApplyBaseTransformationFilter(r); }
/// <summary> /// Creates template from the input image by using provided parameters. /// </summary> /// <param name="sourceImage">Input image.</param> /// <param name="minFeatureStrength">Minimum gradient value for the feature.</param> /// <param name="maxNumberOfFeatures">Maximum number of features per template. The features will be extracted so that their locations are semi-uniformly spread.</param> /// <param name="classLabel">Template class label.</param> public virtual void Initialize(Bgr<byte>[,] sourceImage, int minFeatureStrength, int maxNumberOfFeatures, string classLabel) { Gray<int>[,] sqrMagImg; Gray<int>[,] orientationImg = GradientComputation.Compute(sourceImage, out sqrMagImg, minFeatureStrength); Func<Feature, int> featureImportanceFunc = (feature) => sqrMagImg[feature.Y, feature.X].Intensity; Initialize(orientationImg, maxNumberOfFeatures, classLabel, featureImportanceFunc); }
public static void TestImageToArray() { Image<Bgr, byte> img = new Image<Bgr, byte>(640, 480); img[5, 5] = new Bgr(128, 64, 32); var arr = img.ToArray(); Console.WriteLine("Color image to array: " + arr[0, 5, 5] + " " + arr[1, 5, 5] + " " + arr[2, 5, 5]); /*Debug.Assert(arr[5, 5, 0] == 128 && arr[5, 5, 1] == 64 && arr[5, 5, 2] == 32);*/ }
/// <summary> /// Draws circles. /// </summary> /// <param name="image">Input image.</param> /// <param name="circles">Circles</param> /// <param name="color">Circle color.</param> /// <param name="thickness">Contours thickness.</param> public unsafe static void Draw(this Bgr <byte>[,] image, IEnumerable <Circle> circles, Bgr <byte> color, int thickness) { using (var img = image.Lock()) { var iplImage = img.AsOpenCvImage(); foreach (var circle in circles) { var center = new Point(circle.X, circle.Y); CvCoreInvoke.cvCircle(&iplImage, center, circle.Radius, color.ToCvScalar(), thickness, LineTypes.EightConnected, 0); } } }
/// <summary> /// Draws contour. /// </summary> /// <param name="image">Input image.</param> /// <param name="contour">Contour points.</param> /// <param name="color">Contour color.</param> /// <param name="thickness">Contours thickness.</param> /// <param name="opacity">Sets alpha channel where 0 is transparent and 255 is full opaque.</param> public unsafe static void Draw(this Bgr <byte>[,] image, Point[] contour, Bgr <byte> color, int thickness, byte opacity = Byte.MaxValue) { var contourHandle = GCHandle.Alloc(contour, GCHandleType.Pinned); using (var img = image.Lock()) { var iplImage = img.AsOpenCvImage(); //TODO - noncritical: implement with cvContour CvCoreInvoke.cvPolyLine(&iplImage, new IntPtr[] { contourHandle.AddrOfPinnedObject() }, new int[] { contour.Length }, 1, true, color.ToCvScalar(), thickness, LineTypes.EightConnected, 0); } contourHandle.Free(); }
/// <summary> /// The blending filter is able to blend two images using a homography matrix. /// A linear alpha gradient is used to smooth out differences between the two /// images, effectively blending them in two images. The gradient is computed /// considering the distance between the centers of the two images. /// </summary> /// <param name="im">Image.</param> /// <param name="overlayIm">The overlay image (also called the anchor).</param> /// <param name="homography">Homography matrix used to map a image passed to /// the filter to the overlay image specified at filter creation.</param> /// <param name="fillColor">The filling color used to fill blank spaces. The filling color will only be visible after the image is converted /// to 24bpp. The alpha channel will be used internally by the filter.</param> /// <param name="gradient">A value indicating whether to blend using a linear /// gradient or just superimpose the two images with equal weights.</param> /// <param name="alphaOnly">A value indicating whether only the alpha channel /// should be blended. This can be used together with a transparency /// mask to selectively blend only portions of the image.</param> /// <returns>Blended image.</returns> public static Bgra <byte>[,] Blend(this Bgr <byte>[,] im, Bgr <byte>[,] overlayIm, MatrixH homography, Bgra <byte> fillColor, bool gradient = true, bool alphaOnly = false) { Bgra <byte>[,] resultImage = null; using (var uOverlayIm = overlayIm.Lock()) { Blend blend = new Blend(homography, uOverlayIm.AsBitmap()); blend.AlphaOnly = alphaOnly; blend.Gradient = gradient; blend.FillColor = fillColor.ToColor(); resultImage = im.ApplyBaseTransformationFilter <Bgr <byte>, Bgra <byte> >(blend); } return(resultImage); }
public static void Convert(ref Hsv <byte> hsv, ref Bgr <byte> bgr) { if (hsv.S == 0) { bgr.R = hsv.V; bgr.G = hsv.V; bgr.B = hsv.V; return; } int hue = hsv.H * 2; //move to [0-360 range] (only needed for byte!) int hQuadrant = hue / 60; // Hue quadrant 0 - 5 (60deg) int hOffset = hue % 60; // Hue position in quadrant int vs = hsv.V * hsv.S; byte p = (byte)(hsv.V - (vs / 255)); byte q = (byte)(hsv.V - (vs / 255 * hOffset) / 60); byte t = (byte)(hsv.V - (vs / 255 * (60 - hOffset)) / 60); switch (hQuadrant) { case 0: bgr.R = hsv.V; bgr.G = t; bgr.B = p; break; case 1: bgr.R = q; bgr.G = hsv.V; bgr.B = p; break; case 2: bgr.R = p; bgr.G = hsv.V; bgr.B = t; break; case 3: bgr.R = p; bgr.G = q; bgr.B = hsv.V; break; case 4: bgr.R = t; bgr.G = p; bgr.B = hsv.V; break; default: bgr.R = hsv.V; bgr.G = p; bgr.B = q; break; } }
public static void Convert(ref Bgr <byte> bgr, ref Hsv <byte> hsv) { byte rgbMin, rgbMax; rgbMin = bgr.R < bgr.G ? (bgr.R < bgr.B ? bgr.R : bgr.B) : (bgr.G < bgr.B ? bgr.G : bgr.B); rgbMax = bgr.R > bgr.G ? (bgr.R > bgr.B ? bgr.R : bgr.B) : (bgr.G > bgr.B ? bgr.G : bgr.B); hsv.V = rgbMax; if (hsv.V == 0) { hsv.H = 0; hsv.S = 0; return; } hsv.S = (byte)(255 * (rgbMax - rgbMin) / rgbMax); if (hsv.S == 0) { hsv.H = 0; return; } int hue = 0; if (rgbMax == bgr.R) { hue = 0 + 60 * (bgr.G - bgr.B) / (rgbMax - rgbMin); if (hue < 0) { hue += 360; } } else if (rgbMax == bgr.G) { hue = 120 + 60 * (bgr.B - bgr.R) / (rgbMax - rgbMin); } else //rgbMax == bgr.B { hue = 240 + 60 * (bgr.R - bgr.G) / (rgbMax - rgbMin); } hsv.H = (byte)(hue / 2); //scale [0-360] . [0-180] (only needed for byte!) Debug.Assert(hue >= 0 && hue <= 360); }
/// <summary> /// Draws rectangle annotation. /// </summary> /// <param name="image">Image.</param> /// <param name="rect">User specified area to annotate.</param> /// <param name="text">Label.</param> /// <param name="annotationWidth">Width of annotation rectangle.</param> /// <param name="color">Color for rectangle. Label area is filled. Default color is yellow-green.</param> /// <param name="textColor">Label color. Default color is black.</param> /// <param name="font">Font to use. Default is "Arial" of size 10, style: Bold.</param> /// <param name="thickness">Rectangle thickness.</param> public static void DrawAnnotation(this Image <Bgr, byte> image, Rectangle rect, string text, int annotationWidth = 100, Bgr color = default(Bgr), Bgr textColor = default(Bgr), Font font = null, int thickness = 1) { color = color.Equals(default(Bgr)) ? Color.YellowGreen.ToBgr() : color; textColor = textColor.Equals(default(Bgr)) ? Color.Black.ToBgr() : color; font = font ?? new Font("Arial", 8, System.Drawing.FontStyle.Bold); var nLines = text.Where(x => x.Equals('\n')).Count() + 1; var annotationHeight = (int)(3 + (font.SizeInPoints + 3) * nLines + 3); var xOffset = (annotationWidth - rect.Width) / 2; var annotationRect = new Rectangle(rect.X - xOffset, rect.Y - annotationHeight, annotationWidth, annotationHeight); image.Draw(annotationRect, color, thickness); image.Draw(rect, color, thickness); image.Draw(annotationRect, color, -1, 80); image.Draw(text, font, annotationRect, textColor); }
/// <summary> /// Computes gradient orientations from the color image. Orientation from the channel which has the maximum gradient magnitude is taken as the orientation for a location. /// </summary> /// <param name="frame">Image.</param> /// <param name="magnitudeSqrImage">Squared magnitude image.</param> /// <param name="minValidMagnitude">Minimal valid magnitude.</param> /// <returns>Orientation image (angles are in degrees).</returns> public static unsafe Gray<int>[,] Compute(Bgr<byte>[,] frame, out Gray<int>[,] magnitudeSqrImage, int minValidMagnitude) { var minSqrMagnitude = minValidMagnitude * minValidMagnitude; var orientationImage = new Gray<int>[frame.Height(), frame.Width()]; var _magnitudeSqrImage = orientationImage.CopyBlank(); using (var uFrame = frame.Lock()) { ParallelLauncher.Launch(thread => { computeColor(thread, (byte*)uFrame.ImageData, uFrame.Stride, orientationImage, _magnitudeSqrImage, minSqrMagnitude); }, frame.Width() - 2 * kernelRadius, frame.Height() - 2 * kernelRadius); } magnitudeSqrImage = _magnitudeSqrImage; return orientationImage; }
private void processImage(Bgr<byte>[,] frame, out Gray<byte>[,] probabilityMap, out Rectangle prevSearchArea, out Box2D foundBox) { prevSearchArea = searchArea; //convert to HSV var hsvImg = frame.ToHsv(); //back-project ratio hist => create probability map probabilityMap = ratioHist.BackProject(hsvImg.SplitChannels<Hsv<byte>, byte>(0, 1)); //or new Image<Gray<byte>>[]{ hsvImg[0], hsvImg[1]...} //user constraints... Gray<byte>[,] mask = hsvImg.InRange(new Hsv<byte>(0, 0, (byte)minV), new Hsv<byte>(0, 0, (byte)maxV), Byte.MaxValue, 2); probabilityMap.AndByte(mask, inPlace:true); //run Camshift algorithm to find new object position, size and angle foundBox = Camshift.Process(probabilityMap, searchArea); var foundArea = Rectangle.Round(foundBox.GetMinArea()); searchArea = foundArea.Inflate(0.05, 0.05, frame.Size()); //inflate found area for search (X factor)... if (searchArea.IsEmpty) isROISelected = false; //reset tracking }
/// <summary> /// Draws Box2D. /// </summary> /// <param name="image">Input image.</param> /// <param name="box">Box 2D.</param> /// <param name="color">Object's color.</param> /// <param name="thickness">Border thickness.</param> /// <param name="opacity">Sets alpha channel where 0 is transparent and 255 is full opaque.</param> public unsafe static void Draw(this Bgr <byte>[,] image, Box2D box, Bgr <byte> color, int thickness, byte opacity = Byte.MaxValue) { if (thickness < 1) { throw new NotSupportedException("Only positive values are valid!"); } var vertices = box.GetVertices(); using (var img = image.Lock()) { var iplImage = img.AsOpenCvImage(); for (int i = 0; i < vertices.Length; i++) { int idx2 = (i + 1) % vertices.Length; CvCoreInvoke.cvLine(&iplImage, vertices[i].Round(), vertices[idx2].Round(), color.ToCvScalar(opacity), thickness, LineTypes.EightConnected, 0); } } }
private void processImage(Bgr <byte>[,] frame, out Gray <byte>[,] probabilityMap, out Rectangle prevSearchArea, out Box2D foundBox) { prevSearchArea = searchArea; //convert to HSV var hsvImg = frame.ToHsv(); //back-project ratio hist => create probability map probabilityMap = ratioHist.BackProject(hsvImg.SplitChannels <Hsv <byte>, byte>(0, 1)); //or new Image<Gray<byte>>[]{ hsvImg[0], hsvImg[1]...} //user constraints... Gray <byte>[,] mask = hsvImg.InRange(new Hsv <byte>(0, 0, (byte)minV), new Hsv <byte>(0, 0, (byte)maxV), Byte.MaxValue, 2); probabilityMap.AndByte(mask, inPlace: true); //run Camshift algorithm to find new object position, size and angle foundBox = Camshift.Process(probabilityMap, searchArea); var foundArea = Rectangle.Round(foundBox.GetMinArea()); searchArea = foundArea.Inflate(0.05, 0.05, frame.Size()); //inflate found area for search (X factor)... if (searchArea.IsEmpty) { isROISelected = false; //reset tracking } }
private void initTracking(Bgr <byte>[,] frame) { //get hue channel from search area var hsvImg = frame.ToHsv(); //user constraints... Gray <byte>[,] mask = hsvImg.InRange(new Hsv <byte>(0, 0, (byte)minV), new Hsv <byte>(0, 0, (byte)maxV), Byte.MaxValue, 2); originalObjHist.Calculate(hsvImg.SplitChannels <Hsv <byte>, byte>(roi, 0, 1), !false, mask, roi.Location); originalObjHist.Scale((float)1 / roi.Area()); //originalObjHist.Normalize(Byte.MaxValue); var backgroundArea = roi.Inflate(1.5, 1.5, frame.Size()); backgroundHist.Calculate(hsvImg.SplitChannels <Hsv <byte>, byte>(backgroundArea, 0, 1), !false, mask, backgroundArea.Location); backgroundHist.Scale((float)1 / backgroundArea.Area()); //backgroundHist.Normalize(Byte.MaxValue); //how good originalObjHist and objHist match (suppresses possible selected background) ratioHist = originalObjHist.CreateRatioHistogram(backgroundHist, Byte.MaxValue, 10); searchArea = roi; roi = Rectangle.Empty; }
/// <summary> /// This kind of template can not be created from color images. This function will throw an exception <see cref="System.Exception"/>. /// </summary> /// <param name="sourceImage">Input image.</param> /// <param name="minFeatureStrength">Minimum gradient value for the feature.</param> /// <param name="maxNumberOfFeatures">Maximum number of features per template. The features will be extracted so that their locations are semi-uniformly spread.</param> /// <param name="classLabel">Template class label.</param> public override void Initialize(Bgr<byte>[,] sourceImage, int minFeatureStrength, int maxNumberOfFeatures, string classLabel) { throw new Exception("Binary mask can not be saved from non black-white image!"); }
private void ContourDemoForm_Shown(object sender, EventArgs e) { image = (ResourceImages.bwHand.ToArray() as Gray<byte>[,]).ToBgr(); findPeaksAndValleys(); }
/// <summary> /// Converts the source color to the destination color. /// </summary> /// <param name="image">Source image.</param> /// <returns>image with converted color.</returns> public static Hsv <byte>[,] ToHsv(this Bgr <byte>[,] image) { return(image.Convert <Bgr <byte>, Hsv <byte> >(Bgr <byte> .Convert)); }
/// <summary> /// The blending filter is able to blend two images using a homography matrix. /// A linear alpha gradient is used to smooth out differences between the two /// images, effectively blending them in two images. The gradient is computed /// considering the distance between the centers of the two images. /// </summary> /// <param name="im">Image.</param> /// <param name="overlayIm">The overlay image (also called the anchor).</param> /// <param name="homography">Homography matrix used to map a image passed to /// the filter to the overlay image specified at filter creation.</param> /// <param name="fillColor">The filling color used to fill blank spaces. The filling color will only be visible after the image is converted /// to 24bpp. The alpha channel will be used internally by the filter.</param> /// <param name="gradient">A value indicating whether to blend using a linear /// gradient or just superimpose the two images with equal weights.</param> /// <param name="alphaOnly">A value indicating whether only the alpha channel /// should be blended. This can be used together with a transparency /// mask to selectively blend only portions of the image.</param> /// <returns>Blended image.</returns> public static Bgra<byte>[,] Blend(this Bgr<byte>[,] im, Bgr<byte>[,] overlayIm, MatrixH homography, Bgra<byte> fillColor, bool gradient = true, bool alphaOnly = false) { Bgra<byte>[,] resultImage = null; using (var uOverlayIm = overlayIm.Lock()) { Blend blend = new Blend(homography, uOverlayIm.AsBitmap()); blend.AlphaOnly = alphaOnly; blend.Gradient = gradient; blend.FillColor = fillColor.ToColor(); resultImage = im.ApplyBaseTransformationFilter<Bgr<byte>, Bgra<byte>>(blend); } return resultImage; }
private List<Match> findObjects(Bgr<byte>[,] image, out long preprocessTime, out long matchTime) { var grayIm = image.ToGray(); var bestRepresentatives = new List<Match>(); Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); linPyr = LinearizedMapPyramid.CreatePyramid(grayIm); //prepare linear-pyramid maps preprocessTime = stopwatch.ElapsedMilliseconds; stopwatch.Restart(); List<Match> matches = linPyr.MatchTemplates(templPyrs, threshold); stopwatch.Stop(); matchTime = stopwatch.ElapsedMilliseconds; var matchGroups = new MatchClustering(minDetectionsPerGroup).Group(matches.ToArray()); foreach (var group in matchGroups) { bestRepresentatives.Add(group.Representative); } return bestRepresentatives; }
/// <summary> /// Converts the source color to the destination color. /// </summary> /// <param name="image">Source image.</param> /// <returns>image with converted color.</returns> public static Gray <byte>[,] ToGray(this Bgr <byte>[,] image) { return(image.Convert <Bgr <byte>, Gray <byte> >(Bgr <byte> .Convert)); }
/// <summary> /// Draws text on the provided image. /// </summary> /// <param name="image">Input image.</param> /// <param name="text">User text.</param> /// <param name="font">Font.</param> /// <param name="botomLeftPoint">Bottom-left point.</param> /// <param name="color">Text color.</param> /// <param name="opacity">Sets alpha channel where 0 is transparent and 255 is full opaque.</param> public unsafe static void Draw(this Bgr <byte>[,] image, string text, Font font, Point botomLeftPoint, Bgr <byte> color, byte opacity = Byte.MaxValue) { using (var img = image.Lock()) { var iplImage = img.AsOpenCvImage(); CvCoreInvoke.cvPutText(&iplImage, text, botomLeftPoint, ref font, color.ToCvScalar()); } }
/// <summary> /// Gets System.Drawing.Color from Bgr8 color. /// </summary> /// <param name="color">Color.</param> /// <param name="opacity">Opacity. If color has 4 channels opacity is discarded.</param> /// <returns>System.Drawing.Color</returns> public static System.Drawing.Color ToColor(this Bgr <byte> color, byte opacity = Byte.MaxValue) { return(Color.FromArgb(opacity, color.R, color.G, color.B)); }
/// <summary> /// Converts the source color to the destination color. /// </summary> /// <param name="image">Source image.</param> /// <param name="area">Working area.</param> /// <returns>image with converted color.</returns> public static Hsv <byte>[,] ToHsv(this Bgr <byte>[,] image, Rectangle area) { return(image.Convert <Bgr <byte>, Hsv <byte> >(Bgr <byte> .Convert, area)); }
private void trackOneStep(Bgr<byte>[,] frame, out Gray<byte>[,] probabilityMap, out Box2D foundBox) { const float SEARCH_AREA_INFLATE_FACTOR = 0.05f; /**************************** KALMAN predict **************************/ kalman.Predict(); searchArea = createRect(kalman.State.Position, searchArea.Size, frame.Size()); /**************************** KALMAN predict **************************/ trackCamshift(frame, searchArea, out probabilityMap, out foundBox); if (!foundBox.IsEmpty) { /**************************** KALMAN correct **************************/ kalman.Correct(new PointF(foundBox.Center.X, foundBox.Center.Y)); //correct predicted state by measurement /**************************** KALMAN correct **************************/ var foundArea = Rectangle.Round(foundBox.GetMinArea()); searchArea = foundArea.Inflate(SEARCH_AREA_INFLATE_FACTOR, SEARCH_AREA_INFLATE_FACTOR, frame.Size()); //inflate found area for search (X factor)... nonVisibleCount = 0; } else { nonVisibleCount++; if (nonVisibleCount == 1) //for the first time { searchArea = searchArea.Inflate(-SEARCH_AREA_INFLATE_FACTOR * 1.5, -SEARCH_AREA_INFLATE_FACTOR * 1.5, frame.Size()); //shrink (hysteresis) } searchArea = createRect(kalman.State.Position, searchArea.Size, frame.Size()); } if (nonVisibleCount > 100) //if not visible for a longer time => reset tracking { nonVisibleCount = 0; isROISelected = false; } }
public static CvScalar ToCvScalar(this Bgr <byte> color, byte opacity = Byte.MaxValue) { return(new CvScalar { V0 = color.B, V1 = color.G, V2 = color.R, V3 = opacity }); }
private void trackCamshift(Bgr<byte>[,] frame, Rectangle searchArea, out Gray<byte>[,] probabilityMap, out Box2D foundBox) { const int PROBABILITY_MIN_VAL = (int)(0.3f * Byte.MaxValue); //convert to HSV var hsvImg = frame.ToHsv(); //back-project ratio hist => create probability map probabilityMap = ratioHist.BackProject(hsvImg.SplitChannels<Hsv<byte>, byte>(0, 1)); //or new Image<Gray<byte>>[]{ hsvImg[0], hsvImg[1]...} //user constraints... Gray<byte>[,] mask = hsvImg.InRange(new Hsv<byte>(0, 0, (byte)minV), new Hsv<byte>(0, 0, (byte)maxV), Byte.MaxValue, 2); probabilityMap.AndByte(mask, inPlace: true); //run Camshift algorithm to find new object position, size and angle CentralMoments centralMoments; foundBox = Camshift.Process(probabilityMap, searchArea, Meanshift.DEFAULT_TERM, out centralMoments); //stopping conditions float avgIntensity = centralMoments.Mu00 / (foundBox.Size.Area() + Single.Epsilon); if (avgIntensity < PROBABILITY_MIN_VAL || foundBox.Size.IsEmpty || foundBox.Size.Height < 12) { foundBox = Box2D.Empty; //invalid box } }
private void drawPoints(Bgr<byte>[,] im, List<PointF> points) { foreach (var pt in points) { var rect = new RectangleF(pt.X, pt.Y, 1, 1); rect.Inflate(winSize / 2, winSize / 2); im.Draw(rect, Bgr<byte>.Red, 2); } }
/// <summary> /// The blending filter is able to blend two images using a homography matrix. /// A linear alpha gradient is used to smooth out differences between the two /// images, effectively blending them in two images. The gradient is computed /// considering the distance between the centers of the two images. /// <para>Homography matrix is set to identity.</para> /// <para>Fill color is set to black with alpha set to 0 (all zeros).</para> /// </summary> /// <param name="im">Image.</param> /// <param name="overlayIm">The overlay image (also called the anchor).</param> /// <param name="gradient">A value indicating whether to blend using a linear /// gradient or just superimpose the two images with equal weights.</param> /// <param name="alphaOnly">A value indicating whether only the alpha channel /// should be blended. This can be used together with a transparency /// mask to selectively blend only portions of the image.</param> /// <returns>Blended image.</returns> public static Bgra <byte>[,] Blend(this Bgr <byte>[,] im, Bgr <byte>[,] overlayIm, bool gradient = true, bool alphaOnly = false) { return(Blend(im, overlayIm, new MatrixH(Matrix.Identity(3)), new Bgra <byte>(), gradient, alphaOnly)); }
/// <summary> /// Saves the specified image. /// </summary> /// <param name="image">Image to save.</param> /// <param name="fileName">Image filename.</param> public static void Save(this Bgr <ushort>[,] image, string fileName) { image.Save <Bgr <ushort> >(fileName); }
/// <summary> /// Converts 8-bit gray intensity to the 8-bit Bgr color. /// </summary> /// <param name="gray">Source color.</param> /// <param name="bgr">Destination color.</param> public static void Convert(ref Gray <T> gray, ref Bgr <T> bgr) { bgr.B = gray.Intensity; bgr.G = gray.Intensity; bgr.R = gray.Intensity; }
/// <summary> /// Saves the specified image. /// </summary> /// <param name="image">Image to save.</param> /// <param name="fileName">Image filename.</param> public static void Save(this Bgr <double>[,] image, string fileName) { image.Save <Bgr <double> >(fileName); }
/// <summary> /// The blending filter is able to blend two images using a homography matrix. /// A linear alpha gradient is used to smooth out differences between the two /// images, effectively blending them in two images. The gradient is computed /// considering the distance between the centers of the two images. /// <para>Homography matrix is set to identity.</para> /// <para>Fill color is set to black with alpha set to 0 (all zeros).</para> /// </summary> /// <param name="im">Image.</param> /// <param name="overlayIm">The overlay image (also called the anchor).</param> /// <param name="gradient">A value indicating whether to blend using a linear /// gradient or just superimpose the two images with equal weights.</param> /// <param name="alphaOnly">A value indicating whether only the alpha channel /// should be blended. This can be used together with a transparency /// mask to selectively blend only portions of the image.</param> /// <returns>Blended image.</returns> public static Bgra<byte>[,] Blend(this Bgr<byte>[,] im, Bgr<byte>[,] overlayIm, bool gradient = true, bool alphaOnly = false) { return Blend(im, overlayIm, new MatrixH(Matrix.Identity(3)), new Bgra<byte>(), gradient, alphaOnly); }
/// <summary> /// Converts the source channel depth to the destination channel depth. /// </summary> /// <typeparam name="TDepth">Destination channel depth.</typeparam> /// <param name="image">Image.</param> /// <returns>Image with converted element depth.</returns> public static Bgr <TDepth>[,] Cast <TDepth>(this Bgr <double>[,] image) where TDepth : struct { return(image.ConvertChannelDepth <Bgr <double>, Bgr <TDepth> >()); }
private void timer_Tick(object sender, EventArgs e) { const int BORDER_OFFSET = 20; scale += 10 * dScale; if (scale > 300) dScale = -1; if (scale < 100) dScale = 1; angle += 5 * dAngle; if (angle > 360) dAngle = -1; if (dAngle < 0) dAngle = 1; var transformation = Transforms2D.Combine ( Transforms2D.Rotation((float)Angle.ToRadians(angle)), Transforms2D.Scale(scale, scale) ); IEnumerable<PointF> pts = modelPts.Transform(transformation); var box = pts.BoundingRect(); //maybe apply it to bounding box instead of points (expensive) pts = pts.Transform(Transforms2D.Translation(-box.X + BORDER_OFFSET, -box.Y + BORDER_OFFSET)); var image = new Bgr<byte>[scale + BORDER_OFFSET * 2, scale + BORDER_OFFSET * 2]; drawContour(pts.ToList(), image); pictureBox.Image = image.ToBitmap(); }
/// <summary> /// Converts the source color to the destination color. /// </summary> /// <param name="image">Source image.</param> /// <param name="area">Working area.</param> /// <returns>image with converted color.</returns> public static Gray <byte>[,] ToGray(this Bgr <byte>[,] image, Rectangle area) { return(image.Convert <Bgr <byte>, Gray <byte> >(Bgr <byte> .Convert, area)); }