/// <summary> /// Convert a CvArray to cv::Mat and push it into the vector /// </summary> /// <typeparam name="TDepth">The type of depth of the cvArray</typeparam> /// <param name="cvArray">The cvArray to be pushed into the vector</param> public void Push <TDepth>(CvArray <TDepth> cvArray) where TDepth : new() { using (Mat m = new Mat()) { m.From(cvArray); Push(m); } }
private static void DrawNormal(CvArray <byte> img, Point point1, Point point2) { Point startPoint = Point.Empty; Point endPoint = Point.Empty; CalculateNormal(img, point1, point2, out startPoint, out endPoint); CvInvoke.Line(img, point1, point2, new MCvScalar(0, 255, 0)); CvInvoke.Line(img, startPoint, endPoint, new MCvScalar(0, 255, 0)); }
private Mat ScaleImage(CvArray <byte> image) { if (image.Width * image.Height >= 900_000) { return(image.Mat); } Mat scaledImage = new Mat(); CvInvoke.Resize(image, scaledImage, Size.Empty, 1.5, 1.5); return(scaledImage); }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from. Keypoints for which a descriptor cannot be computed are removed.</param> /// <returns>The descriptors founded on the keypoint location</returns> private Matrix <byte> ComputeDescriptorsRawHelper(CvArray <Byte> image, Image <Gray, byte> mask, VectorOfKeyPoint keyPoints) { if (mask != null) { keyPoints.FilterByPixelsMask(mask); } int count = keyPoints.Size; if (count == 0) { return(null); } Matrix <byte> descriptors = new Matrix <byte>(count, DescriptorSize * image.NumberOfChannels, 1); CvInvoke.CvOrbDetectorComputeDescriptors(_ptr, image, keyPoints, descriptors); return(descriptors); }
/// <summary> /// Compute the descriptor given the image and the point location /// </summary> /// <param name="image">The image where the descriptor will be computed from</param> /// <param name="mask">The optional mask, can be null if not needed</param> /// <param name="keyPoints">The keypoint where the descriptor will be computed from. Keypoints for which a descriptor cannot be computed are removed.</param> /// <returns>The descriptors founded on the keypoint location</returns> private Matrix <Byte> ComputeDescriptorsRawHelper(CvArray <Byte> image, Image <Gray, byte> mask, VectorOfKeyPoint keyPoints) { const float epsilon = 1.192092896e-07f; // smallest such that 1.0+epsilon != 1.0 keyPoints.FilterByImageBorder(image.Size, 48 / 2 + 9 / 2); //this value comes from opencv's BriefDescriptorExtractor::computeImpl implementation keyPoints.FilterByKeypointSize(epsilon, float.MaxValue); if (mask != null) { keyPoints.FilterByPixelsMask(mask); } int count = keyPoints.Size; if (count == 0) { return(null); } Matrix <Byte> descriptors = new Matrix <Byte>(count, DescriptorSize * image.NumberOfChannels, 1); CvInvoke.CvBriefDescriptorComputeDescriptors(_ptr, image, keyPoints, descriptors); Debug.Assert(keyPoints.Size == descriptors.Rows); return(descriptors); }
private static void CalculateNormal(CvArray <byte> img, Point point1, Point point2, out Point startPoint, out Point endPoint, int normalLength = 10) { int startX = (point1.X + point2.X) / 2; int startY = (point1.Y + point2.Y) / 2; double coeff = (double)(point2.Y - point1.Y) / (point2.X - point1.X); Console.WriteLine(coeff); int endX; if (point2.Y < point1.Y) { endX = (int)Math.Round(normalLength / Math.Sqrt(1 + 1 / (coeff * coeff)) + startX); } else { endX = (int)Math.Round(-normalLength / Math.Sqrt(1 + 1 / (coeff * coeff)) + startX); } int endY; if (coeff == 0) { if (point1.X > point2.X) { endY = startY - normalLength; } else { endY = startY + normalLength; } } else { endY = (int)Math.Round(startY + startX / coeff - endX / coeff); } startPoint = new Point(startX, startY); endPoint = new Point(endX, endY); }
/// <summary> /// Convert a CvArray to cv::Mat and push it into the vector /// </summary> /// <typeparam name="TDepth">The type of depth of the cvArray</typeparam> /// <param name="cvArray">The cvArray to be pushed into the vector</param> public void Push <TDepth>(CvArray <TDepth> cvArray) where TDepth : new() { Push(cvArray.Mat); }
private static List <int> HistLine(CvArray <byte> image) { LineSegment2D[] lines = CvInvoke.HoughLinesP(image, 1, Math.PI / 90.0, (int)(0.8 * image.Cols)); return((from line in lines where Math.Abs(line.P1.Y - line.P2.Y) <= 5 select line.P1.Y).ToList()); }
/// <summary> /// Downloads data from device to host memory. Blocking calls /// </summary> /// <param name="arr">The destination CvArray where the GpuMat data will be downloaded to.</param> public void Download(CvArray <TDepth> arr) { GpuInvoke.GpuMatDownload(_ptr, arr); }
/// <summary> /// Pefroms blocking upload data to GpuMat /// </summary> /// <param name="arr">The CvArray to be uploaded to GpuMat</param> public void Upload(CvArray <TDepth> arr) { GpuInvoke.GpuMatUpload(_ptr, arr); }
/// <summary> /// Create a GpuMat from an CvArray of the same depth type /// </summary> /// <param name="arr">The CvArry to be converted to GpuMat</param> public GpuMat(CvArray <TDepth> arr) { _ptr = GpuInvoke.GpuMatCreateFromArr(arr); }