public OpencvSource(string cam_or_url) { MAssert.Check(cam_or_url != string.Empty); // check if cam_or_url is number bool stream = false; for (int i = 0; i < cam_or_url.Length; ++i) { stream = stream || (cam_or_url[i] < '0') || (cam_or_url[i] > '9'); } if (stream) { // open stream Console.WriteLine("opening stream '{0}'", cam_or_url); capturer = new OpenCvSharp.VideoCapture(cam_or_url); } else { // convert to integer int cam_id = Convert.ToInt32(cam_or_url, 10); MAssert.Check(cam_id >= 0, "wrong webcam id"); // open vebcam Console.WriteLine("opening webcam {0}", cam_id); capturer = new OpenCvSharp.VideoCapture(cam_id); MAssert.Check(capturer.IsOpened(), "webcam not opened"); // set resolution capturer.Set(OpenCvSharp.CaptureProperty.FrameWidth, 1280); capturer.Set(OpenCvSharp.CaptureProperty.FrameHeight, 720); MAssert.Check(capturer.IsOpened(), "webcam not opened"); } // sometimes first few frames can be empty even if camera is good // so skip few frames OpenCvSharp.Mat frame; for (int i = 0; i < 10; ++i) { frame = capturer.RetrieveMat(); } // check first two frames OpenCvSharp.Mat image1 = new OpenCvSharp.Mat(), image2 = new OpenCvSharp.Mat(); capturer.Read(image1); capturer.Read(image2); Console.WriteLine("image1 size: {0}", image1.Size()); Console.WriteLine("image1 size: {0}", image2.Size()); MAssert.Check( !image1.Empty() && !image2.Empty() && image1.Size() == image2.Size() && image1.Type() == OpenCvSharp.MatType.CV_8UC3 && image2.Type() == OpenCvSharp.MatType.CV_8UC3, "error opening webcam or stream"); }
public void RotateImage(OpenCvSharp.Mat src, ref OpenCvSharp.Mat dst, double angle, double scale) { var imageCenter = new Point2f(src.Cols / 2f, src.Rows / 2f); var rotationMat = Cv2.GetRotationMatrix2D(imageCenter, angle, scale); Cv2.WarpAffine(src, dst, rotationMat, src.Size()); }
public void Run() { Mat src = new Mat(FilePath.Image.Girl, ImreadModes.Color); Mat dst = new Mat(FilePath.Image.Lenna, ImreadModes.Color); Mat src0 = src.Resize(dst.Size(), 0, 0, InterpolationFlags.Lanczos4); Mat mask = Mat.Zeros(src0.Size(), MatType.CV_8UC3); mask.Circle(200, 200, 100, Scalar.White, -1); Mat blend1 = new Mat(); Mat blend2 = new Mat(); Mat blend3 = new Mat(); Cv2.SeamlessClone( src0, dst, mask, new Point(260, 270), blend1, SeamlessCloneMethods.NormalClone); Cv2.SeamlessClone( src0, dst, mask, new Point(260, 270), blend2, SeamlessCloneMethods.MonochromeTransfer); Cv2.SeamlessClone( src0, dst, mask, new Point(260, 270), blend3, SeamlessCloneMethods.MixedClone); using (new Window("src", src0)) using (new Window("dst", dst)) using (new Window("mask", mask)) using (new Window("blend NormalClone", blend1)) using (new Window("blend MonochromeTransfer", blend2)) using (new Window("blend MixedClone", blend3)) { Cv2.WaitKey(); } }
private int OpenCVDeepLearningDetector(string path) { // uses emugu library //https://medium.com/@vinuvish/face-detection-with-opencv-and-deep-learning-90bff9028fa8 string prototextPath = @"./Dnn/deploy.prototxt"; string caffeModelPath = @"./Dnn/res10_300x300_ssd_iter_140000.caffemodel"; //// load the model; using (var net = OpenCvSharp.Dnn.CvDnn.ReadNetFromCaffe(prototxt: prototextPath, caffeModel: caffeModelPath)) using (OpenCvSharp.Mat image = Cv2.ImRead(path)) { // get the original image size OpenCvSharp.Size imageSize = image.Size(); // the dnn detector works on a 300x300 image; // now resize the image for the Dnn dector; OpenCvSharp.Size size = new OpenCvSharp.Size(299, 299); // set the scalar property to RGB colors, don't know what these values represent. OpenCvSharp.Scalar mcvScalar = new OpenCvSharp.Scalar(104.0, 177.0, 123.0); using (var blob = OpenCvSharp.Dnn.CvDnn.BlobFromImage(image: image, scaleFactor: 1, size: size, mean: mcvScalar, swapRB: true)) { net.SetInput(blob, "data"); using (OpenCvSharp.Mat detections = net.Forward()) { // convert the detected values to a faces object that we can use to // draw rectangles. List <ConfidenceRect> Faces = new List <ConfidenceRect>(); //var rows = detections.SizeOfDimension[2]; //Array ans = detections.GetData(); //for (int n = 0; n < rows; n++) //{ // object confidence = ans.GetValue(0, 0, n, 2); // object x1 = ans.GetValue(0, 0, n, 3); // object y1 = ans.GetValue(0, 0, n, 4); // object x2 = ans.GetValue(0, 0, n, 5); // object y2 = ans.GetValue(0, 0, n, 6); // ConfidenceRect cr = new ConfidenceRect(confidence, x1, y1, x2, y2, imageSize); // if (cr.Confidence > 0) // { // Debug.WriteLine($"Confidence {cr.Confidence}"); // } // if (cr.Confidence > Confidence) // { // Faces.Add(cr); // } //} //// convert to a writeableBitmap //WriteableBitmap writeableBitmap = new WriteableBitmap(ImageSource); //ImageSource = ConvertWriteableBitmapToBitmapImage(writeableBitmap); //OnPropertyChanged("ImageSource"); //DrawDnnOnImage?.Invoke(Faces, imageSize); //return Faces.Count.ToString(); } } } return(0); }
public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Objects objects, bool render_mask, bool isTrackingON) { OpenCvSharp.Mat overlay = left_display.Clone(); OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(0, 0, left_display.Size().Width, left_display.Size().Height); OpenCvSharp.Mat mask = new OpenCvSharp.Mat(left_display.Rows, left_display.Cols, OpenCvSharp.MatType.CV_8UC1); int line_thickness = 2; for (int i = 0; i < objects.numObject; i++) { sl.ObjectData obj = objects.objectData[i]; if (Utils.renderObject(obj, isTrackingON)) { OpenCvSharp.Scalar base_color = Utils.generateColorID_u(obj.id); // Display image scale bouding box 2d if (obj.boundingBox2D.Length < 4) { continue; } Point top_left_corner = Utils.cvt(obj.boundingBox2D[0], img_scale); Point top_right_corner = Utils.cvt(obj.boundingBox2D[1], img_scale); Point bottom_right_corner = Utils.cvt(obj.boundingBox2D[2], img_scale); Point bottom_left_corner = Utils.cvt(obj.boundingBox2D[3], img_scale); // Create of the 2 horizontal lines Cv2.Line(left_display, top_left_corner, top_right_corner, base_color, line_thickness); Cv2.Line(left_display, bottom_left_corner, bottom_right_corner, base_color, line_thickness); // Creation of two vertical lines Utils.drawVerticalLine(ref left_display, bottom_left_corner, top_left_corner, base_color, line_thickness); Utils.drawVerticalLine(ref left_display, bottom_right_corner, top_right_corner, base_color, line_thickness); // Scaled ROI OpenCvSharp.Rect roi = new OpenCvSharp.Rect(top_left_corner.X, top_left_corner.Y, (int)top_right_corner.DistanceTo(top_left_corner), (int)bottom_right_corner.DistanceTo(top_right_corner)); overlay.SubMat(roi).SetTo(base_color); sl.float2 position_image = getImagePosition(obj.boundingBox2D, img_scale); Cv2.PutText(left_display, obj.label.ToString(), new Point(position_image.x - 20, position_image.y - 12), HersheyFonts.HersheyComplexSmall, 0.5f, new Scalar(255, 255, 255, 255), 1); if (!float.IsInfinity(obj.position.Z)) { string text = Math.Abs(obj.position.Z).ToString("0.##M"); Cv2.PutText(left_display, text, new Point(position_image.x - 20, position_image.y), HersheyFonts.HersheyComplexSmall, 0.5, new Scalar(255, 255, 255, 255), 1); } } } // Here, overlay is as the left image, but with opaque masks on each detected objects Cv2.AddWeighted(left_display, 0.7, overlay, 0.3, 0.0, left_display); }
/// <summary> /// utility function: convert one image to another with optional vertical flip /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="flags"></param> public static void ConvertImage(Mat src, Mat dst, ConvertImageModes flags = ConvertImageModes.None) { if (src == null) { throw new ArgumentNullException("src"); } if (dst == null) { throw new ArgumentNullException("dst"); } src.ThrowIfDisposed(); dst.ThrowIfDisposed(); dst.Create(src.Size(), MatType.CV_8UC3); NativeMethods.imgcodecs_cvConvertImage_Mat(src.CvPtr, dst.CvPtr, (int)flags); GC.KeepAlive(src); GC.KeepAlive(dst); }
/// <summary> /// Converts OpenCV Mat to Unity texture /// </summary> /// <returns>Unity texture</returns> /// <param name="mat">OpenCV Mat</param> /// <param name="outTexture">Unity texture to set pixels</param> public static Texture2D MatToTexture(Mat mat, Texture2D outTexture = null) { Size size = mat.Size(); using (Mat unityMat = new Mat(utils_mat_to_texture_2(mat.CvPtr))) { if (null == outTexture || outTexture.width != size.Width || outTexture.height != size.Height) { outTexture = new Texture2D(size.Width, size.Height); } int count = size.Width * size.Height; Color32Bytes data = new Color32Bytes(); data.byteArray = new byte[count * 4]; data.colors = new Color32[count]; Marshal.Copy(unityMat.Data, data.byteArray, 0, data.byteArray.Length); outTexture.SetPixels32(data.colors); outTexture.Apply(); return(outTexture); } }
//https://stackoverflow.com/questions/52016253/python-np-array-equivilent-in-opencv-opencvsharp public static Mat zeros_like(Mat a) { return(new Mat(a.Size(), a.Type(), new Scalar(0))); }
/// <summary> /// Eye見て傾き検出 /// </summary> /// <param name="srcMat"></param> /// <param name="putMat"></param> /// <returns></returns> public Mat PutEllipseEyeMaskOnFace(Mat srcMat, Mat putMat) { var grayMat = new Mat(); Cv2.CvtColor(srcMat, grayMat, ColorConversionCodes.BGR2GRAY); Cv2.EqualizeHist(grayMat, grayMat); var faces = Cascade.DetectMultiScale(grayMat); if (faces == null) return srcMat; var polygons = new List<List<Point>>(); var faceCount = faces.Count(); // O(n) for (int d = 0; d < faceCount; d++) { polygons = new List<List<Point>>(); int x1 = faces[d].X; int y1 = faces[d].Y; int width = faces[d].Width; int height = faces[d].Height; int x2 = x1 + width; int y2 = y1 + height; int pwidth = putMat.Width; int pheight = putMat.Height; int srcWidth = srcMat.Width; int srcHeight = srcMat.Height; polygons.Add(new List<Point>() { new Point(x1,y1), new Point(x2,y1), new Point(x2,y2), new Point(x1,y2), }); var faceSize = new Size(width, height); //重ねるファイルは少し拡大したほうが良いかな? /* Mat put0 = putMat[(int)(pwidth * 0.1) , (int)(pwidth * 0.9), (int)(pheight * 0.1), (int)(pheight * 0.9)] .Resize(new Size(width, heigh), 0, 0, InterpolationFlags.Lanczos4); */ Mat put0 = putMat.Resize(faceSize, 0, 0, InterpolationFlags.Lanczos4); //真ん中編の色を適当に抽出 // 改良の余地あり(肌色領域の平均取ったり?) MatOfByte3 mat3 = new MatOfByte3(put0); // cv::Mat_<cv::Vec3b> var indexer = mat3.GetIndexer(); Vec3b color = indexer[(int)(put0.Width * 0.5), (int)(put0.Height * 0.5)]; //抽出した色で埋める Mat put1 = new Mat(srcMat.Size(), MatType.CV_8UC3, new Scalar(color.Item0, color.Item1, color.Item2)); //重ねる範囲にコピー put1[y1, y2, x1, x2] = put0; Mat put1gray = Mat.Zeros(srcMat.Size(), MatType.CV_8UC1); put1gray[y1, y2, x1, x2] = grayMat[y1, y2, x1, x2]; var eyes = EyeCascade.DetectMultiScale(put1gray); /* Debug.WriteLine(eyes.Count()); var cccc = new Point(eyes[0].X + eyes[0].Width * 0.5, eyes[0].Y + eyes[0].Height * 0.5); put1gray.Circle(cccc,(int)(eyes[0].Width * 0.5), new Scalar(0, 255, 255)); return put1gray;*/ var eyeCount = eyes.Count(); if (eyeCount >= 2) { var eyePpints = new List<Point>(); var orderedEyes = eyes.OrderByDescending(x => x.Width * x.Height).ToArray(); while (true) { for (int i = 0; i < 2; i++) { eyePpints.Add(new Point(eyes[i].X + eyes[i].Width * 0.5, eyes[i].Y + eyes[i].Height * 0.5)); } var wrapRect = Cv2.MinAreaRect(eyePpints); if (Math.Abs(wrapRect.Angle % 180) < 20) { var scale = 1.0; var angle = -wrapRect.Angle % 180; var eyedx = (eyePpints[0].X + eyePpints[1].X) * 0.5 - wrapRect.Center.X; var eyedy = (eyePpints[0].Y + eyePpints[1].Y) * 0.5 - wrapRect.Center.Y; //中心はここ var center = new Point( (faces[d].X + faces[d].Width * 0.5) + eyedx, (faces[d].Y + faces[d].Height * 0.5) + eyedy); Mat matrix = Cv2.GetRotationMatrix2D(center, angle, scale); //画像を回転させる Cv2.WarpAffine(put1, put1, matrix, put1.Size()); var faceAvgWidth = (int)((wrapRect.Size.Width + faceSize.Width) * 0.6); var rotateRect = new RotatedRect(center, new Size2f(faceAvgWidth, faceSize.Height * 0.9), angle); Mat mask = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); Cv2.Ellipse(mask, rotateRect, new Scalar(255, 255, 255), -1, LineTypes.AntiAlias); // Cv2.FillPoly(mask, polygons, new Scalar(255, 255, 255)); Cv2.SeamlessClone(put1, srcMat, mask, center, srcMat, SeamlessCloneMethods.NormalClone); break; } else { if (orderedEyes.Count() > 2) { orderedEyes = orderedEyes.Skip(1).ToArray(); } else { var angle = 0; //中心はここ var center = new Point(faces[d].X + faces[d].Width * 0.5, faces[d].Y + faces[d].Height * 0.5); var rotateRect = new RotatedRect(center, new Size2f(faceSize.Width * 0.8, faceSize.Height * 0.9), angle); Mat mask = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); Cv2.Ellipse(mask, rotateRect, new Scalar(255, 255, 255), -1, LineTypes.AntiAlias); // Cv2.FillPoly(mask, polygons, new Scalar(255, 255, 255)); Cv2.SeamlessClone(put1, srcMat, mask, center, srcMat, SeamlessCloneMethods.NormalClone); break; } } } } else { var angle = 0; //中心はここ var center = new Point(faces[d].X + faces[d].Width * 0.5, faces[d].Y + faces[d].Height * 0.5); var rotateRect = new RotatedRect(center, new Size2f(faceSize.Width * 0.8, faceSize.Height * 0.9), angle); Mat mask = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); Cv2.Ellipse(mask, rotateRect, new Scalar(255, 255, 255), -1, LineTypes.AntiAlias); // Cv2.FillPoly(mask, polygons, new Scalar(255, 255, 255)); Cv2.SeamlessClone(put1, srcMat, mask, center, srcMat, SeamlessCloneMethods.NormalClone); } } return srcMat; }
/// <summary> /// Poisson Image Editing /// </summary> /// <param name="srcMat">顔がある方</param> /// <param name="putMat">重ねる顔</param> /// <returns></returns> public Mat PutMaskOnFace(Mat srcMat, Mat putMat) { var grayMat = new Mat(); Cv2.CvtColor(srcMat, grayMat, ColorConversionCodes.BGR2GRAY); Cv2.EqualizeHist(grayMat, grayMat); var faces = Cascade.DetectMultiScale(grayMat); if (faces == null) return srcMat; var binaryMat = new Mat(); int blockSize = 7; double k = 0.15; double R = 32; Binarizer.Sauvola(grayMat, binaryMat, blockSize, k, R); Cv2.BitwiseNot(binaryMat, binaryMat); var polygons = new List<List<Point>>(); var faceCount = faces.Count(); // O(n) for (int d = 0; d < faceCount; d++) { polygons = new List<List<Point>>(); int x1 = faces[d].X; int y1 = faces[d].Y; int width = faces[d].Width; int heigh = faces[d].Height; int x2 = x1 + width; int y2 = y1 + heigh; polygons.Add(new List<Point>() { new Point(x1,y1), new Point(x2,y1), new Point(x2,y2), new Point(x1,y2), }); var pwidth = putMat.Width; var pheight = putMat.Height; //重ねるファイルは少し拡大したほうが良いかな? /* Mat put0 = putMat[(int)(pwidth * 0.1) , (int)(pwidth * 0.9), (int)(pheight * 0.1), (int)(pheight * 0.9)] .Resize(new Size(width, heigh), 0, 0, InterpolationFlags.Lanczos4); */ Mat put0 = putMat.Resize(new Size(width, heigh), 0, 0, InterpolationFlags.Lanczos4); //真ん中編の色を適当に抽出 // 改良の余地あり(肌色領域の平均取ったり?) MatOfByte3 mat3 = new MatOfByte3(put0); // cv::Mat_<cv::Vec3b> var indexer = mat3.GetIndexer(); Vec3b color = indexer[(int)(put0.Width * 0.5), (int)(put0.Height * 0.5)]; //抽出した色で埋める Mat put1 = new Mat(srcMat.Size(), MatType.CV_8UC3, new Scalar(color.Item0, color.Item1, color.Item2)); //重ねる範囲にコピー put1[y1, y2, x1, x2] = put0; Mat mask = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); Cv2.FillPoly(mask, polygons, new Scalar(255, 255, 255)); //中心はここ var center = new Point(faces[d].X + faces[d].Width * 0.5, faces[d].Y + faces[d].Height * 0.5); Cv2.SeamlessClone(put1, srcMat, mask, center, srcMat, SeamlessCloneMethods.NormalClone); } return srcMat; }
public Mat PutEllipseMaskOnFace2(Mat srcMat, Mat putMat) { var grayMat = new Mat(); Cv2.CvtColor(srcMat, grayMat, ColorConversionCodes.BGR2GRAY); Cv2.EqualizeHist(grayMat, grayMat); var faces = Cascade.DetectMultiScale(grayMat); if (faces == null) return srcMat; var binaryMat = new Mat(); // binaryMat = ColorExtractor.ExtractMask(srcMat,ColorConversionCodes.BGR2HSV,ColorVariation.Skin); // return binaryMat; int blockSize = 7; double k = 1.5; double R = 100; Binarizer.Sauvola(grayMat, binaryMat, blockSize, k, R); Cv2.BitwiseNot(binaryMat, binaryMat); return binaryMat; var polygons = new List<List<Point>>(); var faceCount = faces.Count(); // O(n) for (int d = 0; d < faceCount; d++) { polygons = new List<List<Point>>(); int x1 = faces[d].X; int y1 = faces[d].Y; int width = faces[d].Width; int height = faces[d].Height; int x2 = x1 + width; int y2 = y1 + height; int pwidth = putMat.Width; int pheight = putMat.Height; int srcWidth = srcMat.Width; int srcHeight = srcMat.Height; polygons.Add(new List<Point>() { new Point(x1,y1), new Point(x2,y1), new Point(x2,y2), new Point(x1,y2), }); // f = fixed /* int fx1 = (int)(x1 - width * 0.01); fx1 = fx1 > 0 ? fx1 : 0; int fx2 = (int)(x2 + width * 0.01); fx2 = fx2 < srcWidth ? fx2 : srcWidth; int fy1 = (int)(y1 - height * 0.01); fy1 = fy1 > 0 ? fy1 : 0; int fy2 = (int)(y2 + height * 0.01); fy2 = fy2 < srcHeight ? fy2 : srcHeight; */ int fx1 = (int)(x1 + width * 0.1); int fx2 = (int)(x2 - width * 0.1); int fy1 = (int)(y1 + height * 0.1); int fy2 = (int)(y2 - height * 0.1); int fwidth = x2 - x1; int fheight = y2 - y1; var faceSize = new Size(fwidth, fheight); //重ねるファイルは少し拡大したほうが良いかな? /* Mat put0 = putMat[(int)(pwidth * 0.1) , (int)(pwidth * 0.9), (int)(pheight * 0.1), (int)(pheight * 0.9)] .Resize(new Size(width, heigh), 0, 0, InterpolationFlags.Lanczos4); */ Mat put0 = putMat.Resize(faceSize, 0, 0, InterpolationFlags.Lanczos4); //真ん中編の色を適当に抽出 // 改良の余地あり(肌色領域の平均取ったり?) MatOfByte3 mat3 = new MatOfByte3(put0); // cv::Mat_<cv::Vec3b> var indexer = mat3.GetIndexer(); Vec3b color = indexer[(int)(put0.Width * 0.5), (int)(put0.Height * 0.5)]; //抽出した色で埋める Mat put1 = new Mat(srcMat.Size(), MatType.CV_8UC3, new Scalar(color.Item0, color.Item1, color.Item2)); //重ねる範囲にコピー put1[y1, y2, x1, x2] = put0; Mat mask = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); //中心はここ var center = new Point(faces[d].X + faces[d].Width * 0.5, faces[d].Y + faces[d].Height * 0.5); Mat faceAroundMat = Mat.Zeros(srcMat.Size(), MatType.CV_8UC1); faceAroundMat[fy1, fy2, fx1, fx2] = binaryMat[fy1, fy2, fx1, fx2]; // faceAroundMat[y1, y2, x1, x2] = binaryMat[y1, y2, x1, x2]; //var countours = new // 単純な輪郭抽出のみでは、傾きがわからない // 元のAPIが破壊的な関数なので clone http://opencv.jp/opencv-2svn/cpp/imgproc_structural_analysis_and_shape_descriptors.html#cv-findcontours var contours = faceAroundMat.Clone().FindContoursAsArray(RetrievalModes.List, ContourApproximationModes.ApproxNone); //要素数が大きい輪郭だけ var detectedContours = contours.Where(c => /*Cv2.ContourArea(c) > Cv2.ContourArea(polygons[0]) * 0.05 &&*/ Cv2.ContourArea(c) < Cv2.ContourArea(polygons[0]) * 0.1); Mat conMat = Mat.Zeros(srcMat.Size(), MatType.CV_8UC1); Cv2.DrawContours(conMat, detectedContours, -1, new Scalar(255, 255, 255)); return conMat; var points = new List<Point>(); foreach (var dc in detectedContours) { points.Union(dc); } var detectedRotateRect = Cv2.MinAreaRect(points); float angle = detectedRotateRect.Angle = Math.Abs(detectedRotateRect.Angle) > 20 ? detectedRotateRect.Angle % 20 : detectedRotateRect.Angle; float scale = 1.0f; // 回転 Mat matrix = Cv2.GetRotationMatrix2D(center, angle, scale); Debug.WriteLine(detectedRotateRect.Angle); //画像を回転させる Cv2.WarpAffine(put1, put1, matrix, put1.Size()); var rotateRect = new RotatedRect(center, new Size2f(faceSize.Width, faceSize.Height), detectedRotateRect.Angle); continue; Cv2.Ellipse(mask, detectedRotateRect, new Scalar(255, 255, 255), -1, LineTypes.AntiAlias); // Cv2.FillPoly(mask, polygons, new Scalar(255, 255, 255)); Cv2.SeamlessClone(put1, srcMat, mask, center, srcMat, SeamlessCloneMethods.NormalClone); } return srcMat; }
public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Objects objects, bool showOnlyOK) { OpenCvSharp.Mat overlay = left_display.Clone(); OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(1, 1, left_display.Size().Width, left_display.Size().Height); for (int i = 0; i < objects.numObject; i++) { sl.ObjectData obj = objects.objectData[i]; if (renderObject(obj, showOnlyOK)) { // Draw Skeleton bones OpenCvSharp.Scalar base_color = generateColorID(obj.id); foreach (var part in SKELETON_BONES) { var kp_a = cvt(obj.keypoints2D[(int)part.Item1], img_scale); var kp_b = cvt(obj.keypoints2D[(int)part.Item2], img_scale); if (roi_render.Contains(kp_a) && roi_render.Contains(kp_b)) { Cv2.Line(left_display, kp_a, kp_b, base_color, 1, LineTypes.AntiAlias); } } var hip_left = obj.keypoints2D[(int)sl.BODY_PARTS.LEFT_HIP]; var hip_right = obj.keypoints2D[(int)sl.BODY_PARTS.RIGHT_HIP]; var spine = (hip_left + hip_right) / 2; var neck = obj.keypoints2D[(int)sl.BODY_PARTS.NECK]; if (hip_left.X > 0 && hip_left.Y > 0 && hip_right.X > 0 && hip_right.Y > 0 && neck.X > 0 && neck.Y > 0) { var spine_a = cvt(spine, img_scale); var spine_b = cvt(neck, img_scale); if (roi_render.Contains(spine_a) && roi_render.Contains(spine_b)) { Cv2.Line(left_display, spine_a, spine_b, base_color, 1, LineTypes.AntiAlias); } } // Draw Skeleton joints foreach (var kp in obj.keypoints2D) { Point cv_kp = cvt(kp, img_scale); if (roi_render.Contains(cv_kp)) { Cv2.Circle(left_display, cv_kp, 3, base_color, -1); } } if (hip_left.X > 0 && hip_left.Y > 0 && hip_right.X > 0 && hip_right.Y > 0) { Point cv_spine = cvt(spine, img_scale); if (roi_render.Contains(cv_spine)) { Cv2.Circle(left_display, cv_spine, 3, base_color, -1); } } } } // Here, overlay is as the left image, but with opaque masks on each detected objects Cv2.AddWeighted(left_display, 0.9, overlay, 0.1, 0.0, left_display); }
/// <summary> /// utility function: convert one image to another with optional vertical flip /// </summary> /// <param name="src"></param> /// <param name="dst"></param> /// <param name="flags"></param> public static void ConvertImage(Mat src, Mat dst, ConvertImageModes flags = ConvertImageModes.None) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); src.ThrowIfDisposed(); dst.ThrowIfDisposed(); dst.Create(src.Size(), MatType.CV_8UC3); NativeMethods.imgcodecs_cvConvertImage_Mat(src.CvPtr, dst.CvPtr, (int)flags); GC.KeepAlive(src); GC.KeepAlive(dst); }
public void Run() { Mat img = Cv2.ImRead(FilePath.Image.Lenna, ImreadModes.GrayScale); // expand input image to optimal size Mat padded = new Mat(); int m = Cv2.GetOptimalDFTSize(img.Rows); int n = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderTypes.Constant, Scalar.All(0)); // Add to the expanded another plane with zeros Mat paddedF32 = new Mat(); padded.ConvertTo(paddedF32, MatType.CV_32F); Mat[] planes = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) }; Mat complex = new Mat(); Cv2.Merge(planes, complex); // this way the result may fit in the source matrix Mat dft = new Mat(); Cv2.Dft(complex, dft); // compute the magnitude and switch to logarithmic scale // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)) Mat[] dftPlanes; Cv2.Split(dft, out dftPlanes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) // planes[0] = magnitude Mat magnitude = new Mat(); Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude); magnitude += Scalar.All(1); // switch to logarithmic scale Cv2.Log(magnitude, magnitude); // crop the spectrum, if it has an odd number of rows or columns Mat spectrum = magnitude[ new Rect(0, 0, magnitude.Cols & -2, magnitude.Rows & -2)]; // rearrange the quadrants of Fourier image so that the origin is at the image center int cx = spectrum.Cols / 2; int cy = spectrum.Rows / 2; Mat q0 = new Mat(spectrum, new Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant Mat q1 = new Mat(spectrum, new Rect(cx, 0, cx, cy)); // Top-Right Mat q2 = new Mat(spectrum, new Rect(0, cy, cx, cy)); // Bottom-Left Mat q3 = new Mat(spectrum, new Rect(cx, cy, cx, cy)); // Bottom-Right // swap quadrants (Top-Left with Bottom-Right) Mat tmp = new Mat(); q0.CopyTo(tmp); q3.CopyTo(q0); tmp.CopyTo(q3); // swap quadrant (Top-Right with Bottom-Left) q1.CopyTo(tmp); q2.CopyTo(q1); tmp.CopyTo(q2); // Transform the matrix with float values into a Cv2.Normalize(spectrum, spectrum, 0, 1, NormTypes.MinMax); // Show the result Cv2.ImShow("Input Image" , img); Cv2.ImShow("Spectrum Magnitude", spectrum); // calculating the idft Mat inverseTransform = new Mat(); Cv2.Dft(dft, inverseTransform, DftFlags.Inverse | DftFlags.RealOutput); Cv2.Normalize(inverseTransform, inverseTransform, 0, 1, NormTypes.MinMax); Cv2.ImShow("Reconstructed by Inverse DFT", inverseTransform); Cv2.WaitKey(); }
static bool RunOn(string file, OpenCvSharp.Rect boundingBox) { int areas = 0; int[] quadrants = new int[4]; using (OpenCvSharp.Mat m = new OpenCvSharp.Mat(file, OpenCvSharp.ImreadModes.Grayscale)) { //blur the image a little using (var blurred = m.GaussianBlur(new Size(3, 3), 0)) { //make the image binary black or white and make black the background color using (var g = blurred.Threshold(200, 255, ThresholdTypes.BinaryInv | ThresholdTypes.Otsu)) { var element = Cv2.GetStructuringElement( MorphShapes.Rect, new Size(50, 1)); //remove lines from dark background image by creating a mask using (var mask = g.MorphologyEx(MorphTypes.Open, element, iterations: 2)) { using (Mat newMask = new Mat()) { //mask bits should be 0 to skip copying items Cv2.BitwiseNot(mask, newMask); using (Mat newImage = new Mat()) { //make new image and apply mask so as to not copy the lines g.CopyTo(newImage, newMask); //create the box image using (OpenCvSharp.Mat box = new OpenCvSharp.Mat(new Size(boundingBox.Width, boundingBox.Height), MatType.CV_8U)) { //copy to the box newImage[boundingBox].CopyTo(box); using (Mat labels = new Mat()) { using (var centroids = new Mat()) { using (Mat stats = new Mat()) { //find the white blobs //populate the quadrants blobs appear in //create total area of white stuff int cnt = Cv2.ConnectedComponentsWithStats(box, labels, stats, centroids, PixelConnectivity.Connectivity8); #if usequadrants int qh = box.Size().Height / 2; int qw = box.Size().Width / 2; var tl = new Rect(0, 0, qw, qh); var vl = new Rect(0, qh, qw, qh); var tr = new Rect(qw, 0, qw, qh); var br = new Rect(qw, qh, qw, qh); #endif for (var x = 1; x < stats.Size().Height; x++) { #if usequadrants var left = stats.Get <int>(x, (int)ConnectedComponentsTypes.Left); var top = stats.Get <int>(x, (int)ConnectedComponentsTypes.Top); var width = stats.Get <int>(x, (int)ConnectedComponentsTypes.Width); var height = stats.Get <int>(x, (int)ConnectedComponentsTypes.Height); var re = new Rect(left, top, width, height); if (re.IntersectsWith(tl)) { quadrants[0] = 1; } if (re.IntersectsWith(vl)) { quadrants[1] = 1; } if (re.IntersectsWith(tr)) { quadrants[2] = 1; } if (re.IntersectsWith(br)) { quadrants[3] = 1; } #endif areas += stats.Get <int>(x, (int)ConnectedComponentsTypes.Area); } } } } var boxarea = box.Size().Width *box.Size().Height; double[] areasTest = new double[] { areas }; double[] boxAreas = new double[] { boxarea }; //use infer.net to determine if the mean is good or not VariableArray <bool> ytest = Variable.Array <bool>(new Range(areasTest.Length)); BayesPointMachine(areasTest, boxAreas, Variable.Random(wPosterior), ytest); var res = (DistributionStructArray <Bernoulli, bool>)engine.Infer(ytest); var mean = res[0].GetMean(); Console.WriteLine(boxarea + " " + areas + " " + mean + " " #if usequadrants + quadrants.Sum());