public Texture getScanFrame(WebCamTexture inputTexture) { Mat original = Unity.TextureToMat(inputTexture); Size inputSize = new Size(original.Width, original.Height); scanner.Input = Unity.TextureToMat(inputTexture); if (!scanner.Success) { scanner.Settings.GrayMode = PaperScanner.ScannerSettings.ColorMode.HueGrayscale; } Point[] detectedContour = scanner.PaperShape; var matCombinedFrame = new Mat(new Size(inputSize.Width, inputSize.Height), original.Type(), Scalar.FromRgb(64, 64, 64)); original.CopyTo(matCombinedFrame.SubMat(0, inputSize.Height, 0, inputSize.Width)); if (null != detectedContour && detectedContour.Length > 2) { matCombinedFrame.DrawContours(new Point[][] { detectedContour }, 0, Scalar.FromRgb(255, 255, 0), 3); } return(Unity.MatToTexture(matCombinedFrame)); }
/// <summary> /// Combines original and processed images into a new twice wide image /// </summary> /// <param name="original">Source image</param> /// <param name="processed">Processed image</param> /// <param name="detectedContour">Contour to draw over original image to show detected shape</param> /// <returns>OpenCV::Mat image with images combined</returns> private Mat CombineMats(Mat original, Mat processed, Point[] detectedContour) { Size inputSize = new Size(original.Width, original.Height); // combine fancy output image: // - create new texture twice as wide as input // - copy input into the left half // - draw detected paper contour over original input // - put "scanned", un-warped and cleared paper to the right, centered in the right half var matCombined = new Mat(new Size(inputSize.Width * 2, inputSize.Height), original.Type(), Scalar.FromRgb(64, 64, 64)); // copy original image with detected shape drawn over original.CopyTo(matCombined.SubMat(0, inputSize.Height, 0, inputSize.Width)); if (null != detectedContour && detectedContour.Length > 2) { matCombined.DrawContours(new Point[][] { detectedContour }, 0, Scalar.FromRgb(255, 255, 0), 3); } // copy scanned paper without extra scaling, as is if (null != processed) { double hw = processed.Width * 0.5, hh = processed.Height * 0.5; Point2d center = new Point2d(inputSize.Width + inputSize.Width * 0.5, inputSize.Height * 0.5); Mat roi = matCombined.SubMat( (int)(center.Y - hh), (int)(center.Y + hh), (int)(center.X - hw), (int)(center.X + hw) ); processed.CopyTo(roi); } return(matCombined); }
/// <summary> /// Marks detected objects on the texture /// </summary> public void MarkDetected(bool drawSubItems = true) { // mark each found eye foreach (OpenCvSharp.Demo.DetectedFace face in Faces) { // face rect Cv2.Rectangle((InputOutputArray)Image, face.Region, Scalar.FromRgb(255, 0, 0), 2); // convex hull //Cv2.Polylines(Image, new IEnumerable<Point>[] { face.Info.ConvexHull }, true, Scalar.FromRgb(255, 0, 0), 2); // render face triangulation (should we have one) if (face.Info != null) { foreach (OpenCvSharp.Demo.DetectedFace.Triangle tr in face.Info.DelaunayTriangles) { Cv2.Polylines(Image, new IEnumerable <Point>[] { tr.ToArray() }, true, Scalar.FromRgb(0, 0, 255), 1); } } // Sub-items if (drawSubItems) { List <string> closedItems = new List <string>(new string[] { "Nose", "Eye", "Lip" }); foreach (OpenCvSharp.Demo.DetectedObject sub in face.Elements) { if (sub.Marks != null) { Cv2.Polylines(Image, new IEnumerable <Point>[] { sub.Marks }, closedItems.Contains(sub.Name), Scalar.FromRgb(0, 255, 0), 1); } } } } }