/// <summary> /// /// </summary> /// <param name="cascade"></param> /// <returns></returns> private Mat DetectFace(CascadeClassifier cascade) { Mat result; using (var src = new Mat(FilePath.Image.Yalta, ImreadModes.Color)) using (var gray = new Mat()) { result = src.Clone(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY); // Detect faces Rect[] faces = cascade.DetectMultiScale( gray, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30)); // Render all detected faces foreach (Rect face in faces) { var center = new Point { X = (int)(face.X + face.Width * 0.5), Y = (int)(face.Y + face.Height * 0.5) }; var axes = new Size { Width = (int)(face.Width * 0.5), Height = (int)(face.Height * 0.5) }; Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4); } } return result; }
static cv.Mat MatInverse(cv.Mat m) { // assumes determinant is not 0 // that is, the matrix does have an inverse int n = m.Rows; cv.Mat result = m.Clone(); cv.Mat lum; // combined lower & upper int[] perm; // out parameter MatDecompose(m, out lum, out perm); // ignore return double[] b = new double[n]; for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == perm[j]) { b[j] = 1.0; } else { b[j] = 0.0; } } double[] x = Reduce(lum, b); // for (int j = 0; j < n; ++j) { result.Set <double>(j, i, x[j]); } } return(result); }
private static Mat[] SelectStitchingImages(int width, int height, int count) { Mat source = new Mat(@"Data\Image\lenna.png", ImreadModes.Color); Mat result = source.Clone(); var rand = new Random(); var mats = new List<Mat>(); for (int i = 0; i < count; i++) { int x1 = rand.Next(source.Cols - width); int y1 = rand.Next(source.Rows - height); int x2 = x1 + width; int y2 = y1 + height; result.Line(new Point(x1, y1), new Point(x1, y2), new Scalar(0, 0, 255)); result.Line(new Point(x1, y2), new Point(x2, y2), new Scalar(0, 0, 255)); result.Line(new Point(x2, y2), new Point(x2, y1), new Scalar(0, 0, 255)); result.Line(new Point(x2, y1), new Point(x1, y1), new Scalar(0, 0, 255)); Mat m = source[new Rect(x1, y1, width, height)]; mats.Add(m.Clone()); } using (new Window(result)) { Cv2.WaitKey(); } return mats.ToArray(); }
private int OpenCVFaceDetector(string path) { // uses openCv Library OpenCvSharp.CascadeClassifier faceClassifier = new OpenCvSharp.CascadeClassifier(@"./haarcascade/haarcascade_frontalface_alt.xml"); OpenCvSharp.Mat result; Rect[] faces = new Rect[0]; using (var src = new OpenCvSharp.Mat(path, OpenCvSharp.ImreadModes.Color)) using (var gray = new OpenCvSharp.Mat()) { result = src.Clone(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY); // Detect faces faces = faceClassifier.DetectMultiScale(gray, 1.08, 2, OpenCvSharp.HaarDetectionType.ScaleImage); List <System.Drawing.Rectangle> rfaces = new List <System.Drawing.Rectangle>(); foreach (Rect face in faces) { System.Drawing.Rectangle r = new System.Drawing.Rectangle(face.X, face.Y, face.Width, face.Height); this.GetLandmarks(gray, face, rfaces); rfaces.Add(r); } DrawOnImage?.Invoke(rfaces.ToArray(), new System.Drawing.Size(result.Width, result.Height)); } result.Dispose(); return(faces.Length); }
public void Run(cv.Mat src) { // The locations of the markers in the image at FilePath.Image.Aruco. const int upperLeftMarkerId = 160; const int upperRightMarkerId = 268; const int lowerRightMarkerId = 176; const int lowerLeftMarkerId = 168; var detectorParameters = DetectorParameters.Create(); detectorParameters.CornerRefinementMethod = CornerRefineMethod.Subpix; detectorParameters.CornerRefinementWinSize = 9; var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict4X4_1000); CvAruco.DetectMarkers(src, dictionary, out var corners, out var ids, detectorParameters, out var rejectedPoints); detectedMarkers = src.Clone(); CvAruco.DrawDetectedMarkers(detectedMarkers, corners, ids, cv.Scalar.White); // Find the index of the four markers in the ids array. We'll use this same index into the // corners array to find the corners of each marker. var upperLeftCornerIndex = Array.FindIndex(ids, id => id == upperLeftMarkerId); var upperRightCornerIndex = Array.FindIndex(ids, id => id == upperRightMarkerId); var lowerRightCornerIndex = Array.FindIndex(ids, id => id == lowerRightMarkerId); var lowerLeftCornerIndex = Array.FindIndex(ids, id => id == lowerLeftMarkerId); // Make sure we found all four markers. if (upperLeftCornerIndex < 0 || upperRightCornerIndex < 0 || lowerRightCornerIndex < 0 || lowerLeftCornerIndex < 0) { return; } // Marker corners are stored clockwise beginning with the upper-left corner. // Get the first (upper-left) corner of the upper-left marker. var upperLeftPixel = corners[upperLeftCornerIndex][0]; // Get the second (upper-right) corner of the upper-right marker. var upperRightPixel = corners[upperRightCornerIndex][1]; // Get the third (lower-right) corner of the lower-right marker. var lowerRightPixel = corners[lowerRightCornerIndex][2]; // Get the fourth (lower-left) corner of the lower-left marker. var lowerLeftPixel = corners[lowerLeftCornerIndex][3]; // Create coordinates for passing to GetPerspectiveTransform var sourceCoordinates = new List <cv.Point2f> { upperLeftPixel, upperRightPixel, lowerRightPixel, lowerLeftPixel }; var destinationCoordinates = new List <cv.Point2f> { new cv.Point2f(0, 0), new cv.Point2f(1024, 0), new cv.Point2f(1024, 1024), new cv.Point2f(0, 1024), }; var transform = cv.Cv2.GetPerspectiveTransform(sourceCoordinates, destinationCoordinates); normalizedImage = new cv.Mat(); cv.Cv2.WarpPerspective(src, normalizedImage, transform, new cv.Size(1024, 1024)); }
public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Objects objects, bool render_mask, bool isTrackingON) { OpenCvSharp.Mat overlay = left_display.Clone(); OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(0, 0, left_display.Size().Width, left_display.Size().Height); OpenCvSharp.Mat mask = new OpenCvSharp.Mat(left_display.Rows, left_display.Cols, OpenCvSharp.MatType.CV_8UC1); int line_thickness = 2; for (int i = 0; i < objects.numObject; i++) { sl.ObjectData obj = objects.objectData[i]; if (Utils.renderObject(obj, isTrackingON)) { OpenCvSharp.Scalar base_color = Utils.generateColorID_u(obj.id); // Display image scale bouding box 2d if (obj.boundingBox2D.Length < 4) { continue; } Point top_left_corner = Utils.cvt(obj.boundingBox2D[0], img_scale); Point top_right_corner = Utils.cvt(obj.boundingBox2D[1], img_scale); Point bottom_right_corner = Utils.cvt(obj.boundingBox2D[2], img_scale); Point bottom_left_corner = Utils.cvt(obj.boundingBox2D[3], img_scale); // Create of the 2 horizontal lines Cv2.Line(left_display, top_left_corner, top_right_corner, base_color, line_thickness); Cv2.Line(left_display, bottom_left_corner, bottom_right_corner, base_color, line_thickness); // Creation of two vertical lines Utils.drawVerticalLine(ref left_display, bottom_left_corner, top_left_corner, base_color, line_thickness); Utils.drawVerticalLine(ref left_display, bottom_right_corner, top_right_corner, base_color, line_thickness); // Scaled ROI OpenCvSharp.Rect roi = new OpenCvSharp.Rect(top_left_corner.X, top_left_corner.Y, (int)top_right_corner.DistanceTo(top_left_corner), (int)bottom_right_corner.DistanceTo(top_right_corner)); overlay.SubMat(roi).SetTo(base_color); sl.float2 position_image = getImagePosition(obj.boundingBox2D, img_scale); Cv2.PutText(left_display, obj.label.ToString(), new Point(position_image.x - 20, position_image.y - 12), HersheyFonts.HersheyComplexSmall, 0.5f, new Scalar(255, 255, 255, 255), 1); if (!float.IsInfinity(obj.position.Z)) { string text = Math.Abs(obj.position.Z).ToString("0.##M"); Cv2.PutText(left_display, text, new Point(position_image.x - 20, position_image.y), HersheyFonts.HersheyComplexSmall, 0.5, new Scalar(255, 255, 255, 255), 1); } } } // Here, overlay is as the left image, but with opaque masks on each detected objects Cv2.AddWeighted(left_display, 0.7, overlay, 0.3, 0.0, left_display); }
protected override Mat ExecuteImpl(Mat inputImage) { lock (m_sync) { if (m_videoCapture != null) { if (m_videoCapture.Read(m_image) && m_image.Width != 0 && m_image.Height != 0) { return(m_image.Clone()); } } } return(null); }
static cv.Mat ExtractUpper(cv.Mat lum) { // upper part of an LU (lu values on diagional and above, 0.0s below) int n = lum.Rows; cv.Mat result = lum.Clone().SetTo(0); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i <= j) { result.Set <double>(i, j, lum.At <double>(i, j)); } } } return(result); }
public void Run() { using (Mat src = new Mat(FilePath.Image.Distortion, ImreadModes.Color)) using (Mat gray = new Mat()) using (Mat dst = src.Clone()) { Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY); CppStyleMSER(gray, dst); // C++ style using (new Window("MSER src", src)) using (new Window("MSER gray", gray)) using (new Window("MSER dst", dst)) { Cv2.WaitKey(); } } }
/// <summary> /// sample of new C++ style wrapper /// </summary> private void SampleCpp() { // (1) Load the image using (Mat imgGray = new Mat(FilePath.Image.Goryokaku, ImreadModes.GrayScale)) using (Mat imgStd = new Mat(FilePath.Image.Goryokaku, ImreadModes.Color)) using (Mat imgProb = imgStd.Clone()) { // Preprocess Cv2.Canny(imgGray, imgGray, 50, 200, 3, false); // (3) Run Standard Hough Transform LineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0); int limit = Math.Min(segStd.Length, 10); for (int i = 0; i < limit; i++ ) { // Draws result lines float rho = segStd[i].Rho; float theta = segStd[i].Theta; double a = Math.Cos(theta); double b = Math.Sin(theta); double x0 = a * rho; double y0 = b * rho; Point pt1 = new Point { X = (int)Math.Round(x0 + 1000 * (-b)), Y = (int)Math.Round(y0 + 1000 * (a)) }; Point pt2 = new Point { X = (int)Math.Round(x0 - 1000 * (-b)), Y = (int)Math.Round(y0 - 1000 * (a)) }; imgStd.Line(pt1, pt2, Scalar.Red, 3, LineTypes.AntiAlias, 0); } // (4) Run Probabilistic Hough Transform LineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10); foreach (LineSegmentPoint s in segProb) { imgProb.Line(s.P1, s.P2, Scalar.Red, 3, LineTypes.AntiAlias, 0); } // (5) Show results using (new Window("Hough_line_standard", WindowMode.AutoSize, imgStd)) using (new Window("Hough_line_probabilistic", WindowMode.AutoSize, imgProb)) { Window.WaitKey(0); } } }
public void Run() { using (Mat imgSrc = new Mat(FilePath.Image.Lenna, ImreadModes.Color)) using (Mat imgGray = new Mat()) using (Mat imgDst = imgSrc.Clone()) { Cv2.CvtColor(imgSrc, imgGray, ColorConversionCodes.BGR2GRAY, 0); KeyPoint[] keypoints = Cv2.FAST(imgGray, 50, true); foreach (KeyPoint kp in keypoints) { imgDst.Circle(kp.Pt, 3, Scalar.Red, -1, LineTypes.AntiAlias, 0); } Cv2.ImShow("FAST", imgDst); Cv2.WaitKey(0); Cv2.DestroyAllWindows(); } }
static cv.Mat ExtractLower(cv.Mat lum) { // lower part of an LU Crout's decomposition // (dummy 1.0s on diagonal, 0.0s above) int n = lum.Rows; cv.Mat result = lum.Clone().SetTo(0); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) { result.Set <double>(i, j, 1.0); } else if (i > j) { result.Set <double>(i, j, lum.At <double>(i, j)); } } } return(result); }
public void infilterY(ref Mat DST,Mat SRC) { int width = SRC.Width; int height = SRC.Height; DST = SRC.Clone(); var indexer = new MatOfByte3(DST).GetIndexer(); for (int x = 0; x < width; x++) for (int y = 0; y < height - 1; y++) { Vec3b color = indexer[y, x]; double val = indexer[y+1, x].Item0 - indexer[y, x].Item0; if (val > 255) color.Item0 = 255; else if (val < 0) color.Item0 = 0; else color.Item0 = (byte)val; indexer[y, x] = color; } indexer = null; }
public Mat PutEllipseMaskOnFace(Mat srcMat, Mat putMat) { var grayMat = new Mat(); Cv2.CvtColor(srcMat, grayMat, ColorConversionCodes.BGR2GRAY); Cv2.EqualizeHist(grayMat, grayMat); var faces = Cascade.DetectMultiScale(grayMat); if (faces == null) return srcMat; var binaryMat = new Mat(); int blockSize = 7; double k = 0.15; double R = 32; Binarizer.Sauvola(grayMat, binaryMat, blockSize, k, R); Cv2.BitwiseNot(binaryMat, binaryMat); // return binaryMat; var polygons = new List<List<Point>>(); var faceCount = faces.Count(); // O(n) for (int d = 0; d < faceCount; d++) { polygons = new List<List<Point>>(); int x1 = faces[d].X; int y1 = faces[d].Y; int width = faces[d].Width; int height = faces[d].Height; int x2 = x1 + width; int y2 = y1 + height; int pwidth = putMat.Width; int pheight = putMat.Height; int srcWidth = srcMat.Width; int srcHeight = srcMat.Height; polygons.Add(new List<Point>() { new Point(x1,y1), new Point(x2,y1), new Point(x2,y2), new Point(x1,y2), }); // f = fixed int fx1 = (int)(x1 - width * 0.1); fx1 = fx1 > 0 ? fx1 : 0; int fx2 = (int)(x2 + width * 0.1); fx2 = fx2 < srcWidth ? fx2 : srcWidth; int fy1 = (int)(y1 - height * 0.1); fy1 = fy1 > 0 ? fy1 : 0; int fy2 = (int)(y2 + height * 0.1); fy2 = fy2 < srcHeight ? fy2 : srcHeight; int fwidth = x2 - x1; int fheight = y2 - y1; /* var detectedContours = contours.Where(c => { var cc = c.Count(); return cc > 150 && cc < 1000; }); foreach(var con in detectedContours) { var rotateRect = Cv2.FitEllipse(con); }*/ var faceSize = new Size(fwidth, fheight); //重ねるファイルは少し拡大したほうが良いかな? /* Mat put0 = putMat[(int)(pwidth * 0.1) , (int)(pwidth * 0.9), (int)(pheight * 0.1), (int)(pheight * 0.9)] .Resize(new Size(width, heigh), 0, 0, InterpolationFlags.Lanczos4); */ Mat put0 = putMat.Resize(faceSize, 0, 0, InterpolationFlags.Lanczos4); //真ん中編の色を適当に抽出 // 改良の余地あり(肌色領域の平均取ったり?) MatOfByte3 mat3 = new MatOfByte3(put0); // cv::Mat_<cv::Vec3b> var indexer = mat3.GetIndexer(); Vec3b color = indexer[(int)(put0.Width * 0.5), (int)(put0.Height * 0.5)]; //抽出した色で埋める Mat put1 = new Mat(srcMat.Size(), MatType.CV_8UC3, new Scalar(color.Item0, color.Item1, color.Item2)); //重ねる範囲にコピー put1[y1, y2, x1, x2] = put0; Mat mask = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); //中心はここ var center = new Point(faces[d].X + faces[d].Width * 0.5, faces[d].Y + faces[d].Height * 0.5); Mat faceAroundMat = Mat.Zeros(srcMat.Size(), MatType.CV_8UC1); faceAroundMat[fy1, fy2, fx1, fx2] = binaryMat[fy1, fy2, fx1, fx2]; // faceAroundMat[y1, y2, x1, x2] = binaryMat[y1, y2, x1, x2]; //var countours = new // 単純な輪郭抽出のみでは、傾きがわからない // 元のAPIが破壊的な関数なので clone http://opencv.jp/opencv-2svn/cpp/imgproc_structural_analysis_and_shape_descriptors.html#cv-findcontours //var contours = faceAroundMat.Clone().FindContoursAsArray(RetrievalModes.List, ContourApproximationModes.ApproxNone); var contours = binaryMat.Clone().FindContoursAsArray(RetrievalModes.List, ContourApproximationModes.ApproxNone); /* Mat mat = Mat.Zeros(srcMat.Size(), MatType.CV_8UC3); Cv2.DrawContours(mat, contours.Where(c => Cv2.ContourArea(c) > 100),-1, new Scalar(0, 255, 0)); */ var detectedContour = contours.FindMax(c => Cv2.ContourArea(c)); // var rotateRect = new RotatedRect(); /* if (Cv2.ContourArea(detectedContour) > Cv2.ContourArea(polygons[0]) * 0.3) { rotateRect = Cv2.FitEllipse(detectedContour); } else { rotateRect = new RotatedRect(center, new Size2f(faceSize.Width, faceSize.Height), 0); } rotateRect = Cv2.FitEllipse(detectedContour); Debug.WriteLine(rotateRect.Angle);*/ var rotateRect = Cv2.FitEllipse(detectedContour); rotateRect.Size = new Size2f(faceSize.Width, faceSize.Height); float angle = Math.Abs(rotateRect.Angle) > 20 ? -rotateRect.Angle % 20 : -rotateRect.Angle; float scale = 1.0f; // 回転 Mat matrix = Cv2.GetRotationMatrix2D(center, angle, scale); //画像を回転させる Cv2.WarpAffine(put1, put1, matrix, put1.Size()); Cv2.Ellipse(mask, rotateRect, new Scalar(255, 255, 255), -1, LineTypes.AntiAlias); // Cv2.FillPoly(mask, polygons, new Scalar(255, 255, 255)); Cv2.SeamlessClone(put1, srcMat, mask, center, srcMat, SeamlessCloneMethods.NormalClone); } return srcMat; }
static int MatDecompose(cv.Mat m, out cv.Mat lum, out int[] perm) { // Crout's LU decomposition for matrix determinant and inverse // stores combined lower & upper in lum[][] // stores row permuations into perm[] // returns +1 or -1 according to even or odd number of row permutations // lower gets dummy 1.0s on diagonal (0.0s above) // upper gets lum values on diagonal (0.0s below) int toggle = +1; // even (+1) or odd (-1) row permutatuions int n = m.Rows; // make a copy of m[][] into result lum[][] lum = m.Clone(); // make perm[] perm = new int[n]; for (int i = 0; i < n; ++i) { perm[i] = i; } for (int j = 0; j < n - 1; ++j) // process by column. note n-1 { double max = Math.Abs(lum.At <double>(j, j)); int piv = j; for (int i = j + 1; i < n; ++i) // find pivot index { double xij = Math.Abs(lum.At <double>(i, j)); if (xij > max) { max = xij; piv = i; } } // i if (piv != j) { cv.Mat tmp = lum.Row(piv).Clone(); // swap rows j, piv lum.Row(j).CopyTo(lum.Row(piv)); tmp.CopyTo(lum.Row(j)); int t = perm[piv]; // swap perm elements perm[piv] = perm[j]; perm[j] = t; toggle = -toggle; } double xjj = lum.At <double>(j, j); if (xjj != 0.0) { for (int i = j + 1; i < n; ++i) { double xij = lum.At <double>(i, j) / xjj; lum.Set <double>(i, j, xij); for (int k = j + 1; k < n; ++k) { lum.Set <double>(i, k, lum.At <double>(i, k) - xij * lum.At <double>(j, k)); } } } } return(toggle); // for determinant }
public void work(OpenCvSharp.Mat frame) { // sending the frame in the capturer (_tracker) // pbio::CVRawImage cvri_frame; byte[] data = new byte[frame.Total() * frame.Type().Channels]; Marshal.Copy(frame.DataStart, data, 0, (int)data.Length); RawImage ri_frame = new RawImage(frame.Width, frame.Height, RawImage.Format.FORMAT_BGR, data); List <RawSample> samples = _tracker.capture(ri_frame); // clone the frame for drawing on it OpenCvSharp.Mat draw_image = frame.Clone(); // handle each face on the frame separately for (int i = 0; i < samples.Count; ++i) { RawSample sample = samples[i]; // get a face rectangle RawSample.Rectangle rectangle = sample.getRectangle(); // set a point to place information for this face OpenCvSharp.Point2f text_point = new OpenCvSharp.Point2f( rectangle.x + rectangle.width + 3, rectangle.y + 10); const float text_line_height = 22; // draw facial points // red color for all points // green for left eye // yellow for right eye // (yes, there is a mess with left and right eyes in face_sdk api, // but if we fix it now we will lose compatibility with previous versions) if (_flag_points) { List <Point> points = sample.getLandmarks(); for (int j = -2; j < points.Count; ++j) { Point p = j == -2 ? sample.getLeftEye() : j == -1 ? sample.getRightEye() : points[j]; OpenCvSharp.Scalar color = j == -2 ? new OpenCvSharp.Scalar(50, 255, 50) : j == -1 ? new OpenCvSharp.Scalar(50, 255, 255) : new OpenCvSharp.Scalar(50, 50, 255); OpenCvSharp.Cv2.Circle( draw_image, new OpenCvSharp.Point2f(p.x, p.y), j < 0 ? 4 : 2, color, -1, OpenCvSharp.LineTypes.AntiAlias); } } // draw rectangle if (_flag_positions) { OpenCvSharp.Cv2.Rectangle( draw_image, new OpenCvSharp.Rect( rectangle.x, rectangle.y, rectangle.width, rectangle.height), new OpenCvSharp.Scalar(50, 50, 255), 2, OpenCvSharp.LineTypes.AntiAlias); } // draw age and gender if (_flag_age_gender) { AgeGenderEstimator.AgeGender age_gender = _age_geder_estimator.estimateAgeGender(sample); string age_text = "age: "; switch (age_gender.age) { case AgeGenderEstimator.Age.AGE_KID: age_text += "kid "; break; case AgeGenderEstimator.Age.AGE_YOUNG: age_text += "young "; break; case AgeGenderEstimator.Age.AGE_ADULT: age_text += "adult "; break; case AgeGenderEstimator.Age.AGE_SENIOR: age_text += "senior "; break; } age_text += string.Format("years: {0:G3}", age_gender.age_years); puttext( draw_image, age_text, text_point); text_point.Y += text_line_height; puttext( draw_image, age_gender.gender == AgeGenderEstimator.Gender.GENDER_FEMALE ? "gender: female" : age_gender.gender == AgeGenderEstimator.Gender.GENDER_MALE ? "gender: male" : "?", text_point); text_point.Y += text_line_height; text_point.Y += text_line_height / 3; } // draw emotions if (_flag_emotions) { List <EmotionsEstimator.EmotionConfidence> emotions = _emotions_estimator.estimateEmotions(sample); for (int j = 0; j < emotions.Count; ++j) { EmotionsEstimator.Emotion emotion = emotions[j].emotion; float confidence = emotions[j].confidence; OpenCvSharp.Cv2.Rectangle( draw_image, new OpenCvSharp.Rect( (int)text_point.X, (int)text_point.Y - (int)text_line_height / 2, (int)(100 * confidence), (int)text_line_height), emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? new OpenCvSharp.Scalar(255, 0, 0) : emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? new OpenCvSharp.Scalar(0, 255, 0) : emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? new OpenCvSharp.Scalar(0, 0, 255) : emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? new OpenCvSharp.Scalar(0, 255, 255) : new OpenCvSharp.Scalar(0, 0, 0), -1); puttext( draw_image, emotion == EmotionsEstimator.Emotion.EMOTION_NEUTRAL ? "neutral" : emotion == EmotionsEstimator.Emotion.EMOTION_HAPPY ? "happy" : emotion == EmotionsEstimator.Emotion.EMOTION_ANGRY ? "angry" : emotion == EmotionsEstimator.Emotion.EMOTION_SURPRISE ? "surprise" : "?", text_point + new OpenCvSharp.Point2f(100, 0)); text_point.Y += text_line_height; text_point.Y += text_line_height / 3; } } // draw angles text if (_flag_angles) { string yaw, pitch, roll; yaw = string.Format("yaw: {0}", (0.1f * (int)10 * sample.getAngles().yaw + 0.5f)); pitch = string.Format("pitch: {0}", (0.1f * (int)10 * sample.getAngles().pitch + 0.5f)); roll = string.Format("roll: {0}", (0.1f * (int)10 * sample.getAngles().roll + 0.5f)); puttext(draw_image, yaw, text_point); text_point.Y += text_line_height; puttext(draw_image, pitch, text_point); text_point.Y += text_line_height; puttext(draw_image, roll, text_point); text_point.Y += text_line_height; text_point.Y += text_line_height / 3; } // draw angles vectors if (_flag_angles_vectors) { RawSample.Angles angles = sample.getAngles(); float cos_a = (float)Math.Cos(angles.yaw * OpenCvSharp.Cv2.PI / 180); float sin_a = (float)Math.Sin(angles.yaw * OpenCvSharp.Cv2.PI / 180); float cos_b = (float)Math.Cos(angles.pitch * OpenCvSharp.Cv2.PI / 180); float sin_b = (float)Math.Sin(angles.pitch * OpenCvSharp.Cv2.PI / 180); float cos_c = (float)Math.Cos(angles.roll * OpenCvSharp.Cv2.PI / 180); float sin_c = (float)Math.Sin(angles.roll * OpenCvSharp.Cv2.PI / 180); OpenCvSharp.Point3f[] xyz = { new OpenCvSharp.Point3f(cos_a * cos_c, -sin_c, -sin_a), new OpenCvSharp.Point3f(sin_c, cos_b * cos_c, -sin_b), new OpenCvSharp.Point3f(sin_a, sin_b, cos_a * cos_b) }; OpenCvSharp.Point2f center = new OpenCvSharp.Point2f( (sample.getLeftEye().x + sample.getRightEye().x) * 0.5f, (sample.getLeftEye().y + sample.getRightEye().y) * 0.5f); float length = (rectangle.width + rectangle.height) * 0.3f; for (int c = 0; c < 3; ++c) { OpenCvSharp.Cv2.Line( draw_image, center, center + new OpenCvSharp.Point2f(xyz[c].X, -xyz[c].Y) * length, c == 0 ? new OpenCvSharp.Scalar(50, 255, 255) : c == 1 ? new OpenCvSharp.Scalar(50, 255, 50) : c == 2 ? new OpenCvSharp.Scalar(50, 50, 255) : new OpenCvSharp.Scalar(), 2, OpenCvSharp.LineTypes.AntiAlias); } } // draw quality text if (_flag_quality) { QualityEstimator.Quality q = _quality_estimator.estimateQuality(sample); string lighting, noise, sharpness, flare; lighting = "lighting: " + q.lighting.ToString(); puttext(draw_image, lighting, text_point); text_point.Y += text_line_height; noise = "noise: " + q.noise.ToString(); puttext(draw_image, noise, text_point); text_point.Y += text_line_height; sharpness = "sharpness: " + q.sharpness.ToString(); puttext(draw_image, sharpness, text_point); text_point.Y += text_line_height; flare = "flare: " + q.flare.ToString(); puttext(draw_image, flare, text_point); text_point.Y += text_line_height; text_point.Y += text_line_height / 3; } // draw liveness text if (_flag_liveness) { Liveness2DEstimator.Liveness liveness_2d_result = _liveness_2d_estimator.estimateLiveness(sample); puttext( draw_image, "liveness: " + ( liveness_2d_result == Liveness2DEstimator.Liveness.REAL ? "real" : liveness_2d_result == Liveness2DEstimator.Liveness.FAKE ? "fake" : liveness_2d_result == Liveness2DEstimator.Liveness.NOT_ENOUGH_DATA ? "not enough data" : "??"), text_point); text_point.Y += text_line_height; text_point.Y += text_line_height / 3; } // draw face quality if (_flag_face_quality) { float quality = _face_quality_estimator.estimateQuality(sample); string ss = "face quality: " + quality.ToString(); puttext(draw_image, ss, text_point); text_point.Y += text_line_height; text_point.Y += text_line_height / 3; } // draw face cuts for (int cut_i = 0; cut_i < 3; ++cut_i) { if ((cut_i == 0 && !_flag_cutting_base) || (cut_i == 1 && !_flag_cutting_full) || (cut_i == 2 && !_flag_cutting_token)) { continue; } puttext( draw_image, cut_i == 0 ? "base cut:" : cut_i == 1 ? "full cut:" : cut_i == 2 ? "token cut:" : "?? cut", text_point); text_point.Y += text_line_height / 2; MemoryStream obuf = new MemoryStream(); sample.cutFaceImage( obuf, RawSample.ImageFormat.IMAGE_FORMAT_BMP, cut_i == 0 ? RawSample.FaceCutType.FACE_CUT_BASE : cut_i == 1 ? RawSample.FaceCutType.FACE_CUT_FULL_FRONTAL : cut_i == 2 ? RawSample.FaceCutType.FACE_CUT_TOKEN_FRONTAL : (RawSample.FaceCutType) 999); byte[] sbuf = obuf.ToArray(); // const OpenCvSharp.Mat_<uchar> cvbuf(1, sbuf.length(), (uchar*) sbuf.c_str()); OpenCvSharp.Mat img = OpenCvSharp.Cv2.ImDecode(sbuf, OpenCvSharp.ImreadModes.Unchanged); OpenCvSharp.Cv2.Resize(img, img, OpenCvSharp.Size.Zero, 0.3, 0.3); int img_rect_x = (int)Math.Max(0, -text_point.X); int img_rect_y = (int)Math.Max(0, -text_point.Y); int img_rect_width = (int)Math.Min( img.Cols - img_rect_x, draw_image.Cols - Math.Max(0, text_point.X)); int img_rect_height = (int)Math.Min( img.Rows - img_rect_y, draw_image.Rows - Math.Max(0, text_point.Y)); if (img_rect_width <= 0 || img_rect_height <= 0) { continue; } OpenCvSharp.Rect img_rect = new OpenCvSharp.Rect(img_rect_x, img_rect_y, img_rect_width, img_rect_height); img[img_rect].CopyTo( draw_image[new OpenCvSharp.Rect( (int)Math.Max(0, text_point.X), (int)Math.Max(0, text_point.Y), img_rect.Width, img_rect.Height)]); text_point.Y += text_line_height / 2; text_point.Y += img.Rows; text_point.Y += text_line_height / 3; } } // draw checkboxes for (int i = 0; i < flags_count; ++i) { OpenCvSharp.Rect rect = flag_rect(i); OpenCvSharp.Rect rect2 = new OpenCvSharp.Rect(rect.X + 5, rect.Y + 5, rect.Width - 10, rect.Height - 10); OpenCvSharp.Cv2.Rectangle(draw_image, rect, OpenCvSharp.Scalar.All(255), -1); OpenCvSharp.Cv2.Rectangle(draw_image, rect, OpenCvSharp.Scalar.All(0), 2, OpenCvSharp.LineTypes.AntiAlias); if (get_flag(i)) { OpenCvSharp.Cv2.Rectangle(draw_image, rect2, OpenCvSharp.Scalar.All(0), -1, OpenCvSharp.LineTypes.AntiAlias); } puttext( draw_image, flag_name(i), new OpenCvSharp.Point2f(rect.X + rect.Width + 3, rect.Y + rect.Height - 3)); } // show image with drawed information OpenCvSharp.Cv2.ImShow("demo", draw_image); // register callback on mouse events OpenCvSharp.Cv2.SetMouseCallback("demo", (OpenCvSharp.CvMouseCallback)onMouse); }
public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Objects objects, bool showOnlyOK) { OpenCvSharp.Mat overlay = left_display.Clone(); OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(1, 1, left_display.Size().Width, left_display.Size().Height); for (int i = 0; i < objects.numObject; i++) { sl.ObjectData obj = objects.objectData[i]; if (renderObject(obj, showOnlyOK)) { // Draw Skeleton bones OpenCvSharp.Scalar base_color = generateColorID(obj.id); foreach (var part in SKELETON_BONES) { var kp_a = cvt(obj.keypoints2D[(int)part.Item1], img_scale); var kp_b = cvt(obj.keypoints2D[(int)part.Item2], img_scale); if (roi_render.Contains(kp_a) && roi_render.Contains(kp_b)) { Cv2.Line(left_display, kp_a, kp_b, base_color, 1, LineTypes.AntiAlias); } } var hip_left = obj.keypoints2D[(int)sl.BODY_PARTS.LEFT_HIP]; var hip_right = obj.keypoints2D[(int)sl.BODY_PARTS.RIGHT_HIP]; var spine = (hip_left + hip_right) / 2; var neck = obj.keypoints2D[(int)sl.BODY_PARTS.NECK]; if (hip_left.X > 0 && hip_left.Y > 0 && hip_right.X > 0 && hip_right.Y > 0 && neck.X > 0 && neck.Y > 0) { var spine_a = cvt(spine, img_scale); var spine_b = cvt(neck, img_scale); if (roi_render.Contains(spine_a) && roi_render.Contains(spine_b)) { Cv2.Line(left_display, spine_a, spine_b, base_color, 1, LineTypes.AntiAlias); } } // Draw Skeleton joints foreach (var kp in obj.keypoints2D) { Point cv_kp = cvt(kp, img_scale); if (roi_render.Contains(cv_kp)) { Cv2.Circle(left_display, cv_kp, 3, base_color, -1); } } if (hip_left.X > 0 && hip_left.Y > 0 && hip_right.X > 0 && hip_right.Y > 0) { Point cv_spine = cvt(spine, img_scale); if (roi_render.Contains(cv_spine)) { Cv2.Circle(left_display, cv_spine, 3, base_color, -1); } } } } // Here, overlay is as the left image, but with opaque masks on each detected objects Cv2.AddWeighted(left_display, 0.9, overlay, 0.1, 0.0, left_display); }
private OpenCvSharp.Point DetectCenterOfEye(OpenCvSharp.Mat frame, Rect boundingBox) { // Gets location relative to frame // https://pysource.com/2019/01/04/eye-motion-tracking-opencv-with-python/ //extract the eye region by coordinates. OpenCvSharp.Mat Roi = frame.Clone(boundingBox); if (Roi.Rows == 0 && Roi.Cols == 0) { return(new OpenCvSharp.Point()); } // convert to grayscale OpenCvSharp.Mat grayRoi = new OpenCvSharp.Mat(); Cv2.CvtColor(Roi, grayRoi, ColorConversionCodes.BGR2GRAY); // get rid of surrounding noise to isolate pupil OpenCvSharp.Mat grayRoi2 = new OpenCvSharp.Mat(); Cv2.GaussianBlur(grayRoi, grayRoi2, new OpenCvSharp.Size(7, 7), 0); // try get rid of more noise OpenCvSharp.Mat threshold = new OpenCvSharp.Mat(); Cv2.Threshold(grayRoi2, threshold, 5, 255, ThresholdTypes.BinaryInv); //Cv2.Thresh_Binary OpenCvSharp.Point[][] contours; HierarchyIndex[] hi; Cv2.FindContours(threshold, out contours, out hi, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple); // get the contour with the largest area, the pupil of eye. OpenCvSharp.Point CenterOfEye = new OpenCvSharp.Point(); int EyeRadius = 0; for (int i = 0; i < contours.Length; i++) { OpenCvSharp.Point thisEye; int thisEyeRadius = 0; PupilCenter(contours, i, out thisEye, out thisEyeRadius); if (i == 0 || thisEyeRadius > EyeRadius) { CenterOfEye = thisEye; EyeRadius = thisEyeRadius; } } if (EyeRadius > 0) { Scalar color = new Scalar(0, 0, 255); // locate center relative to the frame int FrameX = CenterOfEye.X + EyeRadius + boundingBox.X; int FrameY = CenterOfEye.Y + EyeRadius + boundingBox.Y; OpenCvSharp.Point ctr = new OpenCvSharp.Point(FrameX, FrameY); Cv2.Circle(frame, ctr, EyeRadius, color: color, thickness: 2); // draw vert line thru center of pupil Cv2.Line(img: frame, pt1: new OpenCvSharp.Point(FrameX, boundingBox.Y), pt2: new OpenCvSharp.Point(FrameX, boundingBox.Y + boundingBox.Height), color, 1); //// draw horiz line thru center of pupil Cv2.Line(img: frame, pt1: new OpenCvSharp.Point(boundingBox.X, FrameY), pt2: new OpenCvSharp.Point(boundingBox.X + boundingBox.Width, FrameY), color, 1); } Roi.Dispose(); grayRoi.Dispose(); grayRoi2.Dispose(); threshold.Dispose(); return(CenterOfEye); }
public void CvDct(ref Mat DST, Mat SRC, int N) { Mat dct, idct; Mat dct2, dct3; int width = SRC.Width;//N; int height = SRC.Height;//N; DST = SRC.Clone(); //DCT,IDCT用の行列作成(double) dct = new Mat(height, width, MatType.CV_64FC1); idct = new Mat(height, width, MatType.CV_64FC1); dct2 = new Mat(height, width, MatType.CV_64FC1); dct3 = new Mat(height, width, MatType.CV_64FC1); var indexer_DST = new MatOfByte3(DST).GetIndexer(); var indexer_dct = new MatOfDouble3(dct).GetIndexer(); //行列dctに画像データをコピー //double fcos; for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) { Vec3d color = indexer_dct[y, x]; color.Item0= indexer_DST[y, x].Item0 / 256.0; indexer_dct[y,x] = color; } //DCT…dctをコサイン変換してdct2を作成します Cv2.Dct(dct, dct2, DctFlags.None); //dct2をDenomで割りdct3を作成します PerformDenom(ref dct3, dct2); //IDCT…dct3を逆コサイン変換します Cv2.Dct(dct3, idct, DctFlags.Inverse); var indexer_idct = new MatOfDouble3(idct).GetIndexer(); //逆変換用画像にデータをコピー for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) { Vec3b color = indexer_DST[y, x]; color.Item0= (byte)(indexer_idct[y,x].Item0 * 256.0); indexer_DST[y,x]=color; } ////正規化 //double min, max; //min = 4000000000000; //max = -4000000000000; //double offset = 0.0; ////輝度値の最大と最小を取得 //DST.MinMaxIdx(out min, out max); ////for (int x = 0; x < width; x++) //// for (int y = 0; y < height; y++) //// { //// double data = indexer_DST[y,x].Item0; //// if (data < min) min = data; //// if (data > max) max = data; //// } //for (int x = 0; x < width; x++) // for (int y = 0; y < height; y++) // { // Vec3b color = indexer_DST[y, x]; // double data = indexer_DST[y, x].Item0; // if (data < min + offset) data = min + offset; // color.Item0 = (byte)( (((data / (max - min + offset))) * 255.0) - (((min + offset) / (max - min + offset)) * 255.0) ); // indexer_DST[y,x] = color; // } ////DST = idct.Clone(); //行列メモリを開放します dct.Dispose(); dct2.Dispose(); dct3.Dispose(); idct.Dispose(); indexer_dct = null; indexer_DST = null; indexer_idct = null; }
private void PerformDenom(ref Mat DST, Mat SRC) { double PI = 3.1416; int width = SRC.Width; int height = SRC.Height; //XとYを準備 double[,] meshX = new double[height,width]; double[,] meshY = new double[height,width]; double[,] denom = new double[height,width]; DST = SRC.Clone(); var indexer = new MatOfDouble3(DST).GetIndexer(); //メッシュグリッドの作成 for (int y = 0; y < height; y++) for (int x = 0; x < width; x++) { meshX[y, x] = x; meshY[y, x] = y; } //固有値計算 for (int y = 0; y < height; y++) for (int x = 0;x < width; x++) denom[y, x] = (2.0 * Math.Cos(PI * (double)x / ((double)width)) - 2.0) + (2.0 * Math.Cos(PI * (double)y / ((double)height)) - 2.0); //計算 for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) { //data_d[j][i] = data_s[j][i] / denom[j][i]; Vec3d color = indexer[y, x]; color.Item0 = indexer[y, x].Item0; //ゼロ割防止 if (!(x == 0 && y == 0)) color.Item0 = color.Item0 / denom[y, x]; indexer[y, x] = color; } indexer = null; }
private void OnClick実行(object sender, EventArgs e) { System.Diagnostics.Debug.WriteLine("OnClick実行 開始"); if (is4Image) { int width = 入力画像[0].Width; int height = 入力画像[0].Height; SGx = new OpenCvSharp.Mat(height,width, MatType.CV_8UC1); SGy = new OpenCvSharp.Mat(height, width, MatType.CV_8UC1); for (int num = 0; num < 4; num++) { Gx[num] = 入力画像[num].Clone(); Gy[num] = 入力画像[num].Clone(); } for (int num = 0; num < 4; num++) {//infilterX,Y mCV.infilterX(ref Gx[num], 入力画像[num]); mCV.infilterY(ref Gy[num], 入力画像[num]); } mCV.Median(Gx, ref SGx); mCV.Median(Gy, ref SGy); //Gxxを作る.とりあえず外周1ピクセルやらない方向で. Gxx = new OpenCvSharp.Mat(height, width, MatType.CV_8UC1); Gyy = new OpenCvSharp.Mat(height, width, MatType.CV_8UC1); mCV.infilterX(ref Gxx, SGx); mCV.infilterY(ref Gyy, SGy); //SP作成(仮の出力画像) Mat SP = new OpenCvSharp.Mat(height,width, MatType.CV_8UC1); var indexer_sp = new MatOfByte3(SP).GetIndexer(); var indexer_Gxx= new MatOfByte3(Gxx).GetIndexer(); var indexer_Gyy = new MatOfByte3(Gyy).GetIndexer(); for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) { Vec3b color = indexer_sp[y, x]; double val = indexer_Gxx[y, x].Item0+ indexer_Gyy[y, x].Item0; if (val > 255) color.Item0 = 255; else if (val < 0) color.Item0 = 0; else color.Item0 = (byte)val; indexer_sp[y, x] = color; } indexer_sp =null; indexer_Gxx =null; indexer_Gyy = null; Mat DCT_dst = new OpenCvSharp.Mat(height, width, MatType.CV_8UC1); mCV.CvDct(ref DCT_dst,SP, 1024);//第3引数使われてない件 //DCT_dst = SP.Clone(); //mCV.吉岡反射光除去処理(入力画像, ref DCT_dst,int.Parse(textBox_Gaus.Text),int.Parse(textBox_Bright.Text)); 出力画像=DCT_dst.Clone(); } else System.Diagnostics.Debug.WriteLine("no 4 images"); System.Diagnostics.Debug.WriteLine("OnClick実行 終了"); }