public void Run() { var capture = new VideoCapture(); capture.Set(CaptureProperty.FrameWidth, 640); capture.Set(CaptureProperty.FrameHeight, 480); capture.Open(-1); if (!capture.IsOpened()) throw new Exception("capture initialization failed"); var fs = FrameSource.CreateCameraSource(-1); var sr = SuperResolution.CreateBTVL1(); sr.SetInput(fs); using (var normalWindow = new Window("normal")) using (var srWindow = new Window("super resolution")) { var normalFrame = new Mat(); var srFrame = new Mat(); while (true) { capture.Read(normalFrame); sr.NextFrame(srFrame); if (normalFrame.Empty() || srFrame.Empty()) break; normalWindow.ShowImage(normalFrame); srWindow.ShowImage(srFrame); Cv2.WaitKey(100); } } }
public void Run() { Mat src = new Mat("Data/Image/shapes.png", ImreadModes.Color); Mat gray = src.CvtColor(ColorConversionCodes.BGR2GRAY); Mat binary = gray.Threshold(0, 255, ThresholdTypes.Otsu | ThresholdTypes.Binary); Mat labelView = src.EmptyClone(); Mat rectView = binary.CvtColor(ColorConversionCodes.GRAY2BGR); ConnectedComponents cc = Cv2.ConnectedComponentsEx(binary); if (cc.LabelCount <= 1) return; // draw labels cc.RenderBlobs(labelView); // draw bonding boxes except background foreach (var blob in cc.Blobs.Skip(1)) { rectView.Rectangle(blob.Rect, Scalar.Red); } // filter maximum blob var maxBlob = cc.GetLargestBlob(); var filtered = new Mat(); cc.FilterByBlob(src, filtered, maxBlob); using (new Window("src", src)) using (new Window("binary", binary)) using (new Window("labels", labelView)) using (new Window("bonding boxes", rectView)) using (new Window("maximum blob", filtered)) { Cv2.WaitKey(); } }
/// <summary> /// /// </summary> /// <param name="cascade"></param> /// <returns></returns> private Mat DetectFace(CascadeClassifier cascade) { Mat result; using (var src = new Mat(FilePath.Image.Yalta, ImreadModes.Color)) using (var gray = new Mat()) { result = src.Clone(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY); // Detect faces Rect[] faces = cascade.DetectMultiScale( gray, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30)); // Render all detected faces foreach (Rect face in faces) { var center = new Point { X = (int)(face.X + face.Width * 0.5), Y = (int)(face.Y + face.Height * 0.5) }; var axes = new Size { Width = (int)(face.Width * 0.5), Height = (int)(face.Height * 0.5) }; Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4); } } return result; }
/// <summary> /// カメラから画像を取得します /// </summary> /// <param name="cameraIndex"></param> /// <returns></returns> public static Mat GetCameraImage(int cameraIndex = 0) { var frame = new Mat(); using (var capture = new VideoCapture(0)) capture.Read(frame); return frame; }
static cv.Mat MatInverse(cv.Mat m) { // assumes determinant is not 0 // that is, the matrix does have an inverse int n = m.Rows; cv.Mat result = m.Clone(); cv.Mat lum; // combined lower & upper int[] perm; // out parameter MatDecompose(m, out lum, out perm); // ignore return double[] b = new double[n]; for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == perm[j]) { b[j] = 1.0; } else { b[j] = 0.0; } } double[] x = Reduce(lum, b); // for (int j = 0; j < n; ++j) { result.Set <double>(j, i, x[j]); } } return(result); }
static cv.Mat MatProduct(cv.Mat matA, cv.Mat matB) { int aRows = matA.Rows; int aCols = matA.Cols; int bRows = matB.Rows; int bCols = matB.Cols; if (aCols != bRows) { throw new Exception("Non-conformable matrices"); } cv.Mat result = new cv.Mat(aRows, bCols, cv.MatType.CV_64F, 0); for (int i = 0; i < aRows; ++i) // each row of A { for (int j = 0; j < bCols; ++j) // each col of B { for (int k = 0; k < aCols; ++k) // could use bRows { result.Set <double>(i, j, result.At <double>(i, j) + matA.At <double>(i, k) * matB.At <double>(k, j)); } } } return(result); }
static double[] Reduce(cv.Mat luMatrix, double[] b) // helper { int n = luMatrix.Rows; double[] x = new double[n]; for (int i = 0; i < n; ++i) { x[i] = b[i]; } for (int i = 1; i < n; ++i) { double sum = x[i]; for (int j = 0; j < i; ++j) { sum -= luMatrix.At <double>(i, j) * x[j]; } x[i] = sum; } x[n - 1] /= luMatrix.At <double>(n - 1, n - 1); for (int i = n - 2; i >= 0; --i) { double sum = x[i]; for (int j = i + 1; j < n; ++j) { sum -= luMatrix.At <double>(i, j) * x[j]; } x[i] = sum / luMatrix.At <double>(i, i); } return(x); }
public void SetData(cv.Mat input) { //if (xSize != -1 || zSize != -1) //{ // surfaceMeshRenderableSeries.DataSeries.Clear(); //} zMap = new cv.Mat(new cv.Size(input.Width, input.Height), cv.MatType.CV_32FC1); input.ConvertTo(zMap, cv.MatType.CV_32FC1); //Init(zMap.Rows, zMap.Cols); Init(zMap.Rows, zMap.Cols); Parallel.For(0, xSize, x => { for (int z = 0; z < zSize; ++z) { MeshDataSeries[z, x] = zMap.At <float>(x, z); if (yMax < zMap.Get <float>(x, z)) { yMax = zMap.Get <float>(x, z); } } }); double min = 0.0f, max = 0.0f; zMap.MinMaxLoc(out min, out max); surfaceMeshRenderableSeries.Maximum = max; surfaceMeshRenderableSeries.Minimum = min; //backgroundSurfaceMesh.IsVisible = false; }
public double[] GetRadiusCenter3P() { int dataSize = positionInfo[annotIdx].points.Count; double a, b, r; cv.Mat A = cv.Mat.Zeros(dataSize, 3, cv.MatType.CV_64FC1); cv.Mat B = cv.Mat.Zeros(dataSize, 1, cv.MatType.CV_64FC1); cv.Mat X = cv.Mat.Zeros(dataSize, 1, cv.MatType.CV_64FC1); for (int i = 0; i < dataSize; i++) { A.Set <double>(i, 0, positionInfo[annotIdx].points[i].x); A.Set <double>(i, 1, positionInfo[annotIdx].points[i].z); A.Set <double>(i, 2, 1); B.Set <double>(i, -Math.Pow(positionInfo[annotIdx].points[i].x, 2) - Math.Pow(positionInfo[annotIdx].points[i].z, 2)); } cv.Cv2.Solve(A, B, X, cv.DecompTypes.SVD); a = -X.At <double>(0, 0) / 2; b = -X.At <double>(1, 0) / 2; r = Math.Sqrt(Math.Pow(a, 2) + Math.Pow(b, 2) - X.At <double>(2, 0)); double[] data = { a, b, r }; return(data); }
public void ApplyMinMaxCalc(cv.Mat input) { double zMax = double.MinValue; double zMin = double.MaxValue; for (int x = 0; x < xSize; ++x) { for (int z = 0; z < zSize; ++z) { if (zMax < input.At <double>(x, z)) { zMax = input.At <double>(x, z); } if (zMin > input.At <double>(x, z)) { zMin = input.At <double>(x, z); } } } for (int x = 0; x < xSize; ++x) { for (int z = 0; z < zSize; ++z) { input.Set <float>(x, z, input.At <float>(x, z) - (float)zMin); input.Set <float>(x, z, input.At <float>(x, z) * 1000.0f / (float)(zMax - zMin)); MeshDataSeries[z, x] = input.Get <float>(x, z); } } }
private static Mat[] SelectStitchingImages(int width, int height, int count) { Mat source = new Mat(@"Data\Image\lenna.png", ImreadModes.Color); Mat result = source.Clone(); var rand = new Random(); var mats = new List<Mat>(); for (int i = 0; i < count; i++) { int x1 = rand.Next(source.Cols - width); int y1 = rand.Next(source.Rows - height); int x2 = x1 + width; int y2 = y1 + height; result.Line(new Point(x1, y1), new Point(x1, y2), new Scalar(0, 0, 255)); result.Line(new Point(x1, y2), new Point(x2, y2), new Scalar(0, 0, 255)); result.Line(new Point(x2, y2), new Point(x2, y1), new Scalar(0, 0, 255)); result.Line(new Point(x2, y1), new Point(x1, y1), new Scalar(0, 0, 255)); Mat m = source[new Rect(x1, y1, width, height)]; mats.Add(m.Clone()); } using (new Window(result)) { Cv2.WaitKey(); } return mats.ToArray(); }
public void Run() { var gray = new Mat(FilePath.Image.Lenna, ImreadModes.GrayScale); var kaze = KAZE.Create(); var akaze = AKAZE.Create(); var kazeDescriptors = new Mat(); var akazeDescriptors = new Mat(); KeyPoint[] kazeKeyPoints = null, akazeKeyPoints = null; var kazeTime = MeasureTime(() => kaze.DetectAndCompute(gray, null, out kazeKeyPoints, kazeDescriptors)); var akazeTime = MeasureTime(() => akaze.DetectAndCompute(gray, null, out akazeKeyPoints, akazeDescriptors)); var dstKaze = new Mat(); var dstAkaze = new Mat(); Cv2.DrawKeypoints(gray, kazeKeyPoints, dstKaze); Cv2.DrawKeypoints(gray, akazeKeyPoints, dstAkaze); using (new Window(String.Format("KAZE [{0:F2}ms]", kazeTime.TotalMilliseconds), dstKaze)) using (new Window(String.Format("AKAZE [{0:F2}ms]", akazeTime.TotalMilliseconds), dstAkaze)) { Cv2.WaitKey(); } }
/// <summary> /// Adds descriptors to a training set. /// </summary> /// <param name="descriptors">descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a descriptor. /// The training set is clustered using clustermethod to construct the vocabulary.</param> public void Add(Mat descriptors) { if (descriptors == null) throw new ArgumentNullException(nameof(descriptors)); NativeMethods.features2d_BOWTrainer_add(ptr, descriptors.CvPtr); GC.KeepAlive(descriptors); }
public byte[] Mark(Mat srcImage) { try { var grayImage = new Mat(); Cv2.CvtColor(srcImage, grayImage, ColorConversionCodes.BGRA2GRAY); Cv2.EqualizeHist(grayImage, grayImage); var cascade = new CascadeClassifier($@"{AppDomain.CurrentDomain.BaseDirectory}/Services/Data/haarcascade_frontalface_alt.xml"); var faces = cascade.DetectMultiScale( grayImage, 1.1, 3, HaarDetectionType.DoRoughSearch | HaarDetectionType.ScaleImage ); if (faces.Length < 1) { return(null); } var face = faces.FirstOrDefault(); var image = new Image(); var file = image.ConvertToByte(srcImage); return(Crop(file, face.X, face.Y, face.Width, face.Height)); } catch (Exception e) { return(null); } }
private int OpenCVFaceDetector(string path) { // uses openCv Library OpenCvSharp.CascadeClassifier faceClassifier = new OpenCvSharp.CascadeClassifier(@"./haarcascade/haarcascade_frontalface_alt.xml"); OpenCvSharp.Mat result; Rect[] faces = new Rect[0]; using (var src = new OpenCvSharp.Mat(path, OpenCvSharp.ImreadModes.Color)) using (var gray = new OpenCvSharp.Mat()) { result = src.Clone(); Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY); // Detect faces faces = faceClassifier.DetectMultiScale(gray, 1.08, 2, OpenCvSharp.HaarDetectionType.ScaleImage); List <System.Drawing.Rectangle> rfaces = new List <System.Drawing.Rectangle>(); foreach (Rect face in faces) { System.Drawing.Rectangle r = new System.Drawing.Rectangle(face.X, face.Y, face.Width, face.Height); this.GetLandmarks(gray, face, rfaces); rfaces.Add(r); } DrawOnImage?.Invoke(rfaces.ToArray(), new System.Drawing.Size(result.Width, result.Height)); } result.Dispose(); return(faces.Length); }
public void RotateImage(OpenCvSharp.Mat src, ref OpenCvSharp.Mat dst, double angle, double scale) { var imageCenter = new Point2f(src.Cols / 2f, src.Rows / 2f); var rotationMat = Cv2.GetRotationMatrix2D(imageCenter, angle, scale); Cv2.WarpAffine(src, dst, rotationMat, src.Size()); }
public void Run() { var gray = new Mat(FilePath.Image.Lenna, ImreadModes.GrayScale); var dst = new Mat(FilePath.Image.Lenna, ImreadModes.Color); BRISK brisk = BRISK.Create(); KeyPoint[] keypoints = brisk.Detect(gray); if (keypoints != null) { var color = new Scalar(0, 255, 0); foreach (KeyPoint kpt in keypoints) { float r = kpt.Size / 2; Cv2.Circle(dst, kpt.Pt, (int)r, color); Cv2.Line(dst, new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r), new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r), color); Cv2.Line(dst, new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r), new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r), color); } } using (new Window("BRISK features", dst)) { Cv2.WaitKey(); } }
private void App_Frame(OpenCvSharp.Mat frame) { this.SourceFrameLock.WaitOne(); if (this.SourceFrame != null) { this.SourceFrame.Dispose(); } this.SourceFrame = frame; this.SourceFrameLock.ReleaseMutex(); this.ExecPython(this.OptionViewModel.Model.RenderScriptName, frame); this.Frame = BitmapExtension.Parse(frame); this._elapsedStopwatch.Stop(); this.ElapsedTime = this.ElapsedTime.Add(TimeSpan.FromMilliseconds(this._elapsedStopwatch.ElapsedMilliseconds)); this._elapsedStopwatch.Restart(); this._handleFrameThreadExecutableLock.WaitOne(); if (this._handleFrameThreadExecutable) { var thread = new Thread(new ThreadStart(this.FrameHandlerRoutine)); thread.Start(); } this._handleFrameThreadExecutableLock.ReleaseMutex(); }
void displayImage() { // 벽, 게시판, 종이 크기 조절 paper = new Bitmap(pictureBox1.Size.Width, pictureBox1.Size.Height); // 종이 paper2 = new Bitmap(outW, outH); Color pen; // 펜(콕콕 찍을 용도) for (int i = 0; i < outH; i++) { for (int k = 0; k < outW; k++) { byte r = outImage[RR, i, k]; // 잉크(색상값) byte g = outImage[GG, i, k]; // 잉크(색상값) byte b = outImage[BB, i, k]; // 잉크(색상값) pen = Color.FromArgb(r, g, b); // 펜에 잉크 묻히기 try { paper.SetPixel(k + (pictureBox1.Size.Width - outW) / 2, i + (pictureBox1.Size.Height - outH) / 2, pen); // 종이에 콕 찍기 paper2.SetPixel(k, i, pen); } catch { MessageBox.Show("이미지 사이즈가 너무 큽니다."); return; } } } paper2.Save("temp.jpg"); // inCvImage로 불러오기 inCvImage = Cv2.ImRead("temp.jpg"); // 임시 저장 파일 삭제 System.IO.File.Delete("temp.jpg"); pictureBox1.Image = paper; // 게시판에 종이를 붙이기. }
void 自作合成処理() { if (合成用素材.Length == 検査面数 * 4) { for (int num = 0; num < 検査面数; num++) { 合成画像[num] = new OpenCvSharp.Mat(合成用素材[0].Height, 合成用素材[0].Width, MatType.CV_8UC1); Mat[] images = new Mat[4]; for (int i = 0; i < 4; i++) { images[i] = 合成用素材[num * 4 + i]; } MyCV.自作反射光除去(images, ref 合成画像[num]); MyCV.コントラスト調整(ref 合成画像[num], double.Parse(textBox_傾き.Text)); MyCV.明るさ調整(ref 合成画像[num], double.Parse(textBox_明るさ.Text)); this.Text = num * 100.0 / 検査面数 + "%"; } } if (radioButton_合成.Checked) { 表示画像更新(); } radioButton_合成.Checked = true; }
public void ToBitmapGrayScale() { Mat img = new Mat(FilePath.Image.Lenna511, ImreadModes.GrayScale); // width % 4 != 0 Bitmap bitmap = BitmapConverter.ToBitmap(img); // Bitmap bitmap = img.ToBitmap(); using (var form = new Form()) using (var pb = new PictureBox()) { pb.Image = bitmap; var size = new System.Drawing.Size(bitmap.Width, bitmap.Height); pb.ClientSize = size; form.ClientSize = size; form.Controls.Add(pb); form.KeyPreview = true; form.KeyDown += (sender, args) => { if (args.KeyCode.HasFlag(Keys.Enter)) ((Form)sender).Close(); }; form.Text = "Grayscale Mat to Bitmap Test"; form.ShowDialog(); } }
public void Run() { Mat src = new Mat(FilePath.Image.Girl, ImreadModes.Color); Mat dst = new Mat(FilePath.Image.Lenna, ImreadModes.Color); Mat src0 = src.Resize(dst.Size(), 0, 0, InterpolationFlags.Lanczos4); Mat mask = Mat.Zeros(src0.Size(), MatType.CV_8UC3); mask.Circle(200, 200, 100, Scalar.White, -1); Mat blend1 = new Mat(); Mat blend2 = new Mat(); Mat blend3 = new Mat(); Cv2.SeamlessClone( src0, dst, mask, new Point(260, 270), blend1, SeamlessCloneMethods.NormalClone); Cv2.SeamlessClone( src0, dst, mask, new Point(260, 270), blend2, SeamlessCloneMethods.MonochromeTransfer); Cv2.SeamlessClone( src0, dst, mask, new Point(260, 270), blend3, SeamlessCloneMethods.MixedClone); using (new Window("src", src0)) using (new Window("dst", dst)) using (new Window("mask", mask)) using (new Window("blend NormalClone", blend1)) using (new Window("blend MonochromeTransfer", blend2)) using (new Window("blend MixedClone", blend3)) { Cv2.WaitKey(); } }
/// <summary> /// Filter a image with the specified label values. /// </summary> /// <param name="src">Source image.</param> /// <param name="dst">Destination image.</param> /// <param name="labelValues">Label values.</param> /// <returns>Filtered image.</returns> public Mat FilterByLabels(Mat src, Mat dst, IEnumerable<int> labelValues) { if (src == null) throw new ArgumentNullException("src"); if (dst == null) throw new ArgumentNullException("dst"); if (labelValues == null) throw new ArgumentNullException("labelValues"); int[] labelArray = EnumerableEx.ToArray(labelValues); if (labelArray.Length == 0) throw new ArgumentException("empty labelValues"); foreach (int labelValue in labelArray) { if (labelValue < 0 || labelValue >= LabelCount) throw new ArgumentException("0 <= x < LabelCount"); } // マスク用Matを用意し、Andで切り抜く using (Mat mask = GetLabelMask(labelArray[0])) { for (int i = 1; i < labelArray.Length; i++) { using (var maskI = GetLabelMask(labelArray[i])) { Cv2.BitwiseOr(mask, maskI, mask); } } src.CopyTo(dst, mask); return dst; } }
private void DrawFeatures(string filename, IEnumerable<KeyPoint> keypoints) { using (var dst = new Mat(filename, ImreadModes.GrayScale)) { int thickness = 2; foreach (KeyPoint kp in keypoints) { float r = kp.Size/2; Cv2.Circle(dst, kp.Pt, (int) r, Scalar.Red, thickness); Cv2.Line(dst, new Point2f(kp.Pt.X + r, kp.Pt.Y + r), new Point2f(kp.Pt.X - r, kp.Pt.Y - r), Scalar.Red, thickness); Cv2.Line(dst, new Point2f(kp.Pt.X - r, kp.Pt.Y - r), new Point2f(kp.Pt.X + r, kp.Pt.Y + r), Scalar.Red, thickness); } using (new Window("src w. keypoints", WindowMode.FreeRatio, dst)) { Cv2.WaitKey(); } } }
// the function TestPerformance() measures the running time of // the nearest neighbor search in an ordered dataset of points; // // the test emulates the computation of the distance transform; // it calculates the minimum distance from each point in // the given rectangle to a point in the input dataset; static public cv.Mat TestPerformance ( int rect_width, int rect_height, List <Point> test_points ) { cv.Mat dist = new cv.Mat(rect_height, rect_width, cv.MatType.CV_32F); PointComparer pnt_comparer = new PointComparer(); test_points.Sort(pnt_comparer); Stopwatch watch = new Stopwatch(); watch.Start(); for (int x = 0; x < rect_width; ++x) { for (int y = 0; y < rect_height; ++y) { float nextVal = (float)MinDistanceOrderedSet(new Point(x, y), pnt_comparer, test_points); dist.Set <float>(y, x, nextVal); } } watch.Stop(); Console.WriteLine("execution time of ordered dataset algorithm = {0} ms ;", watch.ElapsedMilliseconds); return(dist); }
public OpencvSource(string cam_or_url) { MAssert.Check(cam_or_url != string.Empty); // check if cam_or_url is number bool stream = false; for (int i = 0; i < cam_or_url.Length; ++i) { stream = stream || (cam_or_url[i] < '0') || (cam_or_url[i] > '9'); } if (stream) { // open stream Console.WriteLine("opening stream '{0}'", cam_or_url); capturer = new OpenCvSharp.VideoCapture(cam_or_url); } else { // convert to integer int cam_id = Convert.ToInt32(cam_or_url, 10); MAssert.Check(cam_id >= 0, "wrong webcam id"); // open vebcam Console.WriteLine("opening webcam {0}", cam_id); capturer = new OpenCvSharp.VideoCapture(cam_id); MAssert.Check(capturer.IsOpened(), "webcam not opened"); // set resolution capturer.Set(OpenCvSharp.CaptureProperty.FrameWidth, 1280); capturer.Set(OpenCvSharp.CaptureProperty.FrameHeight, 720); MAssert.Check(capturer.IsOpened(), "webcam not opened"); } // sometimes first few frames can be empty even if camera is good // so skip few frames OpenCvSharp.Mat frame; for (int i = 0; i < 10; ++i) { frame = capturer.RetrieveMat(); } // check first two frames OpenCvSharp.Mat image1 = new OpenCvSharp.Mat(), image2 = new OpenCvSharp.Mat(); capturer.Read(image1); capturer.Read(image2); Console.WriteLine("image1 size: {0}", image1.Size()); Console.WriteLine("image1 size: {0}", image2.Size()); MAssert.Check( !image1.Empty() && !image2.Empty() && image1.Size() == image2.Size() && image1.Type() == OpenCvSharp.MatType.CV_8UC3 && image2.Type() == OpenCvSharp.MatType.CV_8UC3, "error opening webcam or stream"); }
public void Run() { Mat src = new Mat(FilePath.Image.Fruits, ImreadModes.Color); Mat normconv = new Mat(), recursFildered = new Mat(); Cv2.EdgePreservingFilter(src, normconv, EdgePreservingMethods.NormconvFilter); Cv2.EdgePreservingFilter(src, recursFildered, EdgePreservingMethods.RecursFilter); Mat detailEnhance = new Mat(); Cv2.DetailEnhance(src, detailEnhance); Mat pencil1 = new Mat(), pencil2 = new Mat(); Cv2.PencilSketch(src, pencil1, pencil2); Mat stylized = new Mat(); Cv2.Stylization(src, stylized); using (new Window("src", src)) using (new Window("edgePreservingFilter - NormconvFilter", normconv)) using (new Window("edgePreservingFilter - RecursFilter", recursFildered)) using (new Window("detailEnhance", detailEnhance)) using (new Window("pencilSketch grayscale", pencil1)) using (new Window("pencilSketch color", pencil2)) using (new Window("stylized", stylized)) { Cv2.WaitKey(); } }
void puttext(OpenCvSharp.Mat image, string text, OpenCvSharp.Point2f position) { // twice - for better reading // since we are drawing on the frame from webcam // white background OpenCvSharp.Cv2.PutText( image, text, position, OpenCvSharp.HersheyFonts.HersheyDuplex, 0.7, OpenCvSharp.Scalar.All(255), 5, OpenCvSharp.LineTypes.AntiAlias); // black text OpenCvSharp.Cv2.PutText( image, text, position, OpenCvSharp.HersheyFonts.HersheyDuplex, 0.7, OpenCvSharp.Scalar.All(0), 1, OpenCvSharp.LineTypes.AntiAlias); }
private void MatchBySurf(Mat src1, Mat src2) { var gray1 = new Mat(); var gray2 = new Mat(); Cv2.CvtColor(src1, gray1, ColorConversionCodes.BGR2GRAY); Cv2.CvtColor(src2, gray2, ColorConversionCodes.BGR2GRAY); var surf = SURF.Create(500, 4, 2, true); // Detect the keypoints and generate their descriptors using SURF KeyPoint[] keypoints1, keypoints2; var descriptors1 = new MatOfFloat(); var descriptors2 = new MatOfFloat(); surf.Compute(gray1, null, out keypoints1, descriptors1); surf.Compute(gray2, null, out keypoints2, descriptors2); // Match descriptor vectors var bfMatcher = new BFMatcher(NormTypes.L2, false); var flannMatcher = new FlannBasedMatcher(); DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2); DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2); // Draw matches var bfView = new Mat(); Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView); var flannView = new Mat(); Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView); using (new Window("SURF matching (by BFMather)", WindowMode.AutoSize, bfView)) using (new Window("SURF matching (by FlannBasedMatcher)", WindowMode.AutoSize, flannView)) { Cv2.WaitKey(); } }
public void Run(cv.Mat src) { // The locations of the markers in the image at FilePath.Image.Aruco. const int upperLeftMarkerId = 160; const int upperRightMarkerId = 268; const int lowerRightMarkerId = 176; const int lowerLeftMarkerId = 168; var detectorParameters = DetectorParameters.Create(); detectorParameters.CornerRefinementMethod = CornerRefineMethod.Subpix; detectorParameters.CornerRefinementWinSize = 9; var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict4X4_1000); CvAruco.DetectMarkers(src, dictionary, out var corners, out var ids, detectorParameters, out var rejectedPoints); detectedMarkers = src.Clone(); CvAruco.DrawDetectedMarkers(detectedMarkers, corners, ids, cv.Scalar.White); // Find the index of the four markers in the ids array. We'll use this same index into the // corners array to find the corners of each marker. var upperLeftCornerIndex = Array.FindIndex(ids, id => id == upperLeftMarkerId); var upperRightCornerIndex = Array.FindIndex(ids, id => id == upperRightMarkerId); var lowerRightCornerIndex = Array.FindIndex(ids, id => id == lowerRightMarkerId); var lowerLeftCornerIndex = Array.FindIndex(ids, id => id == lowerLeftMarkerId); // Make sure we found all four markers. if (upperLeftCornerIndex < 0 || upperRightCornerIndex < 0 || lowerRightCornerIndex < 0 || lowerLeftCornerIndex < 0) { return; } // Marker corners are stored clockwise beginning with the upper-left corner. // Get the first (upper-left) corner of the upper-left marker. var upperLeftPixel = corners[upperLeftCornerIndex][0]; // Get the second (upper-right) corner of the upper-right marker. var upperRightPixel = corners[upperRightCornerIndex][1]; // Get the third (lower-right) corner of the lower-right marker. var lowerRightPixel = corners[lowerRightCornerIndex][2]; // Get the fourth (lower-left) corner of the lower-left marker. var lowerLeftPixel = corners[lowerLeftCornerIndex][3]; // Create coordinates for passing to GetPerspectiveTransform var sourceCoordinates = new List <cv.Point2f> { upperLeftPixel, upperRightPixel, lowerRightPixel, lowerLeftPixel }; var destinationCoordinates = new List <cv.Point2f> { new cv.Point2f(0, 0), new cv.Point2f(1024, 0), new cv.Point2f(1024, 1024), new cv.Point2f(0, 1024), }; var transform = cv.Cv2.GetPerspectiveTransform(sourceCoordinates, destinationCoordinates); normalizedImage = new cv.Mat(); cv.Cv2.WarpPerspective(src, normalizedImage, transform, new cv.Size(1024, 1024)); }
public Mat Detect(Mat mat) { byte[] array = new byte[_width * _height * mat.ElemSize()]; Marshal.Copy(mat.DataStart, array, 0, array.Length); using (Image <Bgr, byte> image1 = new Image <Bgr, byte>(_width, _height)) { image1.Bytes = array; var frame = image1.Mat; int cols = 640; int rows = 480; _net.SetInput(DnnInvoke.BlobFromImage(frame, 1, new System.Drawing.Size(300, 300), default(MCvScalar), false, false)); using (Emgu.CV.Mat matt = _net.Forward()) { float[,,,] flt = (float[, , , ])matt.GetData(); for (int x = 0; x < flt.GetLength(2); x++) { if (flt[0, 0, x, 2] > _probability) { int X1 = Convert.ToInt32(flt[0, 0, x, 3] * cols); int Y1 = Convert.ToInt32(flt[0, 0, x, 4] * rows); int X2 = Convert.ToInt32(flt[0, 0, x, 5] * cols); int Y2 = Convert.ToInt32(flt[0, 0, x, 6] * rows); mat.Rectangle(new OpenCvSharp.Rect((int)X1, (int)Y1, (int)X2 - (int)X1, (int)Y2 - (int)Y1), Scalar.Red); } } } } return(mat); }
public void brightness(ref Mat img, double 目標) { //中心近くの9ピクセルから輝度調整 int width = img.Width; int height = img.Height; int center_x = width / 5; int center_y = height / 5; double[] vals = new double[9]; double average = 0; double diff = 0; var indexer = new MatOfByte3(img).GetIndexer(); vals[0] = indexer[center_y - 10, center_x - 10].Item0; vals[3] = indexer[center_y - 10, center_x].Item0; vals[6] = indexer[center_y - 10, center_x + 10].Item0; vals[1] = indexer[center_y, center_x - 10].Item0; vals[4] = indexer[center_y, center_x].Item0; vals[7] = indexer[center_y, center_x + 10].Item0; vals[2] = indexer[center_y + 10, center_x - 10].Item0; vals[5] = indexer[center_y + 10, center_x].Item0; vals[8] = indexer[center_y + 10, center_x + 10].Item0; for (int num = 0; num < 9; num++) average += vals[num]; average = average / 9.0; diff = 目標 - average; for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) { Vec3b color = indexer[y, x]; double val = color.Item0 + diff; if (val > 255) color.Item0 = 255; else if (val < 0) color.Item0 = 0; else color.Item0 = (byte)val; indexer[y, x] = color; } indexer = null; }
public void myEventHandler(OpenCvSharp.Mat frame, OpenCvSharp.Mat thresholded, List <Wall> walls, List <Gem> gems) { System.Drawing.Bitmap image1 = BitmapConverter.ToBitmap(frame); pictureBox1.Image = image1; System.Drawing.Bitmap image2 = BitmapConverter.ToBitmap(thresholded); pictureBox2.Image = image2; Debug.WriteLine("Capture:"); Debug.WriteLine($"Found gems : {gems.Count}"); foreach (var gem in gems) { Debug.Write($"X : {gem.Position.X} "); Debug.WriteLine($"Y : {gem.Position.Y}"); } Debug.WriteLine($""); Debug.WriteLine($"Found walls : {walls.Count}"); foreach (var wall in walls) { Debug.Write($"X : {wall.StartPosition.X} "); Debug.WriteLine($"Y : {wall.StartPosition.Y}"); } Debug.WriteLine(""); Debug.WriteLine(""); currentGems = new Gem[gems.Count]; currentWalls = new Wall[walls.Count]; gems.CopyTo(currentGems); walls.CopyTo(currentWalls); }
/// <summary> /// Show the inferred image. /// </summary> /// <param name="imageData">The image data of the inferred image.</param> static void ShowImage(byte[] imageData) { var mat = new OpenCvSharp.Mat(imageHeight, imageWidth, OpenCvSharp.MatType.CV_8UC3, imageData, 3 * imageWidth); Cv2.ImShow("Image With Style Transfer", mat); Cv2.WaitKey(0); }
private int OpenCVDeepLearningDetector(string path) { // uses emugu library //https://medium.com/@vinuvish/face-detection-with-opencv-and-deep-learning-90bff9028fa8 string prototextPath = @"./Dnn/deploy.prototxt"; string caffeModelPath = @"./Dnn/res10_300x300_ssd_iter_140000.caffemodel"; //// load the model; using (var net = OpenCvSharp.Dnn.CvDnn.ReadNetFromCaffe(prototxt: prototextPath, caffeModel: caffeModelPath)) using (OpenCvSharp.Mat image = Cv2.ImRead(path)) { // get the original image size OpenCvSharp.Size imageSize = image.Size(); // the dnn detector works on a 300x300 image; // now resize the image for the Dnn dector; OpenCvSharp.Size size = new OpenCvSharp.Size(299, 299); // set the scalar property to RGB colors, don't know what these values represent. OpenCvSharp.Scalar mcvScalar = new OpenCvSharp.Scalar(104.0, 177.0, 123.0); using (var blob = OpenCvSharp.Dnn.CvDnn.BlobFromImage(image: image, scaleFactor: 1, size: size, mean: mcvScalar, swapRB: true)) { net.SetInput(blob, "data"); using (OpenCvSharp.Mat detections = net.Forward()) { // convert the detected values to a faces object that we can use to // draw rectangles. List <ConfidenceRect> Faces = new List <ConfidenceRect>(); //var rows = detections.SizeOfDimension[2]; //Array ans = detections.GetData(); //for (int n = 0; n < rows; n++) //{ // object confidence = ans.GetValue(0, 0, n, 2); // object x1 = ans.GetValue(0, 0, n, 3); // object y1 = ans.GetValue(0, 0, n, 4); // object x2 = ans.GetValue(0, 0, n, 5); // object y2 = ans.GetValue(0, 0, n, 6); // ConfidenceRect cr = new ConfidenceRect(confidence, x1, y1, x2, y2, imageSize); // if (cr.Confidence > 0) // { // Debug.WriteLine($"Confidence {cr.Confidence}"); // } // if (cr.Confidence > Confidence) // { // Faces.Add(cr); // } //} //// convert to a writeableBitmap //WriteableBitmap writeableBitmap = new WriteableBitmap(ImageSource); //ImageSource = ConvertWriteableBitmapToBitmapImage(writeableBitmap); //OnPropertyChanged("ImageSource"); //DrawDnnOnImage?.Invoke(Faces, imageSize); //return Faces.Count.ToString(); } } } return(0); }
public static OpenCvSharp.Mat MatEmguToOpenCVSharp(Emgu.CV.Mat emguMat) { #region 正在应用,Emgu指针,new OpenCvSharp.Mat(IntPtr) var ptrMat = new OpenCvSharp.Mat(emguMat.Ptr); return(ptrMat); #endregion }
/// <summary> /// Creates an OpenCV version of a ZED Mat. /// </summary> /// <param name="zedmat">Source ZED Mat.</param> /// <param name="zedmattype">Type of ZED Mat - data type and channel number. /// <returns></returns> private static OpenCvSharp.Mat SLMat2CVMat(ref sl.Mat zedmat, MAT_TYPE zedmattype) { int cvmattype = SLMatType2CVMatType(zedmattype); OpenCvSharp.Mat cvmat = new OpenCvSharp.Mat(zedmat.GetHeight(), zedmat.GetWidth(), cvmattype, zedmat.GetPtr()); return(cvmat); }
/// <summary> /// Useful for converting CV Mats to raw image bytes /// </summary> /// <param name="img"></param> /// <returns></returns> public static byte[] ConvertMatToBytes(OpenCvSharp.Mat img) { int renderSize = img.Width * img.Height * img.Channels(); byte[] ret = new byte[renderSize]; Marshal.Copy(img.Data, ret, 0, renderSize); return(ret); }
public static Emgu.CV.Mat MatOpenCVSharpToEmgu(OpenCvSharp.Mat opcvsMat) { #region 正在应用,OpenCvSharp CvPtr指针 Emgu CvArrToMat var emptr = Emgu.CV.CvInvoke.CvArrToMat(opcvsMat.CvPtr, true); return(emptr); #endregion }
public void Run(cv.Mat color, cv.Mat result1, int kernelSize) { if (kernelSize % 2 == 0) { kernelSize -= 1; // kernel size must be odd } cv.Cv2.GaussianBlur(color, result1, new cv.Size(kernelSize, kernelSize), 0, 0); }
/// <summary> /// 初期化 /// </summary> /// <param name="image"></param> public MatProxy(Mat image) { using (var converted = new Mat()) { Cv2.ConvertImage(image, converted); ImageData = converted.ToBytes(".png"); } }
public void Run() { var src1 = new Mat(FilePath.Image.Match1, ImreadModes.Color); var src2 = new Mat(FilePath.Image.Match2, ImreadModes.Color); MatchBySift(src1, src2); MatchBySurf(src1, src2); }
/// <summary> /// Sets a visual vocabulary. /// </summary> /// <param name="vocabulary">Vocabulary (can be trained using the inheritor of BOWTrainer ). /// Each row of the vocabulary is a visual word(cluster center).</param> public void SetVocabulary(Mat vocabulary) { if (IsDisposed) throw new ObjectDisposedException(GetType().Name); if (vocabulary == null) throw new ArgumentNullException(nameof(vocabulary)); NativeMethods.features2d_BOWImgDescriptorExtractor_setVocabulary(ptr, vocabulary.CvPtr); GC.KeepAlive(vocabulary); }
public int CompareFile(string fileName, bool fullScan = false) { ImageInfo isrc = new ImageInfo(); if (!isrc.SetData(fileName)) { return(-1); } //ハッシュレベルで一致ならそのまま返却 if (!fullScan) { int itp = CompareHash(isrc); if (itp >= 0) { isrc.Dispose(); return(itp); } } int idx = -1; double maxVal = 0; Mat src = Mat.FromStream(isrc.DataStream, ImreadModes.AnyColor); foreach (ImageInfo ii in ImageStack) { OpenCvSharp.Mat m = OpenCvSharp.Mat.FromStream(ii.DataStream, ImreadModes.AnyColor); OpenCvSharp.Mat roi = m[0, src.Height > m.Height ? m.Height : src.Height, 0, src.Width > m.Width ? m.Width : src.Width]; OpenCvSharp.Mat res = new Mat(); Cv2.MatchTemplate(src, roi, res, TemplateMatchModes.CCoeffNormed); double min, max; Cv2.MinMaxLoc(res, out min, out max); if (maxVal < max) { idx = ii.id; maxVal = max; } if (!fullScan && max > thresh) { src.Dispose(); return(ii.id); } roi.Dispose(); m.Dispose(); } src.Dispose(); isrc.Dispose(); return(idx); }
/* public TargetDetector(string fileName) { Cascade = new CascadeClassifier(fileName); Scale = 1.04; ScaleFactor = 1.3; MinNeighbors = 2; }*/ public TargetDetector(Mat mask) { Cascade = new CascadeClassifier(App.FaceCascadeName); EyeCascade = new CascadeClassifier(App.EyeCascadeName); SetMask(mask); Scale = 1.04; ScaleFactor = 1.3; MinNeighbors = 2; }
private static void ThreadingMethod() { Bitmap bmp = new Bitmap(width, height, PixelFormat.Format32bppArgb); while (!isQuitting) { currentFrame = captureScreen(ref bmp, false); mat = OpenCvSharp.Extensions.BitmapConverter.ToMat(currentFrame); Thread.Sleep(1); } }
/// <summary> /// Saves an image to a specified file. /// </summary> /// <param name="fileName">Name of the file.</param> /// <param name="img">Image to be saved.</param> /// <param name="prms">Format-specific save parameters encoded as pairs</param> /// <returns></returns> public static bool ImWrite(string fileName, Mat img, int[] prms = null) { if (string.IsNullOrEmpty(fileName)) throw new ArgumentNullException("fileName"); if (img == null) throw new ArgumentNullException("img"); if (prms == null) prms = new int[0]; return NativeMethods.imgcodecs_imwrite(fileName, img.CvPtr, prms, prms.Length) != 0; }
private void DeskewImage(ref OpenCvSharp.Mat a) { double angle = GetSkewAngle(); RotateImage(a, ref a, angle, 1); //using (var window = new Window("erode", image: a, flags: WindowMode.AutoSize)) //{ // Cv2.WaitKey(); //} }
private IEnumerable<KeyPoint> FindFeatures_HarrisCornerDetector(string filename) { using (var src = new Mat(filename, ImreadModes.GrayScale)) { // find ínteresting corners Point2f[] corners = src.GoodFeaturesToTrack(100, 0.01, 100, null, 15, true, 1); IEnumerable<KeyPoint> keypoints = corners.Select(s => new KeyPoint(s, 100)); return keypoints; } }
public void Run(cv.Mat src) { var array = new byte[src.Width * src.Height * src.ElemSize()]; Marshal.Copy(src.Data, array, 0, array.Length); using (var image = Dlib.LoadImageData <byte>(array, (uint)src.Height, (uint)src.Width, (uint)(src.Width * src.ElemSize()))) { Dlib.PyramidUp(image); rects = detector.Operator(image); } }
public void Run(ref cv.Mat src, List <cv.Point> points) { List <Point> test_points = new List <Point>(); foreach (cv.Point pt in points) { test_points.Add(new Point(pt.X, pt.Y)); } src = AlgoOrderedList.TestPerformance(src.Width, src.Height, test_points); }
/// <summary> /// Loads a multi-page image from a file. /// </summary> /// <param name="filename">Name of file to be loaded.</param> /// <param name="mats">A vector of Mat objects holding each page, if more than one.</param> /// <param name="flags">Flag that can take values of @ref cv::ImreadModes, default with IMREAD_ANYCOLOR.</param> /// <returns></returns> public static bool ImReadMulti(string filename, out Mat[] mats, ImreadModes flags = ImreadModes.AnyColor) { if (filename == null) throw new ArgumentNullException("filename"); using (var matsVec = new VectorOfMat()) { int ret = NativeMethods.imgcodecs_imreadmulti(filename, matsVec.CvPtr, (int) flags); mats = matsVec.ToArray(); return ret != 0; } }
// grey scale comparison of translated image private void button2_Click(object sender, EventArgs e) { var src1 = new Mat(comboBox1.SelectedItem.ToString(), ImreadModes.GrayScale); var src2 = new Mat(comboBox2.SelectedItem.ToString(), ImreadModes.GrayScale); var output = new Mat(); Cv2.Absdiff(src1, src2, output); using (new Window("output of abs diff", WindowMode.KeepRatio, output)) { Cv2.WaitKey(); } }
public static void Write(Mat image, WriteableBitmap bmp) { if (image.Rows != bmp.PixelHeight || image.Cols != bmp.PixelWidth) { Cv2.Resize(image, image, new OpenCvSharp.Size(bmp.PixelWidth, bmp.PixelHeight)); } var pixels = GetPixels(image); var stride = image.Cols * image.ElemSize(); bmp.WritePixels( new Int32Rect(0, 0, bmp.PixelWidth, bmp.PixelHeight), pixels, stride, 0); }
public void Run() { // Split/Merge Test { Mat src = new Mat(FilePath.Image.Lenna, ImreadModes.Color); // Split each plane Mat[] planes; Cv2.Split(src, out planes); Cv2.ImShow("planes 0", planes[0]); Cv2.ImShow("planes 1", planes[1]); Cv2.ImShow("planes 2", planes[2]); Cv2.WaitKey(); Cv2.DestroyAllWindows(); // Invert G plane Cv2.BitwiseNot(planes[1], planes[1]); // Merge Mat merged = new Mat(); Cv2.Merge(planes, merged); Cv2.ImShow("src", src); Cv2.ImShow("merged", merged); Cv2.WaitKey(); Cv2.DestroyAllWindows(); } // MixChannels Test { Mat rgba = new Mat(300, 300, MatType.CV_8UC4, new Scalar(50, 100, 150, 200)); Mat bgr = new Mat(rgba.Rows, rgba.Cols, MatType.CV_8UC3); Mat alpha = new Mat(rgba.Rows, rgba.Cols, MatType.CV_8UC1); Mat[] input = { rgba }; Mat[] output = { bgr, alpha }; // rgba[0] -> bgr[2], rgba[1] -> bgr[1], // rgba[2] -> bgr[0], rgba[3] -> alpha[0] int[] fromTo = { 0, 2, 1, 1, 2, 0, 3, 3 }; Cv2.MixChannels(input, output, fromTo); Cv2.ImShow("rgba", rgba); Cv2.ImShow("bgr", bgr); Cv2.ImShow("alpha", alpha); Cv2.WaitKey(); Cv2.DestroyAllWindows(); } }
/// <summary> /// Displays the image in the specified window /// </summary> /// <param name="winname">Name of the window.</param> /// <param name="mat">Image to be shown.</param> public static void ImShow(string winname, Mat mat) { if (string.IsNullOrEmpty(winname)) throw new ArgumentNullException("winname"); if (mat == null) throw new ArgumentNullException("mat"); try { NativeMethods.highgui_imshow(winname, mat.CvPtr); } catch (BadImageFormatException ex) { throw PInvokeHelper.CreateException(ex); } }
/// <summary> /// Saves an image to a specified file. /// </summary> /// <param name="fileName">Name of the file.</param> /// <param name="img">Image to be saved.</param> /// <param name="prms">Format-specific save parameters encoded as pairs</param> /// <returns></returns> public static bool ImWrite(string fileName, Mat img, params ImageEncodingParam[] prms) { if (prms != null) { List<int> p = new List<int>(); foreach (ImageEncodingParam item in prms) { p.Add((int) item.EncodingId); p.Add(item.Value); } return ImWrite(fileName, img, p.ToArray()); } return ImWrite(fileName, img, (int[]) null); }
/// <summary> /// Extracts MSER by C++-style code (cv::MSER) /// </summary> /// <param name="gray"></param> /// <param name="dst"></param> private void CppStyleMSER(Mat gray, Mat dst) { MSER mser = MSER.Create(); Point[][] contours; Rect[] bboxes; mser.DetectRegions(gray, out contours, out bboxes); foreach (Point[] pts in contours) { Scalar color = Scalar.RandomColor(); foreach (Point p in pts) { dst.Circle(p, 1, color); } } }
/// <summary> /// Submatrix operations /// </summary> private void SubMat() { Mat src = Cv2.ImRead(FilePath.Image.Lenna); // Assign small image to mat Mat small = new Mat(); Cv2.Resize(src, small, new Size(100, 100)); src[10, 110, 10, 110] = small; src[370, 470, 400, 500] = small.T(); // ↑ This is same as the following: //small.T().CopyTo(src[370, 470, 400, 500]); // Get partial mat (similar to cvSetImageROI) Mat part = src[200, 400, 200, 360]; // Invert partial pixel values Cv2.BitwiseNot(part, part); // Fill the region (50..100, 100..150) with color (128, 0, 0) part = src.SubMat(50, 100, 400, 450); part.SetTo(128); using (new Window("SubMat", src)) { Cv2.WaitKey(); } }
public void Run() { const string OutVideoFile = "out.avi"; // Opens MP4 file (ffmpeg is probably needed) VideoCapture capture = new VideoCapture(FilePath.Movie.Bach); // Read movie frames and write them to VideoWriter Size dsize = new Size(640, 480); using (VideoWriter writer = new VideoWriter(OutVideoFile, -1, capture.Fps, dsize)) { Console.WriteLine("Converting each movie frames..."); Mat frame = new Mat(); while(true) { // Read image capture.Read(frame); if(frame.Empty()) break; Console.CursorLeft = 0; Console.Write("{0} / {1}", capture.PosFrames, capture.FrameCount); // grayscale -> canny -> resize Mat gray = new Mat(); Mat canny = new Mat(); Mat dst = new Mat(); Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY); Cv2.Canny(gray, canny, 100, 180); Cv2.Resize(canny, dst, dsize, 0, 0, InterpolationFlags.Linear); // Write mat to VideoWriter writer.Write(dst); } Console.WriteLine(); } // Watch result movie using (VideoCapture capture2 = new VideoCapture(OutVideoFile)) using (Window window = new Window("result")) { int sleepTime = (int)(1000 / capture.Fps); Mat frame = new Mat(); while (true) { capture2.Read(frame); if(frame.Empty()) break; window.ShowImage(frame); Cv2.WaitKey(sleepTime); } } }