Esempio n. 1
3
      public StopSignDetector(IInputArray stopSignModel)
      {
         _detector = new SURF(500);
         using (Mat redMask = new Mat())
         {
            GetRedPixelMask(stopSignModel, redMask);
            _modelKeypoints = new VectorOfKeyPoint();
            _modelDescriptors = new Mat();
            _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
            if (_modelKeypoints.Size == 0)
               throw new Exception("No image feature has been found in the stop sign model");
         }

         _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
         _modelDescriptorMatcher.Add(_modelDescriptors);

         _octagon = new VectorOfPoint(
            new Point[]
            {
               new Point(1, 0),
               new Point(2, 0),
               new Point(3, 1),
               new Point(3, 2),
               new Point(2, 3),
               new Point(1, 3),
               new Point(0, 2),
               new Point(0, 1)
            });

      }
        private static void usingCppInterface1()
        {
            // Cv2.ImRead
            using (var src = new Mat(@"..\..\Images\Penguin.Png", LoadMode.AnyDepth | LoadMode.AnyColor))
            using (var dst = new Mat())
            {
                src.CopyTo(dst);

                for (var y = 0; y < src.Height; y++)
                {
                    for (var x = 0; x < src.Width; x++)
                    {
                        var pixel = src.Get<Vec3b>(y, x);
                        var newPixel = new Vec3b
                        {
                            Item0 = (byte)(255 - pixel.Item0), // B
                            Item1 = (byte)(255 - pixel.Item1), // G
                            Item2 = (byte)(255 - pixel.Item2) // R
                        };
                        dst.Set(y, x, newPixel);
                    }
                }

                // [Cpp] Accessing Pixel
                // https://github.com/shimat/opencvsharp/wiki/%5BCpp%5D-Accessing-Pixel

                //Cv2.NamedWindow();
                //Cv2.ImShow();
                using (new Window("C++ Interface: Src", image: src))
                using (new Window("C++ Interface: Dst", image: dst))
                {
                    Cv2.WaitKey(0);
                }
            }
        }
Esempio n. 3
0
        private void MatchBySurf(Mat src1, Mat src2)
        {
            var gray1 = new Mat();
            var gray2 = new Mat();

            Cv2.CvtColor(src1, gray1, ColorConversion.BgrToGray);
            Cv2.CvtColor(src2, gray2, ColorConversion.BgrToGray);

            var surf = new SURF(500, 4, 2, true);

            // Detect the keypoints and generate their descriptors using SURF
            KeyPoint[] keypoints1, keypoints2;
            var descriptors1 = new MatOfFloat();
            var descriptors2 = new MatOfFloat();
            surf.Run(gray1, null, out keypoints1, descriptors1);
            surf.Run(gray2, null, out keypoints2, descriptors2);

            // Match descriptor vectors 
            var bfMatcher = new BFMatcher(NormType.L2, false);
            var flannMatcher = new FlannBasedMatcher();
            DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2);
            DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2);

            // Draw matches
            var bfView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView);
            var flannView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView);

            using (new Window("SURF matching (by BFMather)", WindowMode.AutoSize, bfView))
            using (new Window("SURF matching (by FlannBasedMatcher)", WindowMode.AutoSize, flannView))
            {
                Cv2.WaitKey();
            }
        }
Esempio n. 4
0
        public void ToBitmapGrayScale()
        {
            Mat img = new Mat(FilePath.Lenna511, LoadMode.GrayScale); // width % 4 != 0

            Bitmap bitmap = BitmapConverter2.ToBitmap(img);
            // Bitmap bitmap = img.ToBitmap();

            using (var form = new Form())
            using (var pb = new PictureBox())
            {
                pb.Image = bitmap;
                var size = new System.Drawing.Size(bitmap.Width, bitmap.Height);
                pb.ClientSize = size;
                form.ClientSize = size;
                form.Controls.Add(pb);
                form.KeyPreview = true;
                form.KeyDown += (sender, args) =>
                {
                    if (args.KeyCode.HasFlag(Keys.Enter))
                        ((Form)sender).Close();
                };
                form.Text = "Grayscale Mat to Bitmap Test";

                form.ShowDialog();
            }
        }
Esempio n. 5
0
        /// <summary>
        /// Get homography projection for the observedImage
        /// </summary>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Point[] GetMatchingPoints(Mat observedImage, out long matchTime)
        {
            matchTime = 0;
            Point[] result = new Point[0];
            if (modelImage == null) return result;

            Mat homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;

            using (Mat gray = new Mat())
            {
                CvInvoke.CvtColor(observedImage, gray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
                using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                {
                    Mat mask;
                    FindMatch(modelImage, gray, out matchTime, out modelKeyPoints, out observedKeyPoints, matches, out mask, out homography);
                    if (homography != null)
                    {
                        Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                        PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top) };
                        pts = CvInvoke.PerspectiveTransform(pts, homography);
                        result = Array.ConvertAll<PointF, Point>(pts, Point.Round);
                    }
                    return result;
                }
            }
        }
Esempio n. 6
0
        public void Run()
        {
            var dst = new Mat(FilePath.Lenna, LoadMode.Color);
            var gray = new Mat(FilePath.Lenna, LoadMode.GrayScale);

            StarDetector detector = new StarDetector(45);
            KeyPoint[] keypoints = detector.Run(gray);

            if (keypoints != null)
            {
                var color = new Scalar(0, 255, 0);
                foreach (KeyPoint kpt in keypoints)
                {
                    float r = kpt.Size / 2;
                    Cv2.Circle(dst, kpt.Pt, (int)r, color, 1, LineType.Link8, 0);
                    Cv2.Line(dst, 
                        new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r), 
                        new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r), 
                        color, 1, LineType.Link8, 0);
                    Cv2.Line(dst, 
                        new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r), 
                        new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r), 
                        color, 1, LineType.Link8, 0);
                }
            }

            using (new Window("StarDetector features", dst))
            {
                Cv.WaitKey();
            }
        }
Esempio n. 7
0
        private static void HDR()
        {
            var hdr = CalibrateDebevec.Create();

            Mat[] images = new Mat[3];
            images[0] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
            images[1] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
            images[2] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);

            float[] speeds = new float[3];
            speeds[0] = 1;
            speeds[1] = 1;
            speeds[2] = 1;

            Mat dst = new Mat();

            hdr.Process(images, dst, speeds);

            dst.ToString();

            for (int i = 0; i < Math.Max(dst.Rows, dst.Cols); i++)
            {
                Console.WriteLine(dst.At<float>(i));
            }
        }
Esempio n. 8
0
        /// <summary>
        /// Submatrix operations
        /// </summary>
        private void SubMat()
        {
            Mat src = Cv2.ImRead(FilePath.Image.Lenna);

            // Assign small image to mat
            Mat small = new Mat();
            Cv2.Resize(src, small, new Size(100, 100));
            src[10, 110, 10, 110] = small;
            src[370, 470, 400, 500] = small.T();
            // ↑ This is same as the following:
            //small.T().CopyTo(src[370, 470, 400, 500]);

            // Get partial mat (similar to cvSetImageROI)
            Mat part = src[200, 400, 200, 360];
            // Invert partial pixel values
            Cv2.BitwiseNot(part, part);

            // Fill the region (50..100, 100..150) with color (128, 0, 0)
            part = src.SubMat(50, 100, 400, 450);
            part.SetTo(128);

            using (new Window("SubMat", src))
            {
                Cv2.WaitKey();
            }
        }
        // Use this for initialization
        void Start()
        {
            Texture2D imgTexture = Resources.Load ("chessboard") as Texture2D;

                        Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC3);

                        Utils.texture2DToMat (imgTexture, imgMat);
                        Debug.Log ("imgMat dst ToString " + imgMat.ToString ());

                        Mat grayMat = new Mat ();
                        Imgproc.cvtColor (imgMat, grayMat, Imgproc.COLOR_RGB2GRAY);

                        Imgproc.Canny (grayMat, grayMat, 50, 200);

                        Mat lines = new Mat ();

                        Imgproc.HoughLinesP (grayMat, lines, 1, Mathf.PI / 180, 50, 50, 10);

            //						Debug.Log ("lines toStirng " + lines.ToString ());
            //						Debug.Log ("lines dump" + lines.dump ());

                        int[] linesArray = new int[lines.cols () * lines.rows () * lines.channels ()];
                        lines.get (0, 0, linesArray);

                        for (int i = 0; i < linesArray.Length; i=i+4) {
                                Core.line (imgMat, new Point (linesArray [i + 0], linesArray [i + 1]), new Point (linesArray [i + 2], linesArray [i + 3]), new Scalar (255, 0, 0), 2);
                        }

                        Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false);
                        Utils.matToTexture2D (imgMat, texture);

                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
        }
Esempio n. 10
0
        // Use this for initialization
        void Start()
        {
            Texture2D imgTexture = Resources.Load ("lena") as Texture2D;

                        Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC4);

                        Utils.texture2DToMat (imgTexture, imgMat);
                        Debug.Log ("imgMat dst ToString " + imgMat.ToString ());

                        //CascadeClassifier cascade = new CascadeClassifier (Utils.getFilePath ("lbpcascade_frontalface.xml"));
                        CascadeClassifier cascade = new CascadeClassifier (Utils.getFilePath ("haarcascade_frontalface_alt.xml"));

                        Mat grayMat = new Mat ();
                        Imgproc.cvtColor (imgMat, grayMat, Imgproc.COLOR_RGBA2GRAY);
                        Imgproc.equalizeHist (grayMat, grayMat);

                        MatOfRect faces = new MatOfRect ();

                        if (cascade != null)
                                cascade.detectMultiScale (grayMat, faces, 1.1, 2, 2,
                                           new Size (20, 20), new Size ());

                        OpenCVForUnity.Rect[] rects = faces.toArray ();
                        for (int i = 0; i < rects.Length; i++) {
                                Debug.Log ("detect faces " + rects [i]);

                                Core.rectangle (imgMat, new Point (rects [i].x, rects [i].y), new Point (rects [i].x + rects [i].width, rects [i].y + rects [i].height), new Scalar (255, 0, 0, 255), 2);
                        }

                        Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false);

                        Utils.matToTexture2D (imgMat, texture);

                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
        }
        // Use this for initialization
        void Start()
        {
            //Read the left and right images
            Texture2D texLeft = Resources.Load ("tsukuba_l") as Texture2D;
            Texture2D texRight = Resources.Load ("tsukuba_r") as Texture2D;
            Mat imgLeft = new Mat (texLeft.height, texLeft.width, CvType.CV_8UC1);
            Mat imgRight = new Mat (texRight.height, texRight.width, CvType.CV_8UC1);
            Utils.texture2DToMat (texLeft, imgLeft);
            Utils.texture2DToMat (texRight, imgRight);
            //or
            //Mat imgLeft = Imgcodecs.imread (Utils.getFilePath ("tsukuba_l.png"), Imgcodecs.IMREAD_GRAYSCALE);
            //Mat imgRight = Imgcodecs.imread (Utils.getFilePath ("tsukuba_r.png"), Imgcodecs.IMREAD_GRAYSCALE);

            Mat imgDisparity16S = new Mat (imgLeft.rows (), imgLeft.cols (), CvType.CV_16S);
            Mat imgDisparity8U = new Mat (imgLeft.rows (), imgLeft.cols (), CvType.CV_8UC1);

            if (imgLeft.empty () || imgRight.empty ()) {
                Debug.Log ("Error reading images ");
            }

            StereoBM sbm = StereoBM.create (16, 15);

            sbm.compute (imgLeft, imgRight, imgDisparity16S);

            //normalize to CvType.CV_8U
            Core.normalize (imgDisparity16S, imgDisparity8U, 0, 255, Core.NORM_MINMAX, CvType.CV_8U);

            Texture2D texture = new Texture2D (imgDisparity8U.cols (), imgDisparity8U.rows (), TextureFormat.RGBA32, false);

            Utils.matToTexture2D (imgDisparity8U, texture);

            gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
        }
Esempio n. 12
0
        /// <summary>
        /// System.Drawing.BitmapからOpenCVのMatへ変換して返す.
        /// </summary>
        /// <param name="src">変換するSystem.Drawing.Bitmap</param>
        /// <returns>変換結果のMat</returns>
#else
        /// <summary>
        /// Converts System.Drawing.Bitmap to Mat
        /// </summary>
        /// <param name="src">System.Drawing.Bitmap object to be converted</param>
        /// <returns>A Mat object which is converted from System.Drawing.Bitmap</returns>
#endif
        public static Mat ToMat(this Bitmap src)
        {
            if (src == null)
                throw new ArgumentNullException("src");

            int w = src.Width;
            int h = src.Height;
            int channels;
            switch (src.PixelFormat)
            {
                case PixelFormat.Format24bppRgb:
                case PixelFormat.Format32bppRgb:
                    channels = 3; break;
                case PixelFormat.Format32bppArgb:
                case PixelFormat.Format32bppPArgb:
                    channels = 4; break;
                case PixelFormat.Format8bppIndexed:
                case PixelFormat.Format1bppIndexed:
                    channels = 1; break;
                default:
                    throw new NotImplementedException();
            }

            Mat dst = new Mat(h, w, MatType.CV_8UC(channels));
            ToMat(src, dst);
            return dst;
        }
Esempio n. 13
0
        public void Run()
        {
            var capture = new VideoCapture();
            capture.Set(CaptureProperty.FrameWidth, 640);
            capture.Set(CaptureProperty.FrameHeight, 480);
            capture.Open(-1);
            if (!capture.IsOpened())
                throw new Exception("capture initialization failed");

            var fs = FrameSource.CreateCameraSource(-1);
            var sr = SuperResolution.CreateBTVL1();
            sr.SetInput(fs);

            using (var normalWindow = new Window("normal"))
            using (var srWindow = new Window("super resolution"))
            {
                var normalFrame = new Mat();
                var srFrame = new Mat();
                while (true)
                {
                    capture.Read(normalFrame);
                    sr.NextFrame(srFrame);
                    if (normalFrame.Empty() || srFrame.Empty())
                        break;
                    normalWindow.ShowImage(normalFrame);
                    srWindow.ShowImage(srFrame);
                    Cv2.WaitKey(100);
                }
            }
        }
Esempio n. 14
0
        private static void FileStorageTest()
        {
            const string fileName = "foo.yml";

            try
            {
                using (var fs = new FileStorage(fileName, FileStorage.Mode.Write | FileStorage.Mode.FormatYaml))
                {
                    fs.Write("int", 123);
                    fs.Write("double", Math.PI);
                    using (var tempMat = new Mat("data/lenna.png"))
                    {
                        fs.Write("mat", tempMat);
                    }
                }

                using (var fs = new FileStorage(fileName, FileStorage.Mode.Read))
                {
                    Console.WriteLine("int: {0}", fs["int"].ReadInt());
                    Console.WriteLine("double: {0}", (double) fs["double"]);
                    using (var window = new Window("mat"))
                    {
                        window.ShowImage(fs["mat"].ReadMat());
                        Cv2.WaitKey();
                    }
                }
            }
            finally
            {
                File.Delete(fileName);
            }
        }
Esempio n. 15
0
        public static Mat ConvertToGrayScale(Mat mat)
        {
            Mat grayMat = new Mat();
            Cv2.CvtColor(mat, grayMat, ColorConversion.RgbToGray);

            return grayMat;
        }
Esempio n. 16
0
	// Update is called once per frame
	void Update () {
		
		cap.Read (frame);


		if (!frame.Empty()){

			//assume this part of the frame contains only background
			smoothed_img = frame.Blur(new Size(5,5));

			frame_hsv = frame.CvtColor (ColorConversionCodes.BGR2HSV);
			Scalar lb = new Scalar (0, 0, 50);
			Scalar ub = new Scalar (180, 70, 180);

			Mat disc = Cv2.GetStructuringElement (MorphShapes.Ellipse, new Size (7, 7));

			Cv2.MorphologyEx (thresh, thresh, MorphTypes.Close, disc,null,3);


			contours = Cv2.FindContoursAsMat (thresh , RetrievalModes.List, ContourApproximationModes.ApproxSimple);


			mask = new Mat (thresh.Size (), thresh.Type (), Scalar.All (0));


			Cv2.Merge(new Mat[]{mask,mask,mask},mask);
			Cv2.BitwiseAnd (mask, frame, mask);

			//Cv2.Merge(new Mat[]{frame_backproj,frame_backproj,frame_backproj},frame_backproj);

			tex.LoadImage (smoothed_img.ToBytes (".png", new int[]{ 0 }));

		}

	}
Esempio n. 17
0
    // Update is called once per frame
    void Update()
    {
        using ( Mat image = new Mat() ) {
            // Webカメラから画像を取得する
            video.Read( image );

            // 顔を検出する
            var faces = cascade.DetectMultiScale( image );
            if ( faces.Length > 0 ) {
                var face = faces[0];

                // 顔の矩形を描画する
                image.Rectangle( face, new Scalar( 255, 0, 0 ), 2 );

                // 中心の座標を計算する
                var x = face.TopLeft.X + (face.Size.Width / 2);
                var y = face.TopLeft.Y + (face.Size.Height / 2);

                // オブジェクトを移動する
                if ( Object !=null ) {
                    Object.transform.localPosition = Vector2ToVector3( new Vector2( x, y ) );
                }
            }

            // OpenCVのデータがBGRなのでRGBに変える
            // Bitmap形式に変えてテクスチャに流し込む
            using(var cvtImage = image.CvtColor( ColorConversion.BgrToRgb )){
                texture.LoadRawTextureData( cvtImage.ImEncode( ".bmp" ) );
                texture.Apply();
            }
        }
    }
Esempio n. 18
0
		/// <summary>
		/// Hamms the dist marker.
		/// </summary>
		/// <returns>The dist marker.</returns>
		/// <param name="bits">Bits.</param>
		public static int hammDistMarker (Mat bits, byte[,] markerDesign)
		{

				int dist = 0;

				int size = markerDesign.GetLength(0);

				byte[] b = new byte[size * size];

				bits.get (0, 0, b);
		
				for (int y=0; y<size; y++) {
						
						int sum = 0;
						
						for (int x=0; x<size; x++) {
					
								sum += (b [y*size + x] == markerDesign [y,x]) ? 0 : 1;
						}
						
						dist += sum;
				}
		
				return dist;
		}
      private void ProcessImage(IInputOutputArray image)
      {
         Stopwatch watch = Stopwatch.StartNew(); // time the detection process

         List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
         List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
         List<RotatedRect> licenseBoxList = new List<RotatedRect>();
         List<string> words = _licensePlateDetector.DetectLicensePlate(
            image,
            licensePlateImagesList,
            filteredLicensePlateImagesList,
            licenseBoxList);

         watch.Stop(); //stop the timer
         processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);

         panel1.Controls.Clear();
         Point startPoint = new Point(10, 10);
         for (int i = 0; i < words.Count; i++)
         {
            Mat dest = new Mat();
            CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
            AddLabelAndImage(
               ref startPoint,
               String.Format("License: {0}", words[i]),
               dest);
            PointF[] verticesF = licenseBoxList[i].GetVertices();
            Point[] vertices = Array.ConvertAll(verticesF, Point.Round);
            using(VectorOfPoint pts = new VectorOfPoint(vertices))
               CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar,2  );
            
         }

      }
Esempio n. 20
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="cascade"></param>
        /// <returns></returns>
        private Mat DetectFace(CascadeClassifier cascade)
        {
            Mat result;

            using (var src = new Mat(FilePath.Image.Yalta, LoadMode.Color))
            using (var gray = new Mat())
            {
                result = src.Clone();
                Cv2.CvtColor(src, gray, ColorConversion.BgrToGray, 0);

                // Detect faces
                Rect[] faces = cascade.DetectMultiScale(
                    gray, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30));

                // Render all detected faces
                foreach (Rect face in faces)
                {
                    var center = new Point
                    {
                        X = (int)(face.X + face.Width * 0.5),
                        Y = (int)(face.Y + face.Height * 0.5)
                    };
                    var axes = new Size
                    {
                        Width = (int)(face.Width * 0.5),
                        Height = (int)(face.Height * 0.5)
                    };
                    Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4);
                }
            }
            return result;
        }
Esempio n. 21
0
   // Use this for initialization
   void Start()
   {  
		String[] textureNames = new string[] { "stitch1", "stitch2", "stitch3", "stitch4"};
		Mat[] imgs = new Mat[textureNames.Length];
		Mat tmp = new Mat ();
		for (int i = 0; i < textureNames.Length; i++) {
			Texture2D tex = Resources.Load<Texture2D>(textureNames[i]);
			imgs [i] = new Mat ();
			TextureConvert.Texture2dToOutputArray(tex, tmp);
			CvInvoke.Flip(tmp, tmp, FlipType.Vertical);
			CvInvoke.CvtColor (tmp, imgs [i], ColorConversion.Bgra2Bgr);
			if (imgs [i].IsEmpty)
				Debug.Log ("Image " + i + " is empty");
			else
				Debug.Log ("Image " + i + " is " + imgs[i].NumberOfChannels + " channels "  + imgs [i].Width + "x" + imgs [i].Height);
		}
		Emgu.CV.Stitching.Stitcher stitcher = new Emgu.CV.Stitching.Stitcher (false);
		Mat result = new Mat ();
		using (VectorOfMat vms = new VectorOfMat (imgs))
			stitcher.Stitch (vms, result);
		//CvInvoke.Flip(result, result, FlipType.Vertical);

		Texture2D texture = TextureConvert.InputArrayToTexture2D(result, FlipType.Vertical);

		this.GetComponent<GUITexture>().texture = texture;
		Size s = result.Size;
		this.GetComponent<GUITexture>().pixelInset = new Rect(-s.Width / 2, -s.Height / 2, s.Width, s.Height);

   }
Esempio n. 22
0
        private static void LineIterator()
        {
            var img = new Mat("data/lenna.png", ImreadModes.Color);
            var pt1 = new Point(100, 100);
            var pt2 = new Point(300, 300);
            var iterator = new LineIterator(img, pt1, pt2, PixelConnectivity.Connectivity8);

            // invert color
            foreach (var pixel in iterator)
            {
                Vec3b value = pixel.GetValue<Vec3b>();
                value.Item0 = (byte)~value.Item0;
                value.Item1 = (byte)~value.Item1;
                value.Item2 = (byte)~value.Item2;
                pixel.SetValue(value);
            }

            // re-enumeration works fine
            foreach (var pixel in iterator)
            {
                Vec3b value = pixel.GetValue<Vec3b>();
                Console.WriteLine("{0} = ({1},{2},{3})", pixel.Pos, value.Item0, value.Item1, value.Item2);
            }

            Window.ShowImages(img);
        }
Esempio n. 23
0
 /// <summary>
 /// Finds perspective transformation H=||h_ij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane</param>
 /// <param name="dstPoints">Point coordinates in the destination plane</param>
 /// <param name="homography">The output homography matrix</param>
 /// <param name="method">FindHomography method</param>
 /// <param name="ransacReprojThreshold">
 /// The maximum allowed reprojection error to treat a point pair as an inlier. 
 /// The parameter is only used in RANSAC-based homography estimation. 
 /// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
 /// </param>
 /// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
 /// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
 public static void FindHomography(
    PointF[] srcPoints,
    PointF[] dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
    GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
    try
    {
       using (
          Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
       using (
          Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
       {
          CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
       }
    }
    finally
    {
       srcHandle.Free();
       dstHandle.Free();
    }
 }
        // Use this for initialization
        void Start()
        {
            Texture2D inputTexture = Resources.Load ("lena") as Texture2D;

                        Mat inputMat = new Mat (inputTexture.height, inputTexture.width, CvType.CV_8UC4);

                        Utils.texture2DToMat (inputTexture, inputMat);
                        Debug.Log ("inputMat dst ToString " + inputMat.ToString ());

                        Mat src_mat = new Mat (4, 1, CvType.CV_32FC2);
                        Mat dst_mat = new Mat (4, 1, CvType.CV_32FC2);

                        src_mat.put (0, 0, 0.0, 0.0, inputMat.rows (), 0.0, 0.0, inputMat.cols (), inputMat.rows (), inputMat.cols ());
                        dst_mat.put (0, 0, 0.0, 0.0, inputMat.rows (), 200.0, 0.0, inputMat.cols (), inputMat.rows (), inputMat.cols () - 200.0);
                        Mat perspectiveTransform = Imgproc.getPerspectiveTransform (src_mat, dst_mat);

                        Mat outputMat = inputMat.clone ();

                        Imgproc.warpPerspective (inputMat, outputMat, perspectiveTransform, new Size (inputMat.rows (), inputMat.cols ()));

                        Texture2D outputTexture = new Texture2D (outputMat.cols (), outputMat.rows (), TextureFormat.RGBA32, false);

                        Utils.matToTexture2D (outputMat, outputTexture);

                        gameObject.GetComponent<Renderer> ().material.mainTexture = outputTexture;
        }
Esempio n. 25
0
	public List<Point[]> detect (Mat im, float scaleFactor, int minNeighbours, OpenCVForUnity.Size minSize)
	{
		//convert image to greyscale
		Mat gray = null;
		if (im.channels () == 1) {
			gray = im;
		} else {
			gray = new Mat ();
			Imgproc.cvtColor (im, gray, Imgproc.COLOR_RGBA2GRAY);
		}


		using (Mat equalizeHistMat = new Mat ()) 
		using (MatOfRect faces = new MatOfRect ()) {
			
			Imgproc.equalizeHist (gray, equalizeHistMat);

			detector.detectMultiScale (equalizeHistMat, faces, scaleFactor, minNeighbours, 0
				| Objdetect.CASCADE_FIND_BIGGEST_OBJECT
				| Objdetect.CASCADE_SCALE_IMAGE, minSize, new Size ());
			
			
			if (faces.rows () < 1) {
				return new List<Point[]> ();
			}
			return convertMatOfRectToPoints (faces);
		}
				
	}
    public void OnTrackablesUpdated()
    {
        if (!m_RegisteredFormat) {
            CameraDevice.Instance.SetFrameFormat (m_PixelFormat, true);
            m_RegisteredFormat = true;
        }

        CameraDevice cam = CameraDevice.Instance;
        Image image = cam.GetCameraImage (m_PixelFormat);
        if (image == null) {
            Debug.Log (m_PixelFormat + " image is not available yet");
        } else {

            if (inputMat == null) {
                inputMat = new Mat (image.Height, image.Width, CvType.CV_8UC1);
                //Debug.Log ("inputMat dst ToString " + inputMat.ToString ());
            }

            inputMat.put (0, 0, image.Pixels);

            Core.putText (inputMat, "CameraImageToMatSample " + inputMat.cols () + "x" + inputMat.rows (), new Point (5, inputMat.rows () - 5), Core.FONT_HERSHEY_PLAIN, 1.0, new Scalar (255, 0, 0, 255));

            if (outputTexture == null) {
                outputTexture = new Texture2D (inputMat.cols (), inputMat.rows (), TextureFormat.RGBA32, false);
            }

            Utils.matToTexture2D (inputMat, outputTexture);

            quad.transform.localScale = new Vector3 ((float)image.Width, (float)image.Height, 1.0f);
            quad.GetComponent<Renderer> ().material.mainTexture = outputTexture;

            mainCamera.orthographicSize = image.Height / 2;

        }
    }
Esempio n. 27
0
      protected override void OnCreate(Bundle bundle)
      {
         base.OnCreate(bundle);

         AppPreference preference = new AppPreference();


         OnButtonClick += delegate
         {
            CvInvoke.UseOpenCL = preference.UseOpenCL;
            String oclDeviceName = preference.OpenClDeviceName;
            if (!String.IsNullOrEmpty(oclDeviceName))
            {
               CvInvoke.OclSetDefaultDevice(oclDeviceName);
            }

            long time;
            using (Mat box = new Mat(Assets, "box.png"))
            using (Mat boxInScene = new Mat(Assets, "box_in_scene.png"))
            using (Mat result = DrawMatches.Draw(box, boxInScene, out time))
            {
               SetImageBitmap(result.ToBitmap(Bitmap.Config.Rgb565));

               String computeDevice = CvInvoke.UseOpenCL ? "OpenCL: " + Emgu.CV.Ocl.Device.Default.Name : "CPU";
               SetMessage(String.Format("Matched with '{0}' in {1} milliseconds.", computeDevice, time));
            }
         };
      }
Esempio n. 28
0
 /// <summary>
 /// Decode image stored in the buffer
 /// </summary>
 /// <param name="buf">The buffer</param>
 /// <param name="loadType">The image loading type</param>
 /// <param name="dst">The output placeholder for the decoded matrix.</param>
 public static void Imdecode(byte[] buf, CvEnum.LoadImageType loadType, Mat dst)
 {
    using (VectorOfByte vb = new VectorOfByte(buf))
    {
       Imdecode(vb, loadType, dst);
    }
 }
        // Use this for initialization
        void Start()
        {
            rgbMat = new Mat ();

                        capture = new VideoCapture ();
                        capture.open (Utils.getFilePath ("768x576_mjpeg.mjpeg"));

                        if (capture.isOpened ()) {
                                Debug.Log ("capture.isOpened() true");
                        } else {
                                Debug.Log ("capture.isOpened() false");
                        }

                        Debug.Log ("CAP_PROP_FORMAT: " + capture.get (Videoio.CAP_PROP_FORMAT));
                        Debug.Log ("CV_CAP_PROP_PREVIEW_FORMAT: " + capture.get (Videoio.CV_CAP_PROP_PREVIEW_FORMAT));
                        Debug.Log ("CAP_PROP_POS_MSEC: " + capture.get (Videoio.CAP_PROP_POS_MSEC));
                        Debug.Log ("CAP_PROP_POS_FRAMES: " + capture.get (Videoio.CAP_PROP_POS_FRAMES));
                        Debug.Log ("CAP_PROP_POS_AVI_RATIO: " + capture.get (Videoio.CAP_PROP_POS_AVI_RATIO));
                        Debug.Log ("CAP_PROP_FRAME_COUNT: " + capture.get (Videoio.CAP_PROP_FRAME_COUNT));
                        Debug.Log ("CAP_PROP_FPS: " + capture.get (Videoio.CAP_PROP_FPS));
                        Debug.Log ("CAP_PROP_FRAME_WIDTH: " + capture.get (Videoio.CAP_PROP_FRAME_WIDTH));
                        Debug.Log ("CAP_PROP_FRAME_HEIGHT: " + capture.get (Videoio.CAP_PROP_FRAME_HEIGHT));

                        texture = new Texture2D ((int)(frameWidth), (int)(frameHeight), TextureFormat.RGBA32, false);
                        gameObject.transform.localScale = new Vector3 ((float)frameWidth, (float)frameHeight, 1);
                        float widthScale = (float)Screen.width / (float)frameWidth;
                        float heightScale = (float)Screen.height / (float)frameHeight;
                        if (widthScale < heightScale) {
                                Camera.main.orthographicSize = ((float)frameWidth * (float)Screen.height / (float)Screen.width) / 2;
                        } else {
                                Camera.main.orthographicSize = (float)frameHeight / 2;
                        }

                        gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
        }
        public FlannColoredModelPoints(List<Tuple<CvPoint3D64f, CvColor>> modelPoints, IndexParams indexParams, SearchParams searchParams, double colorScale)
        {
            _modelPoints = modelPoints;

            _modelMat = new CvMat(_modelPoints.Count, 6, MatrixType.F32C1);
            unsafe
            {
                float* modelArr = _modelMat.DataSingle;
                foreach (var tuple in _modelPoints)
                {
                    *(modelArr++) = (float)tuple.Item1.X;
                    *(modelArr++) = (float)tuple.Item1.Y;
                    *(modelArr++) = (float)tuple.Item1.Z;
                    *(modelArr++) = (float)(tuple.Item2.R * colorScale / 255);
                    *(modelArr++) = (float)(tuple.Item2.G * colorScale / 255);
                    *(modelArr++) = (float)(tuple.Item2.B * colorScale / 255);
                }
            }
            _colorScale = colorScale;
            _modelDataMat = new Mat(_modelMat);
            _indexParam = indexParams;
            _searchParam = searchParams;
            _indexParam.IsEnabledDispose = false;
            _searchParam.IsEnabledDispose = false;
            _flannIndex = new Index(_modelDataMat, _indexParam);
        }
Esempio n. 31
0
        /// <summary>
        /// Inverse Discrete Fourier transformation
        /// </summary>
        /// <param name=""></param>
        /// <param name=""></param>
        /// <param name="fourier_s"></param>
        /// <returns></returns>
        private static void IDFT(Mat outMat, Complex[,] fourier)
        {
            var outIndexer = outMat.GetGenericIndexer <byte>();

            for (int y = 0; y < outMat.Height; y++)
            {
                for (int x = 0; x < outMat.Width; x++)
                {
                    var val = new Complex(0d, 0d);
                    for (int l = 0; l < outMat.Height; l++)
                    {
                        for (int k = 0; k < outMat.Width; k++)
                        {
                            var G     = fourier[l, k];
                            var theta = 2 * Math.PI * (k * (double)x / outMat.Width + (double)l * y / outMat.Height);
                            var temp  = new Complex(Math.Cos(theta), Math.Sin(theta)) * G;
                            val += temp;
                        }
                    }
                    var g = Complex.Abs(val) / Math.Sqrt(outMat.Height * outMat.Width);
                    outIndexer[y, x] = (byte)g;
                }
            }
        }
Esempio n. 32
0
        //javadoc: NormalBayesClassifier::predictProb(inputs, outputs, outputProbs)
        public float predictProb(Mat inputs, Mat outputs, Mat outputProbs)
        {
            ThrowIfDisposed();
            if (inputs != null)
            {
                inputs.ThrowIfDisposed();
            }
            if (outputs != null)
            {
                outputs.ThrowIfDisposed();
            }
            if (outputProbs != null)
            {
                outputProbs.ThrowIfDisposed();
            }

#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            float retVal = ml_NormalBayesClassifier_predictProb_11(nativeObj, inputs.nativeObj, outputs.nativeObj, outputProbs.nativeObj);

            return(retVal);
#else
            return(-1);
#endif
        }
Esempio n. 33
0
        private void DetectInRegion(Mat img, Rect r, List <Rect> detectedObjectsInRegions)
        {
            Rect r0 = new Rect(new Point(), img.size());
            Rect r1 = new Rect(r.x, r.y, r.width, r.height);

            Rect.inflate(r1, (int)((r1.width * coeffTrackingWindowSize) - r1.width) / 2,
                         (int)((r1.height * coeffTrackingWindowSize) - r1.height) / 2);
            r1 = Rect.intersect(r0, r1);

            if ((r1.width <= 0) || (r1.height <= 0))
            {
                Debug.Log("DetectionBasedTracker::detectInRegion: Empty intersection");
                return;
            }

            int d = Math.Min(r.width, r.height);

            d = (int)Math.Round(d * coeffObjectSizeToTrack);

            MatOfRect tmpobjects = new MatOfRect();

            Mat img1 = new Mat(img, r1); //subimage for rectangle -- without data copying

            cascade.detectMultiScale(img1, tmpobjects, 1.1, 2, 0 | Objdetect.CASCADE_DO_CANNY_PRUNING | Objdetect.CASCADE_SCALE_IMAGE | Objdetect.CASCADE_FIND_BIGGEST_OBJECT, new Size(d, d), new Size());


            Rect[] tmpobjectsArray = tmpobjects.toArray();
            int    len             = tmpobjectsArray.Length;

            for (int i = 0; i < len; i++)
            {
                Rect tmp    = tmpobjectsArray [i];
                Rect curres = new Rect(new Point(tmp.x + r1.x, tmp.y + r1.y), tmp.size());
                detectedObjectsInRegions.Add(curres);
            }
        }
Esempio n. 34
0
        public List <Tensor <float> > GetTensors()
        {
            if (box == null)
            {
                return(null);
            }

            if (box.Count == 0)
            {
                return(null);
            }

            List <Tensor <float> > tensors = new List <Tensor <float> >();

            foreach (List <Point> theRealBox in box.Values)
            {
                Mat roiMat = new Mat();
                GetRoiFromBox(roiMat, theRealBox);
                Tensor <float> tensorForBox = GetTensorInputFromImg(roiMat);
                tensors.Add(tensorForBox);
            }

            return(tensors);
        }
Esempio n. 35
0
        /// <summary>
        /// Recognize text using the tesseract-ocr API.
        /// Takes image on input and returns recognized text in the output_text parameter.
        /// Optionally provides also the Rects for individual text elements found(e.g.words),
        /// and the list of those text elements with their confidence values.
        /// </summary>
        /// <param name="image">Input image CV_8UC1 or CV_8UC3</param>
        /// <param name="outputText">Output text of the tesseract-ocr.</param>
        /// <param name="componentRects">If provided the method will output a list of Rects for the individual
        /// text elements found(e.g.words or text lines).</param>
        /// <param name="componentTexts">If provided the method will output a list of text strings for the
        /// recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentConfidences">If provided the method will output a list of confidence values
        /// for the recognition of individual text elements found(e.g.words or text lines).</param>
        /// <param name="componentLevel">OCR_LEVEL_WORD (by default), or OCR_LEVEL_TEXT_LINE.</param>
        public override void Run(
            Mat image,
            out string outputText,
            out Rect[] componentRects,
            out string?[] componentTexts,
            out float[] componentConfidences,
            ComponentLevels componentLevel = ComponentLevels.Word)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            using (var outputTextString = new StdString())
                using (var componentRectsVector = new VectorOfRect())
                    using (var componentTextsVector = new VectorOfString())
                        using (var componentConfidencesVector = new VectorOfFloat())
                        {
                            NativeMethods.text_OCRTesseract_run1(
                                ptr,
                                image.CvPtr,
                                outputTextString.CvPtr,
                                componentRectsVector.CvPtr,
                                componentTextsVector.CvPtr,
                                componentConfidencesVector.CvPtr,
                                (int)componentLevel);

                            outputText           = outputTextString.ToString();
                            componentRects       = componentRectsVector.ToArray();
                            componentTexts       = componentTextsVector.ToArray();
                            componentConfidences = componentConfidencesVector.ToArray();
                        }

            GC.KeepAlive(image);
        }
Esempio n. 36
0
        private void DCTTransformBitsToBlock()
        {
            int i;
            dctarray[0, 0] = 1024;

            for (i = 1; i < options.cellCount + 1; i++)
            {
                byte[] temp = Input.Take(options.density);

                SnakeArraySet(dctarray, i, BitsToCell(temp));
            }
            for (i = (options.cellCount) + 1; i < 64; i++)
            {
                SnakeArraySet(dctarray, i, 0);
            }

            byte[] ar = new byte[64];
            DFFrameBlock blok = new DFFrameBlock(ar);
            Mat mat = block.Body.Idct();
            mat.ConvertTo(blok.Body, MatType.CV_8U);
            mat.Dispose();
            Output.Add(blok.ToArray());
            blok.Free();
        }
Esempio n. 37
0
        public static Mat segment(Mat modelSizeImage)
        {
            float[] dataArray          = mat2tensorArray(modelSizeImage);
            float[] segmentationResult = call_dll_SendArray(dataArray);

            byte[] maskImageData = new byte[Constant.MODEL_HEIGHT * Constant.MODEL_WIDTH];

            for (var i = 0; i < maskImageData.Length; i++)
            {
                float[] pixel = new float[Constant.NUM_OF_CLASS];
                for (var j = 0; j < pixel.Length; j++)
                {
                    pixel[j] = segmentationResult[i * Constant.NUM_OF_CLASS + j];
                }
                // klass 0(bg), 1 ~ 5(parts)
                maskImageData[i] = (byte)softmax(pixel);
            }

            Mat maskImage = new Mat(Constant.MODEL_HEIGHT, Constant.MODEL_WIDTH, CvType.CV_8UC1);

            maskImage.put(0, 0, maskImageData);

            return(maskImage);
        }
Esempio n. 38
0
        private void _processImage()
        {
            using (var buffA = new Mat(_img.Size, DepthType.Cv8U, 1))
            {
                CvInvoke.CvtColor(_img, buffA, ColorConversion.Bgr2Gray);

                if (_debug)
                {
                    Util.DebugViewImage(buffA);
                }

                if (_gaussBlur > 0)
                {
                    using (var buffB = new Mat(_img.Size, DepthType.Cv8U, 1))
                    {
                        CvInvoke.GaussianBlur(buffA, buffB, new Size(_gaussBlur, _gaussBlur), 0);

                        if (_debug)
                        {
                            Util.DebugViewImage(buffB);
                        }

                        CvInvoke.Canny(buffB, _mat, _cannyThreshold, _cannyThreshold * 3);
                    }
                }
                else
                {
                    CvInvoke.Canny(buffA, _mat, _cannyThreshold, _cannyThreshold * 2);
                }

                if (_debug)
                {
                    Util.DebugViewImage(_mat);
                }
            }
        }
Esempio n. 39
0
        public static Bitmap ImgPj(List <Bitmap> strfile)
        {
            Bitmap bmp = null;
            Mat    outimg;

            try {
                List <Mat> listmat = new List <Mat>();
                for (int i = 0; i < strfile.Count; i++)
                {
                    Bitmap            bmp1 = strfile[i];
                    Image <Bgr, byte> a    = new Image <Bgr, byte>(bmp1);
                    listmat.Add(a.Mat);
                }
                Stitcher stitcher = new Stitcher(false);
                outimg = new Mat();
                if (T_ConFigure.SfName.Trim().Length > 0)
                {
                    stitcher.Stitch(new VectorOfMat(listmat.ToArray()), outimg);
                }
            } catch {
                return(bmp);
            }
            return(outimg.Bitmap);
        }
Esempio n. 40
0
        public L3DS()
        {
            InitializeComponent();
            SetCultureInfo();
            splitContainer1.MinimumSize = new Size(400, splitContainer1.Height);
            frame         = new Mat();
            tkWidget      = new OpenTKWidget();
            tkWidget.Dock = DockStyle.Fill;
            splitContainer1.Panel2.Controls.Add(tkWidget);

            /*
             * Mat laser = CvInvoke.Imread("laser.png");
             * Mat bg = CvInvoke.Imread("laser_background.png");
             *
             * float[] lu, lv, ru, rv;
             *
             * Laser.Instance.GenerateWeightMatrix(1280, 360);
             *
             * Stopwatch ws = new Stopwatch();
             * ws.Start();
             *
             * float[] x, y, z;
             *
             * for (int i = 0; i < 2000; i++)
             * {
             *
             *  Laser.Instance.ProcessingImageCL(laser, bg, out lu, out lv, out ru, out rv);
             *  Laser.Instance.TransformPixelsTo3DPointCL(lu, lv, 0.18f, 0.18f, 0.18f, 0.0f, out x, out y, out z);
             *  Laser.Instance.TransformPixelsTo3DPointCL(ru, rv, 0.18f, 0.18f, 0.18f, 0.0f, out x, out y, out z);
             *
             *  lu = lv = ru = rv = x = y = z = null;
             * }
             *
             * ws.Stop();
             * Debug.WriteLine(ws.Elapsed);*/
        }
Esempio n. 41
0
        public void SolvePnPTestByMat()
        {
            var rvec         = new double[] { 0, 0, 0 };
            var tvec         = new double[] { 0, 0, 0 };
            var cameraMatrix = new double[3, 3]
            {
                { 1, 0, 0 },
                { 0, 1, 0 },
                { 0, 0, 1 }
            };
            var dist = new double[] { 0, 0, 0, 0, 0 };

            var objPts = new Point3f[]
            {
                new Point3f(0, 0, 1),
                new Point3f(1, 0, 1),
                new Point3f(0, 1, 1),
                new Point3f(1, 1, 1),
                new Point3f(1, 0, 2),
                new Point3f(0, 1, 2)
            };

            double[,] jacobian;
            Point2f[] imgPts;
            Cv2.ProjectPoints(objPts, rvec, tvec, cameraMatrix, dist, out imgPts, out jacobian);

            using (var objPtsMat = new Mat(objPts.Length, 1, MatType.CV_32FC3))
                using (var imgPtsMat = new Mat(imgPts.Length, 1, MatType.CV_32FC2))
                    using (var cameraMatrixMat = Mat.Eye(3, 3, MatType.CV_64FC1))
                        using (var distMat = Mat.Zeros(5, 0, MatType.CV_64FC1))
                            using (var rvecMat = new Mat())
                                using (var tvecMat = new Mat())
                                {
                                    Cv2.SolvePnP(objPtsMat, imgPtsMat, cameraMatrixMat, distMat, rvecMat, tvecMat);
                                }
        }
Esempio n. 42
0
        private List <byte[]> GetFaces(byte[] image)
        {
            Mat src = Cv2.ImDecode(image, ImreadModes.Color);

            // Convert the byte array into jpeg image and Save the image coming from the source
            //in the root directory for testing purposes.
            src.SaveImage(@"Images\image.jpg", new ImageEncodingParam(ImwriteFlags.JpegProgressive, 255));
            var file        = Path.Combine(Directory.GetCurrentDirectory(), "CascadeFile", "haarcascade_frontalface_default.xml");
            var faceCascade = new CascadeClassifier();

            faceCascade.Load(file);
            var faces    = faceCascade.DetectMultiScale(src, 1.1, 6, HaarDetectionType.DoRoughSearch, new Size(60, 60));
            var faceList = new List <byte[]>();
            int j        = 0;

            foreach (var rect in faces)
            {
                var faceImage = new Mat(src, rect);
                faceList.Add(faceImage.ToBytes(".jpg"));
                faceImage.SaveImage(@"Images\face" + j + ".jpg", new ImageEncodingParam(ImwriteFlags.JpegProgressive, 255));
                j++;
            }
            return(faceList);
        }
Esempio n. 43
0
        public Bitmap gaussFilter(Bitmap bmpMyBitmap)
        {
            using (Image <Gray, Byte> img = new Image <Gray, Byte>(bmpMyBitmap))
            {
                // GrauwertBild
                Image <Gray, byte> gray = new Image <Gray, byte>(bmpMyBitmap);
                //use image pyr to remove noise
                UMat pyrDown = new UMat();
                CvInvoke.PyrDown(gray, pyrDown);
                CvInvoke.PyrUp(pyrDown, gray);

                // Gauß Filter um störungen zu filtern
                Mat src1 = new Mat();
                src1 = img.Mat;
                Mat dstGaussBlur = new Mat();
                CvInvoke.Canny(gray, src1, 90, 50);
                CvInvoke.GaussianBlur(src1, dstGaussBlur, new System.Drawing.Size(25, 25), 0); // detalierte ergebnisse kernel size runternehmen (15,15), umso höher umso mehr störungen werden gefiltern--->umso höher umso weniger binarisieren
                                                                                               //Binarisieren um den Block zu filtern
                Mat dstBinary = new Mat();
                CvInvoke.Threshold(dstGaussBlur, dstBinary, 10, 255, ThresholdType.Binary);

                return(dstBinary.Bitmap);
            }
        }
Esempio n. 44
0
        public void Run()
        {
            const string protoTxt    = @"Data\Text\bvlc_googlenet.prototxt";
            const string caffeModel  = "bvlc_googlenet.caffemodel";
            const string synsetWords = @"Data\Text\synset_words.txt";
            var          classNames  = File.ReadAllLines(synsetWords)
                                       .Select(line => line.Split(' ').Last())
                                       .ToArray();

            Console.Write("Downloading Caffe Model...");
            PrepareModel(caffeModel);
            Console.WriteLine(" Done");

            using (var net = CvDnn.ReadNetFromCaffe(protoTxt, caffeModel))
                using (var img = new Mat(@"Data\Image\space_shuttle.jpg"))
                {
                    Console.WriteLine("Layer names: {0}", string.Join(", ", net.GetLayerNames()));
                    Console.WriteLine();

                    // Convert Mat to batch of images
                    using (var inputBlob = CvDnn.BlobFromImage(img, 1, new Size(224, 224), new Scalar(104, 117, 123)))
                    {
                        net.SetInput(inputBlob, "data");
                        using (var prob = net.Forward("prob"))
                        {
                            // find the best class
                            GetMaxClass(prob, out int classId, out double classProb);
                            Console.WriteLine("Best class: #{0} '{1}'", classId, classNames[classId]);
                            Console.WriteLine("Probability: {0:P2}", classProb);

                            Console.WriteLine("Press any key to exit");
                            Console.Read();
                        }
                    }
                }
        }
Esempio n. 45
0
 public override void ViewDidLoad()
 {
     base.ViewDidLoad();
     ButtonText          = "Match";
     base.OnButtonClick +=
         delegate
     {
         long processingTime;
         Size frameSize = FrameSize;
         using (Mat modelImage = CvInvoke.Imread("box.png", Emgu.CV.CvEnum.ImreadModes.Grayscale))
             using (Mat observedImage = CvInvoke.Imread("box_in_scene.png", Emgu.CV.CvEnum.ImreadModes.Grayscale))
                 using (Mat image = DrawMatches.Draw(modelImage, observedImage, out processingTime))
                     using (Mat resized = new Mat())
                     {
                         double dx  = ((double)frameSize.Width) / image.Width;
                         double dy  = ((double)frameSize.Height) / image.Height;
                         double min = Math.Min(dx, dy);
                         CvInvoke.Resize(image, resized, Size.Empty, min, min);
                         //image.Resize(frameSize.Width, frameSize.Height, Emgu.CV.CvEnum.Inter.Nearest, true)
                         MessageText = String.Format("Matching Time: {0} milliseconds.", processingTime);
                         SetImage(resized);
                     }
     };
 }
Esempio n. 46
0
    //findContours分割技术
    private static Mat MyFindLargestRectangle(Mat original_image)
    {
        Mat imgSource = original_image;

        Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
        Imgproc.Canny(imgSource, imgSource, 50, 50);
        Imgproc.GaussianBlur(imgSource, imgSource, new Size(5, 5), 5);
        List <MatOfPoint> contours = new List <MatOfPoint>();

        Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
        double       maxArea         = 0;
        int          maxAreaIdx      = -1;
        MatOfPoint   largest_contour = contours[0];
        MatOfPoint2f approxCurve     = new MatOfPoint2f();

        for (int idx = 0; idx < contours.Count; idx++)
        {
            MatOfPoint temp_contour = contours[idx];
            double     contourarea  = Imgproc.contourArea(temp_contour);
            if (contourarea - maxArea > 1)
            {
                maxArea         = contourarea;
                largest_contour = temp_contour;
                maxAreaIdx      = idx;
                MatOfPoint2f new_mat     = new MatOfPoint2f(temp_contour.toArray());
                int          contourSize = (int)temp_contour.total();
                Imgproc.approxPolyDP(new_mat, approxCurve, contourSize * 0.05, true);
            }
        }

        Imgproc.drawContours(imgSource, contours, -1, new Scalar(255, 0, 0), 1);
        Imgproc.fillConvexPoly(imgSource, largest_contour, new Scalar(255, 255, 255));
        Imgproc.drawContours(imgSource, contours, maxAreaIdx, new Scalar(0, 0, 255), 3);

        return(imgSource);
    }
Esempio n. 47
0
 private void runCamera()
 {
     while (true)
     {
         try
         {
             if (conditionRunCam)
             {
                 if (captureV == null)
                 {
                     captureV = new VideoCapture(0);
                 }
                 captureV.Read(m);
                 Mat tempImage = new Mat();
                 CvInvoke.Resize(m, tempImage, new Size(imageBox1.Width, imageBox1.Height), 0, 0, Emgu.CV.CvEnum.Inter.Linear);
                 if (!m.IsEmpty)
                 {
                     imageBox1.Image = tempImage;
                 }
                 Thread.Sleep(50);
             }
             //if (!conditionRunCam)
             //    if (captureV != null) captureV.Dispose();
             plcFX3G.GetDevice("X13", out buttonRead);
             if (buttonRead == 1)
             {
                 Invoke(new MethodInvoker(delegate { btnCapture.PerformClick(); }));
                 break;
             }
         }
         catch
         {
             return;
         }
     }
 }
Esempio n. 48
0
 public ColorAnalyser(Mat image)
 {
     _image = image;
 }
Esempio n. 49
0
        private void ProcessImage(IInputOutputArray image)
        {
            Stopwatch watch = Stopwatch.StartNew(); // time the detection process

            List <IInputOutputArray> licensePlateImagesList         = new List <IInputOutputArray>();
            List <IInputOutputArray> filteredLicensePlateImagesList = new List <IInputOutputArray>();
            List <RotatedRect>       licenseBoxList = new List <RotatedRect>();

            var found = new List <string>();

            for (double rWidth = 1; rWidth < 12; rWidth += 0.2)
            {
                for (double rHeight = 1; rHeight < 12; rHeight += 0.2)
                {
                    List <string> words1 = _licensePlateDetector.DetectLicensePlate(
                        image,
                        licensePlateImagesList,
                        filteredLicensePlateImagesList,
                        licenseBoxList, rWidth, rHeight);

                    if (words1.Any())
                    {
                        var f = $"FOUND: {rWidth}-{rHeight} = {string.Concat(words1)}";

                        found.Add(f);

                        Console.WriteLine(f);
                    }
                    else
                    {
                        //Console.WriteLine($"FAILED: {rWidth}-{rHeight}");
                    }
                }
            }
            List <string> words = new List <string>();

            //List<string> words = _licensePlateDetector.DetectLicensePlate(
            //   image,
            //   licensePlateImagesList,
            //   filteredLicensePlateImagesList,
            //   licenseBoxList, 6, 12);

            watch.Stop(); //stop the timer
            //processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);

            //panel1.Controls.Clear();
            System.Drawing.Point startPoint = new System.Drawing.Point(10, 10);
            for (int i = 0; i < words.Count; i++)
            {
                Mat dest = new Mat();
                CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
                AddLabelAndImage(
                    ref startPoint,
                    String.Format("License: {0}", words[i]),
                    dest);
                PointF[] verticesF = licenseBoxList[i].GetVertices();
                System.Drawing.Point[] vertices = Array.ConvertAll(verticesF, System.Drawing.Point.Round);
                using (VectorOfPoint pts = new VectorOfPoint(vertices))
                    CvInvoke.Polylines(image, pts, true, new Bgr(System.Drawing.Color.Red).MCvScalar, 2);
            }
        }
Esempio n. 50
0
        /// <summary>
        /// Draws the pred.
        /// </summary>
        /// <param name="classId">Class identifier.</param>
        /// <param name="conf">Conf.</param>
        /// <param name="left">Left.</param>
        /// <param name="top">Top.</param>
        /// <param name="right">Right.</param>
        /// <param name="bottom">Bottom.</param>
        /// <param name="frame">Frame.</param>
        private void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat frame)
        {
            Imgproc.rectangle(frame, new Point(left, top), new Point(right, bottom), new Scalar(0, 255, 0, 255), 2);

            string label = conf.ToString();

            if (classNames != null && classNames.Count != 0)
            {
                if (classId < (int)classNames.Count)
                {
                    label = classNames[classId] + ": " + label;
                }
            }

            int[] baseLine  = new int[1];
            Size  labelSize = Imgproc.getTextSize(label, Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);

            top = Mathf.Max(top, (int)labelSize.height);
            Imgproc.rectangle(frame, new Point(left, top - labelSize.height),
                              new Point(left + labelSize.width, top + baseLine[0]), Scalar.all(255), Core.FILLED);
            Imgproc.putText(frame, label, new Point(left, top), Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0, 255));
        }
        //watershed

        /* public static Mat C_Image_Watershed(Mat image, Mat Origi_Image, Scalar color)
         * {
         *   var componentCount = 0;
         *   var rnd = new Random();
         *   var watershedImage=new Mat();
         *   OpenCvSharp.Point[][] contours_data;
         *   HierarchyIndex[] hierarchyIndexes;
         *   Mat img = C_Image_GrayValue(image);
         *
         *   Cv2.FindContours(img, out contours_data, out hierarchyIndexes, RetrievalModes.CComp, ContourApproximationModes.ApproxSimple);
         *
         *   Mat markers= new Mat(Origi_Image.Size(), MatType.CV_32S, s: Scalar.All(0));
         *   for (int i = 0; i < contours_data.Length; i++)
         *   {
         *       Cv2.DrawContours(markers, contours_data, i, Scalar.All(componentCount + 1), -1, LineTypes.Link8, hierarchyIndexes);
         *       componentCount++;
         *   }
         *   if (componentCount != 0)
         *   {
         *       var colorTable = new List<Vec3b>();
         *       for (var i = 0; i < componentCount; i++)
         *       {
         *           //colorTable.Add(color.ToVec3b());
         *           var b = rnd.Next(0, 255); //Cv2.TheRNG().Uniform(0, 255);
         *           var g = rnd.Next(0, 255); //Cv2.TheRNG().Uniform(0, 255);
         *           var r = rnd.Next(0, 255); //Cv2.TheRNG().Uniform(0, 255);
         *
         *           colorTable.Add(new Vec3b((byte)b, (byte)g, (byte)r));
         *       }
         *
         *       Cv2.Watershed(Origi_Image, markers);
         *
         *       Cv2.ImShow("test", markers);
         *        watershedImage = new Mat(markers.Size(), MatType.CV_8UC3);
         *
         *       // paint the watershed image
         *       for (var i = 0; i < markers.Rows; i++)
         *       {
         *           for (var j = 0; j < markers.Cols; j++)
         *           {
         *               var idx = markers.At<int>(i, j);
         *               if (idx == -1)
         *               {
         *                   watershedImage.Set(i, j, new Vec3b(255, 255, 255));
         *               }
         *               else if (idx <= 0 || idx > componentCount)
         *               {
         *                   watershedImage.Set(i, j, new Vec3b(0, 0, 0));
         *               }
         *               else
         *               {
         *                   //watershedImage.Set(i, j, color.ToVec3b());
         *                  watershedImage.Set(i, j, colorTable[idx - 1]);
         *               }
         *           }
         *       }
         *      // watershedImage = watershedImage * 0.5 + Origi_Image * 0.5;
         *       // watershedImage = watershedImage * 0.5;
         *   }
         *
         *
         *   return watershedImage;
         * }*/

        //Convert from Mat to Bitmap
        public static Bitmap MatToBitmap(Mat image)
        {
            return(OpenCvSharp.Extensions.BitmapConverter.ToBitmap(image));
        }
Esempio n. 52
0
        // Use this for initialization
        void Run()
        {
            //if true, The error log of the Native side OpenCV will be displayed on the Unity Editor Console.
            Utils.setDebugMode(true);

            if (!string.IsNullOrEmpty(classes))
            {
                classNames = readClassNames(classes_filepath);
                if (classNames == null)
                {
                    Debug.LogError(classes_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                }
            }
            else if (classesList.Count > 0)
            {
                classNames = classesList;
            }

            Mat img = Imgcodecs.imread(input_filepath);

            if (img.empty())
            {
                Debug.LogError(input_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
                img = new Mat(424, 640, CvType.CV_8UC3, new Scalar(0, 0, 0));
            }


            //Adust Quad.transform.localScale.
            gameObject.transform.localScale = new Vector3(img.width(), img.height(), 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float imageWidth  = img.width();
            float imageHeight = img.height();

            float widthScale  = (float)Screen.width / imageWidth;
            float heightScale = (float)Screen.height / imageHeight;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (imageWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = imageHeight / 2;
            }


            Net net = null;

            if (string.IsNullOrEmpty(config_filepath) || string.IsNullOrEmpty(model_filepath))
            {
                Debug.LogError(config_filepath + " or " + model_filepath + " is not loaded. Please see \"StreamingAssets/dnn/setup_dnn_module.pdf\". ");
            }
            else
            {
                //! [Initialize network]
                net = Dnn.readNet(model_filepath, config_filepath);
                //! [Initialize network]
            }


            if (net == null)
            {
                Imgproc.putText(img, "model file is not loaded.", new Point(5, img.rows() - 30), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
                Imgproc.putText(img, "Please read console message.", new Point(5, img.rows() - 10), Imgproc.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255), 2, Imgproc.LINE_AA, false);
            }
            else
            {
                outBlobNames = getOutputsNames(net);
                //for (int i = 0; i < outBlobNames.Count; i++)
                //{
                //    Debug.Log("names [" + i + "] " + outBlobNames[i]);
                //}

                outBlobTypes = getOutputsTypes(net);
                //for (int i = 0; i < outBlobTypes.Count; i++)
                //{
                //    Debug.Log("types [" + i + "] " + outBlobTypes[i]);
                //}


                // Create a 4D blob from a frame.
                Size inpSize = new Size(inpWidth > 0 ? inpWidth : img.cols(),
                                        inpHeight > 0 ? inpHeight : img.rows());
                Mat blob = Dnn.blobFromImage(img, scale, inpSize, mean, swapRB, false);


                // Run a model.
                net.setInput(blob);

                if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1)
                {  // Faster-RCNN or R-FCN
                    Imgproc.resize(img, img, inpSize);
                    Mat imInfo = new Mat(1, 3, CvType.CV_32FC1);
                    imInfo.put(0, 0, new float[] {
                        (float)inpSize.height,
                        (float)inpSize.width,
                        1.6f
                    });
                    net.setInput(imInfo, "im_info");
                }


                TickMeter tm = new TickMeter();
                tm.start();


                List <Mat> outs = new List <Mat>();
                net.forward(outs, outBlobNames);


                tm.stop();
                Debug.Log("Inference time, ms: " + tm.getTimeMilli());


                postprocess(img, outs, net);

                for (int i = 0; i < outs.Count; i++)
                {
                    outs[i].Dispose();
                }
                blob.Dispose();
                net.Dispose();
            }


            Imgproc.cvtColor(img, img, Imgproc.COLOR_BGR2RGB);

            Texture2D texture = new Texture2D(img.cols(), img.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(img, texture);

            gameObject.GetComponent <Renderer>().material.mainTexture = texture;


            Utils.setDebugMode(false);
        }
Esempio n. 53
0
        /// <summary>
        /// Postprocess the specified frame, outs and net.
        /// </summary>
        /// <param name="frame">Frame.</param>
        /// <param name="outs">Outs.</param>
        /// <param name="net">Net.</param>
        private void postprocess(Mat frame, List <Mat> outs, Net net)
        {
            string outLayerType = outBlobTypes[0];


            List <int>   classIdsList    = new List <int>();
            List <float> confidencesList = new List <float>();
            List <OpenCVForUnity.CoreModule.Rect> boxesList = new List <OpenCVForUnity.CoreModule.Rect>();

            if (net.getLayer(new DictValue(0)).outputNameToIndex("im_info") != -1)
            {
                // Faster-RCNN or R-FCN
                // Network produces output blob with a shape 1x1xNx7 where N is a number of
                // detections and an every detection is a vector of values
                // [batchId, classId, confidence, left, top, right, bottom]

                if (outs.Count == 1)
                {
                    outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7);

                    //Debug.Log ("outs[i].ToString() " + outs [0].ToString ());

                    float[] data = new float[7];

                    for (int i = 0; i < outs[0].rows(); i++)
                    {
                        outs[0].get(i, 0, data);

                        float confidence = data[2];

                        if (confidence > confThreshold)
                        {
                            int class_id = (int)(data[1]);

                            int left   = (int)(data[3] * frame.cols());
                            int top    = (int)(data[4] * frame.rows());
                            int right  = (int)(data[5] * frame.cols());
                            int bottom = (int)(data[6] * frame.rows());
                            int width  = right - left + 1;
                            int height = bottom - top + 1;

                            classIdsList.Add((int)(class_id) - 0);
                            confidencesList.Add((float)confidence);
                            boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                        }
                    }
                }
            }
            else if (outLayerType == "DetectionOutput")
            {
                // Network produces output blob with a shape 1x1xNx7 where N is a number of
                // detections and an every detection is a vector of values
                // [batchId, classId, confidence, left, top, right, bottom]

                if (outs.Count == 1)
                {
                    outs[0] = outs[0].reshape(1, (int)outs[0].total() / 7);

                    //Debug.Log ("outs[i].ToString() " + outs [0].ToString ());

                    float[] data = new float[7];

                    for (int i = 0; i < outs[0].rows(); i++)
                    {
                        outs[0].get(i, 0, data);

                        float confidence = data[2];

                        if (confidence > confThreshold)
                        {
                            int class_id = (int)(data[1]);

                            int left   = (int)(data[3] * frame.cols());
                            int top    = (int)(data[4] * frame.rows());
                            int right  = (int)(data[5] * frame.cols());
                            int bottom = (int)(data[6] * frame.rows());
                            int width  = right - left + 1;
                            int height = bottom - top + 1;

                            classIdsList.Add((int)(class_id) - 0);
                            confidencesList.Add((float)confidence);
                            boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                        }
                    }
                }
            }
            else if (outLayerType == "Region")
            {
                for (int i = 0; i < outs.Count; ++i)
                {
                    // Network produces output blob with a shape NxC where N is a number of
                    // detected objects and C is a number of classes + 4 where the first 4
                    // numbers are [center_x, center_y, width, height]

                    //Debug.Log ("outs[i].ToString() "+outs[i].ToString());

                    float[] positionData   = new float[5];
                    float[] confidenceData = new float[outs[i].cols() - 5];

                    for (int p = 0; p < outs[i].rows(); p++)
                    {
                        outs[i].get(p, 0, positionData);

                        outs[i].get(p, 5, confidenceData);

                        int   maxIdx     = confidenceData.Select((val, idx) => new { V = val, I = idx }).Aggregate((max, working) => (max.V > working.V) ? max : working).I;
                        float confidence = confidenceData[maxIdx];

                        if (confidence > confThreshold)
                        {
                            int centerX = (int)(positionData[0] * frame.cols());
                            int centerY = (int)(positionData[1] * frame.rows());
                            int width   = (int)(positionData[2] * frame.cols());
                            int height  = (int)(positionData[3] * frame.rows());
                            int left    = centerX - width / 2;
                            int top     = centerY - height / 2;

                            classIdsList.Add(maxIdx);
                            confidencesList.Add((float)confidence);
                            boxesList.Add(new OpenCVForUnity.CoreModule.Rect(left, top, width, height));
                        }
                    }
                }
            }
            else
            {
                Debug.Log("Unknown output layer type: " + outLayerType);
            }


            MatOfRect boxes = new MatOfRect();

            boxes.fromList(boxesList);

            MatOfFloat confidences = new MatOfFloat();

            confidences.fromList(confidencesList);


            MatOfInt indices = new MatOfInt();

            Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);

            //Debug.Log ("indices.dump () "+indices.dump ());
            //Debug.Log ("indices.ToString () "+indices.ToString());

            for (int i = 0; i < indices.total(); ++i)
            {
                int idx = (int)indices.get(i, 0)[0];
                OpenCVForUnity.CoreModule.Rect box = boxesList[idx];
                drawPred(classIdsList[idx], confidencesList[idx], box.x, box.y,
                         box.x + box.width, box.y + box.height, frame);
            }

            indices.Dispose();
            boxes.Dispose();
            confidences.Dispose();
        }
Esempio n. 54
0
        public static void AnalyseSectionBackground(Section section, Rect[] rects, Mat image)
        {
            Vec3b[] colors;

            if (section.Layout.Type == Layout.LayoutType.Centered)
            {
                // we want to analyse pixels outside of the container

                var spaceWidth = (section.Rect.Width - (int)section.Layout.Width) / 2;
                if (spaceWidth < 0)
                {
                    spaceWidth = image.Width / 2;
                }
                var leftFrom  = section.Rect.X;
                var leftTo    = leftFrom + spaceWidth;
                var rightFrom = section.Rect.Width - spaceWidth;
                var rightTo   = section.Rect.Width;
                var yFrom     = section.Rect.Y;
                var yTo       = section.Rect.Y + section.Rect.Height;

                var random = new Random();
                var num    = 5;
                colors = new Vec3b[num * 2];
                // test random 5*2 pixels
                for (var i = 0; i < num; i++)
                {
                    // generate random coordinates
                    var x1 = random.Next(leftFrom, leftTo);
                    var x2 = random.Next(rightFrom, rightTo);
                    var y  = random.Next(yFrom, yTo);

                    // check pixels
                    colors[i]     = image.At <Vec3b>(y, x1);
                    colors[i + 5] = image.At <Vec3b>(y, x2);
                }
            }
            else
            {
                // we want to analyse all pixels except those saved in rects array
                var random = new Random();
                var num    = 5;
                colors = new Vec3b[num];
                var found = 0;

                var xFrom = section.Rect.X;
                var xTo   = section.Rect.X + section.Rect.Width;
                var yFrom = section.Rect.Y;
                var yTo   = section.Rect.Y + section.Rect.Height;

                while (found != num)
                {
                    // generate random coordinates
                    var x = random.Next(xFrom, xTo);
                    var y = random.Next(yFrom, yTo);

                    // check if coordinates don't collide with rects
                    var collides = false;
                    for (var i = 0; i < rects.Length; i++)
                    {
                        var rect = rects[i];
                        if (rect.Contains(new Rect(x, y, 1, 1)))
                        {
                            collides = true;
                        }

                        if (collides)
                        {
                            break;
                        }
                    }

                    // if doesn't collide check background color
                    if (!collides)
                    {
                        colors[found] = image.At <Vec3b>(y, x);
                        found++;
                    }
                }
            }

            var unique = colors.Distinct().Count();

            if (unique <= 2)
            {
                // background is just one color
                var color = colors.MostCommon();
                section.BackgroundColor = new int[] { color.Item2, color.Item1, color.Item0 };
            }
            else
            {
                // background seems to be more complicated (image)
                section.BackgroundImage = $"https://via.placeholder.com/{section.Rect.Width}x{section.Rect.Height}";
            }
        }
Esempio n. 55
0
        public void Recognize(Mat m)
        {
            int[] dim = new int[] { 1, m.Height, m.Width, 3 };
            if (_imageTensor == null)
            {
                _imageTensor = new Tensor(Emgu.TF.DataType.Uint8, dim);
            }
            else
            {
                if (!(_imageTensor.Type == Emgu.TF.DataType.Uint8 && Enumerable.SequenceEqual(dim, _imageTensor.Dim)))
                {
                    _imageTensor.Dispose();
                    _imageTensor = new Tensor(Emgu.TF.DataType.Uint8, dim);
                }
            }

            Emgu.TF.TensorConvert.ReadTensorFromMatBgr(m, _imageTensor);

            MaskRcnnInceptionV2Coco.RecognitionResult[] results;
            if (_coldSession)
            {
                //First run of the recognition graph, here we will compile the graph and initialize the session
                //This is expected to take much longer time than consecutive runs.
                results      = _inceptionGraph.Recognize(_imageTensor)[0];
                _coldSession = false;
            }

            //Here we are trying to time the execution of the graph after it is loaded
            Stopwatch sw = Stopwatch.StartNew();

            results = _inceptionGraph.Recognize(_imageTensor)[0];
            sw.Stop();
            int goodResultCount = 0;

            foreach (var r in results)
            {
                if (r.Probability > 0.5)
                {
                    float      x1    = r.Region[0] * m.Height;
                    float      y1    = r.Region[1] * m.Width;
                    float      x2    = r.Region[2] * m.Height;
                    float      y2    = r.Region[3] * m.Width;
                    RectangleF rectf = new RectangleF(y1, x1, y2 - y1, x2 - x1);
                    Rectangle  rect  = Rectangle.Round(rectf);

                    rect.Intersect(new Rectangle(Point.Empty, m.Size)); //only keep the region that is inside the image
                    if (rect.IsEmpty)
                    {
                        continue;
                    }

                    //draw the rectangle around the region
                    CvInvoke.Rectangle(m, rect, new Emgu.CV.Structure.MCvScalar(0, 0, 255), 2);

                    #region draw the mask
                    float[,] mask = r.Mask;
                    GCHandle handle = GCHandle.Alloc(mask, GCHandleType.Pinned);
                    using (Mat mk = new Mat(new Size(mask.GetLength(1), mask.GetLength(0)), Emgu.CV.CvEnum.DepthType.Cv32F, 1, handle.AddrOfPinnedObject(), mask.GetLength(1) * sizeof(float)))
                        using (Mat subRegion = new Mat(m, rect))
                            using (Mat maskLarge = new Mat())
                                using (Mat maskLargeInv = new Mat())
                                    using (Mat largeColor = new Mat(subRegion.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 3))
                                    {
                                        CvInvoke.Resize(mk, maskLarge, subRegion.Size);

                                        //give the mask at least 30% transparency
                                        using (ScalarArray sa = new ScalarArray(0.7))
                                            CvInvoke.Min(sa, maskLarge, maskLarge);

                                        //Create the inverse mask for the original image
                                        using (ScalarArray sa = new ScalarArray(1.0))
                                            CvInvoke.Subtract(sa, maskLarge, maskLargeInv);

                                        //The mask color
                                        largeColor.SetTo(new Emgu.CV.Structure.MCvScalar(255, 0, 0));

                                        CvInvoke.BlendLinear(largeColor, subRegion, maskLarge, maskLargeInv, subRegion);
                                    }
                    handle.Free();
                    #endregion

                    //draw the label
                    CvInvoke.PutText(m, r.Label, Point.Round(rect.Location), Emgu.CV.CvEnum.FontFace.HersheyComplex, 1.0, new Emgu.CV.Structure.MCvScalar(0, 255, 0), 1);

                    goodResultCount++;
                }
            }

            String resStr = String.Format("{0} objects detected in {1} milliseconds.", goodResultCount, sw.ElapsedMilliseconds);

            if (_renderMat == null)
            {
                _renderMat = new Mat();
            }
            m.CopyTo(_renderMat);
            //Bitmap bmp = _renderMat.ToBitmap();

            if (InvokeRequired)
            {
                this.Invoke((MethodInvoker)(() =>
                {
                    messageLabel.Text = resStr;
                    pictureBox.Image = _renderMat;
                }));
            }
            else
            {
                messageLabel.Text = resStr;
                pictureBox.Image  = _renderMat;
            }
        }
 /// <summary>
 /// Computes an image descriptor using the set visual vocabulary.
 /// </summary>
 /// <param name="image">Image, for which the descriptor is computed</param>
 /// <param name="keypoints">Key points detected in the input image.</param>
 /// <param name="imgDescriptors">The output image descriptors.</param>
 public void Compute(IInputArray image, VectorOfKeyPoint keypoints, Mat imgDescriptors)
 {
     using (InputArray iaImage = image.GetInputArray())
         Features2DInvoke.cveBOWImgDescriptorExtractorCompute(_ptr, iaImage, keypoints, imgDescriptors);
 }
 /// <summary>
 /// Sets a visual vocabulary.
 /// </summary>
 /// <param name="vocabulary">The vocabulary</param>
 public void SetVocabulary(Mat vocabulary)
 {
     Features2DInvoke.cveBOWImgDescriptorExtractorSetVocabulary(_ptr, vocabulary);
 }
Esempio n. 58
0
 /// <summary>
 /// Sets the new value for the layer output blob.
 /// </summary>
 /// <param name="name">Descriptor of the updating layer output blob.</param>
 /// <param name="blob">Input blob</param>
 public void SetInput(Mat blob, String name)
 {
     using (CvString outputNameStr = new CvString(name))
         DnnInvoke.cveDnnNetSetInput(_ptr, blob, outputNameStr);
 }
Esempio n. 59
0
        static void Main(string[] args)
        {
            SerialPort pyboard = new SerialPort("COM6", 115200);
            pyboard.Open();
            pyboard.WriteLine("import paramove\r");
            var options = new DataflowBlockOptions();
            options.BoundedCapacity = 10;
            var pipe_buffer = new BufferBlock<CamData>(options);
            bool foundfish = false;
            int l_or_r = 0; 
            MCvScalar gray = new MCvScalar(128, 128, 128);
            int roidim = 80;
            string camera_id = "img0"; //this is the ID of the NI-IMAQ board in NI MAX. 
            var _session = new ImaqSession(camera_id);

            String camerawindow = "Camera Window";
            CvInvoke.NamedWindow(camerawindow);
            int frameWidth = 1280;
            int frameHeight = 1024;
            uint bufferCount = 3;
            uint buff_out = 0;
            int numchannels = 1;
            ContourProperties fishcontour = new ContourProperties();
            System.Drawing.Size framesize = new System.Drawing.Size(frameWidth, frameHeight);
            System.Drawing.Size roi_size = new System.Drawing.Size(roidim, roidim);
            Mat cvimage = new Mat(framesize, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
            Mat modeimage = new Mat(framesize, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
            Mat modeimage_roi = new Mat(roi_size, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
            byte[,] data_2D = new byte[frameHeight, frameWidth];
            byte[,] data_2D_roi = new byte[roidim, roidim];
            byte[,] imagemode = new byte[frameHeight, frameWidth];
            ImaqBuffer image = null;
            List<byte[,]> imglist = new List<byte[,]>();
            ImaqBufferCollection buffcollection = _session.CreateBufferCollection((int)bufferCount, ImaqBufferCollectionType.VisionImage);
            _session.RingSetup(buffcollection, 0, false);
            _session.Acquisition.AcquireAsync();

            imglist = GetImageList(_session, 5000, 400);
            imagemode = FindMode(imglist);
            modeimage.SetTo(imagemode);
            imglist.Clear();
            CvInvoke.Imshow(camerawindow, modeimage);
            CvInvoke.WaitKey(0);
            Point f_center = new Point();
            Mat cv_roi = new Mat(roi_size, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
            image = _session.Acquisition.Extract((uint)0, out buff_out);
            uint j = buff_out;
            Console.WriteLine("j followed by buff_out");
            Console.WriteLine(j.ToString());
            Console.WriteLine(buff_out.ToString());
            while (true)
            {
                image = _session.Acquisition.Extract(j, out buff_out);
                data_2D = image.ToPixelArray().U8;
                cvimage.SetTo(data_2D);
        
                if (foundfish)
                {
                    modeimage_roi.SetTo(SliceROI(imagemode, f_center.X, f_center.Y, roidim));
                    data_2D_roi = SliceROI(data_2D, f_center.X, f_center.Y, roidim);
                    cv_roi = new Mat(roi_size, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
                    cv_roi.SetTo(data_2D_roi);
                    fishcontour = FishContour(cv_roi, modeimage_roi);
                    if (fishcontour.height != 0)
                    {
                        f_center.X = (int)fishcontour.center.X + f_center.X - roidim / 2;  // puts ROI coords into full frame coords
                        f_center.Y = (int)fishcontour.center.Y + f_center.Y - roidim / 2;
                    }

                    else
                    {
                        foundfish = false;
                    }
                }
                if (!foundfish)                
                {
                    fishcontour = FishContour(cvimage, modeimage);
                    if (fishcontour.height != 0)
                    {
                        f_center.X = (int)fishcontour.center.X;
                        f_center.Y = (int)fishcontour.center.Y;
//                        foundfish = true;
                        data_2D_roi = SliceROI(data_2D, f_center.X, f_center.Y, roidim);
                        cv_roi = new Mat(roi_size, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
                        cv_roi.SetTo(data_2D_roi);                        
                    }
                    else
                    {
                        foundfish = false;
                        cv_roi = new Mat(roi_size, Emgu.CV.CvEnum.DepthType.Cv8U, numchannels);
                        cv_roi.SetTo(gray); //in movie indicates that program lost the fish on this frame
                   
                        if (j % 25 == 0)
                        {
                            CvInvoke.Imshow(camerawindow, cvimage);
                            CvInvoke.WaitKey(1);
                            Console.WriteLine("Missed Fish");
                            Console.WriteLine(fishcontour.height);
                        }
                        j = buff_out + 1;
                        continue; 
                    }
                }

                if (fishcontour.com.Y > fishcontour.center.Y)
                {
//                   pyboard.WriteLine("paramove.pull_up()\r");
                    l_or_r = 1;

                }
                else if (fishcontour.com.Y < fishcontour.center.Y)
                {
// pyboard.WriteLine("paramove.pull_down()\r");
                    l_or_r = 0;
                }
                // PROBABLY MAKE THIS SO IT DOESNT DRAW DURING A STIMULUS
                if (j % 25 == 0)
                {
                    if (l_or_r == 0)
                    {
                        pyboard.WriteLine("paramove.pull_up()\r");
                        CvInvoke.Circle(cvimage, new Point(f_center.X, f_center.Y), 20, new MCvScalar(0, 0, 0));
//                        CvInvoke.Circle(cvimage, new Point(f_center.X - roidim / 2 + fish_head.X, f_center.Y - roidim / 2 + fish_head.Y), 4, new MCvScalar(255,0,0));
                        Console.WriteLine(fishcontour.height);
                    }
                    else if (l_or_r == 1)
                    {
                        pyboard.WriteLine("paramove.pull_down()\r");
                        CvInvoke.Circle(cvimage, new Point(f_center.X, f_center.Y), 20, new MCvScalar(255, 0, 0));
                        Console.WriteLine(fishcontour.height);
                    }
                  //  CvInvoke.Imshow(camerawindow, cvimage);
                  //  CvInvoke.WaitKey(1);
                }
                j = buff_out + 1;
            }


        }
Esempio n. 60
0
        private void Initialize()
        {
            rgbMat = new Mat();

            if (!capture.isOpened())
            {
                Debug.LogError("capture.isOpened() is false. Please copy from “OpenCVForUnity/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
            }

            Debug.Log("CAP_PROP_FORMAT: " + capture.get(Videoio.CAP_PROP_FORMAT));
            Debug.Log("CAP_PROP_POS_MSEC: " + capture.get(Videoio.CAP_PROP_POS_MSEC));
            Debug.Log("CAP_PROP_POS_FRAMES: " + capture.get(Videoio.CAP_PROP_POS_FRAMES));
            Debug.Log("CAP_PROP_POS_AVI_RATIO: " + capture.get(Videoio.CAP_PROP_POS_AVI_RATIO));
            Debug.Log("CAP_PROP_FRAME_COUNT: " + capture.get(Videoio.CAP_PROP_FRAME_COUNT));
            Debug.Log("CAP_PROP_FPS: " + capture.get(Videoio.CAP_PROP_FPS));
            Debug.Log("CAP_PROP_FRAME_WIDTH: " + capture.get(Videoio.CAP_PROP_FRAME_WIDTH));
            Debug.Log("CAP_PROP_FRAME_HEIGHT: " + capture.get(Videoio.CAP_PROP_FRAME_HEIGHT));
            double ext = capture.get(Videoio.CAP_PROP_FOURCC);

            Debug.Log("CAP_PROP_FOURCC: " + (char)((int)ext & 0XFF) + (char)(((int)ext & 0XFF00) >> 8) + (char)(((int)ext & 0XFF0000) >> 16) + (char)(((int)ext & 0XFF000000) >> 24));

            if (fpsMonitor != null)
            {
                fpsMonitor.Add("CAP_PROP_FORMAT", capture.get(Videoio.CAP_PROP_FORMAT).ToString());
                fpsMonitor.Add("CAP_PROP_POS_MSEC", capture.get(Videoio.CAP_PROP_POS_MSEC).ToString());
                fpsMonitor.Add("CAP_PROP_POS_FRAMES", capture.get(Videoio.CAP_PROP_POS_FRAMES).ToString());
                fpsMonitor.Add("CAP_PROP_POS_AVI_RATIO", capture.get(Videoio.CAP_PROP_POS_AVI_RATIO).ToString());
                fpsMonitor.Add("CAP_PROP_FRAME_COUNT", capture.get(Videoio.CAP_PROP_FRAME_COUNT).ToString());
                fpsMonitor.Add("CAP_PROP_FPS", capture.get(Videoio.CAP_PROP_FPS).ToString());
                fpsMonitor.Add("CAP_PROP_FRAME_WIDTH", capture.get(Videoio.CAP_PROP_FRAME_WIDTH).ToString());
                fpsMonitor.Add("CAP_PROP_FRAME_HEIGHT", capture.get(Videoio.CAP_PROP_FRAME_HEIGHT).ToString());
                fpsMonitor.Add("CAP_PROP_FOURCC", "" + (char)((int)ext & 0XFF) + (char)(((int)ext & 0XFF00) >> 8) + (char)(((int)ext & 0XFF0000) >> 16) + (char)(((int)ext & 0XFF000000) >> 24));
                fpsMonitor.Add("STATE", "");
            }

            capture.grab();
            capture.retrieve(rgbMat, 0);
            int frameWidth  = rgbMat.cols();
            int frameHeight = rgbMat.rows();

            texture = new Texture2D(frameWidth, frameHeight, TextureFormat.RGB24, false);
            gameObject.transform.localScale = new Vector3((float)frameWidth, (float)frameHeight, 1);
            float widthScale  = (float)Screen.width / (float)frameWidth;
            float heightScale = (float)Screen.height / (float)frameHeight;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = ((float)frameWidth * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = (float)frameHeight / 2;
            }
            capture.set(Videoio.CAP_PROP_POS_FRAMES, 0);

            gameObject.GetComponent <Renderer>().material.mainTexture = texture;

            StartCoroutine("WaitFrameTime");

            isPlaying = true;
        }