Наследование: Emgu.CV.MatDataAllocator, IInputArray, IOutputArray, IInputOutputArray, IImage
 public EdgeDetectionWindow(Mat sourceImage)
 {
     _sourceImage = sourceImage;
     InitializeComponent();
     DataContext = this;
     BitmapSource = MainWindow.ToBitmapSource(sourceImage);
 }
        public JanelaDetectarFace(Mat pImagem)
        {
            InitializeComponent();
            mImagem = pImagem;
          //  currentFrame = new Image<Bgr, byte>(new Size(320, 240));
          //  CvInvoke.Resize(mImagem, currentFrame, new Size(320, 240), 0, 0, Emgu.CV.CvEnum.Inter.Cubic);
           // imagemDetect.Image = currentFrame.ToBitmap();
            if (Eigen_Recog.IsTrained)
            {
               // message_bar.Text = "Training Data loaded";
            }
            else
            {
                //message_bar.Text = "No training data found, please train program using Train menu option";
            }
            currentFrame = new Image<Bgr, byte>(new Size(820, 780));
            CvInvoke.Resize(mImagem, currentFrame, new Size(820, 780), 0, 0, Emgu.CV.CvEnum.Inter.Cubic);


            //Convert it to Grayscale
            if (currentFrame != null)
            {
                gray_frame = currentFrame.Convert<Gray, Byte>();

                //Face Detector
                Rectangle[] facesDetected = Face.DetectMultiScale(gray_frame, 1.2, 10, new Size(50, 50), Size.Empty);

                //Action for each element detected
                for (int i = 0; i < facesDetected.Length; i++)// (Rectangle face_found in facesDetected)
                {
                    //This will focus in on the face from the haar results its not perfect but it will remove a majoriy
                    //of the background noise
                    facesDetected[i].X += (int)(facesDetected[i].Height * 0.15);
                    facesDetected[i].Y += (int)(facesDetected[i].Width * 0.22);
                    facesDetected[i].Height -= (int)(facesDetected[i].Height * 0.3);
                    facesDetected[i].Width -= (int)(facesDetected[i].Width * 0.35);

                    result = currentFrame.Copy(facesDetected[i]).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.Inter.Cubic);
                    result._EqualizeHist();
                    //draw the face detected in the 0th (gray) channel with blue color
                    currentFrame.Draw(facesDetected[i], new Bgr(Color.Red), 2);

                    if (Eigen_Recog.IsTrained)
                    {
                        string name = Eigen_Recog.Recognise(result);
                        int match_value = (int)Eigen_Recog.Get_Eigen_Distance;

                        //Draw the label for each face detected and recognized
                        currentFrame.Draw(name + "", new Point(facesDetected[i].X - 2, facesDetected[i].Y - 2), Emgu.CV.CvEnum.FontFace.HersheyDuplex, 1, new Bgr(Color.LightGreen));
                        // currentFrame.Draw(name + " ", ref font, new Point(facesDetected[i].X - 2, facesDetected[i].Y - 2), new Bgr(Color.LightGreen));
                        //  ADD_Face_Found(result, name, match_value);
                    }
                }
                //Show the faces procesed and recognized
                imagemDetect.Image = currentFrame.ToBitmap();
            }
        }
      private void ProcessImage(Mat image)
      {
         Stopwatch watch = Stopwatch.StartNew(); // time the detection process

         List<Mat> stopSignList = new List<Mat>();
         List<Rectangle> stopSignBoxList = new List<Rectangle>();
         _stopSignDetector.DetectStopSign(image, stopSignList, stopSignBoxList);

         watch.Stop(); //stop the timer
         processTimeLabel.Text = String.Format("Stop Sign Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);

         panel1.Controls.Clear();
         Point startPoint = new Point(10, 10);

         for (int i = 0; i < stopSignList.Count; i++)
         {
            Rectangle rect = stopSignBoxList[i];
            AddLabelAndImage(
               ref startPoint,
               String.Format("Stop Sign [{0},{1}]:", rect.Location.Y + rect.Width / 2, rect.Location.Y + rect.Height / 2),
               stopSignList[i]);
            CvInvoke.Rectangle(image, rect, new Bgr(Color.Aquamarine).MCvScalar, 2);

         }

         imageBox1.Image = image;
      }
Пример #4
0
        public CameraTrackingUpdateReturnModel Update()
        {
            // capture frame

            Mat frame = _cameraCapture.QueryFrame();

            //filter out noises

            Mat smoothedFrame = new Mat();

            CvInvoke.GaussianBlur(
                frame,
                smoothedFrame,
                new Size(this.frameBlurStrength, this.frameBlurStrength),
                1);

            // get mask for preview

            Mat forgroundMask = new Mat();

            _fgDetector.Apply(smoothedFrame, forgroundMask);

            this.lastFrame = frame;
            this.lastMask  = forgroundMask;

            return(new CameraTrackingUpdateReturnModel()
            {
                Frame = frame,
                Mask = forgroundMask
            });
        }
Пример #5
0
      static void Main(string[] args)
      {
         MCvPoint3D32f[] _points;
         Mat _left = CvInvoke.Imread("imL.png", ImreadModes.Color);
         Mat _right = CvInvoke.Imread("imR.png", ImreadModes.Color);
         Mat disparityMap = new Mat();

         Stopwatch watch = Stopwatch.StartNew();
         UMat leftGray = new UMat();
         UMat rightGray = new UMat();
         CvInvoke.CvtColor(_left, leftGray, ColorConversion.Bgr2Gray);
         CvInvoke.CvtColor(_right, rightGray, ColorConversion.Bgr2Gray);
         Mat points = new Mat();
         Computer3DPointsFromStereoPair(leftGray, rightGray, disparityMap, points);
         watch.Stop();
         long disparityComputationTime = watch.ElapsedMilliseconds;

         Mat pointsArray = points.Reshape(points.NumberOfChannels, points.Rows*points.Cols);
         Mat colorArray = _left.Reshape(_left.NumberOfChannels, _left.Rows*_left.Cols);
         Mat colorArrayFloat = new Mat();
         colorArray.ConvertTo(colorArrayFloat, DepthType.Cv32F);
         WCloud cloud = new WCloud(pointsArray, colorArray);

         Emgu.CV.Viz3d v = new Emgu.CV.Viz3d("Simple stereo reconstruction");
         WText wtext = new WText("3d point cloud", new System.Drawing.Point(20, 20), 20, new MCvScalar(255, 255, 255));
         WCoordinateSystem wCoordinate = new WCoordinateSystem(1.0);
         v.ShowWidget("text", wtext);
         //v.ShowWidget("coordinate", wCoordinate);
         v.ShowWidget("cloud", cloud);
         v.Spin();
      }
        private void buscarrosto(Bitmap frame)
        {
            Image <Rgb, Byte> imageCV = new Image <Rgb, byte>(frame);

            Emgu.CV.Mat mat   = imageCV.Mat;
            var         array = new byte[mat.Width * mat.Height * mat.ElementSize];

            mat.CopyTo(array);

            using (Array2D <RgbPixel> image = Dlib.LoadImageData <RgbPixel>(array, (uint)mat.Height, (uint)mat.Width, (uint)(mat.Width * mat.ElementSize)))
            {
                using (FrontalFaceDetector fd = Dlib.GetFrontalFaceDetector())

                {
                    var faces = fd.Operator(image);
                    foreach (DlibDotNet.Rectangle face in faces)
                    {
                        FullObjectDetection shape          = _ShapePredictor.Detect(image, face);
                        ChipDetails         faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                        Array2D <RgbPixel>  faceChip       = Dlib.ExtractImageChip <RgbPixel>(image, faceChipDetail);
                        Bitmap bitmap1 = faceChip.ToBitmap <RgbPixel>();
                        MainWindow.main.Statusa1 = bitmap1;
                        Dlib.DrawRectangle(image, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                    }
                }
                frame = image.ToBitmap <RgbPixel>();
                MainWindow.main.Statusa = frame;
            }
        }
Пример #7
0
 /// <summary>
 /// Finds perspective transformation H=||h_ij|| between the source and the destination planes
 /// </summary>
 /// <param name="srcPoints">Point coordinates in the original plane</param>
 /// <param name="dstPoints">Point coordinates in the destination plane</param>
 /// <param name="homography">The output homography matrix</param>
 /// <param name="method">FindHomography method</param>
 /// <param name="ransacReprojThreshold">
 /// The maximum allowed reprojection error to treat a point pair as an inlier. 
 /// The parameter is only used in RANSAC-based homography estimation. 
 /// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
 /// </param>
 /// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
 /// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
 public static void FindHomography(
    PointF[] srcPoints,
    PointF[] dstPoints,
    IOutputArray homography,
    CvEnum.HomographyMethod method,
    double ransacReprojThreshold = 3,
    IOutputArray mask = null)
 {
    GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
    GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
    try
    {
       using (
          Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
       using (
          Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
       {
          CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
       }
    }
    finally
    {
       srcHandle.Free();
       dstHandle.Free();
    }
 }
Пример #8
0
        /// <summary>
        /// Frame Process Function called on Image Grabbed Event
        /// </summary>
        protected override void ProcessFrame(object sender, EventArgs e)
        {
            //Unsubscribe to stop receiving events
            _capture.ImageGrabbed -= ProcessFrame;
            try
            {
                Mat f = new Mat();
                _capture.Retrieve(f, 0);

                //Get frame from camera
                Frame frame = new Frame();
                frame.Timestamp = DateTime.Now;
                frame.Image = new Image<Gray, byte>(f.Bitmap);

                Data = frame;
                NotifyAll();
            }
            catch (Exception ex)
            {
                Log.Print(String.Format("Failed to deal with frame. Reason: {0}", ex.Message), eCategory.Error, LogTag.IMAGE);
            }
            finally
            {
                //Subscribe back to receive events
                _capture.ImageGrabbed += ProcessFrame;
            }
        }
Пример #9
0
        public override void ImageGrabbedHandler(object sender, EventArgs e)
        {
            if (_transmitTask == null || _transmitTask.IsCompleted)
            {
                using (var matCaptured = new Mat())
                {
                    CameraCapture.Retrieve(matCaptured);
                    var bgrImage = matCaptured.ToImage<Bgr, byte>();
                    WriteText(bgrImage, 30, DateTime.Now.ToString("HH:mm:ss tt"));
                    imageBoxCaptured.Image = bgrImage;

                    IImageTransmitter transmitter = null;
                    if (radBsonImage.Checked)
                    {
                        transmitter = _imageTransmitter;
                    }

                    if (radBsonJpeg.Checked)
                    {
                        transmitter = _jpegTransmitter;
                    }

                    if (transmitter != null)
                    {
                        _transmitTask = transmitter.Transmit(bgrImage);
                    }
                }
            }
        }
Пример #10
0
 /// <summary>
 /// Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, finding a function to minimize some functional).
 /// As the image denoising, in particular, may be seen as the variational problem, primal-dual algorithm then can be used to perform 
 /// denoising and this is exactly what is implemented.
 /// </summary>
 /// <param name="observations">This array should contain one or more noised versions of the image that is to be restored.</param>
 /// <param name="result">Here the denoised image will be stored. There is no need to do pre-allocation of storage space, as it will be automatically allocated, if necessary.</param>
 /// <param name="lambda">Corresponds to  in the formulas above. As it is enlarged, the smooth (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly speaking, as it becomes smaller, the result will be more blur but more sever outliers will be removed.</param>
 /// <param name="niters">Number of iterations that the algorithm will run. Of course, as more iterations as better, but it is hard to quantitatively refine this statement, so just use the default and increase it if the results are poor.</param>
 public static void DenoiseTVL1(Mat[] observations, Mat result, double lambda, int niters)
 {
    using (Util.VectorOfMat vm = new Util.VectorOfMat(observations))
    {
       cveDenoiseTVL1(vm, result, lambda, niters);
    }
 }
      protected override void OnCreate(Bundle bundle)
      {
         base.OnCreate(bundle);

         OnImagePicked += (sender, image) =>
         {
            using (Mat stopSignModel = new Mat(Assets, "stop-sign-model.png"))
            {             
               if (image == null)
                  return;

               Stopwatch watch = Stopwatch.StartNew(); // time the detection process

               List<Mat> stopSignList = new List<Mat>();
               List<Rectangle> stopSignBoxList = new List<Rectangle>();
               StopSignDetector detector = new StopSignDetector(stopSignModel);
               detector.DetectStopSign(image, stopSignList, stopSignBoxList);

               watch.Stop(); //stop the timer
               SetMessage(String.Format("Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds));

               foreach (Rectangle rect in stopSignBoxList)
                  CvInvoke.Rectangle(image, rect, new Bgr(System.Drawing.Color.Red).MCvScalar, 2);  

               SetImageBitmap(image.ToBitmap());
               image.Dispose();
            }
         };

         OnButtonClick += (sender, args) =>
         {
            PickImage("stop-sign.jpg");
         };
      }
Пример #12
0
      /// <summary>
      /// Given the left and right image, computer the disparity map and the 3D point cloud.
      /// </summary>
      /// <param name="left">The left image</param>
      /// <param name="right">The right image</param>
      /// <param name="outputDisparityMap">The left disparity map</param>
      /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
      private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points)
      {
         Size size;
         using (InputArray ia = left.GetInputArray())
            size = ia.GetSize();

         using (StereoBM stereoSolver = new StereoBM())
         {
            stereoSolver.Compute(left, right, outputDisparityMap);

            float scale = Math.Max(size.Width, size.Height);

            //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
            using (Matrix<double> q = new Matrix<double>(
               new double[,]
               {
                  {1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center
                  {0.0, -1.0, 0.0, size.Height/2}, //shift the y origin to image center and flip it upside down
                  {0.0, 0.0, -1.0, 0.0}, //Multiply the z value by -1.0, 
                  {0.0, 0.0, 0.0, scale}
               })) //scale the object's coordinate to within a [-0.5, 0.5] cube
            {
               
               CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, false, DepthType.Cv32F);
               
            }
            //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q);
         }
      }
        public PeakPattern(Mat inImage, double highThresh = 180.0)
        {
            ImageHeight = inImage.Height;
            ImageWidth = inImage.Width;
            highThreshold = highThresh;

            //Should text a smaller bounding box than the peak finding threshold.
            textRect = ProcessingTools.findTextEdge<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold });

            //resultList[0] = new BitArray(ImageWidth);
            Mat croppedImage = new Mat(inImage, textRect);
            //resultList.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
            //    textRect.Top + textRect.Height / 4));
            //Console.WriteLine(textRect.Top + textRect.Height / 2);
            //resultList.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
            //    textRect.Top + textRect.Height / 2));
            //resultList.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
            //    (int)(textRect.Top + textRect.Height * 0.75)));

            resultList.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
                textRect.Height / 4));
            //            Console.WriteLine(textRect.Top + textRect.Height / 2);
            resultList.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
                textRect.Height / 2));
            resultList.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
                (int)(textRect.Height * 0.75)));
            //Console.WriteLine("Disc using lines {0}, {1}, {2}",
            //    textRect.Height / 4,
            //    textRect.Height / 2,
            //    (int)(textRect.Height * 0.75));
        }
        ///////////////////////////////////////////////////////////////////////////////////////////
        void processFrameAndUpdateGUI(object sender, EventArgs arg) {
            Mat imgOriginal;

            imgOriginal = capWebcam.QueryFrame();

            if(imgOriginal == null) {
                MessageBox.Show("unable to read frame from webcam" + Environment.NewLine + Environment.NewLine +
                                "exiting program");
                Environment.Exit(0);
                return;
            }

            Mat imgGrayscale = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
            Mat imgBlurred = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
            Mat imgCanny = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);

            CvInvoke.CvtColor(imgOriginal, imgGrayscale, ColorConversion.Bgr2Gray);

            CvInvoke.GaussianBlur(imgGrayscale, imgBlurred, new Size(5, 5), 1.5);

            CvInvoke.Canny(imgBlurred, imgCanny, 100, 200);

            ibOriginal.Image = imgOriginal;
            ibCanny.Image = imgCanny;
        }
Пример #15
0
      static void Main()
      {
         Application.EnableVisualStyles();
         Application.SetCompatibleTextRenderingDefault(false);

         using (Mat image = new Mat("pedestrian.png"))
         {
            
            long processingTime;
            Rectangle[] results;

            if (CudaInvoke.HasCuda)
            {
               using (GpuMat gpuMat = new GpuMat(image))
                  results = FindPedestrian.Find(gpuMat, out processingTime);
            }
            else
            {
               using (UMat uImage = image.GetUMat(AccessType.ReadWrite))
                  results = FindPedestrian.Find(uImage, out processingTime);
            }
            
            foreach (Rectangle rect in results)
            {
               CvInvoke.Rectangle(image, rect, new Bgr(Color.Red).MCvScalar);
            }
            ImageViewer.Show(
               image,
               String.Format("Pedestrian detection using {0} in {1} milliseconds.",
                  CudaInvoke.HasCuda ? "GPU" : 
                  CvInvoke.UseOpenCL ? "OpenCL":
                  "CPU",
                  processingTime));
         }
      }
    public void DetectShapes()
    {
        StringBuilder msgBuilder = new StringBuilder("Performance: ");

        var fileImage = _SavePath + _CaptureCounter.ToString() + ".png";

        Mat image = CvInvoke.Imread(_SavePath + (_CaptureCounter - 1).ToString() + ".png", Emgu.CV.CvEnum.LoadImageType.AnyColor);


        //Convert the image to grayscale and filter out the noise
        UMat uimage = new UMat();

        CvInvoke.CvtColor(image, uimage, ColorConversion.Bgr2Gray);

        //use image pyr to remove noise
        UMat pyrDown = new UMat();

        CvInvoke.PyrDown(uimage, pyrDown);
        CvInvoke.PyrUp(pyrDown, uimage);

        //Image<Gray, Byte> gray = img.Convert<Gray, Byte>().PyrDown().PyrUp();

        #region circle detection
        Stopwatch watch                      = Stopwatch.StartNew();
        double    cannyThreshold             = 180.0;
        double    circleAccumulatorThreshold = 120;
        CircleF[] circles                    = CvInvoke.HoughCircles(uimage, HoughType.Gradient, 2.0, 20.0, cannyThreshold, circleAccumulatorThreshold, 5);
        if (circles.Any())
        {
            m_MyAudioSource.Play(0);
            watch.Stop();
            SceneManager.LoadScene("Acertou");
        }
        #endregion
    }
Пример #17
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public Bitmap GetImageWithDrawnMatches(Bitmap modelImage, Bitmap observedImage, MatchingTechnique matchingTechnique)
        {
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;

            using (Image <Bgr, byte> modelImg = new Image <Bgr, byte>(modelImage))
                using (Image <Bgr, byte> observedImg = new Image <Bgr, byte>(observedImage))
                    using (Emgu.CV.Mat modelMat = modelImg.Mat)
                        using (Emgu.CV.Mat observedMat = observedImg.Mat)
                            using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                            {
                                ImageFeatureDetector.FindMatches(modelMat, observedMat, out modelKeyPoints, out observedKeyPoints, matches, out Mat mask, out Mat homography, matchingTechnique);

                                try
                                {
                                    using (Mat result = new Mat())
                                    {
                                        Features2DToolbox.DrawMatches(modelMat, modelKeyPoints, observedMat, observedKeyPoints, matches, result, new MCvScalar(255, 0, 0), new MCvScalar(0, 0, 255), mask);

                                        return(result.ToBitmap());
                                    }
                                }
                                catch (Exception)
                                {
                                    throw;
                                }
                                finally
                                {
                                    mask?.Dispose();
                                    homography?.Dispose();
                                }
                            }
        }
Пример #18
0
        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog Openfile = new OpenFileDialog();

            if (Openfile.ShowDialog() == DialogResult.OK)
            {
                Image <Bgr, Byte> My_Image = new Image <Bgr, byte>(Openfile.FileName);
                //Mat My_Image = CvInvoke.Imread(Openfile.FileName, Emgu.CV.CvEnum.ImreadModes.Unchanged);
                // 高斯滤波
                CvInvoke.GaussianBlur(My_Image, My_Image, new Size(3, 3), 0, 0);
                CvInvoke.Imshow("GaussianBlur.", My_Image);
                // 转换灰度图 二值化
                CvInvoke.Threshold(My_Image, My_Image, 100, 255, Emgu.CV.CvEnum.ThresholdType.Binary);
                CvInvoke.Imshow("Gray.", My_Image);
                // 腐蚀 膨胀核
                Emgu.CV.Mat StructingElement = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Ellipse, new Size(7, 7), new Point(2, 2));                 // 核
                CvInvoke.Erode(My_Image, My_Image, StructingElement, new Point(-1, -1), 10, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(0));
                Image <Bgr, Byte> image1 = My_Image.Clone();

                CvInvoke.Erode(My_Image, image1, StructingElement, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(0));
                image1 = My_Image - image1;
                CvInvoke.Imshow("GetStructuringElement.", image1);

                CvInvoke.Canny(image1, image1, trackBar1.Value, trackBar1.Value * 3, 3);
                imageBox1.Image = image1;
                ScanBarCodeZbar(image1.ToBitmap());
            }
        }
Пример #19
0
        private void M_capture_ImageGrabbed(object sender, EventArgs e)
        {
            // Console.WriteLine("test: "  + startIndex.ToString());
            //  startIndex++;

            destin = SaveRecordingLocation_textbox.Text;

            if (fileChanged)
            {
                // destin = SaveRecordingLocation_textbox.Text;
                totalFrames = m_capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameCount);
                fps         = m_capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.Fps);
                int    fourcc      = Convert.ToInt32(m_capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.FourCC));
                int    frameHeight = Convert.ToInt32(m_capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameHeight));
                int    frameWidth  = Convert.ToInt32(m_capture.GetCaptureProperty(Emgu.CV.CvEnum.CapProp.FrameWidth));
                string destination = destin + i + ".avi";
                videoWriter = new Emgu.CV.VideoWriter(destination, Emgu.CV.VideoWriter.Fourcc('I', 'Y', 'U', 'V'), fps, new System.Drawing.Size(frameWidth, frameHeight), true);
                fileChanged = false;
            }


            Emgu.CV.Mat m = new Emgu.CV.Mat();
            m_capture.Retrieve(m);
            // pictureBox1.Image = m.ToImage<Bgr, byte>().Bitmap;
            videoWriter.Write(m);



            //throw new NotImplementedException();
        }
Пример #20
0
        static List <int> entropyList <TColor, TDepth>(Emgu.CV.Mat image1, Emgu.CV.Mat image2, TDepth[] channelThresh)
            where TColor : struct, IColor
            where TDepth : IComparable <TDepth>, new()
        {
            List <int> results = new List <int>();

            for (int row = 0; row < image1.Height; row++)
            {
                BitArray test1 = ProcessingTools.testLine <TColor, TDepth>(image1, channelThresh, row);
                BitArray test2 = ProcessingTools.testLine <TColor, TDepth>(image2, channelThresh, row);

                BitArray resArray = test1.Xor(test2);

                int nIndyBits = 0;
                foreach (bool res in resArray)
                {
                    if (res)
                    {
                        ++nIndyBits;
                    }
                }
                //Console.WriteLine("Independant bits at " + row + ": " + result);
                results.Add(nIndyBits);
            }
            return(results);
        }
Пример #21
0
        public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
        {
            var size = left.Size;

            using (var leftGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var rightGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var disparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparity16S = new Mat(size, DepthType.Cv16S, 1))
            using (var filteredDisparity8U = new Mat(size, DepthType.Cv8U, 1))
            {
                leftGpu.Upload(left.Mat);
                rightGpu.Upload(right.Mat);

                algorithm.FindStereoCorrespondence(leftGpu, rightGpu, disparityGpu);

                filter.Apply(disparityGpu, leftGpu, filteredDisparityGpu);

                filteredDisparityGpu.Download(filteredDisparity16S);

                CvInvoke.MinMaxLoc(filteredDisparity16S, ref min, ref max, ref minPosition, ref maxPosition);

                filteredDisparity16S.ConvertTo(filteredDisparity8U, DepthType.Cv8U, 255.0/(Max - Min));

                return new Image<Gray, byte>(filteredDisparity8U.Bitmap);
            }
        }
Пример #22
0
      void ProcessFrame(object sender, EventArgs e)
      {
         Mat frame = _cameraCapture.QueryFrame();
         Mat smoothedFrame = new Mat();
         CvInvoke.GaussianBlur(frame, smoothedFrame, new Size(3, 3), 1); //filter out noises
         //frame._SmoothGaussian(3); 

         #region use the BG/FG detector to find the forground mask
         Mat forgroundMask = new Mat();
         _fgDetector.Apply(smoothedFrame, forgroundMask);
         #endregion

         CvBlobs blobs = new CvBlobs();
         _blobDetector.Detect(forgroundMask.ToImage<Gray, byte>(), blobs);
         blobs.FilterByArea(100, int.MaxValue);

         float scale = (frame.Width + frame.Width)/2.0f;
         _tracker.Update(blobs, 0.01 * scale, 5, 5);
        
         foreach (var pair in _tracker)
         {
            CvTrack b = pair.Value;
            CvInvoke.Rectangle(frame, b.BoundingBox, new MCvScalar(255.0, 255.0, 255.0), 2);
            CvInvoke.PutText(frame,  b.Id.ToString(), new Point((int)Math.Round(b.Centroid.X), (int)Math.Round(b.Centroid.Y)), FontFace.HersheyPlain, 1.0, new MCvScalar(255.0, 255.0, 255.0));
         }

         imageBox1.Image = frame;
         imageBox2.Image = forgroundMask;
      }
Пример #23
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            Mat frame = new Mat();      //Matrix to save the picture
            capture.Retrieve(frame, 0); //retrieve the picture to the matrinx
            Image<Bgr, byte> image = frame.ToImage<Bgr, byte>();
            FaceNo = 0;
            if (frame != null)
            {
                Image<Gray, byte> grayFrame = frame.ToImage<Gray, byte>(); // display the image in the imageBox
                faces = cascade.DetectMultiScale(grayFrame, 1.1, 2, new Size(30, 30));

                Bitmap BitmapInput = grayFrame.ToBitmap();
                Bitmap ExtractedFace;
                Graphics FaceCanvas;
                //countTable.Text = faces.Count().ToString();
                if (faces.Count() > 0)
                {
                    foreach (var face in faces)
                    {
                        image.Draw(face, new Bgr(Color.Blue), 1); // draw rectangles in the picture
                        ExtractedFace = new Bitmap(face.Width, face.Height);
                        FaceCanvas = Graphics.FromImage(ExtractedFace);
                        FaceCanvas.DrawImage(BitmapInput, 0, 0, face, GraphicsUnit.Pixel);
                        ExtFaces.Add(ExtractedFace);
                        FaceNo++;
                    }
                }
                imageBox1.Image = image; // display the image in the imageBox
            }
        }
      public override void ViewDidLoad()
      {
         base.ViewDidLoad();
         ButtonText = "Detect Stop Sign";
         OnButtonClick +=
         delegate
         {
            using (Mat stopSignModel = new Mat("stop-sign-model.png"))
            using (Mat image = new Mat("stop-sign.jpg"))
            {
               Stopwatch watch = Stopwatch.StartNew(); // time the detection process

               List<Mat> stopSignList = new List<Mat>();
               List<Rectangle> stopSignBoxList = new List<Rectangle>();
               StopSignDetector detector = new StopSignDetector(stopSignModel);
               detector.DetectStopSign(image, stopSignList, stopSignBoxList);

               watch.Stop(); //stop the timer
               foreach (Rectangle rect in stopSignBoxList)
               {
                  CvInvoke.Rectangle(image, rect, new MCvScalar(0, 0, 255), 2);
               }
               Size frameSize = FrameSize;
               using (Mat resized = new Mat())
               {
                  CvInvoke.ResizeForFrame(image, resized, frameSize);
                  MessageText = String.Format("Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
                  SetImage(resized);
               }

            }
         };

      }
Пример #25
0
 private static Mat edge(Mat I)
 {
     Mat E = new Mat();
     CvInvoke.CvtColor(I,E,ColorConversion.Bgr2Gray);
     CvInvoke.Canny(E, E, 40, 80, 3);
     return E;
 }
Пример #26
0
      static void Run()
      {
         Mat image = new Mat("lena.jpg", LoadImageType.Color); //Read the files as an 8-bit Bgr image  
         long detectionTime;
         List<Rectangle> faces = new List<Rectangle>();
         List<Rectangle> eyes = new List<Rectangle>();

         //The cuda cascade classifier doesn't seem to be able to load "haarcascade_frontalface_default.xml" file in this release
         //disabling CUDA module for now
         bool tryUseCuda = false;
         bool tryUseOpenCL = true;

         DetectFace.Detect(
           image, "haarcascade_frontalface_default.xml", "haarcascade_eye.xml", 
           faces, eyes,
           tryUseCuda,
           tryUseOpenCL,
           out detectionTime);

         foreach (Rectangle face in faces)
            CvInvoke.Rectangle(image, face, new Bgr(Color.Red).MCvScalar, 2);
         foreach (Rectangle eye in eyes)
            CvInvoke.Rectangle(image, eye, new Bgr(Color.Blue).MCvScalar, 2);

         //display the image 
         ImageViewer.Show(image, String.Format(
            "Completed face and eye detection using {0} in {1} milliseconds", 
            (tryUseCuda && CudaInvoke.HasCuda) ? "GPU"
            : (tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice) ? "OpenCL" 
            : "CPU",
            detectionTime));
      }
Пример #27
0
 /// <summary>
 /// Gets the x and y offsets between two images.
 /// </summary>
 /// <param name="modelImagePath">The path to the model image.</param>
 /// <param name="observedImagePath">The path to the observed image.</param>
 /// <returns>A tuple of the x and y offsets between the modelImage and the observedImage.</returns>
 public static Tuple <int, int> GetXYOffsets(string modelImagePath, string observedImagePath)
 {
     using (Emgu.CV.Mat mat1 = Emgu.CV.CvInvoke.Imread(modelImagePath, ImreadModes.AnyColor))
         using (Emgu.CV.Mat mat2 = Emgu.CV.CvInvoke.Imread(observedImagePath, ImreadModes.AnyColor))
         {
             return(GetXYOffsets(mat1, mat2));
         }
 }
 protected void OnImageAvailable(Mat image)
 {
     EventHandler<ImageAvailableEventArgs> handler = ImageAvailable;
     if (handler != null)
     {
         handler(this, new ImageAvailableEventArgs(image));
     }
 }
Пример #29
0
      private void image1_Initialized(object sender, EventArgs e)
      {
         Mat image = new Mat(100, 400, DepthType.Cv8U, 3);
         image.SetTo(new Bgr(255, 255, 255).MCvScalar);
         CvInvoke.PutText(image, "Hello, world", new System.Drawing.Point(10, 50), Emgu.CV.CvEnum.FontFace.HersheyPlain, 3.0, new Bgr(255.0, 0.0, 0.0).MCvScalar);

         image1.Source = BitmapSourceConvert.ToBitmapSource(image);
      }
Пример #30
0
        public static OpenCvSharp.Mat MatEmguToOpenCVSharp(Emgu.CV.Mat emguMat)
        {
            #region 正在应用,Emgu指针,new OpenCvSharp.Mat(IntPtr)
            var ptrMat = new OpenCvSharp.Mat(emguMat.Ptr);
            return(ptrMat);

            #endregion
        }
Пример #31
0
        public void guardarArchivo(Emgu.CV.Mat img, String name)
        {
            Emgu.CV.Image <Emgu.CV.Structure.Bgr, Byte> memoryImageOut = img.ToImage <Emgu.CV.Structure.Bgr, Byte>();

            Bitmap memoryImageT = memoryImageOut.ToBitmap();

            memoryImageT.Save(ruta + "/files/" + name + ".bmp");
        }
      public override void ViewDidLoad()
      {
         base.ViewDidLoad();
         RootElement root = Root;
         root.UnevenRows = true;
         UIImageView imageView = new UIImageView(View.Frame);
         StringElement messageElement = new StringElement("");
         StringElement licenseElement = new StringElement("");

         root.Add(new Section()
                 { new StyledStringElement("Process", delegate {

            using (Image<Bgr, Byte> image = new Image<Bgr, byte>( "license-plate.jpg"))
            {
               LicensePlateDetector detector = new LicensePlateDetector(".");
               Stopwatch watch = Stopwatch.StartNew(); // time the detection process

               List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
               List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
               List<RotatedRect> licenseBoxList = new List<RotatedRect>();
               List<string> words = detector.DetectLicensePlate(
                  image,
                  licensePlateImagesList,
                  filteredLicensePlateImagesList,
                  licenseBoxList);

               watch.Stop(); //stop the timer
               messageElement.Value = String.Format("{0} milli-seconds", watch.Elapsed.TotalMilliseconds);

               StringBuilder builder = new StringBuilder();
               foreach (String w in words)
                  builder.AppendFormat("{0} ", w);
               licenseElement.Value = builder.ToString();

               messageElement.GetImmediateRootElement().Reload(messageElement, UITableViewRowAnimation.Automatic);
               licenseElement.GetImmediateRootElement().Reload(licenseElement, UITableViewRowAnimation.Automatic);
               foreach (RotatedRect box in licenseBoxList)
               {
                          
                  image.Draw(box, new Bgr(Color.Red), 2);
               }
               Size frameSize = FrameSize;
               using (Mat resized = new Mat())
                  {
                     CvInvoke.ResizeForFrame(image, resized, frameSize);
                     imageView.Image = resized.ToUIImage();
                     imageView.Frame = new RectangleF(PointF.Empty, resized.Size);
                  }
               imageView.SetNeedsDisplay();
                  ReloadData();
            }
         }
         )});
         root.Add(new Section("Recognition Time") { messageElement });
         root.Add(new Section("License Plate") { licenseElement });
         root.Add(new Section() { imageView });
      }
Пример #33
0
        public Emgu.CV.Mat detectarLineas(Mat img)
        {
            double cannyThreshold = umbral;

            //Convert the image to grayscale and filter out the noise
            Emgu.CV.Mat uimage = img.Clone();
            Emgu.CV.Mat imgret = new Emgu.CV.Mat();



            double cannyThresholdLinking = umbral - 60;

            Emgu.CV.Mat cannyEdges = new Emgu.CV.Mat();
            Emgu.CV.CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking);



            Emgu.CV.Structure.LineSegment2D[] lines = Emgu.CV.CvInvoke.HoughLinesP(
                cannyEdges,
                1,               //Distance resolution in pixel-related units 1
                Math.PI / 180.0, //Angle resolution measured in radians.
                255 - umbral,    //threshold
                30,              //min Line width 10
                5);              //gap between lines 1



            Emgu.CV.Image <Emgu.CV.Structure.Gray, Byte> lineImage = new Emgu.CV.Image <Emgu.CV.Structure.Gray, Byte>(img.Width, img.Height);
            foreach (Emgu.CV.Structure.LineSegment2D line in lines)
            {
                lineImage.Draw(line, new Gray(255), 2);
            }



            Emgu.CV.Mat imgInv = new Emgu.CV.Mat();
            Emgu.CV.CvInvoke.BitwiseNot(uimage, uimage);


            Emgu.CV.CvInvoke.BitwiseNot(lineImage, lineImage);



            guardarArchivo(lineImage.Mat, "3imagenmascara");


            guardarArchivo(uimage, "4imagenLineas");
            Emgu.CV.CvInvoke.BitwiseAnd(uimage, lineImage, imgret);



            Emgu.CV.CvInvoke.BitwiseNot(imgret, imgret);


            guardarArchivo(imgret, "5imagensinLineas");
            return(imgret);
        }
        public float ProbabilityMatch(Mat inImage)
        {
            List<BitArray> inFileResults = new List<BitArray>(3);

            var tempRect = ProcessingTools.findTextEdge<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold });
            //tempRect.Height = textRect.Height;
            //tempRect.Width = textRect.Width;

            //inFileResults.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
            //    textRect.Top + textRect.Height / 4));
            //inFileResults.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
            //    textRect.Top + textRect.Height / 2));
            //inFileResults.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
            //    (int)(textRect.Top + textRect.Height * 0.75)));
            //CvInvoke.Rectangle(inImage, tempRect, new Bgr(0, 0, 255).MCvScalar);
            //ImageViewer.Show(inImage, "inPlaceTest");

            Mat croppedImage = new Mat(inImage, tempRect);
            inFileResults.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
                tempRect.Height / 4));
            inFileResults.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
                tempRect.Height / 2));
            inFileResults.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
                (int)(tempRect.Height * 0.75)));

            //Console.WriteLine("Disc using lines {0}, {1}, {2}",
            //    textRect.Height / 4,
            //    textRect.Height / 2,
            //    (int)(textRect.Height * 0.75));

            float[] DotProduct = new float[] { 0, 0, 0 };

            for (int iRow = 0; iRow < 3; iRow++)
            {
                float MagIn = 0;
                float MagRes = 0;
                for (int pixel = 0; pixel < resultList[0].Length; pixel++)
                {

                    if (inFileResults[iRow].Length > pixel)
                    {
                        DotProduct[iRow] += Convert.ToInt32(inFileResults[iRow][pixel]) * Convert.ToInt32(resultList[iRow][pixel]);

                        if (inFileResults[iRow][pixel]) { MagIn++; }
                        if (resultList[iRow][pixel]) { MagRes++; }
                    }
                    else { break; }
                }
                if(MagIn == 0 || MagRes == 0) { continue; }
                double temp = Math.Sqrt(Convert.ToDouble(MagIn)) * Math.Sqrt(Convert.ToDouble(MagRes));
                DotProduct[iRow] /= (float)temp;

            }

            //Return an average of the three cosines.
            return (DotProduct[0] + DotProduct[1] + DotProduct[2]) / 3.0f;
        }
Пример #35
0
 public Rectangle[] FindEyes(string eyeFileName, Image<Gray, Byte> imageFace)
 {
     using (CascadeClassifier eye = createClassifier(eyeFileName))
     using (Mat eyeRegionMat = new Mat())
     {
         Rectangle[] eyeRegion = eye.DetectMultiScale(imageFace, eyescale, eyeneighbors, new Size(eyeminsize, eyeminsize), new Size(eyemaxsize, eyemaxsize));
         return eyeRegion;
     }
 }
Пример #36
0
        public override void ImageGrabbedHandler(object sender, EventArgs e)
        {
            using (var matCaptured = new Mat())
            {
                var retrieveElapsed = Stopwatch.StartNew();
                CameraCapture.Retrieve(matCaptured);
                retrieveElapsed.Stop();

                if (_readyRectangle.IsEmpty)
                {
                    _detectorInput.ErodeDilateIterations = (int) spinDilateIterations.Value;
                    _detectorInput.Settings.Roi = GetRegionOfInterestFromControls();
                    _detectorInput.Captured = matCaptured;

                    var output = _colorDetector.Process(_detectorInput);

                    DrawReticle(output.CapturedImage, output.CentralPoint.ToPoint(), Color.Aqua);

                    if (output.IsDetected)
                    {
                        var radius = 50;
                        var circle = new CircleF(output.CentralPoint, radius);
                        var color = new Bgr(Color.Yellow);
                        output.CapturedImage.Draw(circle, color, 3);
                        var ballTextLocation = output.CentralPoint.ToPoint();
                        ballTextLocation.X += radius;
                        //  output.CapturedImage.Draw("ball", ballTextLocation, FontFace.HersheyPlain, 3, color);
                    }

                    if (checkBoxRoi.Checked)
                    {
                        output.CapturedImage.Draw(_detectorInput.Settings.Roi, Color.Green.ToBgr(), 3);
                    }

                    if (!_imageBoxSelector.SeedingRectangle.IsEmpty)
                    {
                        output.CapturedImage.Draw(_imageBoxSelector.SeedingRectangle, new Bgr(Color.Chartreuse));
                    }

                    imageBoxCaptured.Image = output.CapturedImage;
                    imageBoxFiltered.Image = output.ThresholdImage;

                    NotifyStatus(
                        "Retrieved frame in {0}, {1}"
                        , retrieveElapsed.Elapsed.ToHumanReadable(HumanReadableTimeSpanOptions.Abbreviated)
                        , output);
                }
                else
                {
                    DoAutoThresholding(matCaptured);
                }

                ResizeImageControls();

            }
        }
Пример #37
0
        static Emgu.CV.Mat cannyEdges(Emgu.CV.Mat inImage)
        {
            double thresh1 = 180.0;
            double thresh2 = 160.0;

            Emgu.CV.Mat outImage = new Emgu.CV.Mat();
            CvInvoke.Canny(inImage, outImage, thresh1, thresh2);

            return(outImage);
        }
Пример #38
0
        public int encontrarDimensionLetra()
        {
            int tamano = 0;

            System.Drawing.Bitmap bmp =
                new System.Drawing.Bitmap(System.Drawing.Image.FromFile(ruta + "files/6imagenthresherode.bmp"));
            Emgu.CV.Image <Emgu.CV.Structure.Gray, Byte> imgt = new Emgu.CV.Image <Emgu.CV.Structure.Gray, Byte>(bmp);

            Emgu.CV.Structure.MCvScalar sc = new Emgu.CV.Structure.MCvScalar();
            Size s = new Size(imgt.Width / 10, 1);

            Point p       = new Point(-1, -1);
            Mat   element = Emgu.CV.CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, s, p);


            Emgu.CV.CvInvoke.MorphologyEx(imgt, imgt, Emgu.CV.CvEnum.MorphOp.Close, element, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Constant, sc);
            guardarArchivo(imgt.Mat, "imagenCerrada");
            List <int> valores = new List <int>();



            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();

            Emgu.CV.Mat hier = new Emgu.CV.Mat();
            Emgu.CV.CvInvoke.FindContours(imgt, contours, hier, Emgu.CV.CvEnum.RetrType.Tree, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone);

            Emgu.CV.Util.VectorOfVectorOfPoint contours_poly = new Emgu.CV.Util.VectorOfVectorOfPoint(contours.Size);

            for (int i = 0; i < contours.Size; i++)
            {
                if (contours[i].Size > 100)//80
                {
                    Emgu.CV.CvInvoke.ApproxPolyDP((contours[i]), contours_poly[i], 3, true);

                    Rectangle appRect = Emgu.CV.CvInvoke.BoundingRectangle(contours_poly[i]);                //get the bounding rect
                    if (appRect.Height > 2)
                    {
                        valores.Add(appRect.Height);
                    }
                }
            }
            valores.Sort();
            if (valores.Count % 2 == 0)
            {
                tamano = (valores[Convert.ToInt32(Math.Floor(valores.Count * 0.50))] + valores[Convert.ToInt32(Math.Floor(valores.Count * 0.50)) + 1]) / 2;
            }
            else
            {
                tamano = valores[Convert.ToInt32(Math.Round(valores.Count * 0.50))];
            }



            return(tamano);
        }
Пример #39
0
 private static CGImage RgbaByteMatToCGImage(Mat bgraByte)
 {
    using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB())
    using (CGBitmapContext context = new CGBitmapContext(
       bgraByte.DataPointer,
       bgraByte.Width, bgraByte.Height,
       8,
       bgraByte.Width*4,
       cspace,
       CGImageAlphaInfo.PremultipliedLast))
       return context.ToImage();
 }
Пример #40
0
 public static string OcrImage(Emgu.CV.OCR.Tesseract _ocr, Emgu.CV.Mat image)
 {
     using (var imageColor = new Mat())
         using (Mat imgGrey = new Mat())
             using (Mat imgThresholded = new Mat())
             {
                 if (image.NumberOfChannels == 1)
                 {
                     CvInvoke.CvtColor(image, imageColor, ColorConversion.Gray2Bgr);
                 }
                 else
                 {
                     image.CopyTo(imageColor);
                 }
                 //Interfaces.Image.Util.SaveImageStamped(imageColor.Bitmap, "OcrImage-Color");
                 _ocr.SetImage(imageColor);
                 _ocr.AnalyseLayout();
                 if (_ocr.Recognize() != 0)
                 {
                     throw new Exception("Failed to recognizer image");
                 }
                 Emgu.CV.OCR.Tesseract.Character[] characters = _ocr.GetCharacters();
                 Log.Debug("GetCharacters found " + characters.Length + " with colors");
                 if (characters.Length == 0)
                 {
                     CvInvoke.CvtColor(image, imgGrey, ColorConversion.Bgr2Gray);
                     //Interfaces.Image.Util.SaveImageStamped(imgGrey.Bitmap, "OcrImage-Gray");
                     _ocr.SetImage(imgGrey);
                     _ocr.AnalyseLayout();
                     if (_ocr.Recognize() != 0)
                     {
                         throw new Exception("Failed to recognizer image");
                     }
                     characters = _ocr.GetCharacters();
                     Log.Debug("GetCharacters found " + characters.Length + " with grey scaled");
                     if (characters.Length == 0)
                     {
                         CvInvoke.Threshold(imgGrey, imgThresholded, 65, 255, ThresholdType.Binary);
                         //Interfaces.Image.Util.SaveImageStamped(imgThresholded.Bitmap, "OcrImage-Thresholded");
                         _ocr.SetImage(imgThresholded);
                         _ocr.AnalyseLayout();
                         if (_ocr.Recognize() != 0)
                         {
                             throw new Exception("Failed to recognizer image");
                         }
                         characters = _ocr.GetCharacters();
                         Log.Debug("GetCharacters found " + characters.Length + " thresholded");
                     }
                 }
                 return(_ocr.GetUTF8Text().TrimEnd(Environment.NewLine.ToCharArray()));
             }
 }
Пример #41
0
        public static void Test(string img1, string img2)
        {
            long matchTime;
            long score;

            using (Mat modelImage = CvInvoke.Imread(img1, ImreadModes.Grayscale))
                using (Mat observedImage = CvInvoke.Imread(img2, ImreadModes.Grayscale))
                {
                    Mat result = Draw(modelImage, observedImage, out matchTime, out score);
                    var iv     = new emImageViewer(result, score);
                    iv.Show();
                }
        }
Пример #42
0
 public override void ImageGrabbedHandler(object sender, EventArgs e)
 {
     using (var matCaptured = new Mat())
     {
         CameraCapture.Retrieve(matCaptured);
         var input = new CascadeDetectorInput { Captured = matCaptured };
         var result = _detector.Process(input);
         if (result.IsDetected)
         {
             Log.Info(m => m("{0}", result));
         }
     }
 }
        //Constructor with just filenames
        public fileImageProducer(IEnumerable<string> input)
        {
            imageList = new List<Image>();

            int fauxTime = 0;
            foreach (string i in input)
            {
                Mat imageTemp = new Mat();
                imageTemp = CvInvoke.Imread(i, LoadImageType.Unchanged);
                imageList.Add(new Image(imageTemp, "local", fauxTime));
                fauxTime++;
            }
        }
Пример #44
0
        //private FaceEyes Detect(byte [] imageBytes)
        private FaceEyes Detect(Mat image2)
        //private FaceEyes Detect(Mat ugray)
        {
            FaceEyes FE = new FaceEyes();

            if (/*face != null &&*/ eye != null)
            {
                //watch = Stopwatch.StartNew();
                Log.Error(TAG, "\t\t -- FaceEyes Detect()");
                //using (UMat ugray = new UMat())
                //{
                Mat ugray = new Mat();
                CvInvoke.CvtColor(image2, ugray, Emgu.CV.CvEnum.ColorConversion.Rgb2Gray);

                //normalizes brightness and increases contrast of the image
                CvInvoke.EqualizeHist(ugray, ugray);

                //Rectangle[] facesDetected = face.DetectMultiScale(
                //   ugray, 1.1, 10, new System.Drawing.Size(10,10));

                //FE.Faces.AddRange(facesDetected);

                //foreach (Rectangle f in facesDetected)
                //{
                //Log.Error(TAG, "\t\t -- FaceEyes Detect()\t FACE DETECTED");
                //Mat faceRegion = new Mat(ugray, f);
                //Get the region of interest on the faces
                //using (UMat faceRegion = new UMat(ugray, f))
                //{

                Rectangle[] eyesDetected = eye.DetectMultiScale(
                    ugray, 1.1, 20, new System.Drawing.Size(20, 20));
                //faceRegion, 1.1, 10, new System.Drawing.Size(20, 20));

                foreach (Rectangle e in eyesDetected)
                {
                    Log.Error(TAG, "\t\t -- FaceEyes Detect()\t EYE DETECTED");

                    Rectangle eyeRect = e;
                    //eyeRect.Offset(e.X,e.Y); //offset from top(x)-left(y)
                    FE.Eyes.Add(eyeRect);
                }
                //}
                //}
                //}
                //watch.Stop();
            }

            return(FE);
        }
Пример #45
0
 public void SetImage(Emgu.CV.Mat image)
 {
     if (image == null)
     {
         this.DisplayImage.Source = null;
         return;
     }
     using (VectorOfByte vb = new VectorOfByte())
     {
         CvInvoke.Imencode(".jpg", image, vb);
         byte[] rawData = vb.ToArray();
         this.DisplayImage.Source = ImageSource.FromStream(() => new MemoryStream(rawData));
     }
 }
Пример #46
0
    void Processar2()
    {
        Mat thresh = new Mat();
        VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
        Mat image = CvInvoke.Imread("C:\\Users\\guilherme.schafer\\Documents\\Geometric\\" + "circulo123.png", Emgu.CV.CvEnum.LoadImageType.AnyColor);

        //Mat image = CvInvoke.Imread(_SavePath + _CaptureCounter.ToString() + ".png", Emgu.CV.CvEnum.LoadImageType.AnyColor);
        //CvInvoke.Resize(image, image, new System.Drawing.Size(500, 500));
        CvInvoke.CvtColor(image, image, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
        CvInvoke.GaussianBlur(image, image, new System.Drawing.Size(5, 5), 0);
        Mat hierarchy = new Mat();

        CvInvoke.Threshold(image, thresh, 240, 255, Emgu.CV.CvEnum.ThresholdType.Binary);
        CvInvoke.FindContours(thresh, contours, hierarchy, Emgu.CV.CvEnum.RetrType.Tree, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
        for (var x = 0; x < contours.Size; x++)
        {
            var shape   = "";
            var peri    = CvInvoke.ArcLength(contours[x], true);
            var epsilon = 0.01 * peri;
            var aprox   = new VectorOfPoint();
            CvInvoke.ApproxPolyDP(contours[x], aprox, epsilon, true);
            if (aprox.Size == 3)
            {
                UnityEngine.Debug.Log("Triangulo");
                shape = "Triangulo";
            }
            else if (aprox.Size == 4)
            {
                UnityEngine.Debug.Log("Rectângulo");
                shape = "Rectângulo";
            }
            else if (aprox.Size == 5)
            {
                UnityEngine.Debug.Log("Pentagono");
                shape = "Pentagono";
            }
            else if (6 < aprox.Size && aprox.Size < 15)
            {
                UnityEngine.Debug.Log("Ellipse");
                shape = "Ellipse";
            }
            else
            {
                UnityEngine.Debug.Log("Circulo");
                shape = "Circulo";
            }
        }
    }
Пример #47
0
 private void EmguFaceDetector()
 {
     Emgu.CV.CascadeClassifier emguFaceClassifier = null;
     if (File.Exists(this.ImagePath))
     {
         if (File.Exists(@"./haarcascade/haarcascade_frontalface_alt.xml"))
         {
             emguFaceClassifier = new Emgu.CV.CascadeClassifier(@"./haarcascade/haarcascade_frontalface_alt.xml");
             Emgu.CV.Mat src  = CvInvoke.Imread(this.ImagePath, 0);
             Emgu.CV.Mat gray = new Emgu.CV.Mat();
             CvInvoke.CvtColor(src, gray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
             var faces   = emguFaceClassifier.DetectMultiScale(gray, 1.1, 2, new System.Drawing.Size(30, 30));
             int facecnt = faces.Length;
         }
     }
 }
Пример #48
0
        private Mat Yuv2Rgb(Image yuvImg)
        {
            Log.Error(TAG, "\t\t--Yuv2Rgb, Thread:" + this);

            Java.Nio.ByteBuffer buffer = yuvImg.GetPlanes()[0].Buffer;
            byte[] bytes = new byte[buffer.Remaining()];
            buffer.Get(bytes);

            Mat yuvMat = new Mat(yuvImg.Height, yuvImg.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 3);
            Mat rgbMat = new Mat();

            CvInvoke.Imdecode(bytes, Emgu.CV.CvEnum.ImreadModes.Unchanged, yuvMat);
            CvInvoke.CvtColor(yuvMat, rgbMat, Emgu.CV.CvEnum.ColorConversion.Yuv420P2Rgb);

            return(rgbMat);
        }
Пример #49
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime, out long score)
        {
            Mat homography;
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;

            using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
            {
                Mat mask;
                FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,
                          out mask, out homography, out score);

                //Draw the matched keypoints
                Mat result = new Mat();
                Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                              matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

                #region draw the projected region on the image

                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[]
                    {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

#if NETFX_CORE
                    Point[] points = Extensions.ConvertAll <PointF, Point>(pts, Point.Round);
#else
                    Point[] points = Array.ConvertAll <PointF, Point>(pts, Point.Round);
#endif
                    using (VectorOfPoint vp = new VectorOfPoint(points))
                    {
                        CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                    }
                }
                #endregion

                return(result);
            }
        }
Пример #50
0
        /// <summary>
        /// A method to get keypoints from a frame.
        /// </summary>
        /// <param name="frame">A copy of <see cref="m_frame"/>. It could be resized beforehand.</param>
        public void ProcessFrame(Emgu.CV.Mat frame)
        {
            if (!inprocessframe)
            {
                inprocessframe = true;
                DateTime start = DateTime.Now;

                m_posenet.Inference(frame);

                DateTime stop         = DateTime.Now;
                long     elapsedTicks = stop.Ticks - start.Ticks;
                TimeSpan elapsedSpan  = new TimeSpan(elapsedTicks);
                Console.WriteLine(1000 / (double)elapsedSpan.Milliseconds);

                inprocessframe = false;
            }
        }
Пример #51
0
        public static List <Rectangle> DetectFaces(Emgu.CV.Mat image)
        {
            List <Rectangle> faces = new List <Rectangle>();
            var facesCascade       = HttpContext.Current.Server.MapPath("~/face.xml");

            using (Emgu.CV.CascadeClassifier face = new Emgu.CV.CascadeClassifier(facesCascade))
            {
                using (UMat ugray = new UMat())
                {
                    CvInvoke.CvtColor(image, ugray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
                    CvInvoke.EqualizeHist(ugray, ugray);
                    Rectangle[] facesDetected = face.DetectMultiScale(
                        ugray,
                        1.1,
                        10,
                        new System.Drawing.Size(20, 20));
                    faces.AddRange(facesDetected);
                }
            }
            return(faces);
        }
Пример #52
0
        public static BitArray testLine <TColor, TDepth>(Emgu.CV.Mat imageLine, TDepth[] channelThresh, int row = 0)
            where TColor : struct, IColor
            where TDepth : IComparable <TDepth>, new()
        {
            if (imageLine.NumberOfChannels < channelThresh.Length)
            {
                throw new IndexOutOfRangeException("Array exceeds number of channels in image.");
            }

            Image <TColor, TDepth> tempImage = imageLine.ToImage <TColor, TDepth>();
            BitArray outArray = new BitArray(imageLine.Width);

            for (int pixel = 0; pixel < imageLine.Width; pixel++)
            {
                bool temp = true;
                for (int channel = 0; channel < channelThresh.Length; channel++)
                {
                    temp &= channelThresh[channel].CompareTo(tempImage.Data[row, pixel, channel]) <= 0;
                }
                outArray[pixel] = temp;
            }
            return(outArray);
        }
Пример #53
0
        /// <summary>
        /// Read a file into Mat using native implementations
        /// </summary>
        /// <param name="fileName">The name of the file</param>
        /// <param name="mat">The Mat to read the file into</param>
        /// <param name="loadType">The image load type.</param>
        /// <returns>True if successful</returns>
        public static bool ReadFileToMat(String fileName, Mat mat, CvEnum.ImreadModes loadType)
        {
            if (_fileReaderMatArr == null)
            {
                Type[] readersTypes             = Emgu.Util.Toolbox.GetIntefaceImplementationFromAssembly <Emgu.CV.IFileReaderMat>();
                Emgu.CV.IFileReaderMat[] matArr = new IFileReaderMat[readersTypes.Length];
                for (int i = 0; i < readersTypes.Length; i++)
                {
                    matArr[i] = Activator.CreateInstance(readersTypes[i]) as Emgu.CV.IFileReaderMat;
                }

                _fileReaderMatArr = matArr;
            }

            foreach (IFileReaderMat reader in _fileReaderMatArr)
            {
                if (reader.ReadFile(fileName, mat, loadType))
                {
                    return(true);
                }
            }

            return(false);
        }
Пример #54
0
        /// <summary>
        /// Write a Mat into a file using native implementations
        /// </summary>
        /// <param name="fileName">The name of the file</param>
        /// <param name="mat">The Mat to be written</param>
        /// <returns>True if successful</returns>
        public static bool WriteMatToFile(Mat mat, String fileName)
        {
            if (_fileWriterMatArr == null)
            {
                Type[] writerTypes = Emgu.Util.Toolbox.GetIntefaceImplementationFromAssembly <Emgu.CV.IFileWriterMat>();
                Emgu.CV.IFileWriterMat[] matArr = new IFileWriterMat[writerTypes.Length];
                for (int i = 0; i < writerTypes.Length; i++)
                {
                    matArr[i] = Activator.CreateInstance(writerTypes[i]) as Emgu.CV.IFileWriterMat;
                }

                _fileWriterMatArr = matArr;
            }

            foreach (IFileWriterMat writer in _fileWriterMatArr)
            {
                if (writer.WriteFile(mat, fileName))
                {
                    return(true);
                }
            }

            return(false);
        }
Пример #55
0
      public static async Task<Mat> FromStorageFile(StorageFile file)
      {
         using (IRandomAccessStream fileStream = await file.OpenAsync(Windows.Storage.FileAccessMode.Read))
         {
            BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

            Size s = new Size((int)decoder.PixelWidth, (int)decoder.PixelHeight);

            BitmapTransform transform = new BitmapTransform();               
            PixelDataProvider pixelData = await decoder.GetPixelDataAsync(
            BitmapPixelFormat.Bgra8, BitmapAlphaMode.Straight, transform, ExifOrientationMode.IgnoreExifOrientation,
                ColorManagementMode.DoNotColorManage);

            byte[] sourcePixels = pixelData.DetachPixelData();
            GCHandle handle = GCHandle.Alloc(sourcePixels, GCHandleType.Pinned);
            using (Image<Bgra, Byte> img = new Image<Bgra, byte>(s.Width, s.Height, s.Width * 4, handle.AddrOfPinnedObject()))
            {
               Mat m = new Mat();
               CvInvoke.CvtColor(img, m, ColorConversion.Bgra2Bgr);
               handle.Free();
               return m;
            }
         }
      }
Пример #56
0
        static void SuperimposeGT(string image_folder)
        {
            string[] image_files = System.IO.Directory.GetFiles(image_folder, "*.jpg");
            for (int i = 0; i < image_files.Length; i++)
            {
                string image_file = image_files[i];
                string xml_file   = image_file.Replace(@"\Image\", @"\GT\").Replace(".jpg", "_data.xml");

                if (System.IO.File.Exists(xml_file))
                {
                    iPhotoDrawAnnotation annotation = new iPhotoDrawAnnotation();
                    annotation.LoadRectObjects(xml_file);

                    Mat image = new Emgu.CV.Mat(image_file, Emgu.CV.CvEnum.LoadImageType.Color);
                    for (int j = 0; j < annotation.RectObjects.Count; j++)
                    {
                        RectObject rect_object = annotation.RectObjects[j];
                        MCvScalar  scalar      = rect_object.Name.StartsWith("panel") ? new MCvScalar(0, 0, 255): new MCvScalar(255, 0, 0);
                        CvInvoke.Rectangle(image, rect_object.Rect, scalar);
                    }
                    CvInvoke.Imwrite(System.IO.Path.GetFileName(image_file), image);
                }
            }
        }
Пример #57
0
        private void Capture_ImageGrabbed(object sender, EventArgs e)
        {
            try
            {
                Emgu.CV.Mat frame = new Emgu.CV.Mat();
                if (capture.Retrieve(frame))
                {
                    Emgu.CV.Mat grayFrame = new Emgu.CV.Mat();
                    Emgu.CV.CvInvoke.CvtColor(frame, grayFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                    Rectangle[] faces = emguFaceClassifier.DetectMultiScale(grayFrame, ScaleFactor, Neighbors);
                    foreach (var face in faces)
                    {
                        Emgu.CV.CvInvoke.Rectangle(frame, face, new MCvScalar(0, 0, 255));
                    }
                    //ImageSource = ToBitmapSource(currentFrame);
                    //Bitmap bmi = frame.ToBitmap();
                    //ImageSource = ToBitmapImage(bmi);
                }
            }
            catch (Exception ex)
            {
            }
        }
Пример #58
0
        /// <summary>
        /// Converts to CGImage
        /// </summary>
        /// <returns>The CGImage.</returns>
        public CGImage ToCGImage()
        {
            int       nchannels = NumberOfChannels;
            DepthType d         = Depth;

            if (nchannels == 4 && d == DepthType.Cv8U)
            {
                //bgra
                using (Mat tmp = new Mat())
                {
                    CvInvoke.CvtColor(this, tmp, ColorConversion.Bgra2Rgba);
                    return(RgbaByteMatToCGImage(tmp));
                }
            }
            else if (nchannels == 3 && d == DepthType.Cv8U)
            {
                //bgr
                using (Mat tmp = new Mat())
                {
                    CvInvoke.CvtColor(this, tmp, ColorConversion.Bgr2Rgba);
                    return(RgbaByteMatToCGImage(tmp));
                }
            }
            else if (nchannels == 1 && d == DepthType.Cv8U)
            {
                using (Mat tmp = new Mat())
                {
                    CvInvoke.CvtColor(this, tmp, ColorConversion.Gray2Rgba);
                    return(RgbaByteMatToCGImage(tmp));
                }
            }
            else
            {
                throw new Exception(String.Format("Converting from Mat of {0} channels {1} to CGImage is not supported. Please convert Mat to 3 channel Bgr image of Byte before calling this function.", nchannels, d));
            }
        }
Пример #59
-1
 public override void ImageGrabbedHandler(object sender, EventArgs e)
 {
     using (var matCaptured = new Mat())
     {
         CameraCapture.Retrieve(matCaptured);
     }
 }
Пример #60
-1
        public void ProcessFrame(object sender, EventArgs arg)
        {
            _capture.FlipHorizontal = true;                      // Переворачиваем изображение относительно оси У
            Mat imageMatrix = new Mat();                         //Матрица, которую мы забираем из потока камеры
            _capture.Retrieve(imageMatrix, 0);
            Image<Bgr, byte> imageFrameBGR = imageMatrix.ToImage<Bgr, byte>(); //Преобразование
            Image<Gray, byte> imageFrameGray = RGBFilter(imageFrameBGR,
                                          red_color_min, red_color_max,//Фильтрация на пороговые значения цвета
                                          green_color_min, green_color_max,
                                          blue_color_min, blue_color_max);

            imageFrameGray = MassCenter(imageFrameGray);
            Display(imageMatrix, imageFrameGray);                //<--------------------- отображение

            setLabelValue(labelTimer, my_timer.ElapsedMilliseconds.ToString());

            if (_isDetected)                                     // Ищем движения, если нашли объект
            {
                x_coord.Add(center_x);                           //добавляем в массивы координаты его центра масс
                y_coord.Add(center_y);
                _count_frames++;                                 // увеличиваем счетчик кадров
            }

            else if(x_coord != null)                                    //Если объект не видно
            {
                moutionType(x_coord, y_coord);
            }
        }