private Mat find_ball()
        {
            MCvScalar orangeMin = new MCvScalar(0, 0, 212);     //10 120 100
            MCvScalar orangeMax = new MCvScalar(131, 255, 255); //70 255 255

            Mat arr = new Mat();

            Mat img    = _frame;
            Mat hsvImg = new Mat();

            CvInvoke.CvtColor(img, hsvImg, ColorConversion.Bgr2Hsv);
            CvInvoke.InRange(hsvImg, new ScalarArray(orangeMin), new ScalarArray(orangeMax),
                             hsvImg);
            //CvInvoke.MorphologyEx(hsvImg, hsvImg, MorphOp.Close, new Mat(), new System.Drawing.Point(-1, -1), 5, BorderType.Default, new MCvScalar());
            SimpleBlobDetectorParams param = new SimpleBlobDetectorParams();

            param.FilterByCircularity = false;
            param.FilterByConvexity   = false;
            param.FilterByInertia     = false;
            param.FilterByColor       = false;
            param.MinArea             = 800;
            param.MaxArea             = 5000;
            SimpleBlobDetector detector = new SimpleBlobDetector(param);

            MKeyPoint[] keypoints = detector.Detect(hsvImg);
            Features2DToolbox.DrawKeypoints(img, new VectorOfKeyPoint(keypoints), img, new
                                            Bgr(255, 0, 0), Features2DToolbox.KeypointDrawType.DrawRichKeypoints);

            foreach (var item in keypoints)
            {
                if ((int)item.Point.X > x_min && (int)item.Point.X < x_max && (int)item.Point.Y > y_min && (int)item.Point.Y < y_max)
                {
                    centerX = (int)item.Point.X;
                    centerY = (int)item.Point.Y;
                }
                else
                {
                    centerX = dX;
                    centerY = dY;

                    total_error_x = 0;

                    total_error_y = 0;
                }
            }
            if (keypoints.Length == 0)
            {
                centerX = dX;
                centerY = dY;

                total_error_x = 0;

                total_error_y = 0;
            }

            lbl_x.Content = "Center X: " + centerX;
            lbl_y.Content = "Center Y: " + centerY;

            return(img);
        }
Exemple #2
0
        static public Image <Rgb, byte> SetKeyPont(Image <Rgb, byte> image, OriantatioOnMap oriantation, Bgr color)
        {
            Image <Rgb, byte> result = image.Clone();

            Features2DToolbox.DrawKeypoints(image, oriantation.VectorMapKeyPoint, result, color);
            return(result);
        }
Exemple #3
0
        void Application_Idle(object sender, EventArgs e)
        {
            if (vc != null && !pause)
            {
                SIFT detector = new SIFT();

                Emgu.CV.Util.VectorOfKeyPoint keypoints = new Emgu.CV.Util.VectorOfKeyPoint();

                vc.Read(frame);
                System.Threading.Thread.Sleep((int)(1000.0 / rate - 5));
                //imageBox1.Image = frame;

                frLbl.Text = rate.ToString();
                cfLbl.Text = currentFrame.ToString();
                fcLbl.Text = frameCount.ToString();

                vc.Read(frame);
                imageBox1.Image = frame;
                //detector.Detect(frame);
                detector.DetectRaw(frame, keypoints);
                numOfKeyPoints = keypoints.Size;
                kpLbl.Text     = numOfKeyPoints.ToString();
                Features2DToolbox.DrawKeypoints(frame, keypoints, siftFrame, new Bgr(Color.Blue));
                imageBox2.Image = siftFrame;
                GC.Collect();

                currentFrame++;

                if (currentFrame >= frameCount)
                {
                    pause           = true;
                    button4.Enabled = false;
                }
            }
        }
Exemple #4
0
        private void FindKeypoint(int ID, InputFileModel inputFile, IFeatureDetector detector, bool AddToList = true)
        {
            WindowsFormHelper.AddLogToConsole($"Start finding key points for: {inputFile.fileInfo.Name.ToString()}\n");

            var detectedKeyPoints = detector.DetectKeyPoints(new Mat(inputFile.fileInfo.FullName));

            if (AddToList)
            {
                DetectedKeyPoints.Add(ID, new KeyPointModel()
                {
                    DetectedKeyPoints = new VectorOfKeyPoint(detectedKeyPoints),
                    InputFile         = inputFile,
                    ID = ID
                }
                                      );
            }

            WindowsFormHelper.AddLogToConsole($"FINISH finding key points for: {inputFile.fileInfo.Name.ToString()}\n");


            // Save drawing image
            Mat output = new Mat();

            Directory.CreateDirectory($@"{tempDirectory}\DrawKeypoint");
            Features2DToolbox.DrawKeypoints(new Mat(inputFile.fileInfo.FullName), new VectorOfKeyPoint(detectedKeyPoints), output, new Bgr(0, 0, 255), KeypointDrawType.DrawRichKeypoints);
            output.Save(Path.Combine($@"{tempDirectory}\DrawKeypoint", $"{Path.GetFileNameWithoutExtension(inputFile.fileInfo.Name)}.JPG"));
            fileManager.listViewerModel._lastDrawnKeypoint = new Image <Bgr, byte>(output.Bitmap);

            var file       = new InputFileModel(Path.Combine($@"{tempDirectory}\DrawKeypoint", $"{Path.GetFileNameWithoutExtension(inputFile.fileInfo.Name)}.JPG"));
            var imageList  = _winForm.ImageList[(int)EListViewGroup.DrawnKeyPoint];
            var listViewer = _winForm.ListViews[(int)EListViewGroup.DrawnKeyPoint];

            fileManager.AddInputFileToList(file, fileManager.listViewerModel.ListOfListInputFolder[(int)EListViewGroup.DrawnKeyPoint], imageList, listViewer);
        }
Exemple #5
0
        /// <summary>
        /// Draws data objects onto image and presents
        /// (for selected data objects in data grid).
        /// </summary>
        /// <param name="objects">The data objects.</param>
        public void DrawObjects(IList objects)
        {
            // clone last image from engine vm
            var image = EngineVm.Image.Clone();

            // create MKeyPoint[] if
            // of type KeyPoint
            var keypoints = objects
                            .OfType <KeyPoint>()
                            .Select(o => o.GetKeyPoint())
                            .ToArray();

            // draw selected keypoints
            Features2DToolbox
            .DrawKeypoints(
                image,
                new VectorOfKeyPoint(keypoints),
                image,
                new Bgr(Color.Red),
                Features2DToolbox.KeypointDrawType.DrawRichKeypoints);

            // draw selected boxes
            foreach (var obj in objects.OfType <Box>())
            {
                image.Draw(obj.GetBox(), new Bgr(Color.Red));
            }

            // draw selected circles
            foreach (var obj in objects.OfType <Circle>())
            {
                image.Draw(obj.GetCircle(), new Bgr(Color.Red));
            }

            // draw selected contours
            foreach (var obj in objects.OfType <Contour>())
            {
                image.Draw(obj.GetContour(), new Bgr(Color.Red));
            }

            // draw selected ortated boxes
            foreach (var obj in objects.OfType <RotatedBox>())
            {
                image.Draw(obj.GetBox(), new Bgr(Color.Red), 1);
            }

            // draw selected segments
            foreach (var obj in objects.OfType <Segment>())
            {
                image.Draw(obj.GetSegment(), new Bgr(Color.Red), 1);
            }

            // draw selected ellipses
            foreach (var obj in objects.OfType <RotBoxEllipse>())
            {
                image.Draw(obj.GetEllipse(), new Bgr(Color.Red));
            }

            // present the annoated image
            SetImage(image);
        }
Exemple #6
0
        public bool FindPattern(Mat image)
        {
            VectorOfKeyPoint keypoints;
            Mat descriptors;

            var gray = GetGray(image);

            FeaturesUtils.ExtractFeatures(gray, out keypoints, out descriptors);

            Features2DToolbox.DrawKeypoints(gray, keypoints, image, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

            VectorOfVectorOfDMatch matches;
            Mat homography;

            FeaturesUtils.GetMatches(keypoints, descriptors, _pattern.Keypoints, _pattern.Descriptors, out matches, out homography);

            _patternInfo.Homography = homography;

            var pts = Array.ConvertAll <Point, PointF>(_pattern.Points2d.ToArray(), a => a);

            pts = CvInvoke.PerspectiveTransform(pts, homography);
            var points = Array.ConvertAll(pts, Point.Round);

            _patternInfo.Points2d = new VectorOfPoint(points);

            _patternInfo.Draw2dContour(image, new MCvScalar(0, 200, 0));

            return(true);
        }
Exemple #7
0
        public void SURFDraw(Mat image, Mat testImage)
        {
            VectorOfKeyPoint keyPoint = new VectorOfKeyPoint();
            SURF             surfCPU  = new SURF(500, 4, 2, true, false);

            surfCPU.DetectRaw(image, keyPoint);
            Features2DToolbox.DrawKeypoints(image, keyPoint, testImage, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.Default);
        }
Exemple #8
0
        /// <summary>
        /// 畫上特徵點到圖上
        /// </summary>
        /// <param name="surf">SURF特徵類別</param>
        /// <returns>回傳畫好特徵點的影像</returns>
        public static Image <Bgr, byte> DrawSURFFeature(SURFFeatureData surf)
        {
            VectorOfKeyPoint keyPoints = surf.GetKeyPoints();
            //繪製特徵
            Image <Bgr, byte> result = Features2DToolbox.DrawKeypoints(surf.GetImg(), surf.GetKeyPoints(), new Bgr(255, 255, 255), Features2DToolbox.KeypointDrawType.DEFAULT);

            return(result);
        }
        public void SIFTDraw(Mat image, Mat testImage)
        {
            SIFT             siftCPU  = new SIFT();
            VectorOfKeyPoint keyPoint = new VectorOfKeyPoint();

            siftCPU.DetectRaw(image, keyPoint);

            Features2DToolbox.DrawKeypoints(image, keyPoint, testImage, new Bgr(Color.GreenYellow), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
        }
Exemple #10
0
        public static Image <Bgr, Byte> DrawFeatures(Image <Gray, Byte> modelImage)
        {
            SURFDetector      surfCPU          = new SURFDetector(500, false);
            VectorOfKeyPoint  modelKeyPoints   = new VectorOfKeyPoint();
            Matrix <float>    modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);
            Image <Bgr, Byte> result           = Features2DToolbox.DrawKeypoints(modelImage, modelKeyPoints, new Bgr(0, 0, 255), Features2DToolbox.KeypointDrawType.DEFAULT);

            return(result);
        }
        public List <int> DetectBanknotesResults(ref Mat imageEval)
        {
            var           imageBackup       = imageEval.Clone();
            List <Result> detectorResultOut = DetectBanknotesTrain(imageEval);
            List <int>    results           = new List <int>();
            var           total             = 0.0;

            for (int i = 0; i < detectorResultOut.Count; ++i)
            {
                Result detectorResult = detectorResultOut[i];
                results.Add(detectorResult.GetTrainValue());

                Features2DToolbox.DrawKeypoints(imageEval, detectorResult.GetInliersKeypoints(), imageEval, new Bgr(0, 255, 0), Features2DToolbox.KeypointDrawType.Default);

                var valorTexto = detectorResult.GetTrainValue().ToString();

                //Message in language portuguese
                _synthesizer.SpeakAsync(valorTexto + "reais");

                total += Double.Parse(valorTexto);

                Mat imageMatchesSingle = new Mat();
                imageMatchesSingle = imageBackup;



                Mat matchesInliers = detectorResult.getInliersMatches(ref imageMatchesSingle);


                Rectangle boundingBox = CvInvoke.BoundingRectangle(detectorResult.GetTrainContour());
                _util.CorrectBoundingBox(ref boundingBox, imageEval.Cols, imageEval.Rows);
                InterfaceUtil.DrawLabelInCenterOfROI(valorTexto, ref imageEval, ref boundingBox);
                InterfaceUtil.DrawLabelInCenterOfROI(valorTexto, ref matchesInliers, ref boundingBox);
                _util.DrawContour(ref imageEval, detectorResult.GetTrainContour(), detectorResult.GetTrainContourColor(), 2);
                _util.DrawContour(ref matchesInliers, detectorResult.GetTrainContour(), detectorResult.GetTrainContourColor(), 2);
            }

            if (total == 0)
            {
                //Message in language portuguese
                _synthesizer.SpeakAsync("Nenhuma cédula foi identificada");
            }
            else
            {
                ImageViewer iv = new ImageViewer();

                iv = new ImageViewer(imageEval, "Result ");
                iv.Show();

                //Message in language portuguese
                _synthesizer.SpeakAsync("Valor total é " + total + " reais");
            }

            results.Sort();

            return(results);
        }
Exemple #12
0
        private void CaptureOnImageGrabbed(object sender, EventArgs eventArgs)
        {
            var capture = (Capture)sender;

            //Show time stamp
            double timeIndex = capture.GetCaptureProperty(CapProp.PosMsec);

            ProgressTime = TimeSpan.FromMilliseconds(timeIndex).ToString("g");

            //show frame number
            double frameNumber = capture.GetCaptureProperty(CapProp.PosFrames);
            double totalFrames = capture.GetCaptureProperty(CapProp.FrameCount);

            _progress = frameNumber / totalFrames;
            RaisePropertyChanged("Progress");

            // Show image with keyPoints
            var frame = new Mat();

            _capture.Retrieve(frame);
            var keyFeatures = _projectFile.Model.GetKeyFeatures((int)frameNumber - 1);

            var imageFrame = new Mat();

            Features2DToolbox.DrawKeypoints(frame, keyFeatures, imageFrame, new Bgr(Color.DarkBlue),
                                            Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

            if (frameNumber > 1)
            {
                var matches = _projectFile.Model.GetMatches((int)frameNumber - 1);
                foreach (var match in matches)
                {
                    CvInvoke.Line(imageFrame,
                                  Point.Round(match.Item1.Point),
                                  Point.Round(match.Item2.Point),
                                  new Bgr(Color.Red).MCvScalar,
                                  2);
                }
            }

            OriginImage = VideoImageSource = imageFrame;

            //Wait to display correct framerate
            var frameRate = capture.GetCaptureProperty(CapProp.Fps);
            var rightElapsedMilliseconds = 1000.0 / frameRate;
            var realElapsedMilliseconds  = _stopwatch.ElapsedMilliseconds;
            var waitingMilliseconds      = Math.Max(0, rightElapsedMilliseconds - realElapsedMilliseconds);

            Thread.Sleep((int)waitingMilliseconds);
            _stopwatch.Restart();

            if (frameNumber == totalFrames)
            {
                Stop();
            }
        }
Exemple #13
0
        private void extractFeatureButton_Click(object sender, EventArgs e)
        {
            trainingExtractSurfData = SURFMatch.CalSURFFeature(extractFeatureImage, new MCvSURFParams(500, false));
            //繪製特徵
            Image <Bgr, byte> result = Features2DToolbox.DrawKeypoints(trainingExtractSurfData.GetImg(), trainingExtractSurfData.GetKeyPoints(), new Bgr(255, 255, 255), Features2DToolbox.KeypointDrawType.DEFAULT);
            //顯示
            ImageViewer viewer = new ImageViewer(result, "Extracted Feature");

            viewer.Show();
        }
Exemple #14
0
        public void ShowKeyPoints()
        {
            lstMat.Clear();
            lstModelDescriptors.Clear();
            var featureDetector = new SIFT();

            Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams();
            Emgu.CV.Flann.SearchParams      sp = new SearchParams();
            DescriptorMatcher matcher          = new FlannBasedMatcher(ip, sp);
            Rectangle         cropRect         = new Rectangle(842, 646, 70, 70);
            Mat mask = new Mat(new Size(70, 70), DepthType.Cv8U, 1);

            CvInvoke.Rectangle(mask, new Rectangle(0, 0, 70, 70), new MCvScalar(255, 255, 255), -1);
            CvInvoke.Circle(mask, new Point(35, 37), 22, new MCvScalar(0, 0, 0), -1);


            lstMat.Add(mask);
            String[] folders = { @"Linage2\Main\PartyAuto", @"Linage2\Main\PartyManual" };
            foreach (String folder in folders)
            {
                DirectoryInfo imageFolder = new DirectoryInfo(folder);
                FileInfo[]    files       = Utils.GetFilesByExtensions(imageFolder, ".jpg", ".png").ToArray();
                foreach (FileInfo finfo in files)
                {
                    Mat img  = CvInvoke.Imread(finfo.FullName, ImreadModes.Color);
                    Mat crop = CVUtil.crop_color_frame(img, cropRect);
                    //lstMat.Add(crop);
                    VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();
                    Mat modelDescriptors            = new Mat();
                    featureDetector.DetectAndCompute(crop, mask, modelKeyPoints, modelDescriptors, false);
                    lstModelDescriptors.Add(modelDescriptors);
                    Mat result = new Mat();
                    Features2DToolbox.DrawKeypoints(crop, modelKeyPoints, result, new Bgr(Color.Red));

                    lstMat.Add(result);
                    //BOWImgDescriptorExtractor bow = new BOWImgDescriptorExtractor(featureDetector, matcher);
                }
            }


            /*BOWKMeansTrainer bowtrainer = new BOWKMeansTrainer(1000, new MCvTermCriteria(10, 0.001), 1, Emgu.CV.CvEnum.KMeansInitType.PPCenters);
             * foreach (Mat m in lstModelDescriptors) {
             *  bowtrainer.Add(m);
             * }
             * Mat dict = new Mat();
             * bowtrainer.Cluster();
             * StringBuilder sb = new StringBuilder();
             * Image<Bgr, Byte> imgsave = dict.ToImage<Bgr, Byte>();
             *
             * (new XmlSerializer(typeof(Image<Bgr, Byte>))).Serialize(new StringWriter(sb), imgsave);
             * Console.WriteLine(sb.ToString());*/
        }
Exemple #15
0
        public static UMat Run(Mat img)
        {
            var modelKeyPoints = new VectorOfKeyPoint();
            var result         = new UMat();

            using (UMat uModelImage = img.ToUMat(AccessType.Read))
            {
                SIFT siftfCPU         = new SIFT();
                UMat modelDescriptors = new UMat();
                siftfCPU.DetectRaw(uModelImage, modelKeyPoints);
                Features2DToolbox.DrawKeypoints(img, modelKeyPoints, result, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
            }

            return(result);
        }
        public KeyPoints SIFTDescriptor()
        {
            KeyPoints result = new KeyPoints();
            //SiFT Descriptor
            SIFT             siftAlgo           = null;
            VectorOfKeyPoint modelKeyPointsSift = null;

            try
            {
                siftAlgo           = new SIFT();
                modelKeyPointsSift = new VectorOfKeyPoint();

                MKeyPoint[] siftPoints = siftAlgo.Detect(preProcessedImageInGrayScale);
                modelKeyPointsSift.Push(siftPoints);
                UMat siftDescriptors = new UMat();
                siftAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPointsSift, siftDescriptors, true);
                Image <Gray, Byte> outputImage = new Image <Gray, byte>(
                    preProcessedImageInGrayScale.Width,
                    preProcessedImageInGrayScale.Height);
                Features2DToolbox.DrawKeypoints(
                    preProcessedImageInGrayScale,
                    modelKeyPointsSift,
                    outputImage,
                    new Bgr(255, 255, 255),
                    Features2DToolbox.KeypointDrawType.Default);

                string folderName = @"C:\Projects\LeafService\SiftImage";
                string pathString = System.IO.Path.Combine(folderName, "Sift" + DateTime.UtcNow.Ticks);
                System.IO.Directory.CreateDirectory(pathString);
                if (Directory.Exists(pathString))
                {
                    string newFilePath = Path.Combine(pathString, "SiftImage" + DateTime.UtcNow.Ticks);
                    outputImage.Save(folderName + ".jpg");
                    outputImage.Save(@"C:\Projects\LeafService\SIFTgray.jpg");
                }


                //outputImage.Save("sift.jpg");
                result.Descriptor = siftDescriptors;
                result.Points     = siftPoints;
                return(result);
            }
            finally
            {
                siftAlgo.Dispose();
                modelKeyPointsSift.Dispose();
            }
        }
Exemple #17
0
        public static UMat Run(Mat img)
        {
            var modelKeyPoints = new VectorOfKeyPoint();
            var result         = new UMat();

            using (UMat uModelImage = img.ToUMat(AccessType.Read))
            {
                FastDetector fastCPU          = new FastDetector(10, true);
                Freak        freakCPU         = new Freak();
                UMat         modelDescriptors = new UMat();
                fastCPU.DetectRaw(uModelImage, modelKeyPoints);
                freakCPU.Compute(uModelImage, modelKeyPoints, modelDescriptors);
                Features2DToolbox.DrawKeypoints(img, modelKeyPoints, result, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
            }

            return(result);
        }
        private void detectImgFeatures()
        {
            ORBDetector detector = new ORBDetector(100, 1.2f, 8);

            MKeyPoint[]      img0_keyPoints        = detector.Detect(imgs[0]);
            VectorOfKeyPoint img0_vector_keypoints = new VectorOfKeyPoint(img0_keyPoints);
            Matrix <Byte>    img0_descriptors      = new Matrix <Byte>(img0_vector_keypoints.Size, detector.DescriptorSize);

            MKeyPoint[]      img1_keyPoints        = detector.Detect(imgs[1]);
            VectorOfKeyPoint img1_vector_keypoints = new VectorOfKeyPoint(img1_keyPoints);
            Matrix <Byte>    img1_descriptors      = new Matrix <Byte>(img1_vector_keypoints.Size, detector.DescriptorSize);

            detector.Compute(imgs[0], img0_vector_keypoints, img0_descriptors);

            // display keypoints in red
            Image <Bgr, Byte> newImg = new Image <Bgr, Byte>(imgs[0].Width, imgs[0].Height);

            Features2DToolbox.DrawKeypoints(imgs[0], img0_vector_keypoints, newImg, new Bgr(255, 0, 255),
                                            Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
            imgbox_original.Image = newImg;

            Image <Bgr, Byte> newImg2 = new Image <Bgr, Byte>(imgs[1].Width, imgs[1].Height);

            Features2DToolbox.DrawKeypoints(imgs[1], img1_vector_keypoints, newImg2, new Bgr(255, 0, 255),
                                            Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
            imgbox_second.Image = newImg2;

            // apply BFMatcher to find matches in two images
            BFMatcher bfMatcher            = new BFMatcher(DistanceType.Hamming, true);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            numberFoundPairs = matches.Size;
            bfMatcher.Add(img0_descriptors);
            bfMatcher.KnnMatch(img1_descriptors, matches, 1, null);

            // display final image as two merged images with keypoints
            Mat matched_image = new Mat();

            Features2DToolbox.DrawMatches(imgs[0], img0_vector_keypoints, imgs[1], img1_vector_keypoints,
                                          matches, matched_image, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 0));
            img_final = matched_image.ToImage <Bgr, Byte>();
        }
Exemple #19
0
        public static UMat Run(Mat img)
        {
            double hessianThresh = 500;

            var modelKeyPoints = new VectorOfKeyPoint();
            var result         = new UMat();

            using (UMat uModelImage = img.ToUMat(AccessType.Read))
            {
                SURF surfCPU = new SURF(hessianThresh);
                //extract features from the object image
                UMat modelDescriptors = new UMat();
                //surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                surfCPU.DetectRaw(uModelImage, modelKeyPoints);

                Features2DToolbox.DrawKeypoints(img, modelKeyPoints, result, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
            }

            return(result);
        }
Exemple #20
0
        public Mat PutFeaturesOnImage()
        {
            SIFT siftCPU = new SIFT();

            Details.modelKeyPoints = new VectorOfKeyPoint();

            mKeyPoints = siftCPU.Detect(Details.thinnedimage, null);

            Details.modelKeyPoints.Push(mKeyPoints);

            Mat o = new Mat();

            siftCPU.Compute(Details.thinnedimage, Details.modelKeyPoints, o);

            Mat resultimage = new Mat();

            Features2DToolbox.DrawKeypoints(Details.thinnedimage, Details.modelKeyPoints, resultimage, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.Default);

            return(resultimage);
        }
        private void DrawKeypoints()
        {
            try
            {
                if (imgList["Input"] == null)
                {
                    return;
                }

                var img  = imgList["Input"].Clone();
                var gray = img.Convert <Gray, byte>();

                GFTTDetector detector = new GFTTDetector(2000, 0.06);
                var          corners  = detector.Detect(gray);
                dt.Rows.Clear();
                foreach (MKeyPoint pt in corners)
                {
                    dt.Rows.Add(pt.ClassId,
                                pt.Point.ToString(),
                                pt.Angle,
                                pt.Size,
                                pt.Octave,
                                pt.Response

                                );
                }



                Mat outimg = new Mat();
                Features2DToolbox.DrawKeypoints(img, new VectorOfKeyPoint(corners), outimg, new Bgr(0, 0, 255));

                imageBoxEx1.Image        = outimg.ToBitmap();
                dataGridView1.DataSource = dt;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Exemple #22
0
        public void FindBlobs(bool draw, bool undistort)
        {
            _mKeyPoints = _blobDetector.Detect(_searchMat);

            if (_mKeyPoints.Length != 0)
            {
                VectorOfKeyPoint _vectorOfKeyPoint = new VectorOfKeyPoint(_mKeyPoints);

                if (draw)
                {
                    Features2DToolbox.DrawKeypoints(_searchMat, _vectorOfKeyPoint, _searchMat, _dColor);
                }


                _points = new PointF[_vectorOfKeyPoint.Size];
                for (int i = 0; i < _vectorOfKeyPoint.Size; i++)
                {
                    _points[i] = _vectorOfKeyPoint[i].Point;
                }

                if (undistort)
                {
                    VectorOfPointF _vectorOfPointF = new VectorOfPointF(_points);
                    VectorOfPointF _uVectorOfPoint = new VectorOfPointF();

                    CvInvoke.UndistortPoints(_vectorOfPointF, _uVectorOfPoint, _cameraMatrix, _distCoeffs);
                    PointF[] pu = _uVectorOfPoint.ToArray();

                    for (int i = 0; i < pu.Length; i++)
                    {
                        _points[i].X = pu[i].X * (float)_fx + (float)_cx;
                        _points[i].Y = pu[i].Y * (float)_fy + (float)_cy;
                    }
                }

                OnBlobDetected?.Invoke(new BlobDetectorEventArgs(_points, _deviceNum));
            }
        }
        private void ApplyFASTFeatureDetector(int threshold = 10)
        {
            try
            {
                if (imgList["Input"] == null)
                {
                    return;
                }

                var img  = imgList["Input"].Clone();
                var gray = img.Convert <Gray, byte>();

                FastFeatureDetector detector = new FastFeatureDetector(threshold);
                var corners = detector.Detect(gray);
                dt.Rows.Clear();
                lab_notes.Text = "Number of corners: " + corners.Length.ToString();
                foreach (MKeyPoint pt in corners)
                {
                    dt.Rows.Add(pt.ClassId,
                                pt.Point.ToString(),
                                pt.Angle,
                                pt.Size,
                                pt.Octave,
                                pt.Response

                                );
                }
                Mat outimg = new Mat();
                Features2DToolbox.DrawKeypoints(img, new VectorOfKeyPoint(corners), outimg, new Bgr(0, 0, 255));

                imageBoxEx1.Image        = outimg.ToBitmap();
                dataGridView1.DataSource = dt;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
        public override void Process(Image <Bgr, byte> image, out Image <Bgr, byte> annotatedImage, out List <object> data)
        {
            base.Process(image, out annotatedImage, out data);

            using (var dt = new SimpleBlobDetector(_params))
                using (var kp = new VectorOfKeyPoint())
                {
                    // detect the blobs
                    dt.DetectRaw(image, kp);

                    // draw the blobs
                    Features2DToolbox.DrawKeypoints(
                        image,
                        kp,
                        annotatedImage,
                        new Bgr(_annoColor.Color()),
                        Features2DToolbox.KeypointDrawType.DrawRichKeypoints);

                    // populate the data
                    data = new List <object>();
                    data.AddRange(kp.ToArray().Select(k => new KeyPoint(k)));
                }
        }
        private VectorOfKeyPoint DoBlobDetection(Mat sourceMat)
        {
            Image <Gray, byte> blobImage = sourceMat.ToImage <Gray, byte>().PyrDown().PyrUp();

            blobImage.SmoothBlur(10, 10, true);

            VectorOfKeyPoint kp = new VectorOfKeyPoint();

            if (blobTrackerMaskMat != null)
            {
                blobDetector.DetectRaw(blobImage, kp, blobTrackerMaskMat);
            }
            else
            {
                blobDetector.DetectRaw(blobImage, kp);
            }

            Mat decoratedMat = new Mat(sourceMat.Rows, sourceMat.Cols, DepthType.Cv8U, 3);

            Features2DToolbox.DrawKeypoints(blobImage.Mat, kp, decoratedMat, new Bgr(0, 0, 255));
            BlobDetectedImage = decoratedMat.ToImage <Bgr, byte>();
            return(kp);
        }
        public void DrawSIFTDescriptor(string inputFile, string outputFile)
        {
            //SiFT Descriptor
            SIFT             siftAlgo           = null;
            VectorOfKeyPoint modelKeyPointsSift = null;

            try
            {
                siftAlgo           = new SIFT();
                modelKeyPointsSift = new VectorOfKeyPoint();

                using (Image <Bgr, byte> inputImage = new Image <Bgr, byte>(inputFile))
                {
                    MKeyPoint[] siftPoints = siftAlgo.Detect(inputImage);
                    modelKeyPointsSift.Push(siftPoints);
                    UMat siftDescriptors = new UMat();
                    siftAlgo.DetectAndCompute(inputImage, null, modelKeyPointsSift, siftDescriptors, true);
                    using (Image <Gray, Byte> outputImage = new Image <Gray, byte>(
                               inputImage.Width,
                               inputImage.Height))
                    {
                        Features2DToolbox.DrawKeypoints(
                            inputImage,
                            modelKeyPointsSift,
                            outputImage,
                            new Bgr(255, 255, 255),
                            Features2DToolbox.KeypointDrawType.Default);
                        outputImage.Save(outputFile);
                    }
                }
            }
            finally
            {
                siftAlgo.Dispose();
                modelKeyPointsSift.Dispose();
            }
        }
        static void Main(string[] args)
        {
            String win1 = "Orange Detector"; //The name of the window

            CvInvoke.NamedWindow(win1);      //Create the window using the specific name

            MCvScalar orangeMin = new MCvScalar(10, 211, 140);
            MCvScalar orangeMax = new MCvScalar(18, 255, 255);

            Mat img    = new Mat("fruits.jpg", ImreadModes.AnyColor);
            Mat hsvImg = new Mat();

            CvInvoke.CvtColor(img, hsvImg, ColorConversion.Bgr2Hsv);

            CvInvoke.InRange(hsvImg, new ScalarArray(orangeMin), new ScalarArray(orangeMax), hsvImg);

            CvInvoke.MorphologyEx(hsvImg, hsvImg, MorphOp.Close, new Mat(), new Point(-1, -1), 5, BorderType.Default, new MCvScalar());

            SimpleBlobDetectorParams param = new SimpleBlobDetectorParams();

            param.FilterByCircularity = false;
            param.FilterByConvexity   = false;
            param.FilterByInertia     = false;
            param.FilterByColor       = false;
            param.MinArea             = 1000;
            param.MaxArea             = 50000;

            SimpleBlobDetector detector = new SimpleBlobDetector(param);

            MKeyPoint[] keypoints = detector.Detect(hsvImg);
            Features2DToolbox.DrawKeypoints(img, new VectorOfKeyPoint(keypoints), img, new Bgr(255, 0, 0), Features2DToolbox.KeypointDrawType.DrawRichKeypoints);

            CvInvoke.Imshow(win1, img); //Show image
            CvInvoke.WaitKey(0);        //Wait for key press before executing next line
            CvInvoke.DestroyWindow(win1);
        }
Exemple #28
0
        List <float> ExtractSiftFeatureVector(TaggedImage image, int keyPointCount, SiftSortingMethod sortingMethod, bool doDrawImage)
        {
            // use the emgu functions to gather keypoints

            VectorOfKeyPoint vectorOfKeypoints = new VectorOfKeyPoint();

            Mat output = image.GetMat().Clone(); // only needed for drawing

            sift.DetectAndCompute(image.GetMat(), null, vectorOfKeypoints, output, false);

            // put it into useful data formats

            List <MKeyPoint> keyPoints = new List <MKeyPoint>(vectorOfKeypoints.ToArray());

            // sort

            switch (sortingMethod)
            {
            case SiftSortingMethod.Response:
                keyPoints.Sort((p1, p2) => p1.Response < p2.Response ? 1 : (p1.Response == p2.Response ? 0 : -1));
                break;

            case SiftSortingMethod.Size:
                keyPoints.Sort((p1, p2) => p1.Size < p2.Size ? 1 : (p1.Size == p2.Size ? 0 : -1));
                break;

            case SiftSortingMethod.None:
            default:
                break;
            }

            // expand/trim
            while (keyPoints.Count < keyPointCount)
            {
                keyPoints.Add(new MKeyPoint());
            }

            if (keyPoints.Count > keyPointCount)
            {
                keyPoints.RemoveRange(keyPointCount, keyPoints.Count - keyPointCount);
            }

            // visualize

            if (doDrawImage)
            {
                vectorOfKeypoints = new VectorOfKeyPoint(keyPoints.ToArray());

                Features2DToolbox.DrawKeypoints(image.GetMat(), vectorOfKeypoints, output, new Bgr(0, 0, 255), Features2DToolbox.KeypointDrawType.DrawRichKeypoints);

                String win1 = "SIFT";          //The name of the window
                CvInvoke.NamedWindow(win1);    //Create the window using the specific name

                CvInvoke.Imshow(win1, output); //Show the image
                CvInvoke.WaitKey(0);           //Wait for the key pressing event
                CvInvoke.DestroyWindow(win1);  //Destroy the window if key is pressed
            }

            // convert to list

            List <float> result = new List <float>(5 * keyPointCount);

            for (int i = 0; i < keyPoints.Count; ++i)
            {
                MKeyPoint current = keyPoints[i];

                result.Add(current.Point.X / (float)output.Size.Width);
                result.Add(current.Point.Y / (float)output.Size.Height);
                result.Add(current.Size);
                result.Add(current.Angle);
                result.Add(current.Response);
            }

            return(result);
        }
Exemple #29
0
        private Mat Draw(Mat observedImage)
        {
            if (button1.Text == "UnLoad")
            {
                FindMatch(observedImage);
                Mat result = new Mat();

                if (matches.Size > 2)
                {
                    //Draw the matched keypoints

                    Features2DToolbox.DrawMatches(objImage, objKeyPoints, observedImage, observedKeyPoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
                    //Bgr rgb5 = new Bgr(250, 250, 250);
                    //Features2DToolbox.DrawKeypoints(observedImage, observedKeyPoints, observedImage, rgb5);
                    int   i;
                    float X     = 0;
                    float Y     = 0;
                    int   count = 0;
                    for (i = 0; i < matches.Size; i++)
                    {
                        if (mask.GetData(i)[0] == 1)
                        {
                            X += observedKeyPoints[matches[i][0].QueryIdx].Point.X - objKeyPoints[matches[i][0].TrainIdx].Point.X;
                            Y += observedKeyPoints[matches[i][0].QueryIdx].Point.Y - objKeyPoints[matches[i][0].TrainIdx].Point.Y;
                            count++;
                        }
                    }

                    if (count > 0)
                    {
                        X = X / count + (float)(Math.Abs(objwidth / 2.0));
                        Y = Y / count + (float)(Math.Abs(objheight / 2.0));
                    }
                    else
                    {
                        X = -1;
                        Y = -1;
                    }
                    if (CvInvoke.CountNonZero(mask) > 10)
                    {
                        if (trackings[trackings.Length - 1].Y >= 0)
                        {
                            Point[] newtrackings = new Point[2 * trackings.Length + 1];
                            for (i = 0; i < trackings.Length; i++)
                            {
                                newtrackings[i] = trackings[i];
                            }
                            newtrackings[i].X = (int)X;
                            newtrackings[i].Y = (int)Y;
                            for (i++; i < newtrackings.Length; i++)
                            {
                                newtrackings[i].X = -1;
                                newtrackings[i].Y = -1;
                            }
                            trackings = newtrackings;
                        }
                        else
                        {
                            for (i = 0; trackings[i].Y >= 0; i++)
                            {
                            }
                            trackings[i].X = (int)X;
                            trackings[i].Y = (int)Y;
                        }
                    }


                    if (homography != null)
                    {
                        //draw a rectangle along the projected model

                        System.Drawing.Rectangle rect = new System.Drawing.Rectangle(Point.Empty, objImage.Size);
                        PointF[] pts = new PointF[]
                        {
                            new PointF(rect.Left, rect.Bottom),
                            new PointF(rect.Right, rect.Bottom),
                            new PointF(rect.Right, rect.Top),
                            new PointF(rect.Left, rect.Top)
                        };
                        pts = CvInvoke.PerspectiveTransform(pts, homography);

                        Point[] points = Array.ConvertAll <PointF, Point>(pts, Point.Round);
                        using (VectorOfPoint vp = new VectorOfPoint(points))
                        {
                            //CvInvoke.Polylines(observedImage, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                        }
                    }
                    float a = 2;
                    CvInvoke.Ellipse(result, new RotatedRect(new PointF(X, Y), new SizeF(30, 30), a), new MCvScalar(0, 250, 255, 255), 4);

                    return(result);
                }
            }
            else if (button1.Text == "Load")
            {
                uObservedImage = observedImage.GetUMat(AccessType.ReadWrite);
                // extract features from the observed image
                ORBCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                Bgr rgb1 = new Bgr(250, 250, 250);
                Features2DToolbox.DrawKeypoints(observedImage, observedKeyPoints, observedImage, rgb1);
            }
            return(observedImage);
        }
//..........metoda żeby SURF zrobić........................................................................................
        public void DoSURFDetectAndUpdateForm(object sender, EventArgs e)
        {
            try
            {
                imgSceneColor             = captureWebcam.QueryFrame(); //try pobrać jedną klatkę z obrazu kamery
                lbPreparingCamera.Visible = false;
            }
            catch (Exception ex)                                    //jak się nie da to error wyświetlamy
            {
                this.Text = ex.Message;
            }


            if (imgSceneColor == null)
            {
                this.Text = "error, nie wczytano obrazu z kamery";      //gdy nie odczytano następnej klatki do zmiennej obrazka
            }
            if (imgToFindColor == null)                                 //jeśli jeszcze nie mamy obrazka do znalezienia...
            {
                ibResult.Image = imgSceneColor.ToBitmap();              //...to wywołaj obraz sceny do imageBoxu
            }
            //gdy dotarliśmy aż tutaj, obydwa obrazki są OK i możemy rozpocząć SURF detection

            SURFDetector surfDetector = new SURFDetector(500, false);   //objekt surf, parametr treshold(jak duże punkty bierze pod uwagę i extended flag

            Image <Gray, Byte> imgSceneGray  = null;                    //szary obraz sceny
            Image <Gray, Byte> imgToFindGray = null;                    //szary obrazek do znalezienia

            VectorOfKeyPoint vkpSceneKeyPoints;                         //vektor punktów na obrazie sceny
            VectorOfKeyPoint vkpToFindKeyPoints;                        //vektor punktów na obrazku do znalezienia

            Matrix <Single> mtxSceneDescriptors;                        //macierz deskryptorów do pytania o najbliższe sąsiedztwo
            Matrix <Single> mtxToFindDescriptor;                        //macierz deskryptorów dla szukanego obrazka

            Matrix <int>    mtxMatchIndices;                            //macierz ze wskaźnikami deskryptorów, będzie wypełniana przy trenowaniu deskryptorów (KnnMatch())
            Matrix <Single> mtxDistance;                                //macierz z wartościami odległości, po treningu jak wyżej
            Matrix <Byte>   mtxMask;                                    //input i output dla funkcji VoteForUniqueness(), wskazującej, który rząd pasuje

            BruteForceMatcher <Single> bruteForceMatcher;               //dla każdego deskryptora w pierwszym zestawie, matcher szuka...
                                                                        //...najbliższego deskryptora w drugim zestawie ustawionym przez trening każdego jednego

            HomographyMatrix homographyMatrix = null;                   //dla ProjectPoints() aby ustawić lokalizację znalezionego obrazka w scenie
            int    intKNumNearestNeighbors    = 2;                      //k, liczba najbliższego sąsiedztwa do przeszukania
            double dblUniquenessThreshold     = 0.8;                    //stosunek różncy dystansu dla porównania, żeby wypadło unikalne

            int intNumNonZeroElements;                                  //jako wartość zwracana dla liczby nie-zerowych elementów obu w macierzy maski,...
                                                                        //...także z wywołania GetHomographyMatrixFromMatchedFeatures()

            //parametry do używania przy wywołaniach VoteForSizeAndOrientation()

            double dblScareIncrement = 1.5;                      //określa różnicę w skali dla sąsiadujących komórek
            int    intRotationBins   = 20;                       //liczba komórek dla rotacji z 360 stopni (jeśli =20 to każda komórka pokrywa 18 stopni (20*18=360))

            double dblRansacReprojectionThreshold = 2.0;         //do użycia z GetHomographyMatrixFromMatchedFeatures(), max. dozwolony błąd odwzorowania...
                                                                 //...aby uznać parę punktów za ?inlier?

            Rectangle rectImageToFind = new Rectangle();         //prostokąt obejmujący cały obrazek do znalezienia

            PointF [] pointsF;                                   //4 punkty określające ramkę wokół lokacji znalezionego obrazka na scenie (float)
            Point []  points;                                    //4 punkty, to samo, ale (int)

            imgSceneGray = imgSceneColor.Convert <Gray, Byte>(); //ta sama scena do Graya

            if (isImgToFind == true)
            {
                try
                {
                    imgToFindGray = imgToFindColor.Convert <Gray, Byte>();       // obrazek do znalezienia do Graya
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.ToString());
                }

                vkpSceneKeyPoints   = surfDetector.DetectKeyPointsRaw(imgSceneGray, null);                       //wykrywa punkty w scenie, drugi param. to maska, jeśli null to nie potrzebna
                mtxSceneDescriptors = surfDetector.ComputeDescriptorsRaw(imgSceneGray, null, vkpSceneKeyPoints); //oblicza deskrptory sceny, param. to obraz sceny...
                //...maska, punkty na scenie

                vkpToFindKeyPoints = surfDetector.DetectKeyPointsRaw(imgToFindGray, null);                          //wykrywa punkty na obrazku do znalezienia, drugi param. to...
                //...maska, null bo nie potrzebna

                mtxToFindDescriptor = surfDetector.ComputeDescriptorsRaw(imgToFindGray, null, vkpToFindKeyPoints);                //oblicza aby znaleźć deskryptory(szukany obrazek, maska, szukanego o. punkty)

                bruteForceMatcher = new BruteForceMatcher <Single>(DistanceType.L2);                                              //objekt brute force matchera z L2, kwadrat odległ. Euklidesowej
                bruteForceMatcher.Add(mtxToFindDescriptor);                                                                       //dodaj macierz dla szukanych deskryptorów do brute force matchera

                if (mtxSceneDescriptors != null)                                                                                  //gdy obraz nie ma cech np. ściana
                {
                    mtxMatchIndices = new Matrix <int>(mtxSceneDescriptors.Rows, intKNumNearestNeighbors);                        //objekt macierzy indeksów/komórek (wiersze, kolumny)
                    mtxDistance     = new Matrix <Single>(mtxSceneDescriptors.Rows, intKNumNearestNeighbors);                     //to samo z dystansami

                    bruteForceMatcher.KnnMatch(mtxSceneDescriptors, mtxMatchIndices, mtxDistance, intKNumNearestNeighbors, null); //znajduje k-najbliższy match, (jak null to maska nie potrzebna)

                    mtxMask = new Matrix <Byte>(mtxDistance.Rows, 1);                                                             //objekt macierzy maski
                    mtxMask.SetValue(255);                                                                                        //ustawia wartości wszystkich elementów w macierzy maski

                    Features2DToolbox.VoteForUniqueness(mtxDistance, dblUniquenessThreshold, mtxMask);                            //filtruje pasujące cechy tj. czy match NIE jest unikalny to jest odrzucany

                    intNumNonZeroElements = CvInvoke.cvCountNonZero(mtxMask);                                                     //pobierz liczbę nie-zerowych elementów w macierzy maski
                    if (intNumNonZeroElements >= 4)
                    {
                        //eliminuje dopasowanye cechy, których skla i rotacja nie zgadzają się ze skalą i rotacją większości
                        intNumNonZeroElements = Features2DToolbox.VoteForSizeAndOrientation(vkpToFindKeyPoints, vkpSceneKeyPoints, mtxMatchIndices, mtxMask, dblScareIncrement, intRotationBins);
                        if (intNumNonZeroElements >= 4)             //jeśli ciągle są co najmniej 4 nie-zerowe elementy

                        //pobierz homography matrix używając RANSAC (random sample consensus)
                        {
                            homographyMatrix = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(vkpToFindKeyPoints, vkpSceneKeyPoints, mtxMatchIndices, mtxMask, dblRansacReprojectionThreshold);
                        }
                    }

                    imgCopyOfImageToFindWithBorder = imgToFindColor.Copy();     //robi kopię obrazka do znalezienia aby na tej kopi rysować, bez zmieniania oryginalnego obrazka

                    //rysuje 2pix ramkę wkoło kopi obrazka do znalezienia, używając takiego samego koloru jaki ma box na znaleziony obrazek
                    imgCopyOfImageToFindWithBorder.Draw(new Rectangle(1, 1, imgCopyOfImageToFindWithBorder.Width - 3, imgCopyOfImageToFindWithBorder.Height - 3), bgrFoundImageColor, 2);

                    //rysowanie obrazu sceny i obrazka do znalezienia razem na obrazie rezultatu
                    //3 warunki w zależności od tego, który checkBox jest zaznaczony (rysuj punkty i/lub rysuj linie)
                    if (ckDrawKeyPoints.Checked == true && ckDrawMatchingLines.Checked == true)
                    {
                        //używa DrawMatches() aby połączyć obraz sceny z obrazkiem do znalezienia, potem rysuje punkty i linie
                        imgResult = Features2DToolbox.DrawMatches(imgCopyOfImageToFindWithBorder,
                                                                  vkpToFindKeyPoints,
                                                                  imgSceneColor,
                                                                  vkpSceneKeyPoints,
                                                                  mtxMatchIndices,
                                                                  bgrMatchingLineColor,
                                                                  bgrKeyPointColor,
                                                                  mtxMask,
                                                                  Features2DToolbox.KeypointDrawType.DEFAULT);
                    }
                    else if (ckDrawKeyPoints.Checked == true && ckDrawMatchingLines.Checked == false)
                    {
                        //rysuje scenę z punktami na obrazie rezultatu
                        imgResult = Features2DToolbox.DrawKeypoints(imgSceneColor,
                                                                    vkpSceneKeyPoints,
                                                                    bgrKeyPointColor,
                                                                    Features2DToolbox.KeypointDrawType.DEFAULT);
                        //potem rysuje punkty na kopi obrazka do znalezienia
                        imgCopyOfImageToFindWithBorder = Features2DToolbox.DrawKeypoints(imgCopyOfImageToFindWithBorder,
                                                                                         vkpToFindKeyPoints,
                                                                                         bgrKeyPointColor,
                                                                                         Features2DToolbox.KeypointDrawType.DEFAULT);
                        //potem łączy kopię obrazka do znaleienia na obrazie rezultatu
                        imgResult = imgResult.ConcateHorizontal(imgCopyOfImageToFindWithBorder);
                    }
                    else if (ckDrawKeyPoints.Checked == false && ckDrawMatchingLines.Checked == false)
                    {
                        imgResult = imgSceneColor;                                                  //dołącza scenę do obrazu rezultatu
                        imgResult = imgResult.ConcateHorizontal(imgCopyOfImageToFindWithBorder);    //wiąże kopię szukanego obrazka na obrazie rezultatu
                    }
                    else
                    {
                        MessageBox.Show("Błąd");     //tu już nie powinno nigdy dojść
                    }
                }
                else
                {
                    imgResult = imgSceneColor;                                                  //dołącza scenę do obrazu rezultatu
                    imgResult = imgResult.ConcateHorizontal(imgCopyOfImageToFindWithBorder);    //wiąże kopię szukanego obrazka na obrazie rezultatu
                }

                if (homographyMatrix != null)    //sprawdzanie czy na pewno coś w tej macierzy jest
                {
                    //rysuje ramkę na kawałku sceny z obrazu rezultatu, w miejscu gdzie jest znaleziony szukany obrazek
                    rectImageToFind.X      = 0;     //na starcie ustawia rozmiar prostokąta na pełny rozmiar obrazka do znalezienia
                    rectImageToFind.Y      = 0;
                    rectImageToFind.Width  = imgToFindGray.Width;
                    rectImageToFind.Height = imgToFindGray.Height;

                    //tworzymy obiekt -> array (szereg) tablica na PointF odpowiadające prostokątom
                    pointsF = new PointF[] { new PointF(rectImageToFind.Left, rectImageToFind.Top),
                                             new PointF(rectImageToFind.Right, rectImageToFind.Top),
                                             new PointF(rectImageToFind.Right, rectImageToFind.Bottom),
                                             new PointF(rectImageToFind.Left, rectImageToFind.Bottom) };

                    //ProjectionPoints() ustawia ptfPointsF(przez referencję) na bycie lokacją ramki na fragmencie sceny gdzie jest znaleziony szukany obrazek
                    homographyMatrix.ProjectPoints(pointsF);

                    //konwersja z PointF() do Point() bo ProjectPoints() używa typ PointF() a DrawPolyline() używa Point()
                    points = new Point[] { Point.Round(pointsF[0]),
                                           Point.Round(pointsF[1]),
                                           Point.Round(pointsF[2]),
                                           Point.Round(pointsF[3]) };

                    //rysowanie ramki wkoło znalezionego obrazka na fragmencie sceny obrazu rezultatu
                    imgResult.DrawPolyline(points, true, new Bgr(0, 255, 0), 2);

                    //rysowanie czerwonego myślnika na środku obiektu
                    int x, y, x1, y1, xW, yW;

                    x  = Convert.ToInt32(points[0].X);
                    y  = Convert.ToInt32(points[0].Y);
                    x1 = Convert.ToInt32(points[2].X);
                    y1 = Convert.ToInt32(points[2].Y);

                    xW  = x1 - x;
                    xW /= 2;
                    xW += x;
                    yW  = y1 - y;
                    yW /= 2;
                    yW += y;
                    Point [] pp = new Point[] { new Point(xW, yW), new Point(xW + 10, yW) };    //rysowanie środka wykrytego obiektu
                    imgResult.DrawPolyline(pp, true, new Bgr(0, 0, 255), 5);

                    XX = xW.ToString();
                    YY = yW.ToString();
                    //////////gdy obiekt znika z pola widzenia
                    if (xW == 0 || yW == 0 || xW < -200 || yW < -200 || xW > 800 || yW > 800)
                    {
                        targetLost(-1);
                    }
                    else
                    {
                        targetLost(1);
                    }
                    //////////
                }
                else
                {
                    targetLost(-1);     //strzał w 10!
                }
                //koniec SURF, update całego form

                ibResult.Image = imgResult.ToBitmap();          //pokazanie rezultatu na imageBoxie
            }
        }