Exemple #1
0
        // 描画処理
        private void videoRendering(object sender, NewFrameEventArgs eventArgs)
        {
            Bitmap img = (Bitmap)eventArgs.Frame.Clone();

            // Debug.WriteLine(DateTime.Now + ":" + "描画更新");
            // Debug.WriteLine(mode);

            try
            {
                //pictureBoxCamera.Image = img;

                temp = BitmapConverter.ToMat(img);//比較先画像

                //特徴量の検出と特徴量ベクトルの計算
                akaze.DetectAndCompute(temp, null, out key_point2, descriptor2);

                //画像2の特徴点をoutput2に出力
                Cv2.DrawKeypoints(temp, key_point2, output2);
                //Cv2.ImShow("output2", output2);

                pictureBoxCamera.Image = BitmapConverter.ToBitmap(output2);

                matcher = DescriptorMatcher.Create("BruteForce");
                matches = matcher.Match(descriptor1, descriptor2);

                //閾値以下の要素数のカウント
                for (int i = 0; i < key_point1.Length && i < key_point2.Length; ++i)
                {
                    if (matches[i].Distance < threshold)
                    {
                        ++good_match_length;
                    }
                }

                DMatch[] good_matches = new DMatch[good_match_length];//閾値以下の要素数で定義

                //good_matchesに格納していく
                int j = 0;
                for (int i = 0; i < key_point1.Length && i < key_point2.Length; ++i)
                {
                    if (matches[i].Distance < threshold)
                    {
                        good_matches[j] = matches[i];
                        ++j;
                    }
                }

                //good_matchesの個数デバッグ表示
                Debug.WriteLine(j);
                Invoke((MethodInvoker) delegate()
                {
                    labelMatch.Text = j.ToString();
                });

                //類似点の数が多ければチェックボックスの状態に応じて非常停止
                if (j >= 16)
                {
                    //非常停止
                    if (checkBoxStop.Checked == true)
                    {
                        //WebRequest request = WebRequest.Create("https://maker.ifttt.com/trigger/raspberry/with/key/gHPH_xDKR664IVIr2YtRRj6BbQoQi-K0mCowIJCGPF3");
                        //WebResponse response = request.GetResponse();
                    }

                    //アラート音
                    if (checkBoxAlert.Checked == true)
                    {
                        // _mediaPlayer.settings.volume = 20;
                        _mediaPlayer.URL = @"D:\DCIM\app\AkazeAlert\PcCameraApp\Resources\decision1.mp3";
                        _mediaPlayer.controls.play();
                    }
                }

                Cv2.DrawMatches(mat, key_point1, temp, key_point2, good_matches, output3);
                //Cv2.ImShow("output3", output3);

                pictureBoxResult.Image = BitmapConverter.ToBitmap(output3);
            }
            catch
            {
                pictureBoxCamera.Image = img;
            }
        }
        public override void RunTest()
        {
            using var src             = Cv2.ImRead(ImagePath.Shapes);
            using var detectedCircles = new Mat();
            using var detectedOvals   = new Mat();

            // Invert the image. Shapes has a black background and SimpleBlobDetector doesn't seem to work well with that.
            Cv2.BitwiseNot(src, src);

            // Parameters tuned to detect only circles
            var circleParams = new SimpleBlobDetector.Params
            {
                MinThreshold = 10,
                MaxThreshold = 230,

                // The area is the number of pixels in the blob.
                FilterByArea = true,
                MinArea      = 500,
                MaxArea      = 50000,

                // Circularity is a ratio of the area to the perimeter. Polygons with more sides are more circular.
                FilterByCircularity = true,
                MinCircularity      = 0.9f,

                // Convexity is the ratio of the area of the blob to the area of its convex hull.
                FilterByConvexity = true,
                MinConvexity      = 0.95f,

                // A circle's inertia ratio is 1. A line's is 0. An oval is between 0 and 1.
                FilterByInertia = true,
                MinInertiaRatio = 0.95f
            };

            // Parameters tuned to find the ovals in the Shapes image.
            var ovalParams = new SimpleBlobDetector.Params
            {
                MinThreshold = 10,
                MaxThreshold = 230,
                FilterByArea = true,
                MinArea      = 500,
                // The ovals are the smallest blobs in Shapes, so we limit the max area to eliminate the larger blobs.
                MaxArea             = 10000,
                FilterByCircularity = true,
                MinCircularity      = 0.58f,
                FilterByConvexity   = true,
                MinConvexity        = 0.96f,
                FilterByInertia     = true,
                MinInertiaRatio     = 0.1f
            };

            using var circleDetector = SimpleBlobDetector.Create(circleParams);
            var circleKeyPoints = circleDetector.Detect(src);

            Cv2.DrawKeypoints(src, circleKeyPoints, detectedCircles, Scalar.HotPink, DrawMatchesFlags.DrawRichKeypoints);

            using var ovalDetector = SimpleBlobDetector.Create(ovalParams);
            var ovalKeyPoints = ovalDetector.Detect(src);

            Cv2.DrawKeypoints(src, ovalKeyPoints, detectedOvals, Scalar.HotPink, DrawMatchesFlags.DrawRichKeypoints);

            using var w1 = new Window("Detected Circles", detectedCircles);
            using var w2 = new Window("Detected Ovals", detectedOvals);

            Cv2.WaitKey();
        }
Exemple #3
0
        Vector3 triangulate(int j, HyperMegaStuff.HyperMegaLines drawer = null)
        {
            Ray[] rays         = new Ray[2];
            Mat   workingImage = new Mat(calibrationDevices[j].webcam.leftImage.Height,
                                         calibrationDevices[j].webcam.leftImage.Width,
                                         calibrationDevices[j].webcam.leftImage.Type(), 0);

            for (int i = 0; i < 2; i++)
            {
                Mat curMat = i == 0 ? calibrationDevices[j].webcam.leftImage :
                             calibrationDevices[j].webcam.rightImage;

                if (calibrationDevices[j].subtractionImage[i] != null)
                {
                    // Subtract the background from the curMat
                    Cv2.Subtract(curMat, calibrationDevices[j].subtractionImage[i], workingImage);

                    // Threshold the image to separate black and white
                    Cv2.Threshold(workingImage, workingImage, blobThreshold, 255, ThresholdTypes.BinaryInv); // TODO MAKE THRESHOLD TUNABLE

                    // Detect Blobs using the Mask
                    var settings = new SimpleBlobDetector.Params();
                    settings.FilterByArea        = false;
                    settings.FilterByColor       = false;
                    settings.FilterByInertia     = true;
                    settings.FilterByConvexity   = true;
                    settings.FilterByCircularity = false;
                    SimpleBlobDetector detector = SimpleBlobDetector.Create();
                    KeyPoint[]         blobs    = detector.Detect(workingImage, calibrationDevices[j].maskImage[i]);
                    Cv2.DrawKeypoints(workingImage, blobs, workingImage, 255);
                    int biggest = -1; float size = 0;
                    for (int k = 0; k < blobs.Length; k++)
                    {
                        if (blobs[k].Size > size)
                        {
                            biggest = k;
                            size    = blobs[k].Size;
                        }
                    }

                    // If there's only one blob in this image, assume it's the white circle
                    if (blobs.Length > 0)
                    {
                        float[] pointArr         = { blobs[biggest].Pt.X, blobs[biggest].Pt.Y };
                        Mat     point            = new Mat(1, 1, MatType.CV_32FC2, pointArr);
                        Mat     undistortedPoint = new Mat(1, 1, MatType.CV_32FC2, 0);
                        Cv2.UndistortPoints(point, undistortedPoint, calibrationDevices[j].calibration.cameras[i].cameraMatrixMat,
                                            calibrationDevices[j].calibration.cameras[i].distCoeffsMat,
                                            calibrationDevices[j].calibration.cameras[i].rectificationMatrixMat);
                        Point2f[] rectilinear = new Point2f[1];
                        undistortedPoint.GetArray(0, 0, rectilinear);
                        Transform camera = i == 0 ? calibrationDevices[j].LeftCamera : calibrationDevices[j].RightCamera;
                        rays[i] = new Ray(camera.position, camera.TransformDirection(
                                              new Vector3(-rectilinear[0].X, rectilinear[0].Y, 1f)));
                        if (drawer != null)
                        {
                            drawer.color = ((j == 0) != (i == 0)) ? Color.cyan : Color.red;
                            drawer.DrawRay(rays[i].origin, rays[i].direction);
                        }
                    }
                }
            }
            workingImage.Release();

            // Only accept the triangulated point if the rays match up closely enough
            if (rays[0].origin != Vector3.zero &&
                rays[1].origin != Vector3.zero)
            {
                Vector3 point1 = RayRayIntersection(rays[0], rays[1]);
                Vector3 point2 = RayRayIntersection(rays[1], rays[0]);

                if (Vector3.Distance(point1, point2) < 0.005f)
                {
                    return((point1 + point2) * 0.5f);
                }
                else
                {
                    return(Vector3.zero);
                }
            }
            else
            {
                return(Vector3.zero);
            }
        }