Exemplo n.º 1
0
 static sl.float2 getImagePosition(Vector2[] bounding_box_image, sl.float2 img_scale)
 {
     sl.float2 position;
     position.x = (bounding_box_image[0].X + (bounding_box_image[2].X - bounding_box_image[0].X) * 0.5f) * img_scale.x;
     position.y = (bounding_box_image[0].Y + (bounding_box_image[2].Y - bounding_box_image[0].Y) * 0.5f) * img_scale.y;
     return(position);
 }
Exemplo n.º 2
0
    public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Objects objects, bool render_mask, bool isTrackingON)
    {
        OpenCvSharp.Mat  overlay    = left_display.Clone();
        OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(0, 0, left_display.Size().Width, left_display.Size().Height);

        OpenCvSharp.Mat mask = new OpenCvSharp.Mat(left_display.Rows, left_display.Cols, OpenCvSharp.MatType.CV_8UC1);

        int line_thickness = 2;

        for (int i = 0; i < objects.numObject; i++)
        {
            sl.ObjectData obj = objects.objectData[i];
            if (Utils.renderObject(obj, isTrackingON))
            {
                OpenCvSharp.Scalar base_color = Utils.generateColorID_u(obj.id);

                // Display image scale bouding box 2d
                if (obj.boundingBox2D.Length < 4)
                {
                    continue;
                }

                Point top_left_corner     = Utils.cvt(obj.boundingBox2D[0], img_scale);
                Point top_right_corner    = Utils.cvt(obj.boundingBox2D[1], img_scale);
                Point bottom_right_corner = Utils.cvt(obj.boundingBox2D[2], img_scale);
                Point bottom_left_corner  = Utils.cvt(obj.boundingBox2D[3], img_scale);

                // Create of the 2 horizontal lines
                Cv2.Line(left_display, top_left_corner, top_right_corner, base_color, line_thickness);
                Cv2.Line(left_display, bottom_left_corner, bottom_right_corner, base_color, line_thickness);
                // Creation of two vertical lines
                Utils.drawVerticalLine(ref left_display, bottom_left_corner, top_left_corner, base_color, line_thickness);
                Utils.drawVerticalLine(ref left_display, bottom_right_corner, top_right_corner, base_color, line_thickness);

                // Scaled ROI
                OpenCvSharp.Rect roi = new OpenCvSharp.Rect(top_left_corner.X, top_left_corner.Y, (int)top_right_corner.DistanceTo(top_left_corner), (int)bottom_right_corner.DistanceTo(top_right_corner));

                overlay.SubMat(roi).SetTo(base_color);

                sl.float2 position_image = getImagePosition(obj.boundingBox2D, img_scale);
                Cv2.PutText(left_display, obj.label.ToString(), new Point(position_image.x - 20, position_image.y - 12), HersheyFonts.HersheyComplexSmall, 0.5f, new Scalar(255, 255, 255, 255), 1);

                if (!float.IsInfinity(obj.position.Z))
                {
                    string text = Math.Abs(obj.position.Z).ToString("0.##M");
                    Cv2.PutText(left_display, text, new Point(position_image.x - 20, position_image.y), HersheyFonts.HersheyComplexSmall, 0.5, new Scalar(255, 255, 255, 255), 1);
                }
            }
        }

        // Here, overlay is as the left image, but with opaque masks on each detected objects
        Cv2.AddWeighted(left_display, 0.7, overlay, 0.3, 0.0, left_display);
    }
Exemplo n.º 3
0
    public static void render_2D(ref OpenCvSharp.Mat left_display, sl.float2 img_scale, ref sl.Objects objects, bool showOnlyOK)
    {
        OpenCvSharp.Mat  overlay    = left_display.Clone();
        OpenCvSharp.Rect roi_render = new OpenCvSharp.Rect(1, 1, left_display.Size().Width, left_display.Size().Height);

        for (int i = 0; i < objects.numObject; i++)
        {
            sl.ObjectData obj = objects.objectData[i];
            if (renderObject(obj, showOnlyOK))
            {
                // Draw Skeleton bones
                OpenCvSharp.Scalar base_color = generateColorID(obj.id);
                foreach (var part in SKELETON_BONES)
                {
                    var kp_a = cvt(obj.keypoints2D[(int)part.Item1], img_scale);
                    var kp_b = cvt(obj.keypoints2D[(int)part.Item2], img_scale);
                    if (roi_render.Contains(kp_a) && roi_render.Contains(kp_b))
                    {
                        Cv2.Line(left_display, kp_a, kp_b, base_color, 1, LineTypes.AntiAlias);
                    }
                }

                var hip_left  = obj.keypoints2D[(int)sl.BODY_PARTS.LEFT_HIP];
                var hip_right = obj.keypoints2D[(int)sl.BODY_PARTS.RIGHT_HIP];
                var spine     = (hip_left + hip_right) / 2;
                var neck      = obj.keypoints2D[(int)sl.BODY_PARTS.NECK];

                if (hip_left.X > 0 && hip_left.Y > 0 && hip_right.X > 0 && hip_right.Y > 0 && neck.X > 0 && neck.Y > 0)
                {
                    var spine_a = cvt(spine, img_scale);
                    var spine_b = cvt(neck, img_scale);
                    if (roi_render.Contains(spine_a) && roi_render.Contains(spine_b))
                    {
                        Cv2.Line(left_display, spine_a, spine_b, base_color, 1, LineTypes.AntiAlias);
                    }
                }

                // Draw Skeleton joints
                foreach (var kp in obj.keypoints2D)
                {
                    Point cv_kp = cvt(kp, img_scale);
                    if (roi_render.Contains(cv_kp))
                    {
                        Cv2.Circle(left_display, cv_kp, 3, base_color, -1);
                    }
                }

                if (hip_left.X > 0 && hip_left.Y > 0 && hip_right.X > 0 && hip_right.Y > 0)
                {
                    Point cv_spine = cvt(spine, img_scale);
                    if (roi_render.Contains(cv_spine))
                    {
                        Cv2.Circle(left_display, cv_spine, 3, base_color, -1);
                    }
                }
            }
        }

        // Here, overlay is as the left image, but with opaque masks on each detected objects
        Cv2.AddWeighted(left_display, 0.9, overlay, 0.1, 0.0, left_display);
    }
Exemplo n.º 4
0
 static Point cvt(Vector2 point, sl.float2 scale)
 {
     return(new Point(point.X * scale.x, point.Y * scale.y));
 }
Exemplo n.º 5
0
    public MainWindow(string[] args)
    {
        // Set configuration parameters
        InitParameters init_params = new InitParameters();

        init_params.resolution             = RESOLUTION.HD1080;
        init_params.depthMode              = DEPTH_MODE.ULTRA;
        init_params.coordinateUnits        = UNIT.METER;
        init_params.coordinateSystem       = COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP;
        init_params.depthMaximumDistance   = 10f;
        init_params.cameraDisableSelfCalib = true;

        maxDepthDistance = init_params.depthMaximumDistance;
        parseArgs(args, ref init_params);
        // Open the camera
        zedCamera = new Camera(0);
        ERROR_CODE err = zedCamera.Open(ref init_params);

        if (err != ERROR_CODE.SUCCESS)
        {
            Environment.Exit(-1);
        }

        if (zedCamera.CameraModel != sl.MODEL.ZED2)
        {
            Console.WriteLine(" ERROR : Use ZED2 Camera only");
            return;
        }

        // Enable tracking (mandatory for object detection)
        Quaternion quat = Quaternion.Identity;
        Vector3    vec  = Vector3.Zero;

        zedCamera.EnablePositionalTracking(ref quat, ref vec);

        runtimeParameters = new RuntimeParameters();

        // Enable the Objects detection module
        ObjectDetectionParameters obj_det_params = new ObjectDetectionParameters();

        obj_det_params.enableObjectTracking = true; // the object detection will track objects across multiple images, instead of an image-by-image basis
        isTrackingON = obj_det_params.enableObjectTracking;
        obj_det_params.enable2DMask   = false;
        obj_det_params.imageSync      = true; // the object detection is synchronized to the image
        obj_det_params.detectionModel = sl.DETECTION_MODEL.MULTI_CLASS_BOX_ACCURATE;

        if (USE_BATCHING)
        {
            batchParameters                = new BatchParameters();
            batchParameters.latency        = 2.0f;
            batchParameters.enable         = true;
            batchHandler                   = new BatchSystemHandler((int)batchParameters.latency * 2);
            obj_det_params.batchParameters = batchParameters;
        }

        zedCamera.EnableObjectDetection(ref obj_det_params);

        // Configure object detection runtime parameters
        obj_runtime_parameters = new ObjectDetectionRuntimeParameters();
        detection_confidence   = 60;
        obj_runtime_parameters.detectionConfidenceThreshold = detection_confidence;
        obj_runtime_parameters.objectClassFilter            = new int[(int)OBJECT_CLASS.LAST];
        obj_runtime_parameters.objectClassFilter[(int)sl.OBJECT_CLASS.PERSON] = Convert.ToInt32(true);
        //obj_runtime_parameters.objectClassFilter[(int)sl.OBJECT_CLASS.VEHICLE] = Convert.ToInt32(true);
        // To set a specific threshold
        obj_runtime_parameters.objectConfidenceThreshold = new int[(int)OBJECT_CLASS.LAST];
        obj_runtime_parameters.objectConfidenceThreshold[(int)sl.OBJECT_CLASS.PERSON] = detection_confidence;
        //obj_runtime_parameters.object_confidence_threshold[(int)sl.OBJECT_CLASS.VEHICLE] = detection_confidence;

        // Create ZED Objects filled in the main loop
        objects   = new Objects();
        imageLeft = new sl.Mat();
        int Height = zedCamera.ImageHeight;
        int Width  = zedCamera.ImageWidth;

        displayRes = new Resolution(Math.Min((uint)Width, 1280), Math.Min((uint)Height, 720));
        Resolution tracksRes = new Resolution(400, (uint)displayRes.height);

        // create a global image to store both image and tracks view
        globalImage = new OpenCvSharp.Mat((int)displayRes.height, (int)displayRes.width + (int)tracksRes.width, OpenCvSharp.MatType.CV_8UC4);
        // retrieve ref on image part
        imageLeftOcv = new OpenCvSharp.Mat(globalImage, new OpenCvSharp.Rect(0, 0, (int)displayRes.width, (int)displayRes.height));
        // retrieve ref on tracks part
        imageTrackOcv = new OpenCvSharp.Mat(globalImage, new OpenCvSharp.Rect((int)displayRes.width, 0, (int)tracksRes.width, (int)tracksRes.height));
        // init an sl::Mat from the ocv image ref (which is in fact the memory of global_image)
        imageLeft.Create(displayRes, MAT_TYPE.MAT_8U_C4, MEM.CPU);
        imageRenderLeft = new OpenCvSharp.Mat((int)displayRes.height, (int)displayRes.width, OpenCvSharp.MatType.CV_8UC4, imageLeft.GetPtr());
        imgScale        = new sl.float2((int)displayRes.width / (float)Width, (int)displayRes.height / (float)Height);

        // Create OpenGL Viewer
        viewer = new GLViewer();

        camWorldPose  = new Pose();
        camCameraPose = new Pose();
        pointCloud    = new sl.Mat();
        pcRes         = new Resolution(Math.Min((uint)Width, 720), Math.Min((uint)Height, 404));
        pointCloud.Create(pcRes, MAT_TYPE.MAT_32F_C4, MEM.CPU);

        // 2D tracks
        trackViewGenerator = new TrackingViewer(tracksRes, (int)zedCamera.GetCameraFPS(), maxDepthDistance, 3);
        trackViewGenerator.setCameraCalibration(zedCamera.GetCalibrationParameters());

        window_name = "ZED| 2D View and Birds view";
        Cv2.NamedWindow(window_name, WindowMode.Normal);// Create Window
        Cv2.CreateTrackbar("Confidence", window_name, ref detection_confidence, 100);

        // Create OpenGL window
        CreateWindow();
    }