Beispiel #1
0
 public virtual void draw(Image image1, Image image2, Image outImage, KeypointArray keypoints_image1, KeypointArray keypoints_image2)
 {
     solar_api_displayPINVOKE.IMatchesOverlay_draw__SWIG_3(swigCPtr, Image.getCPtr(image1), Image.getCPtr(image2), Image.getCPtr(outImage), KeypointArray.getCPtr(keypoints_image1), KeypointArray.getCPtr(keypoints_image2));
     if (solar_api_displayPINVOKE.SWIGPendingException.Pending)
     {
         throw solar_api_displayPINVOKE.SWIGPendingException.Retrieve();
     }
 }
Beispiel #2
0
 public virtual void detect(Image image, KeypointArray keypoints)
 {
     solar_api_featuresPINVOKE.IKeypointDetector_detect(swigCPtr, Image.getCPtr(image), KeypointArray.getCPtr(keypoints));
     if (solar_api_featuresPINVOKE.SWIGPendingException.Pending)
     {
         throw solar_api_featuresPINVOKE.SWIGPendingException.Retrieve();
     }
 }
Beispiel #3
0
 public virtual void drawCircles(KeypointArray keypoints, Image displayImage)
 {
     solar_api_displayPINVOKE.I2DOverlay_drawCircles__SWIG_1(swigCPtr, KeypointArray.getCPtr(keypoints), Image.getCPtr(displayImage));
     if (solar_api_displayPINVOKE.SWIGPendingException.Pending)
     {
         throw solar_api_displayPINVOKE.SWIGPendingException.Retrieve();
     }
 }
 public virtual void extract(Image image, KeypointArray keypoints, DescriptorBuffer descriptors)
 {
     solar_api_featuresPINVOKE.IDescriptorsExtractor_extract(swigCPtr, Image.getCPtr(image), KeypointArray.getCPtr(keypoints), DescriptorBuffer.getCPtr(descriptors));
     if (solar_api_featuresPINVOKE.SWIGPendingException.Pending)
     {
         throw solar_api_featuresPINVOKE.SWIGPendingException.Retrieve();
     }
 }
Beispiel #5
0
        public virtual FrameworkReturnCode reindex(KeypointArray keypoints1, KeypointArray keypoints2, DescriptorMatchVector matches, Point2DfArray matchedKeypoints1, Point2DfArray matchedKeypoints2)
        {
            FrameworkReturnCode ret = (FrameworkReturnCode)solar_api_featuresPINVOKE.IKeypointsReIndexer_reindex(swigCPtr, KeypointArray.getCPtr(keypoints1), KeypointArray.getCPtr(keypoints2), DescriptorMatchVector.getCPtr(matches), Point2DfArray.getCPtr(matchedKeypoints1), Point2DfArray.getCPtr(matchedKeypoints2));

            if (solar_api_featuresPINVOKE.SWIGPendingException.Pending)
            {
                throw solar_api_featuresPINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
Beispiel #6
0
        public virtual FrameworkReturnCode unproject(KeypointArray imageKeypoints, Point3DfArray worldPoints)
        {
            FrameworkReturnCode ret = (FrameworkReturnCode)solar_api_geomPINVOKE.IUnproject_unproject__SWIG_3(swigCPtr, KeypointArray.getCPtr(imageKeypoints), Point3DfArray.getCPtr(worldPoints));

            if (solar_api_geomPINVOKE.SWIGPendingException.Pending)
            {
                throw solar_api_geomPINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
Beispiel #7
0
        public virtual FrameworkReturnCode estimate(KeypointArray pointsView1, KeypointArray pointsView2, Transform3Df poseView1, Transform3Df poseView2, DescriptorMatchVector inlierMatches)
        {
            FrameworkReturnCode ret = (FrameworkReturnCode)solar_api_solver_posePINVOKE.I3DTransformFinderFrom2D2D_estimate__SWIG_1(swigCPtr, KeypointArray.getCPtr(pointsView1), KeypointArray.getCPtr(pointsView2), Transform3Df.getCPtr(poseView1), Transform3Df.getCPtr(poseView2), DescriptorMatchVector.getCPtr(inlierMatches));

            if (solar_api_solver_posePINVOKE.SWIGPendingException.Pending)
            {
                throw solar_api_solver_posePINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
Beispiel #8
0
        public virtual double triangulate(KeypointArray keypointsView1, KeypointArray keypointsView2, DescriptorBuffer descriptor1, DescriptorBuffer descriptor2, DescriptorMatchVector matches, PairUIntUInt working_views, Transform3Df poseView1, Transform3Df poseView2, CloudPointVector pcloud)
        {
            double ret = solar_api_solver_mapPINVOKE.ITriangulator_triangulate__SWIG_2(swigCPtr, KeypointArray.getCPtr(keypointsView1), KeypointArray.getCPtr(keypointsView2), DescriptorBuffer.getCPtr(descriptor1), DescriptorBuffer.getCPtr(descriptor2), DescriptorMatchVector.getCPtr(matches), PairUIntUInt.getCPtr(working_views), Transform3Df.getCPtr(poseView1), Transform3Df.getCPtr(poseView2), CloudPointVector.getCPtr(pcloud));

            if (solar_api_solver_mapPINVOKE.SWIGPendingException.Pending)
            {
                throw solar_api_solver_mapPINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
        public virtual FrameworkReturnCode estimate(Image previousImage, Image currentImage, KeypointArray pointsToTrack, Point2DfArray trackedPoints, UCharList status, FloatList error)
        {
            FrameworkReturnCode ret = (FrameworkReturnCode)solar_api_trackingPINVOKE.IOpticalFlowEstimator_estimate__SWIG_0(swigCPtr, Image.getCPtr(previousImage), Image.getCPtr(currentImage), KeypointArray.getCPtr(pointsToTrack), Point2DfArray.getCPtr(trackedPoints), UCharList.getCPtr(status), FloatList.getCPtr(error));

            if (solar_api_trackingPINVOKE.SWIGPendingException.Pending)
            {
                throw solar_api_trackingPINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
Beispiel #10
0
 public virtual void filter(DescriptorMatchVector inputMatches, DescriptorMatchVector outputMatches, KeypointArray inputKeyPoints1, KeypointArray inputKeyPoints2, Transform3Df pose1, Transform3Df pose2, Matrix3x3f intrinsicParams)
 {
     solar_api_featuresPINVOKE.IMatchesFilter_filter__SWIG_1(swigCPtr, DescriptorMatchVector.getCPtr(inputMatches), DescriptorMatchVector.getCPtr(outputMatches), KeypointArray.getCPtr(inputKeyPoints1), KeypointArray.getCPtr(inputKeyPoints2), Transform3Df.getCPtr(pose1), Transform3Df.getCPtr(pose2), Matrix3x3f.getCPtr(intrinsicParams));
     if (solar_api_featuresPINVOKE.SWIGPendingException.Pending)
     {
         throw solar_api_featuresPINVOKE.SWIGPendingException.Retrieve();
     }
 }
Beispiel #11
0
 public virtual void filter(DescriptorMatchVector inputMatches, DescriptorMatchVector outputMatches, KeypointArray keyPoints_1, KeypointArray keyPoints_2)
 {
     solar_api_featuresPINVOKE.IMatchesFilter_filter__SWIG_0(swigCPtr, DescriptorMatchVector.getCPtr(inputMatches), DescriptorMatchVector.getCPtr(outputMatches), KeypointArray.getCPtr(keyPoints_1), KeypointArray.getCPtr(keyPoints_2));
     if (solar_api_featuresPINVOKE.SWIGPendingException.Pending)
     {
         throw solar_api_featuresPINVOKE.SWIGPendingException.Retrieve();
     }
 }
Beispiel #12
0
        public NaturalPipeline(IComponentManager xpcfComponentManager) : base(xpcfComponentManager)
        {
            imageViewerKeypoints = Create <IImageViewer>("SolARImageViewerOpencv", "keypoints");
            imageViewerResult    = Create <IImageViewer>("SolARImageViewerOpencv");
            marker               = Create <IMarker2DNaturalImage>("SolARMarker2DNaturalImageOpencv");
            kpDetector           = Create <IKeypointDetector>("SolARKeypointDetectorOpencv");
            kpDetectorRegion     = Create <IKeypointDetectorRegion>("SolARKeypointDetectorRegionOpencv");
            descriptorExtractor  = Create <IDescriptorsExtractor>("SolARDescriptorsExtractorAKAZE2Opencv");
            matcher              = Create <IDescriptorMatcher>("SolARDescriptorMatcherKNNOpencv");
            geomMatchesFilter    = Create <IMatchesFilter>("SolARGeometricMatchesFilterOpencv");
            poseEstimationPlanar = Create <I3DTransformSACFinderFrom2D3D>("SolARPoseEstimationPlanarPointsOpencv");
            opticalFlow          = Create <IOpticalFlowEstimator>("SolAROpticalFlowPyrLKOpencv");
            projection           = Create <IProject>("SolARProjectOpencv");
            unprojection         = Create <IUnproject>("SolARUnprojectPlanarPointsOpencv");
            img_mapper           = Create <IImage2WorldMapper>("SolARImage2WorldMapper4Marker2D");
            basicMatchesFilter   = Create <IMatchesFilter>("SolARBasicMatchesFilter");
            keypointsReindexer   = Create <IKeypointsReIndexer>("SolARKeypointsReIndexer");
            overlay3DComponent   = Create <I3DOverlay>("SolAR3DOverlayOpencv");
            /* in dynamic mode, we need to check that components are well created*/
            /* this is needed in dynamic mode */
            if (new object[] { imageViewerKeypoints, imageViewerResult, marker, kpDetector, kpDetectorRegion, descriptorExtractor, matcher,
                               geomMatchesFilter, poseEstimationPlanar, opticalFlow, projection, unprojection, img_mapper,
                               basicMatchesFilter, keypointsReindexer, overlay3DComponent }.Contains(null))
            {
                LOG_ERROR("One or more component creations have failed");
                return;
            }
            LOG_INFO("All components have been created");

            // Declare data structures used to exchange information between components
            refImage         = SharedPtr.Alloc <Image>().AddTo(subscriptions);
            previousCamImage = SharedPtr.Alloc <Image>().AddTo(subscriptions);

            //kpImageCam = SharedPtr.Alloc<Image>().AddTo(subscriptions);
            refDescriptors = SharedPtr.Alloc <DescriptorBuffer>().AddTo(subscriptions);
            camDescriptors = SharedPtr.Alloc <DescriptorBuffer>().AddTo(subscriptions);
            matches        = new DescriptorMatchVector().AddTo(subscriptions);

            // where to store detected keypoints in ref image and camera image
            refKeypoints = new KeypointArray().AddTo(subscriptions);
            camKeypoints = new KeypointArray().AddTo(subscriptions);

            markerWorldCorners = new Point3DfArray().AddTo(subscriptions);

            // load marker
            marker.loadMarker().Check();
            marker.getWorldCorners(markerWorldCorners).Check();
            marker.getImage(refImage).Check();

            // detect keypoints in reference image
            kpDetector.detect(refImage, refKeypoints);

            // extract descriptors in reference image
            descriptorExtractor.extract(refImage, refKeypoints, refDescriptors);



            // initialize image mapper with the reference image size and marker size
            var img_mapper_config = img_mapper.BindTo <IConfigurable>().AddTo(subscriptions);
            var refSize           = refImage.getSize();
            var mkSize            = marker.getSize();

            img_mapper_config.getProperty("digitalWidth").setIntegerValue((int)refSize.width);
            img_mapper_config.getProperty("digitalHeight").setIntegerValue((int)refSize.height);
            img_mapper_config.getProperty("worldWidth").setFloatingValue(mkSize.width);
            img_mapper_config.getProperty("worldHeight").setFloatingValue(mkSize.height);

            // vector of 4 corners in the marker
            refImgCorners = new Point2DfArray();
            float    w = refImage.getWidth(), h = refImage.getHeight();
            Point2Df corner0 = new Point2Df(0, 0);
            Point2Df corner1 = new Point2Df(w, 0);
            Point2Df corner2 = new Point2Df(w, h);
            Point2Df corner3 = new Point2Df(0, h);

            refImgCorners.Add(corner0);
            refImgCorners.Add(corner1);
            refImgCorners.Add(corner2);
            refImgCorners.Add(corner3);
        }
Beispiel #13
0
        public override FrameworkReturnCode Proceed(Image camImage, Transform3Df pose, ICamera camera)
        {
            // initialize overlay 3D component with the camera intrinsec parameters (please refeer to the use of intrinsec parameters file)
            overlay3DComponent.setCameraParameters(camera.getIntrinsicsParameters(), camera.getDistorsionParameters());

            // initialize pose estimation based on planar points with the camera intrinsec parameters (please refeer to the use of intrinsec parameters file)
            poseEstimationPlanar.setCameraParameters(camera.getIntrinsicsParameters(), camera.getDistorsionParameters());

            // initialize projection component with the camera intrinsec parameters (please refeer to the use of intrinsec parameters file)
            projection.setCameraParameters(camera.getIntrinsicsParameters(), camera.getDistorsionParameters());

            // initialize unprojection component with the camera intrinsec parameters (please refeer to the use of intrinsec parameters file)
            unprojection.setCameraParameters(camera.getIntrinsicsParameters(), camera.getDistorsionParameters());
            if (!isTrack)
            {
                kpDetector.detect(camImage, camKeypoints);
                descriptorExtractor.extract(camImage, camKeypoints, camDescriptors);
                matcher.match(refDescriptors, camDescriptors, matches);
                basicMatchesFilter.filter(matches, matches, refKeypoints, camKeypoints);
                geomMatchesFilter.filter(matches, matches, refKeypoints, camKeypoints);

                var ref2Dpoints = new Point2DfArray();
                var cam2Dpoints = new Point2DfArray();
                var ref3Dpoints = new Point3DfArray();

                if (matches.Count > 10)
                {
                    keypointsReindexer.reindex(refKeypoints, camKeypoints, matches, ref2Dpoints, cam2Dpoints).Check();
                    img_mapper.map(ref2Dpoints, ref3Dpoints).Check();
                    if (poseEstimationPlanar.estimate(cam2Dpoints, ref3Dpoints, imagePoints_inliers, worldPoints_inliers, pose) != FrameworkReturnCode._SUCCESS)
                    {
                        valid_pose = false;
                        //LOG_DEBUG("Wrong homography for this frame");
                    }
                    else
                    {
                        isTrack = true;
                        needNewTrackedPoints = true;
                        valid_pose           = true;
                        previousCamImage     = camImage.copy();
                        //LOG_INFO("Start tracking", pose.matrix());
                    }
                }
            }
            else
            {
                // initialize points to track
                if (needNewTrackedPoints)
                {
                    imagePoints_track.Clear();
                    worldPoints_track.Clear();
                    KeypointArray newKeypoints = new KeypointArray();
                    // Get the projection of the corner of the marker in the current image
                    projection.project(markerWorldCorners, projectedMarkerCorners, pose);

                    // Detect the keypoints within the contours of the marker defined by the projected corners
                    kpDetectorRegion.detect(previousCamImage, projectedMarkerCorners, newKeypoints);

                    if (newKeypoints.Count > updateTrackedPointThreshold)
                    {
                        foreach (var keypoint in newKeypoints)
                        {
                            //imagePoints_track.push_back(xpcf::utils::make_shared<Point2Df>(keypoint->getX(), keypoint->getY()));
                            imagePoints_track.Add(new Point2Df(keypoint.getX(), keypoint.getY()));
                        }
                        // get back the 3D positions of the detected keypoints in world space
                        unprojection.unproject(imagePoints_track, worldPoints_track, pose);
                        //LOG_DEBUG("Reinitialize points to track");
                    }
                    else
                    {
                        isTrack = false;
                        //LOG_DEBUG("Cannot reinitialize points to track");
                    }
                    needNewTrackedPoints = false;
                }

                // Tracking mode
                if (isTrack)
                {
                    Point2DfArray trackedPoints = new Point2DfArray();
                    Point2DfArray pts2D         = new Point2DfArray();
                    Point3DfArray pts3D         = new Point3DfArray();

                    UCharList status = new UCharList();
                    FloatList err    = new FloatList();

                    // tracking 2D-2D
                    opticalFlow.estimate(previousCamImage, camImage, imagePoints_track, trackedPoints, status, err);

                    for (int i = 0; i < status.Count; i++)
                    {
                        if (status[i] == 1)
                        {
                            pts2D.Add(trackedPoints[i]);
                            pts3D.Add(worldPoints_track[i]);
                        }
                    }
                    // calculate camera pose
                    // Estimate the pose from the 2D-3D planar correspondence
                    if (poseEstimationPlanar.estimate(pts2D, pts3D, imagePoints_track, worldPoints_track, pose) != FrameworkReturnCode._SUCCESS)
                    {
                        isTrack              = false;
                        valid_pose           = false;
                        needNewTrackedPoints = false;
                        //LOG_INFO("Tracking lost");
                    }
                    else
                    {
                        valid_pose       = true;
                        previousCamImage = camImage.copy();
                        if (worldPoints_track.Count < updateTrackedPointThreshold)
                        {
                            needNewTrackedPoints = true;
                        }
                    }
                }
                //else
                //LOG_INFO("Tracking lost");
            }

            if (valid_pose)
            {
                // We draw a box on the place of the recognized natural marker
                overlay3DComponent.draw(pose, camImage);
            }
            //if (imageViewerResult.display(camImage) == FrameworkReturnCode._STOP) return FrameworkReturnCode._STOP;
            return(FrameworkReturnCode._SUCCESS);
        }