Ejemplo n.º 1
0
        public override void func_zeroedyi_and_dzeroedyi_by_dxp_and_dzeroedyi_by_dyi(Vector yi, Vector xp)
        {
            ThreeD_Motion_Model mm = ((Wide_Camera_Point_Feature_Measurement_Model)wide_model).threed_motion_model;

            // Extract cartesian and quaternion components of xp
            mm.func_r(xp);
            mm.func_q(xp);

            Vector3D yWiminusrW = new Vector3D((new Vector3D(yi)) - (mm.get_rRES()));

            Quaternion  qRES       = mm.get_qRES();
            Quaternion  qRW        = qRES.Inverse();
            MatrixFixed dqRW_by_dq = MatrixFixed.dqbar_by_dq();

            // Rotation RRW
            RotationMatrix RRW = qRW.RotationMatrix();

            // Position of feature relative to robot in robot frame
            Vector3D zeroedyi = new Vector3D(RRW * yWiminusrW);

            zeroedyiRES.Update(zeroedyi.GetVNL3());

            // Now calculate Jacobians
            // dzeroedyi_by_dyi is RRW
            dzeroedyi_by_dyiRES.Update(RRW);

            // dzeroedyi_by_dxp has 2 partitions:
            // dzeroedyi_by_dr (3 * 3)
            // dzeroedyi_by_dq (3 * 4)
            MatrixFixed dzeroedyi_by_dr = (MatrixFixed)(RRW);

            dzeroedyi_by_dr *= -1.0f;

            //These should be 3x4 dimension
            MatrixFixed dzeroedyi_by_dqRW = MatrixFixed.dRq_times_a_by_dq(qRW, yWiminusrW);
            MatrixFixed dzeroedyi_by_dq   = dzeroedyi_by_dqRW * dqRW_by_dq;

            dzeroedyi_by_dxpRES.Update(dzeroedyi_by_dr, 0, 0);
            dzeroedyi_by_dxpRES.Update(dzeroedyi_by_dq, 0, 3);
        }
Ejemplo n.º 2
0
        // Function to find a region in an image guided by current motion prediction
        // which doesn't overlap existing features
        public static bool FindNonOverlappingRegion(Scene_Single scene,
                                                    Vector local_u,
                                                    float delta_t,
                                                    Partially_Initialised_Feature_Measurement_Model default_feature_type_for_initialisation,
                                                    uint camera_width,
                                                    uint camera_height,
                                                    uint BOXSIZE,
                                                    ref int init_feature_search_ustart,
                                                    ref int init_feature_search_vstart,
                                                    ref int init_feature_search_ufinish,
                                                    ref int init_feature_search_vfinish,
                                                    uint FEATURE_INIT_STEPS_TO_PREDICT,
                                                    float FEATURE_INIT_DEPTH_HYPOTHESIS,
                                                    Random rnd)
        {
            ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model();

            Vector local_xv = new Vector(scene.get_xv());

            for (uint i = 0; i < FEATURE_INIT_STEPS_TO_PREDICT; i++)
            {
                scene.get_motion_model().func_fv_and_dfv_by_dxv(local_xv, local_u, delta_t);
                local_xv.Update(scene.get_motion_model().get_fvRES());
            }

            threed_motion_model.func_xp(local_xv);
            Vector local_xp = new Vector(threed_motion_model.get_xpRES());

            threed_motion_model.func_r(local_xp);
            Vector3D rW = threed_motion_model.get_rRES();

            threed_motion_model.func_q(local_xp);
            Quaternion qWR = threed_motion_model.get_qRES();

            // yW =  rW + RWR hR
            Vector3D hR = new Vector3D(0.0f, 0.0f, FEATURE_INIT_DEPTH_HYPOTHESIS);

            // Used Inverse + transpose because this was't compiling the normal way
            Vector3D yW = new Vector3D(rW + qWR.RotationMatrix() * hR);

            // Then project that point into the current camera position
            scene.get_motion_model().func_xp(scene.get_xv());

            Fully_Initialised_Feature_Measurement_Model fully_init_fmm =
                (Fully_Initialised_Feature_Measurement_Model)(default_feature_type_for_initialisation.more_initialised_feature_measurement_model);


            Vector yWVNL = yW.GetVNL3();

            fully_init_fmm.func_hi_and_dhi_by_dxp_and_dhi_by_dyi(yWVNL, scene.get_motion_model().get_xpRES());

            // Now, this defines roughly how much we expect a feature initialised to move
            float suggested_u        = fully_init_fmm.get_hiRES()[0];
            float suggested_v        = fully_init_fmm.get_hiRES()[1];
            float predicted_motion_u = camera_width / 2.0f - suggested_u;
            float predicted_motion_v = camera_height / 2.0f - suggested_v;

            // So, the limits of a "safe" region within which we can initialise
            // features so that they end up staying within the screen
            // (Making the approximation that the whole screen moves like the
            // centre point)
            int safe_feature_search_ustart  = (int)(-predicted_motion_u);
            int safe_feature_search_vstart  = (int)(-predicted_motion_v);
            int safe_feature_search_ufinish = (int)(camera_width - predicted_motion_u);
            int safe_feature_search_vfinish = (int)(camera_height - predicted_motion_v);

            if (safe_feature_search_ustart < ((int)((BOXSIZE - 1) / 2) + 1))
            {
                safe_feature_search_ustart = (int)((BOXSIZE - 1) / 2 + 1);
            }
            if (safe_feature_search_ufinish > (int)camera_width - ((int)((BOXSIZE - 1) / 2) + 1))
            {
                safe_feature_search_ufinish = (int)(camera_width - (BOXSIZE - 1) / 2 - 1);
            }
            if (safe_feature_search_vstart < ((int)((BOXSIZE - 1) / 2) + 1))
            {
                safe_feature_search_vstart = (int)((BOXSIZE - 1) / 2 + 1);
            }
            if (safe_feature_search_vfinish > (int)camera_height - ((int)((BOXSIZE - 1) / 2) + 1))
            {
                safe_feature_search_vfinish = (int)(camera_height - (BOXSIZE - 1) / 2 - 1);
            }

            return(FindNonOverlappingRegionNoPredict(safe_feature_search_ustart,
                                                     safe_feature_search_vstart,
                                                     safe_feature_search_ufinish,
                                                     safe_feature_search_vfinish,
                                                     scene,
                                                     ref init_feature_search_ustart,
                                                     ref init_feature_search_vstart,
                                                     ref init_feature_search_ufinish,
                                                     ref init_feature_search_vfinish, rnd));
        }
Ejemplo n.º 3
0
        public void SetUp3DDisplays()
        {
            // Set display virtual camera parameters to match those of the real camera
            // being used in MonoSLAM
            Partially_Initialised_Feature_Measurement_Model default_pifmm =
                monoslaminterface.GetDefaultFeatureTypeForInitialisation();

            if (default_pifmm != null)
            {
                Line_Init_Wide_Point_Feature_Measurement_Model default_wide_pifmm =
                    (Line_Init_Wide_Point_Feature_Measurement_Model)default_pifmm;

                Graphics_Fku = default_wide_pifmm.get_camera().Fku();
                Graphics_Fkv = default_wide_pifmm.get_camera().Fkv();
                Graphics_U0  = default_wide_pifmm.get_camera().U0();
                Graphics_V0  = default_wide_pifmm.get_camera().V0();
                Graphics_Kd1 = default_wide_pifmm.get_camera().Kd1();

                // First display for external 3D view

                /*
                 * threedtool = new ThreeDToolGlowWidget(this,
                 *   controlpanel1->Width() + controlpanel2->Width(), 0,
                 *       monoslaminterface->GetCameraWidth(),
                 *       monoslaminterface->GetCameraHeight());
                 * threedtool->DrawEvent.Attach(this, &MonoSLAMGlow::Draw3D);
                 * threedtool->ProcessHitsEvent.Attach(this, &MonoSLAMGlow::ProcessHits);
                 * threedtool->SetCameraParameters(Graphics_Fku, Graphics_Fkv,
                 *    Graphics_U0, Graphics_V0);
                 */


                // Set start position for GL camera in 3D tool
                // This is x, y, z position
                Vector rthreed = new Vector(0.0f, 0.2f, -1.5f);
                // This is camera orientation in my normal coordinate system
                // (z forward, y up, x left)
                Quaternion qWRthreed = new Quaternion(0.0f, 0.0f, 0.0f, 1.0f);
                // We need to adjust by this rotation to fit GL coordinate frame
                // (z backward, y up, x right)
                // So rotate by pi about y axis
                Quaternion qROthreed = new Quaternion(0.0f, 1.0f, 0.0f, 0.0f);
                Quaternion qWOthreed = qWRthreed.Multiply(qROthreed);
                //threedtool.SetCameraPositionOrientation(rthreed, qWOthreed);

                // Second 3D display for images and augmented reality

                /*
                 * image_threedtool = new ThreeDToolGlowWidget(this,
                 *                         controlpanel1->Width() + controlpanel2->Width(), threedtool->Height(),
                 *                         monoslaminterface->GetCameraWidth(), monoslaminterface->GetCameraHeight());
                 * image_threedtool.DrawEvent.Attach(this, &MonoSLAMGlow::ImageDraw3D);
                 * image_threedtool.ProcessHitsEvent.Attach(this, &MonoSLAMGlow::ImageProcessHits);
                 * image_threedtool.SetCameraParameters(Graphics_Fku, Graphics_Fkv, Graphics_U0, Graphics_V0);
                 */

                // Set up initial state of virtual camera for image display to match
                // state vector
                Scene_Single scene = monoslaminterface.GetScene();
                if (scene != null)
                {
                    Vector v = scene.get_xv();
                    if (v != null)
                    {
                        scene.get_motion_model().func_xp(v);
                        ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model();
                        Vector3D            r   = threed_motion_model.get_rRES();
                        Quaternion          qWR = threed_motion_model.get_qRES();
                        // q is qWR between world frame and Scene robot frame
                        // We need to adjust by this rotation to fit GL coordinate frame
                        // (z backward, y up, x right)
                        // So rotate by pi about y axis
                        Quaternion qRO = new Quaternion(0.0f, 1.0f, 0.0f, 0.0f);
                        Quaternion qWO = qWR.Multiply(qRO);
                        //image_threedtool.SetCameraPositionOrientation(r, qWO);
                    }
                    else
                    {
                        Debug.WriteLine("Scene xp not defined");
                    }
                }
                else
                {
                    Debug.WriteLine("No scene object defined");
                }
            }
            else
            {
                Debug.WriteLine("No partially initialised feature measurement model found");
            }
        }