/// <summary> /// Simple overall prediction /// </summary> /// <param name="scene"></param> /// <param name="u"></param> /// <param name="delta_t"></param> public void predict_filter_slow(Scene_Single scene, Vector u, float delta_t) { Debug.WriteLine("*** SLOW PREDICTION ***"); // What we need to do for the prediction: // Calculate f and grad_f_x // Calculate Q // Form x(k+1|k) and P(k+1|k) int size = (int)scene.get_total_state_size(); // First form original total state and covariance Vector x = new Vector(size); MatrixFixed P = new MatrixFixed(size, size); scene.construct_total_state_and_covariance(ref x, ref P); // Make model calculations: store results in RES matrices Vector xv = scene.get_xv(); //Vector xv = new Vector(scene.get_xv()); scene.get_motion_model().func_fv_and_dfv_by_dxv(xv, u, delta_t); scene.get_motion_model().func_Q(scene.get_xv(), u, delta_t); // Find new state f Vector f = new Vector(size); // Feature elements of f are the same as x f.Update(x); f.Update(scene.get_motion_model().get_fvRES(), 0); // Find new P // Since most elements of df_by_dx are zero... MatrixFixed df_by_dx = new MatrixFixed(size, size); df_by_dx.Fill(0.0f); // Fill the rest of the elements of df_by_dx: 1 on diagonal for features for (int i = (int)scene.get_motion_model().STATE_SIZE; i < df_by_dx.Rows; i++) { df_by_dx[i, i] = 1.0f; } df_by_dx.Update(scene.get_motion_model().get_dfv_by_dxvRES(), 0, 0); // Calculate the process noise MatrixFixed Q = new MatrixFixed(size, size); Q.Fill(0.0f); Q.Update(scene.get_motion_model().get_QxRES(), 0, 0); P.Update(df_by_dx * P * df_by_dx.Transpose()); P += Q; scene.fill_state_and_covariance(f, P); }
/// <summary> /// Fast version of predict filter /// </summary> /// <param name="scene"></param> /// <param name="u">Control vector of accelerations</param> /// <param name="delta_t">time step in seconds</param> public void predict_filter_fast(Scene_Single scene, Vector u, float delta_t) { Vector xv = scene.get_xv(); // Make model calculations: results are stored in RES matrices scene.get_motion_model().func_fv_and_dfv_by_dxv(xv, u, delta_t); scene.get_motion_model().func_Q(xv, u, delta_t); Vector fvRES = scene.get_motion_model().get_fvRES(); xv.Update(fvRES); Motion_Model mm = scene.get_motion_model(); MatrixFixed Pxx = scene.get_Pxx(); MatrixFixed dfv_by_dxvRES = mm.get_dfv_by_dxvRES(); MatrixFixed dfv_by_dxvRES_transposed = dfv_by_dxvRES.Transpose(); MatrixFixed QxRES = mm.get_QxRES(); Pxx.Update((dfv_by_dxvRES * Pxx * dfv_by_dxvRES_transposed) + QxRES); // Change the covariances between vehicle state and feature states foreach (Feature it in scene.feature_list) { MatrixFixed Pxy = it.get_Pxy(); Pxy.Update(dfv_by_dxvRES * Pxy); } }
/// <summary> /// Constructor for partially-initialised features. /// </summary> /// <param name="id">reference to the feature</param> /// <param name="lab"></param> /// <param name="list_pos"></param> /// <param name="scene"></param> /// <param name="h"></param> /// <param name="p_i_f_m_m"></param> public Feature(byte[] id, uint lab, uint list_pos, Scene_Single scene, Vector h, Partially_Initialised_Feature_Measurement_Model p_i_f_m_m, Vector feature_colour) { feature_measurement_model = p_i_f_m_m; partially_initialised_feature_measurement_model = p_i_f_m_m; fully_initialised_feature_measurement_model = null; // Stuff below substituted from Feature_common // Feature_common(id, lab, list_pos, scene, h); feature_constructor_bookeeping(); identifier = id; label = lab; position_in_list = list_pos; // Position of new feature in list position_in_total_state_vector = 0; // This should be set properly colour = feature_colour; //when feature is added // Save the vehicle position where this feature was acquired scene.get_motion_model().func_xp(scene.get_xv()); //xp_orig = scene.get_motion_model().get_xpRES(); xp_orig = new Vector(scene.get_motion_model().get_xpRES()); // Call model functions to calculate feature state, measurement noise // and associated Jacobians. Results are stored in RES matrices // First calculate "position state" and Jacobian scene.get_motion_model().func_xp(scene.get_xv()); scene.get_motion_model().func_dxp_by_dxv(scene.get_xv()); // Now ask the model to initialise the state vector and calculate Jacobians // so that I can go and calculate the covariance matrices partially_initialised_feature_measurement_model.func_ypi_and_dypi_by_dxp_and_dypi_by_dhi_and_Ri(h, scene.get_motion_model().get_xpRES()); // State y //y = partially_initialised_feature_measurement_model.get_ypiRES(); y = new Vector(partially_initialised_feature_measurement_model.get_ypiRES()); // Temp_FS1 will store dypi_by_dxv MatrixFixed Temp_FS1 = partially_initialised_feature_measurement_model.get_dypi_by_dxpRES() * scene.get_motion_model().get_dxp_by_dxvRES(); // Pxy Pxy = scene.get_Pxx() * Temp_FS1.Transpose(); // Pyy Pyy = Temp_FS1 * scene.get_Pxx() * Temp_FS1.Transpose() + partially_initialised_feature_measurement_model.get_dypi_by_dhiRES() * partially_initialised_feature_measurement_model.get_RiRES() * partially_initialised_feature_measurement_model.get_dypi_by_dhiRES().Transpose(); // Covariances of this feature with others int j = 0; foreach (Feature it in scene.get_feature_list_noconst()) { if (j < position_in_list) { // new Pypiyj = dypi_by_dxv . Pxyj // Size of this is FEATURE_STATE_SIZE(new) by FEATURE_STATE_SIZE(old) MatrixFixed m = it.get_Pxy(); MatrixFixed newPyjypi_to_store = (Temp_FS1 * m).Transpose(); //add to the list matrix_block_list.Add(newPyjypi_to_store); } j++; } known_feature_label = -1; }
// Function to find a region in an image guided by current motion prediction // which doesn't overlap existing features public static bool FindNonOverlappingRegion(Scene_Single scene, Vector local_u, float delta_t, Partially_Initialised_Feature_Measurement_Model default_feature_type_for_initialisation, uint camera_width, uint camera_height, uint BOXSIZE, ref int init_feature_search_ustart, ref int init_feature_search_vstart, ref int init_feature_search_ufinish, ref int init_feature_search_vfinish, uint FEATURE_INIT_STEPS_TO_PREDICT, float FEATURE_INIT_DEPTH_HYPOTHESIS, Random rnd) { ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model(); Vector local_xv = new Vector(scene.get_xv()); for (uint i = 0; i < FEATURE_INIT_STEPS_TO_PREDICT; i++) { scene.get_motion_model().func_fv_and_dfv_by_dxv(local_xv, local_u, delta_t); local_xv.Update(scene.get_motion_model().get_fvRES()); } threed_motion_model.func_xp(local_xv); Vector local_xp = new Vector(threed_motion_model.get_xpRES()); threed_motion_model.func_r(local_xp); Vector3D rW = threed_motion_model.get_rRES(); threed_motion_model.func_q(local_xp); Quaternion qWR = threed_motion_model.get_qRES(); // yW = rW + RWR hR Vector3D hR = new Vector3D(0.0f, 0.0f, FEATURE_INIT_DEPTH_HYPOTHESIS); // Used Inverse + transpose because this was't compiling the normal way Vector3D yW = new Vector3D(rW + qWR.RotationMatrix() * hR); // Then project that point into the current camera position scene.get_motion_model().func_xp(scene.get_xv()); Fully_Initialised_Feature_Measurement_Model fully_init_fmm = (Fully_Initialised_Feature_Measurement_Model)(default_feature_type_for_initialisation.more_initialised_feature_measurement_model); Vector yWVNL = yW.GetVNL3(); fully_init_fmm.func_hi_and_dhi_by_dxp_and_dhi_by_dyi(yWVNL, scene.get_motion_model().get_xpRES()); // Now, this defines roughly how much we expect a feature initialised to move float suggested_u = fully_init_fmm.get_hiRES()[0]; float suggested_v = fully_init_fmm.get_hiRES()[1]; float predicted_motion_u = camera_width / 2.0f - suggested_u; float predicted_motion_v = camera_height / 2.0f - suggested_v; // So, the limits of a "safe" region within which we can initialise // features so that they end up staying within the screen // (Making the approximation that the whole screen moves like the // centre point) int safe_feature_search_ustart = (int)(-predicted_motion_u); int safe_feature_search_vstart = (int)(-predicted_motion_v); int safe_feature_search_ufinish = (int)(camera_width - predicted_motion_u); int safe_feature_search_vfinish = (int)(camera_height - predicted_motion_v); if (safe_feature_search_ustart < ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_ustart = (int)((BOXSIZE - 1) / 2 + 1); } if (safe_feature_search_ufinish > (int)camera_width - ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_ufinish = (int)(camera_width - (BOXSIZE - 1) / 2 - 1); } if (safe_feature_search_vstart < ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_vstart = (int)((BOXSIZE - 1) / 2 + 1); } if (safe_feature_search_vfinish > (int)camera_height - ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_vfinish = (int)(camera_height - (BOXSIZE - 1) / 2 - 1); } return(FindNonOverlappingRegionNoPredict(safe_feature_search_ustart, safe_feature_search_vstart, safe_feature_search_ufinish, safe_feature_search_vfinish, scene, ref init_feature_search_ustart, ref init_feature_search_vstart, ref init_feature_search_ufinish, ref init_feature_search_vfinish, rnd)); }
/// <summary> /// Function to find non-overlapping region over without prediction /// this is really the service function called by both the above /// </summary> /// <param name="safe_feature_search_ustart"></param> /// <param name="safe_feature_search_vstart"></param> /// <param name="safe_feature_search_ufinish"></param> /// <param name="safe_feature_search_vfinish"></param> /// <param name="scene"></param> /// <param name="init_feature_search_ustart"></param> /// <param name="init_feature_search_vstart"></param> /// <param name="init_feature_search_ufinish"></param> /// <param name="init_feature_search_vfinish"></param> /// <param name="rnd"></param> /// <returns></returns> public static bool FindNonOverlappingRegionNoPredict( int safe_feature_search_ustart, int safe_feature_search_vstart, int safe_feature_search_ufinish, int safe_feature_search_vfinish, Scene_Single scene, ref int init_feature_search_ustart, ref int init_feature_search_vstart, ref int init_feature_search_ufinish, ref int init_feature_search_vfinish, Random rnd) { int i, j; //if (Camera_Constants.DEBUGDUMP) cout << "FNOLRNP timer start: " << timerlocal << endl; int INIT_FEATURE_SEARCH_WIDTH = 80; int INIT_FEATURE_SEARCH_HEIGHT = 60; // Within this, choose a random region // Check that we've got some room for manouevre if ((safe_feature_search_ufinish - safe_feature_search_ustart > INIT_FEATURE_SEARCH_WIDTH) && (safe_feature_search_vfinish - safe_feature_search_vstart > INIT_FEATURE_SEARCH_HEIGHT)) { // Try a few times to get one that's not overlapping with any features // we know about int NUMBER_OF_RANDOM_INIT_FEATURE_SEARCH_REGION_TRIES = 5; int FEATURE_SEPARATION_MINIMUM = (int)Camera_Constants.BOXSIZE * 3; // Build vectors of feature positions so we only have to work them out once ArrayList u_array = new ArrayList(); ArrayList v_array = new ArrayList(); Feature it; for (j = 0; j < scene.get_feature_list().Count; j++) { it = (Feature)(scene.get_feature_list())[j]; //Vector z = it.get_z(); //u_array.Add(z[0]); //v_array.Add(z[1]); if (it.get_feature_measurement_model().fully_initialised_flag) { //Vector z = it.get_z(); //u_array.Add(z[0]); //v_array.Add(z[1]); Fully_Initialised_Feature_Measurement_Model fifmm = (Fully_Initialised_Feature_Measurement_Model)(it.get_feature_measurement_model()); fifmm.func_hi_and_dhi_by_dxp_and_dhi_by_dyi(it.get_y(), scene.get_motion_model().get_xpRES()); // Check that this is not a feature behind the camera if (it.get_feature_measurement_model().feature_graphics_type == "THREED_POINT") { fifmm.func_zeroedyigraphics_and_Pzeroedyigraphics(it.get_y(), scene.get_xv(), scene.get_Pxx(), it.get_Pxy(), it.get_Pyy()); if (fifmm.get_zeroedyigraphicsRES()[2] > 0) { u_array.Add(fifmm.get_hiRES()[0]); v_array.Add(fifmm.get_hiRES()[1]); } } } } //if (Camera_Constants.DEBUGDUMP) cout << "FNOLRNP timer after functions: " << timerlocal << endl; bool feature_found = false; i = 0; while (i < NUMBER_OF_RANDOM_INIT_FEATURE_SEARCH_REGION_TRIES) { int u_offset = (int)((safe_feature_search_ufinish - safe_feature_search_ustart - INIT_FEATURE_SEARCH_WIDTH) * (rnd.Next(10000) / 10000.0f)); int v_offset = (int)((safe_feature_search_vfinish - safe_feature_search_vstart - INIT_FEATURE_SEARCH_HEIGHT) * (rnd.Next(10000) / 10000.0f)); init_feature_search_ustart = safe_feature_search_ustart + u_offset; init_feature_search_ufinish = init_feature_search_ustart + INIT_FEATURE_SEARCH_WIDTH; init_feature_search_vstart = safe_feature_search_vstart + v_offset; init_feature_search_vfinish = init_feature_search_vstart + INIT_FEATURE_SEARCH_HEIGHT; bool found_a_feature_in_region_flag = false; // These arrays will be the same size float uit, vit; for (j = 0; j < u_array.Count; j++) { uit = (float)u_array[j]; vit = (float)v_array[j]; if ((uit >= init_feature_search_ustart - FEATURE_SEPARATION_MINIMUM) && (uit < init_feature_search_ufinish + FEATURE_SEPARATION_MINIMUM) && (vit >= init_feature_search_vstart - FEATURE_SEPARATION_MINIMUM) && (vit < init_feature_search_vfinish + FEATURE_SEPARATION_MINIMUM)) { found_a_feature_in_region_flag = true; feature_found = true; break; } } if (!found_a_feature_in_region_flag) { break; } i++; } if (!feature_found) { return(false); } } else { return(false); } return(true); }
public void SetUp3DDisplays() { // Set display virtual camera parameters to match those of the real camera // being used in MonoSLAM Partially_Initialised_Feature_Measurement_Model default_pifmm = monoslaminterface.GetDefaultFeatureTypeForInitialisation(); if (default_pifmm != null) { Line_Init_Wide_Point_Feature_Measurement_Model default_wide_pifmm = (Line_Init_Wide_Point_Feature_Measurement_Model)default_pifmm; Graphics_Fku = default_wide_pifmm.get_camera().Fku(); Graphics_Fkv = default_wide_pifmm.get_camera().Fkv(); Graphics_U0 = default_wide_pifmm.get_camera().U0(); Graphics_V0 = default_wide_pifmm.get_camera().V0(); Graphics_Kd1 = default_wide_pifmm.get_camera().Kd1(); // First display for external 3D view /* * threedtool = new ThreeDToolGlowWidget(this, * controlpanel1->Width() + controlpanel2->Width(), 0, * monoslaminterface->GetCameraWidth(), * monoslaminterface->GetCameraHeight()); * threedtool->DrawEvent.Attach(this, &MonoSLAMGlow::Draw3D); * threedtool->ProcessHitsEvent.Attach(this, &MonoSLAMGlow::ProcessHits); * threedtool->SetCameraParameters(Graphics_Fku, Graphics_Fkv, * Graphics_U0, Graphics_V0); */ // Set start position for GL camera in 3D tool // This is x, y, z position Vector rthreed = new Vector(0.0f, 0.2f, -1.5f); // This is camera orientation in my normal coordinate system // (z forward, y up, x left) Quaternion qWRthreed = new Quaternion(0.0f, 0.0f, 0.0f, 1.0f); // We need to adjust by this rotation to fit GL coordinate frame // (z backward, y up, x right) // So rotate by pi about y axis Quaternion qROthreed = new Quaternion(0.0f, 1.0f, 0.0f, 0.0f); Quaternion qWOthreed = qWRthreed.Multiply(qROthreed); //threedtool.SetCameraPositionOrientation(rthreed, qWOthreed); // Second 3D display for images and augmented reality /* * image_threedtool = new ThreeDToolGlowWidget(this, * controlpanel1->Width() + controlpanel2->Width(), threedtool->Height(), * monoslaminterface->GetCameraWidth(), monoslaminterface->GetCameraHeight()); * image_threedtool.DrawEvent.Attach(this, &MonoSLAMGlow::ImageDraw3D); * image_threedtool.ProcessHitsEvent.Attach(this, &MonoSLAMGlow::ImageProcessHits); * image_threedtool.SetCameraParameters(Graphics_Fku, Graphics_Fkv, Graphics_U0, Graphics_V0); */ // Set up initial state of virtual camera for image display to match // state vector Scene_Single scene = monoslaminterface.GetScene(); if (scene != null) { Vector v = scene.get_xv(); if (v != null) { scene.get_motion_model().func_xp(v); ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model(); Vector3D r = threed_motion_model.get_rRES(); Quaternion qWR = threed_motion_model.get_qRES(); // q is qWR between world frame and Scene robot frame // We need to adjust by this rotation to fit GL coordinate frame // (z backward, y up, x right) // So rotate by pi about y axis Quaternion qRO = new Quaternion(0.0f, 1.0f, 0.0f, 0.0f); Quaternion qWO = qWR.Multiply(qRO); //image_threedtool.SetCameraPositionOrientation(r, qWO); } else { Debug.WriteLine("Scene xp not defined"); } } else { Debug.WriteLine("No scene object defined"); } } else { Debug.WriteLine("No partially initialised feature measurement model found"); } }