/// <summary> /// Fast version of predict filter /// </summary> /// <param name="scene"></param> /// <param name="u">Control vector of accelerations</param> /// <param name="delta_t">time step in seconds</param> public void predict_filter_fast(Scene_Single scene, Vector u, float delta_t) { Vector xv = scene.get_xv(); // Make model calculations: results are stored in RES matrices scene.get_motion_model().func_fv_and_dfv_by_dxv(xv, u, delta_t); scene.get_motion_model().func_Q(xv, u, delta_t); Vector fvRES = scene.get_motion_model().get_fvRES(); xv.Update(fvRES); Motion_Model mm = scene.get_motion_model(); MatrixFixed Pxx = scene.get_Pxx(); MatrixFixed dfv_by_dxvRES = mm.get_dfv_by_dxvRES(); MatrixFixed dfv_by_dxvRES_transposed = dfv_by_dxvRES.Transpose(); MatrixFixed QxRES = mm.get_QxRES(); Pxx.Update((dfv_by_dxvRES * Pxx * dfv_by_dxvRES_transposed) + QxRES); // Change the covariances between vehicle state and feature states foreach (Feature it in scene.feature_list) { MatrixFixed Pxy = it.get_Pxy(); Pxy.Update(dfv_by_dxvRES * Pxy); } }
/// <summary> /// Constructor for partially-initialised features. /// </summary> /// <param name="id">reference to the feature</param> /// <param name="lab"></param> /// <param name="list_pos"></param> /// <param name="scene"></param> /// <param name="h"></param> /// <param name="p_i_f_m_m"></param> public Feature(byte[] id, uint lab, uint list_pos, Scene_Single scene, Vector h, Partially_Initialised_Feature_Measurement_Model p_i_f_m_m, Vector feature_colour) { feature_measurement_model = p_i_f_m_m; partially_initialised_feature_measurement_model = p_i_f_m_m; fully_initialised_feature_measurement_model = null; // Stuff below substituted from Feature_common // Feature_common(id, lab, list_pos, scene, h); feature_constructor_bookeeping(); identifier = id; label = lab; position_in_list = list_pos; // Position of new feature in list position_in_total_state_vector = 0; // This should be set properly colour = feature_colour; //when feature is added // Save the vehicle position where this feature was acquired scene.get_motion_model().func_xp(scene.get_xv()); //xp_orig = scene.get_motion_model().get_xpRES(); xp_orig = new Vector(scene.get_motion_model().get_xpRES()); // Call model functions to calculate feature state, measurement noise // and associated Jacobians. Results are stored in RES matrices // First calculate "position state" and Jacobian scene.get_motion_model().func_xp(scene.get_xv()); scene.get_motion_model().func_dxp_by_dxv(scene.get_xv()); // Now ask the model to initialise the state vector and calculate Jacobians // so that I can go and calculate the covariance matrices partially_initialised_feature_measurement_model.func_ypi_and_dypi_by_dxp_and_dypi_by_dhi_and_Ri(h, scene.get_motion_model().get_xpRES()); // State y //y = partially_initialised_feature_measurement_model.get_ypiRES(); y = new Vector(partially_initialised_feature_measurement_model.get_ypiRES()); // Temp_FS1 will store dypi_by_dxv MatrixFixed Temp_FS1 = partially_initialised_feature_measurement_model.get_dypi_by_dxpRES() * scene.get_motion_model().get_dxp_by_dxvRES(); // Pxy Pxy = scene.get_Pxx() * Temp_FS1.Transpose(); // Pyy Pyy = Temp_FS1 * scene.get_Pxx() * Temp_FS1.Transpose() + partially_initialised_feature_measurement_model.get_dypi_by_dhiRES() * partially_initialised_feature_measurement_model.get_RiRES() * partially_initialised_feature_measurement_model.get_dypi_by_dhiRES().Transpose(); // Covariances of this feature with others int j = 0; foreach (Feature it in scene.get_feature_list_noconst()) { if (j < position_in_list) { // new Pypiyj = dypi_by_dxv . Pxyj // Size of this is FEATURE_STATE_SIZE(new) by FEATURE_STATE_SIZE(old) MatrixFixed m = it.get_Pxy(); MatrixFixed newPyjypi_to_store = (Temp_FS1 * m).Transpose(); //add to the list matrix_block_list.Add(newPyjypi_to_store); } j++; } known_feature_label = -1; }
/// <summary> /// Function to find non-overlapping region over without prediction /// this is really the service function called by both the above /// </summary> /// <param name="safe_feature_search_ustart"></param> /// <param name="safe_feature_search_vstart"></param> /// <param name="safe_feature_search_ufinish"></param> /// <param name="safe_feature_search_vfinish"></param> /// <param name="scene"></param> /// <param name="init_feature_search_ustart"></param> /// <param name="init_feature_search_vstart"></param> /// <param name="init_feature_search_ufinish"></param> /// <param name="init_feature_search_vfinish"></param> /// <param name="rnd"></param> /// <returns></returns> public static bool FindNonOverlappingRegionNoPredict( int safe_feature_search_ustart, int safe_feature_search_vstart, int safe_feature_search_ufinish, int safe_feature_search_vfinish, Scene_Single scene, ref int init_feature_search_ustart, ref int init_feature_search_vstart, ref int init_feature_search_ufinish, ref int init_feature_search_vfinish, Random rnd) { int i, j; //if (Camera_Constants.DEBUGDUMP) cout << "FNOLRNP timer start: " << timerlocal << endl; int INIT_FEATURE_SEARCH_WIDTH = 80; int INIT_FEATURE_SEARCH_HEIGHT = 60; // Within this, choose a random region // Check that we've got some room for manouevre if ((safe_feature_search_ufinish - safe_feature_search_ustart > INIT_FEATURE_SEARCH_WIDTH) && (safe_feature_search_vfinish - safe_feature_search_vstart > INIT_FEATURE_SEARCH_HEIGHT)) { // Try a few times to get one that's not overlapping with any features // we know about int NUMBER_OF_RANDOM_INIT_FEATURE_SEARCH_REGION_TRIES = 5; int FEATURE_SEPARATION_MINIMUM = (int)Camera_Constants.BOXSIZE * 3; // Build vectors of feature positions so we only have to work them out once ArrayList u_array = new ArrayList(); ArrayList v_array = new ArrayList(); Feature it; for (j = 0; j < scene.get_feature_list().Count; j++) { it = (Feature)(scene.get_feature_list())[j]; //Vector z = it.get_z(); //u_array.Add(z[0]); //v_array.Add(z[1]); if (it.get_feature_measurement_model().fully_initialised_flag) { //Vector z = it.get_z(); //u_array.Add(z[0]); //v_array.Add(z[1]); Fully_Initialised_Feature_Measurement_Model fifmm = (Fully_Initialised_Feature_Measurement_Model)(it.get_feature_measurement_model()); fifmm.func_hi_and_dhi_by_dxp_and_dhi_by_dyi(it.get_y(), scene.get_motion_model().get_xpRES()); // Check that this is not a feature behind the camera if (it.get_feature_measurement_model().feature_graphics_type == "THREED_POINT") { fifmm.func_zeroedyigraphics_and_Pzeroedyigraphics(it.get_y(), scene.get_xv(), scene.get_Pxx(), it.get_Pxy(), it.get_Pyy()); if (fifmm.get_zeroedyigraphicsRES()[2] > 0) { u_array.Add(fifmm.get_hiRES()[0]); v_array.Add(fifmm.get_hiRES()[1]); } } } } //if (Camera_Constants.DEBUGDUMP) cout << "FNOLRNP timer after functions: " << timerlocal << endl; bool feature_found = false; i = 0; while (i < NUMBER_OF_RANDOM_INIT_FEATURE_SEARCH_REGION_TRIES) { int u_offset = (int)((safe_feature_search_ufinish - safe_feature_search_ustart - INIT_FEATURE_SEARCH_WIDTH) * (rnd.Next(10000) / 10000.0f)); int v_offset = (int)((safe_feature_search_vfinish - safe_feature_search_vstart - INIT_FEATURE_SEARCH_HEIGHT) * (rnd.Next(10000) / 10000.0f)); init_feature_search_ustart = safe_feature_search_ustart + u_offset; init_feature_search_ufinish = init_feature_search_ustart + INIT_FEATURE_SEARCH_WIDTH; init_feature_search_vstart = safe_feature_search_vstart + v_offset; init_feature_search_vfinish = init_feature_search_vstart + INIT_FEATURE_SEARCH_HEIGHT; bool found_a_feature_in_region_flag = false; // These arrays will be the same size float uit, vit; for (j = 0; j < u_array.Count; j++) { uit = (float)u_array[j]; vit = (float)v_array[j]; if ((uit >= init_feature_search_ustart - FEATURE_SEPARATION_MINIMUM) && (uit < init_feature_search_ufinish + FEATURE_SEPARATION_MINIMUM) && (vit >= init_feature_search_vstart - FEATURE_SEPARATION_MINIMUM) && (vit < init_feature_search_vfinish + FEATURE_SEPARATION_MINIMUM)) { found_a_feature_in_region_flag = true; feature_found = true; break; } } if (!found_a_feature_in_region_flag) { break; } i++; } if (!feature_found) { return(false); } } else { return(false); } return(true); }