/// <summary> /// Fast version of predict filter /// </summary> /// <param name="scene"></param> /// <param name="u">Control vector of accelerations</param> /// <param name="delta_t">time step in seconds</param> public void predict_filter_fast(Scene_Single scene, Vector u, float delta_t) { Vector xv = scene.get_xv(); // Make model calculations: results are stored in RES matrices scene.get_motion_model().func_fv_and_dfv_by_dxv(xv, u, delta_t); scene.get_motion_model().func_Q(xv, u, delta_t); Vector fvRES = scene.get_motion_model().get_fvRES(); xv.Update(fvRES); Motion_Model mm = scene.get_motion_model(); MatrixFixed Pxx = scene.get_Pxx(); MatrixFixed dfv_by_dxvRES = mm.get_dfv_by_dxvRES(); MatrixFixed dfv_by_dxvRES_transposed = dfv_by_dxvRES.Transpose(); MatrixFixed QxRES = mm.get_QxRES(); Pxx.Update((dfv_by_dxvRES * Pxx * dfv_by_dxvRES_transposed) + QxRES); // Change the covariances between vehicle state and feature states foreach (Feature it in scene.feature_list) { MatrixFixed Pxy = it.get_Pxy(); Pxy.Update(dfv_by_dxvRES * Pxy); } }
// Function to find non-overlapping region within whole image (no prediction) public static bool FindNonOverlappingRegionWholeImage(Scene_Single scene, uint camera_width, uint camera_height, uint BOXSIZE, ref int init_feature_search_ustart, ref int init_feature_search_vstart, ref int init_feature_search_ufinish, ref int init_feature_search_vfinish, Random rnd) { int safe_feature_search_ustart = (int)((BOXSIZE - 1) / 2 + 1); int safe_feature_search_ufinish = (int)(camera_width - (BOXSIZE - 1) / 2 - 1); int safe_feature_search_vstart = (int)((BOXSIZE - 1) / 2 + 1); int safe_feature_search_vfinish = (int)(camera_height - (BOXSIZE - 1) / 2 - 1); return(FindNonOverlappingRegionNoPredict(safe_feature_search_ustart, safe_feature_search_vstart, safe_feature_search_ufinish, safe_feature_search_vfinish, scene, ref init_feature_search_ustart, ref init_feature_search_vstart, ref init_feature_search_ufinish, ref init_feature_search_vfinish, rnd)); }
public void update(Scene_Single scene) { this.scene = scene; if (features_pending.Count > 0) { bool found = false; int i = 0; while ((i < features_pending.Count) && (!found)) { Feature f = (Feature)features_pending[i]; Vector pos = f.get_h(); if ((pos[0] != 0) || (pos[1] != 0)) { if (featureVisible(f)) { addFeature(f); //found = true; } features_pending.Remove(f); } i++; } } }
/// <summary> /// Simple overall prediction /// </summary> /// <param name="scene"></param> /// <param name="u"></param> /// <param name="delta_t"></param> public void predict_filter_slow(Scene_Single scene, Vector u, float delta_t) { Debug.WriteLine("*** SLOW PREDICTION ***"); // What we need to do for the prediction: // Calculate f and grad_f_x // Calculate Q // Form x(k+1|k) and P(k+1|k) int size = (int)scene.get_total_state_size(); // First form original total state and covariance Vector x = new Vector(size); MatrixFixed P = new MatrixFixed(size, size); scene.construct_total_state_and_covariance(ref x, ref P); // Make model calculations: store results in RES matrices Vector xv = scene.get_xv(); //Vector xv = new Vector(scene.get_xv()); scene.get_motion_model().func_fv_and_dfv_by_dxv(xv, u, delta_t); scene.get_motion_model().func_Q(scene.get_xv(), u, delta_t); // Find new state f Vector f = new Vector(size); // Feature elements of f are the same as x f.Update(x); f.Update(scene.get_motion_model().get_fvRES(), 0); // Find new P // Since most elements of df_by_dx are zero... MatrixFixed df_by_dx = new MatrixFixed(size, size); df_by_dx.Fill(0.0f); // Fill the rest of the elements of df_by_dx: 1 on diagonal for features for (int i = (int)scene.get_motion_model().STATE_SIZE; i < df_by_dx.Rows; i++) { df_by_dx[i, i] = 1.0f; } df_by_dx.Update(scene.get_motion_model().get_dfv_by_dxvRES(), 0, 0); // Calculate the process noise MatrixFixed Q = new MatrixFixed(size, size); Q.Fill(0.0f); Q.Update(scene.get_motion_model().get_QxRES(), 0, 0); P.Update(df_by_dx * P * df_by_dx.Transpose()); P += Q; scene.fill_state_and_covariance(f, P); }
/// <summary> /// Constructor for known features. The different number of /// arguments differentiates it from the constructor for partially-initialised /// features /// </summary> /// <param name="id">reference to the feature identifier</param> /// <param name="?"></param> public Feature(byte[] id, uint lab, uint list_pos, Scene_Single scene, Vector y_known, Vector xp_o, Feature_Measurement_Model f_m_m, uint k_f_l) { feature_measurement_model = f_m_m; feature_constructor_bookeeping(); identifier = id; label = lab; position_in_list = list_pos; // Position of new feature in list // Save the vehicle position where this feature was acquired xp_orig = new Vector(xp_o); // Straighforward initialisation of state and covariances y = y_known; Pxy = new MatrixFixed(scene.get_motion_model().STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE); Pxy.Fill(0.0f); Pyy = new MatrixFixed(feature_measurement_model.FEATURE_STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE); Pyy.Fill(0.0f); int i = 0; MatrixFixed newPyjyi_to_store; foreach (Feature it in scene.get_feature_list_noconst()) { if (i < position_in_list) { newPyjyi_to_store = new MatrixFixed( it.get_feature_measurement_model().FEATURE_STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE); //add to the list matrix_block_list.Add(newPyjyi_to_store); } i++; } known_feature_label = (int)k_f_l; if (feature_measurement_model.fully_initialised_flag) { partially_initialised_feature_measurement_model = null; fully_initialised_feature_measurement_model = (Fully_Initialised_Feature_Measurement_Model)feature_measurement_model; } else { fully_initialised_feature_measurement_model = null; partially_initialised_feature_measurement_model = (Partially_Initialised_Feature_Measurement_Model)feature_measurement_model; } }
public void update_filter_internal_measurement_slow(Scene_Single scene, uint i) { // Size of measurement vector uint size = ((Internal_Measurement)(scene.internal_measurement_vector[(int)i])).get_internal_measurement_model().MEASUREMENT_SIZE; uint size2 = scene.get_total_state_size(); // Size of state vector Vector x = new Vector(size2); MatrixFixed P = new MatrixFixed(size2, size2); scene.construct_total_state_and_covariance(ref x, ref P); // cout << "x:" << x; // cout << "P:" << P; // 1. Form nu and dh_by_dx Vector nu_tot = new Vector(size); MatrixFixed dh_by_dx_tot = new MatrixFixed(size, size2); MatrixFixed R_tot = new MatrixFixed(size, size); scene.construct_total_internal_measurement_stuff(nu_tot, dh_by_dx_tot, R_tot, i); // 2. Calculate S(k+1) MatrixFixed S = new MatrixFixed(size, size); //MatrixFixed Tempss2 = new MatrixFixed(size, size2); //MatrixFixed Temps2s = new MatrixFixed(size2, size); MatrixFixed dh_by_dx_totT = dh_by_dx_tot.Transpose(); S.Update(dh_by_dx_tot * P * dh_by_dx_totT); S += R_tot; // cout << "R_tot:" << R_tot; // cout << "S:" << S; // cout << "dh_by_dx_tot:" << dh_by_dx_tot; // cout << "dh_by_dx_totT:" << dh_by_dx_totT; // 3. Calculate W(k+1) Cholesky S_cholesky = new Cholesky(S); MatrixFixed W = P * dh_by_dx_totT * S_cholesky.Inverse(); // cout << "W:" << W; // 4. Calculate x(k+1|k+1) x += W * nu_tot; // 5. Calculate P(k+1|k+1) P -= W * S * W.Transpose(); scene.fill_state_and_covariance(x, P); // cout << "x after update:" << x; // cout << "P after update:" << P; }
//friend class Scene_Single; //private Random rnd = new Random(); //typedef Vector<Particle> as ParticleVector; /// <summary> /// Constructor. The class is intialised with no particles, so these must be added later using add_particle() /// </summary> /// <param name="sc">The Scene class.</param> /// <param name="f">The feature for which this class represents additional information.</param> public FeatureInitInfo(Scene_Single sc, Feature f) { scene = sc; fp = f; PARTICLE_DIMENSION = f.get_partially_initialised_feature_measurement_model().FREE_PARAMETER_SIZE; number_of_match_attempts = 0; nonzerovariance = false; mean = new Vector(PARTICLE_DIMENSION); covariance = new MatrixFixed(PARTICLE_DIMENSION, PARTICLE_DIMENSION); //if (Scene_Single::STATUSDUMP) cout << "PARTICLE_DIMENSION = " //<< PARTICLE_DIMENSION << endl; }
/// <summary> /// Update the filter in a simple overall way /// </summary> /// <param name="scene"></param> public void total_update_filter_slow(Scene_Single scene) { // Steps to update the total filter: // 1. Form h and dh_by_dx and R(k+1) and z // 2. Calculate S(k+1) // 3. Calculate W(k+1) // 4. Calculate x(k+1|k+1) // 5. Calculate P(k+1|k+1) uint size = scene.get_successful_measurement_vector_size(); // Size of measurement vector uint size2 = scene.get_total_state_size(); // Size of state vector Vector x = new Vector(size2); MatrixFixed P = new MatrixFixed(size2, size2); scene.construct_total_state_and_covariance(ref x, ref P); // 1. Form nu and dh_by_dx Vector nu_tot = new Vector(size); MatrixFixed dh_by_dx_tot = new MatrixFixed(size, size2); MatrixFixed R_tot = new MatrixFixed(size, size); scene.construct_total_measurement_stuff(nu_tot, dh_by_dx_tot, R_tot); // 2. Calculate S(k+1) MatrixFixed temp_matrix = P * dh_by_dx_tot.Transpose(); //pre-calculate to speed up subsequent stuff MatrixFixed S = dh_by_dx_tot * temp_matrix; S += R_tot; // 3. Calculate W(k+1) Cholesky S_cholesky = new Cholesky(S); MatrixFixed W = temp_matrix * S_cholesky.Inverse(); // 4. Calculate x(k+1|k+1) x += W * nu_tot; // 5. Calculate P(k+1|k+1) P -= W * S * W.Transpose(); scene.fill_state_and_covariance(x, P); }
/// <summary> /// Make measurements of all the currently-selected features. Features can be /// selected using Scene_Single::auto_select_n_features(), or manually using /// Scene_Single::select_feature(). This calls /// Scene_Single::starting_measurements() and then Sim_Or_Rob::measure_feature() /// for each selected feature. Each feature for which a measurement /// attempt is made has its Feature::attempted_measurements_of_feature and /// Feature::successful_measurements_of_feature counts updated. /// </summary> /// <param name="scene">The SLAM map to use</param> /// <param name="sim_or_rob">The class to use for measuring features.</param> /// <returns>The number of features successfully measured.</returns> public static int make_measurements(Scene_Single scene, Sim_Or_Rob sim_or_rob, Random rnd) { int count = 0; if (scene.get_no_selected() == 0) { Debug.WriteLine("No features selected."); return(0); } scene.starting_measurements(); Feature it; Vector z; //best position for the feature within the image for (int i = 0; i < scene.selected_feature_list.Count; i++) { it = (Feature)scene.selected_feature_list[i]; z = it.get_z_noconst(); if (sim_or_rob.measure_feature(it.get_identifier(), ref z, it.get_vz(), it.get_h(), it.get_S(), rnd) == false) { // couldn't locate the feature scene.failed_measurement_of_feature(it); it.update_velocity(false); } else { // the feature was found! scene.successful_measurement_of_feature(it); it.update_velocity(true); count++; } } return(count); }
/// <summary> /// Convert a partially-initialised feature to a fully-initialised feature, /// given information about the free parameters \vct{\lambda}. /// The new state \vct{y}_{fi} is given by calling /// Partially_Initialised_Feature_Measurement_Model::func_yfi_and_dyfi_by_dypi_and_dyfi_by_dlambda(). /// where the various Jacobians are returned by calls to /// Partially_Initialised_Feature_Measurement_Model, and the covariance matrices /// \mat{P}_{kl} are already known and stored in the class, except for /// \mat{P}_{\vct{\lambda}}, which is passed to the function. /// </summary> /// <param name="lambda">The mean value for \vct{\lambda}</param> /// <param name="Plambda">The covariance for \vct{\lambda}</param> /// <param name="scene">The SLAM map</param> public void convert_from_partially_to_fully_initialised( Vector lambda, MatrixFixed Plambda, Scene_Single scene) { // We'll do all the work here in feature.cc though probably this only // works with scene_single... // We calculate new state yfi(ypi, lambda) // New feature covariance // Pyfiyfi = dyfi_by_dypi Pypiypi dyfi_by_dypiT + // dyfi_by_dlambda Plambda dyfi_by_dlambdaT // And we change cross covariances as follows: // Pxyfi = Pxypi dyfi_by_dypiT // Pyjyfi = Pyjypi dyfi_by_dypiT for j < i (since we only store top-right // Pyfiyj = dyfi_by_dypi Pypiyj for j > i part of covariance matrix) partially_initialised_feature_measurement_model.func_yfi_and_dyfi_by_dypi_and_dyfi_by_dlambda(y, lambda); MatrixFixed dyfi_by_dypiT = partially_initialised_feature_measurement_model.get_dyfi_by_dypiRES().Transpose(); MatrixFixed dyfi_by_dlambdaT = partially_initialised_feature_measurement_model.get_dyfi_by_dlambdaRES().Transpose(); // Replace y y = new Vector(partially_initialised_feature_measurement_model.get_yfiRES()); // Replace Pxy Pxy = Pxy * dyfi_by_dypiT; // Replace Pyy MatrixFixed Pypiypi_1 = partially_initialised_feature_measurement_model.get_dyfi_by_dypiRES() * Pyy * dyfi_by_dypiT; MatrixFixed Pypiypi_2 = partially_initialised_feature_measurement_model.get_dyfi_by_dlambdaRES() * Plambda * dyfi_by_dlambdaT; Pyy = Pypiypi_1 + Pypiypi_2; // Pyjyi elements for j < i (covariance with features before i in list) uint i = position_in_list; MatrixFixed m_it; int j; for (j = 0; j < position_in_list; j++) { m_it = (MatrixFixed)matrix_block_list[j]; matrix_block_list[j] = m_it * dyfi_by_dypiT; } Feature it; int idx = scene.feature_list.IndexOf(this); for (j = idx + 1; j < scene.feature_list.Count; j++) { it = (Feature)(scene.feature_list[j]); it.matrix_block_list[(int)i] = partially_initialised_feature_measurement_model.get_dyfi_by_dypiRES() * (MatrixFixed)it.matrix_block_list[(int)i]; it.increment_position_in_total_state_vector(-(int)feature_measurement_model.FEATURE_STATE_SIZE); } // Change the total state size in scene, here with a negative increment uint size1 = partially_initialised_feature_measurement_model.more_initialised_feature_measurement_model.FEATURE_STATE_SIZE; uint size2 = partially_initialised_feature_measurement_model.FEATURE_STATE_SIZE; scene.increment_total_state_size((int)size1 - (int)size2); // Change fmm for this model to fully-initialised one feature_measurement_model = partially_initialised_feature_measurement_model.more_initialised_feature_measurement_model; partially_initialised_feature_measurement_model = null; fully_initialised_feature_measurement_model = (Fully_Initialised_Feature_Measurement_Model)feature_measurement_model; //assert(fully_initialised_feature_measurement_model != NULL); // Need to reallocate any other matrices // Assume that measurement size doesn't change dh_by_dy.Resize(feature_measurement_model.MEASUREMENT_SIZE, feature_measurement_model.FEATURE_STATE_SIZE); }
/// <summary> /// Constructor for partially-initialised features. /// </summary> /// <param name="id">reference to the feature</param> /// <param name="lab"></param> /// <param name="list_pos"></param> /// <param name="scene"></param> /// <param name="h"></param> /// <param name="p_i_f_m_m"></param> public Feature(byte[] id, uint lab, uint list_pos, Scene_Single scene, Vector h, Partially_Initialised_Feature_Measurement_Model p_i_f_m_m, Vector feature_colour) { feature_measurement_model = p_i_f_m_m; partially_initialised_feature_measurement_model = p_i_f_m_m; fully_initialised_feature_measurement_model = null; // Stuff below substituted from Feature_common // Feature_common(id, lab, list_pos, scene, h); feature_constructor_bookeeping(); identifier = id; label = lab; position_in_list = list_pos; // Position of new feature in list position_in_total_state_vector = 0; // This should be set properly colour = feature_colour; //when feature is added // Save the vehicle position where this feature was acquired scene.get_motion_model().func_xp(scene.get_xv()); //xp_orig = scene.get_motion_model().get_xpRES(); xp_orig = new Vector(scene.get_motion_model().get_xpRES()); // Call model functions to calculate feature state, measurement noise // and associated Jacobians. Results are stored in RES matrices // First calculate "position state" and Jacobian scene.get_motion_model().func_xp(scene.get_xv()); scene.get_motion_model().func_dxp_by_dxv(scene.get_xv()); // Now ask the model to initialise the state vector and calculate Jacobians // so that I can go and calculate the covariance matrices partially_initialised_feature_measurement_model.func_ypi_and_dypi_by_dxp_and_dypi_by_dhi_and_Ri(h, scene.get_motion_model().get_xpRES()); // State y //y = partially_initialised_feature_measurement_model.get_ypiRES(); y = new Vector(partially_initialised_feature_measurement_model.get_ypiRES()); // Temp_FS1 will store dypi_by_dxv MatrixFixed Temp_FS1 = partially_initialised_feature_measurement_model.get_dypi_by_dxpRES() * scene.get_motion_model().get_dxp_by_dxvRES(); // Pxy Pxy = scene.get_Pxx() * Temp_FS1.Transpose(); // Pyy Pyy = Temp_FS1 * scene.get_Pxx() * Temp_FS1.Transpose() + partially_initialised_feature_measurement_model.get_dypi_by_dhiRES() * partially_initialised_feature_measurement_model.get_RiRES() * partially_initialised_feature_measurement_model.get_dypi_by_dhiRES().Transpose(); // Covariances of this feature with others int j = 0; foreach (Feature it in scene.get_feature_list_noconst()) { if (j < position_in_list) { // new Pypiyj = dypi_by_dxv . Pxyj // Size of this is FEATURE_STATE_SIZE(new) by FEATURE_STATE_SIZE(old) MatrixFixed m = it.get_Pxy(); MatrixFixed newPyjypi_to_store = (Temp_FS1 * m).Transpose(); //add to the list matrix_block_list.Add(newPyjypi_to_store); } j++; } known_feature_label = -1; }
// Function to find a region in an image guided by current motion prediction // which doesn't overlap existing features public static bool FindNonOverlappingRegion(Scene_Single scene, Vector local_u, float delta_t, Partially_Initialised_Feature_Measurement_Model default_feature_type_for_initialisation, uint camera_width, uint camera_height, uint BOXSIZE, ref int init_feature_search_ustart, ref int init_feature_search_vstart, ref int init_feature_search_ufinish, ref int init_feature_search_vfinish, uint FEATURE_INIT_STEPS_TO_PREDICT, float FEATURE_INIT_DEPTH_HYPOTHESIS, Random rnd) { ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model(); Vector local_xv = new Vector(scene.get_xv()); for (uint i = 0; i < FEATURE_INIT_STEPS_TO_PREDICT; i++) { scene.get_motion_model().func_fv_and_dfv_by_dxv(local_xv, local_u, delta_t); local_xv.Update(scene.get_motion_model().get_fvRES()); } threed_motion_model.func_xp(local_xv); Vector local_xp = new Vector(threed_motion_model.get_xpRES()); threed_motion_model.func_r(local_xp); Vector3D rW = threed_motion_model.get_rRES(); threed_motion_model.func_q(local_xp); Quaternion qWR = threed_motion_model.get_qRES(); // yW = rW + RWR hR Vector3D hR = new Vector3D(0.0f, 0.0f, FEATURE_INIT_DEPTH_HYPOTHESIS); // Used Inverse + transpose because this was't compiling the normal way Vector3D yW = new Vector3D(rW + qWR.RotationMatrix() * hR); // Then project that point into the current camera position scene.get_motion_model().func_xp(scene.get_xv()); Fully_Initialised_Feature_Measurement_Model fully_init_fmm = (Fully_Initialised_Feature_Measurement_Model)(default_feature_type_for_initialisation.more_initialised_feature_measurement_model); Vector yWVNL = yW.GetVNL3(); fully_init_fmm.func_hi_and_dhi_by_dxp_and_dhi_by_dyi(yWVNL, scene.get_motion_model().get_xpRES()); // Now, this defines roughly how much we expect a feature initialised to move float suggested_u = fully_init_fmm.get_hiRES()[0]; float suggested_v = fully_init_fmm.get_hiRES()[1]; float predicted_motion_u = camera_width / 2.0f - suggested_u; float predicted_motion_v = camera_height / 2.0f - suggested_v; // So, the limits of a "safe" region within which we can initialise // features so that they end up staying within the screen // (Making the approximation that the whole screen moves like the // centre point) int safe_feature_search_ustart = (int)(-predicted_motion_u); int safe_feature_search_vstart = (int)(-predicted_motion_v); int safe_feature_search_ufinish = (int)(camera_width - predicted_motion_u); int safe_feature_search_vfinish = (int)(camera_height - predicted_motion_v); if (safe_feature_search_ustart < ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_ustart = (int)((BOXSIZE - 1) / 2 + 1); } if (safe_feature_search_ufinish > (int)camera_width - ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_ufinish = (int)(camera_width - (BOXSIZE - 1) / 2 - 1); } if (safe_feature_search_vstart < ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_vstart = (int)((BOXSIZE - 1) / 2 + 1); } if (safe_feature_search_vfinish > (int)camera_height - ((int)((BOXSIZE - 1) / 2) + 1)) { safe_feature_search_vfinish = (int)(camera_height - (BOXSIZE - 1) / 2 - 1); } return(FindNonOverlappingRegionNoPredict(safe_feature_search_ustart, safe_feature_search_vstart, safe_feature_search_ufinish, safe_feature_search_vfinish, scene, ref init_feature_search_ustart, ref init_feature_search_vstart, ref init_feature_search_ufinish, ref init_feature_search_vfinish, rnd)); }
/// <summary> /// Function to find non-overlapping region over without prediction /// this is really the service function called by both the above /// </summary> /// <param name="safe_feature_search_ustart"></param> /// <param name="safe_feature_search_vstart"></param> /// <param name="safe_feature_search_ufinish"></param> /// <param name="safe_feature_search_vfinish"></param> /// <param name="scene"></param> /// <param name="init_feature_search_ustart"></param> /// <param name="init_feature_search_vstart"></param> /// <param name="init_feature_search_ufinish"></param> /// <param name="init_feature_search_vfinish"></param> /// <param name="rnd"></param> /// <returns></returns> public static bool FindNonOverlappingRegionNoPredict( int safe_feature_search_ustart, int safe_feature_search_vstart, int safe_feature_search_ufinish, int safe_feature_search_vfinish, Scene_Single scene, ref int init_feature_search_ustart, ref int init_feature_search_vstart, ref int init_feature_search_ufinish, ref int init_feature_search_vfinish, Random rnd) { int i, j; //if (Camera_Constants.DEBUGDUMP) cout << "FNOLRNP timer start: " << timerlocal << endl; int INIT_FEATURE_SEARCH_WIDTH = 80; int INIT_FEATURE_SEARCH_HEIGHT = 60; // Within this, choose a random region // Check that we've got some room for manouevre if ((safe_feature_search_ufinish - safe_feature_search_ustart > INIT_FEATURE_SEARCH_WIDTH) && (safe_feature_search_vfinish - safe_feature_search_vstart > INIT_FEATURE_SEARCH_HEIGHT)) { // Try a few times to get one that's not overlapping with any features // we know about int NUMBER_OF_RANDOM_INIT_FEATURE_SEARCH_REGION_TRIES = 5; int FEATURE_SEPARATION_MINIMUM = (int)Camera_Constants.BOXSIZE * 3; // Build vectors of feature positions so we only have to work them out once ArrayList u_array = new ArrayList(); ArrayList v_array = new ArrayList(); Feature it; for (j = 0; j < scene.get_feature_list().Count; j++) { it = (Feature)(scene.get_feature_list())[j]; //Vector z = it.get_z(); //u_array.Add(z[0]); //v_array.Add(z[1]); if (it.get_feature_measurement_model().fully_initialised_flag) { //Vector z = it.get_z(); //u_array.Add(z[0]); //v_array.Add(z[1]); Fully_Initialised_Feature_Measurement_Model fifmm = (Fully_Initialised_Feature_Measurement_Model)(it.get_feature_measurement_model()); fifmm.func_hi_and_dhi_by_dxp_and_dhi_by_dyi(it.get_y(), scene.get_motion_model().get_xpRES()); // Check that this is not a feature behind the camera if (it.get_feature_measurement_model().feature_graphics_type == "THREED_POINT") { fifmm.func_zeroedyigraphics_and_Pzeroedyigraphics(it.get_y(), scene.get_xv(), scene.get_Pxx(), it.get_Pxy(), it.get_Pyy()); if (fifmm.get_zeroedyigraphicsRES()[2] > 0) { u_array.Add(fifmm.get_hiRES()[0]); v_array.Add(fifmm.get_hiRES()[1]); } } } } //if (Camera_Constants.DEBUGDUMP) cout << "FNOLRNP timer after functions: " << timerlocal << endl; bool feature_found = false; i = 0; while (i < NUMBER_OF_RANDOM_INIT_FEATURE_SEARCH_REGION_TRIES) { int u_offset = (int)((safe_feature_search_ufinish - safe_feature_search_ustart - INIT_FEATURE_SEARCH_WIDTH) * (rnd.Next(10000) / 10000.0f)); int v_offset = (int)((safe_feature_search_vfinish - safe_feature_search_vstart - INIT_FEATURE_SEARCH_HEIGHT) * (rnd.Next(10000) / 10000.0f)); init_feature_search_ustart = safe_feature_search_ustart + u_offset; init_feature_search_ufinish = init_feature_search_ustart + INIT_FEATURE_SEARCH_WIDTH; init_feature_search_vstart = safe_feature_search_vstart + v_offset; init_feature_search_vfinish = init_feature_search_vstart + INIT_FEATURE_SEARCH_HEIGHT; bool found_a_feature_in_region_flag = false; // These arrays will be the same size float uit, vit; for (j = 0; j < u_array.Count; j++) { uit = (float)u_array[j]; vit = (float)v_array[j]; if ((uit >= init_feature_search_ustart - FEATURE_SEPARATION_MINIMUM) && (uit < init_feature_search_ufinish + FEATURE_SEPARATION_MINIMUM) && (vit >= init_feature_search_vstart - FEATURE_SEPARATION_MINIMUM) && (vit < init_feature_search_vfinish + FEATURE_SEPARATION_MINIMUM)) { found_a_feature_in_region_flag = true; feature_found = true; break; } } if (!found_a_feature_in_region_flag) { break; } i++; } if (!feature_found) { return(false); } } else { return(false); } return(true); }
public void SetUp3DDisplays() { // Set display virtual camera parameters to match those of the real camera // being used in MonoSLAM Partially_Initialised_Feature_Measurement_Model default_pifmm = monoslaminterface.GetDefaultFeatureTypeForInitialisation(); if (default_pifmm != null) { Line_Init_Wide_Point_Feature_Measurement_Model default_wide_pifmm = (Line_Init_Wide_Point_Feature_Measurement_Model)default_pifmm; Graphics_Fku = default_wide_pifmm.get_camera().Fku(); Graphics_Fkv = default_wide_pifmm.get_camera().Fkv(); Graphics_U0 = default_wide_pifmm.get_camera().U0(); Graphics_V0 = default_wide_pifmm.get_camera().V0(); Graphics_Kd1 = default_wide_pifmm.get_camera().Kd1(); // First display for external 3D view /* * threedtool = new ThreeDToolGlowWidget(this, * controlpanel1->Width() + controlpanel2->Width(), 0, * monoslaminterface->GetCameraWidth(), * monoslaminterface->GetCameraHeight()); * threedtool->DrawEvent.Attach(this, &MonoSLAMGlow::Draw3D); * threedtool->ProcessHitsEvent.Attach(this, &MonoSLAMGlow::ProcessHits); * threedtool->SetCameraParameters(Graphics_Fku, Graphics_Fkv, * Graphics_U0, Graphics_V0); */ // Set start position for GL camera in 3D tool // This is x, y, z position Vector rthreed = new Vector(0.0f, 0.2f, -1.5f); // This is camera orientation in my normal coordinate system // (z forward, y up, x left) Quaternion qWRthreed = new Quaternion(0.0f, 0.0f, 0.0f, 1.0f); // We need to adjust by this rotation to fit GL coordinate frame // (z backward, y up, x right) // So rotate by pi about y axis Quaternion qROthreed = new Quaternion(0.0f, 1.0f, 0.0f, 0.0f); Quaternion qWOthreed = qWRthreed.Multiply(qROthreed); //threedtool.SetCameraPositionOrientation(rthreed, qWOthreed); // Second 3D display for images and augmented reality /* * image_threedtool = new ThreeDToolGlowWidget(this, * controlpanel1->Width() + controlpanel2->Width(), threedtool->Height(), * monoslaminterface->GetCameraWidth(), monoslaminterface->GetCameraHeight()); * image_threedtool.DrawEvent.Attach(this, &MonoSLAMGlow::ImageDraw3D); * image_threedtool.ProcessHitsEvent.Attach(this, &MonoSLAMGlow::ImageProcessHits); * image_threedtool.SetCameraParameters(Graphics_Fku, Graphics_Fkv, Graphics_U0, Graphics_V0); */ // Set up initial state of virtual camera for image display to match // state vector Scene_Single scene = monoslaminterface.GetScene(); if (scene != null) { Vector v = scene.get_xv(); if (v != null) { scene.get_motion_model().func_xp(v); ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model(); Vector3D r = threed_motion_model.get_rRES(); Quaternion qWR = threed_motion_model.get_qRES(); // q is qWR between world frame and Scene robot frame // We need to adjust by this rotation to fit GL coordinate frame // (z backward, y up, x right) // So rotate by pi about y axis Quaternion qRO = new Quaternion(0.0f, 1.0f, 0.0f, 0.0f); Quaternion qWO = qWR.Multiply(qRO); //image_threedtool.SetCameraPositionOrientation(r, qWO); } else { Debug.WriteLine("Scene xp not defined"); } } else { Debug.WriteLine("No scene object defined"); } } else { Debug.WriteLine("No partially initialised feature measurement model found"); } }
/**************************Initialise Known Features**************************/ /// <summary> /// Initialise the Scene_Single class with some known features, read from the /// Settings. Each known feature has its own section, starting with /// <code>[KnownFeature1]</code> and counting upwards. The feature type is /// identified with the entry <code>FeatureMeasurementModel=</code>. Further /// settings are loaded by the feature measurement model itself. /// </summary> /// <param name="model_creator"></param> /// <param name="sim_or_rob"></param> /// <param name="scene"></param> /// <returns></returns> public static uint initialise_known_features(Settings settings, Feature_Measurement_Model_Creator model_creator, Sim_Or_Rob sim_or_rob, Scene_Single scene, String path, float MAXIMUM_ANGLE_DIFFERENCE) { uint feature_no = 1; uint num_features = 0; Settings.Section section = null; do { // Step through the section names String section_name = "KnownFeature" + Convert.ToString(feature_no); section = settings.get_section(section_name); // Does this section exist? if (section == null) { return(num_features); } ArrayList values = section.get_entry("FeatureMeasurementModel"); if (values == null) { Debug.WriteLine("No FeatureMeasurementModel entry under the section [" + section_name + "] in initalisation file."); } else { String type = (String)values[0]; Feature_Measurement_Model f_m_m = model_creator.create_model(type, scene.get_motion_model(), MAXIMUM_ANGLE_DIFFERENCE); if (f_m_m == null) { Debug.WriteLine("Unable to create a feature measurement model of type " + type + " as requested in initalisation file."); } else { // Initialise the feature measurement model with any settings f_m_m.read_parameters(settings); // Read the feature state Vector yi = new Vector(3); Vector xp_orig = new Vector(7); f_m_m.read_initial_state(section, yi, xp_orig); // Initialise the feature classimage_mono identifier = sim_or_rob.initialise_known_feature(f_m_m, yi, section, path); if (identifier == null) { Debug.WriteLine("Trouble reading known feature " + section_name + " : skipping."); } else { scene.add_new_known_feature(identifier, yi, xp_orig, f_m_m, feature_no); Debug.WriteLine("Added known feature " + Convert.ToString(feature_no)); num_features++; } } } feature_no++; }while (section != null); return(num_features); }