コード例 #1
0
ファイル: widecamera.cs プロジェクト: kasertim/sentience
        /// <summary>
        /// Project a point from an ideal (Euclidean) camera frame into image co-ordinates
        /// with radial distortion.
        ///
        /// This first transforms the input position vector  vct{y}  (relative to
        /// the camera position) into an undistorted image location  \vct{u}_c 
        /// (relative to an origin at the optical centre)
        /// 
        ///     vct{u}_c = evct{u_c \ v_c} = evct{-F_{ku} y_x / y_z \ -F_{kv} y_y / y_z} =
        ///     emat{-F_{ku} & 0 \ 0 & -F_{kv}}\evct{y_x/y_z \ y_y/y_z}
        /// 
        /// Then radial distortion is applied to give the final image location  h 
        /// (with the origin back in the normal location of of the top left of the image).
        /// 
        ///     vct{h} = frac{vct{u}_c}{sqrt{1 + 2 k_1 |vct{u}_c|^2}} + evct{u_0 \ v_0}
        /// </summary>
        /// <param name="camera"></param>
        /// <returns></returns>
        public virtual Vector Project(Vector camera)
        {
            // Remember this position in case we are asked about the Jacobians of this
            // transformation
            m_last_camera = camera;
  
            // First do perspective projection
            // This turns the position vector into an undistorted image location (with the
            // origin at the centre)
            // Use -Fkuv to swap the image co-ordinates (0,0) at the top to
            // camera co-ordinates (0,0) at the bottom

            Vector imagepos_centred = new Vector(2);
            imagepos_centred[0] = -m_Fku * camera[0] / camera[2];
            imagepos_centred[1] = -m_Fkv * camera[1] / camera[2];

            m_last_image_centred = imagepos_centred;

            // 1 distortion coefficient model
            float radius2 = imagepos_centred.SquaredMagnitude();
            float factor = (float)Math.Sqrt(1 + 2 * m_Kd1 * radius2);
            return imagepos_centred / factor + m_centre;
        }
コード例 #2
0
ファイル: monoSLAM.cs プロジェクト: kasertim/sentience
        /// <summary>
        /// Step the MonoSLAM application on by one frame. This should be called every time
        /// a new frame is captured (and care should be taken to avoid skipping frames).
        /// Before calling this function, Scene_Single::load_new_image() should be called
        /// (e.g. using
        /// <code>monoslaminterface.GetRobotNoConst()->load_new_image()</code>), since the
        /// first parameter to GoOneStep() is currently ignored.
        /// 
        /// GoOneStep() performs the following processing steps:
        /// - Kalman filter prediction step (by calling Kalman::predict_filter_fast(),
        ///   with a zero control vector)
        /// - Select a set of features to make measurements from (set by
        ///   SetNumberOfFeaturesToSelect())
        /// - Predict the locations and and make measurements of those features
        /// - Kalman filter update step
        /// - Delete any bad features (those that have repeatedly failed to be matched)
        /// - If we are not currently initialising a enough new features, and the camera is
        ///   translating, and <code>currently_mapping_flag</code> is set, initialise a new
        ///   feature somewhere sensible
        /// - Update the partially-initialised features
        /// </summary>
        /// <param name="img">camera image</param>
        /// <param name="delta_t">The time between frames in seconds</param>
        /// <param name="currently_mapping_flag">Set to be true if new features should be detected and added to the map.</param>
        /// <returns></returns>
        public bool GoOneStep(float delta_t, bool currently_mapping_flag)
        {
            if (delta_t > 0)
            {
                // update the integral image for use in feature detection
                //img.updateIntegralImage();

                // nullify image selection
                robot.nullify_image_selection();

                init_feature_search_region_defined_flag = false;

                // Control vector of accelerations
                Vector u = new Vector(3);
                u.Fill(0.0f);

                sim_or_rob.set_control(u, delta_t);

                // Record the current position so that I can estimate velocity
                // (We can guarantee that the state vector has position; we can't 
                // guarantee that it has velocity.)
                Vector xv = scene.get_xv();
                scene.get_motion_model().func_xp(xv);
                Vector prev_xp_pos = (scene.get_motion_model().get_xpRES()).Extract(3);

                // Prediction step
                if (currently_mapping_flag)
                    kalman.predict_filter_fast(scene, u, delta_t);

                // if features are not seen the first time try a few more times
                int tries = 0;
                number_of_visible_features = 0;
                while (((tries == 0) || (scene.get_no_selected() < 2)) && (tries < 5))
                {
                    number_of_visible_features = scene.auto_select_n_features(NUMBER_OF_FEATURES_TO_SELECT);

                    if (scene.get_no_selected() > 0)
                    {
                        //scene.predict_measurements(); // commented out in original code                        

                        number_of_matched_features = (uint)SceneLib.make_measurements(scene, sim_or_rob, rnd);

                        if (scene.get_successful_measurement_vector_size() != 0)
                        {
                            // this function is the slowest part of the algorithm
                            kalman.total_update_filter_slow(scene);

                            scene.normalise_state();
                        }

                    }

                    tries++;
                }


                if (currently_mapping_flag)
                    scene.delete_bad_features();

                // Let's enforce symmetry of covariance matrix...
                // Add to transpose and divide by 2                
                uint tot_state_size = scene.get_total_state_size();
                MatrixFixed Pxx = new MatrixFixed(tot_state_size, tot_state_size);
                scene.construct_total_covariance(ref Pxx);
                MatrixFixed PxxT = Pxx.Transpose();                

                Pxx.Update(Pxx * 0.5f + PxxT * 0.5f);
                scene.fill_covariances(Pxx);
                
                // Look at camera speed estimate
                // Get the current position and estimate the speed from it
                xv = scene.get_xv();
                scene.get_motion_model().func_xp(xv);
                Vector xp_pos = scene.get_motion_model().get_xpRES().Extract(3);
                velocity = (xp_pos - prev_xp_pos) / delta_t;
                speed = (float)Math.Sqrt(velocity.SquaredMagnitude());

                // This section of code is a workaround for a situation where
                // the camera suddenly shoots off at high speed, which must be
                // a bug perhaps in the state update.  If the camera suddenly accelerates
                // at a high rate then this just sets the state back to the previous one.
                if (prev_speed == 0) 
                    prev_speed = speed;
                else
                {
                    float speed_change = speed / prev_speed;
                    if ((speed > 1) && (speed_change > 1.2) && (prev_xv != null))
                    {
                        xv.Update(prev_xv);
                    }
                }
                prev_speed = speed;
                if (prev_xv != null)  // TODO: minor bug here with vector != operator
                    prev_xv.Update(xv);
                else
                    prev_xv = new Vector(xv);

                if (currently_mapping_flag)
                {
                    if (speed > 0.2)
                    {
                        if ((number_of_visible_features < NUMBER_OF_FEATURES_TO_KEEP_VISIBLE) &&
                            (scene.get_feature_init_info_vector().Count < (uint)(MAX_FEATURES_TO_INIT_AT_ONCE)))
                        {
                            // if the number of features is low make more attempts
                            // to initialise new ones
                            tries = 1;
                            if (number_of_visible_features < 8) tries = 2;
                            if (number_of_visible_features < 5) tries = 3;
                            for (int i = 0; i < tries; i++) AutoInitialiseFeature(u, delta_t);
                        }
                    }
                }

                MatchPartiallyInitialisedFeatures();                

                recordCameraHistory();
                 
            }
            return true;
        }