예제 #1
0
        public override void func_zeroedyigraphics_and_Pzeroedyigraphics(Vector yi, Vector xv,
	                    MatrixFixed Pxx, MatrixFixed Pxyi, MatrixFixed Pyiyi)
        {
            ((Wide_Camera_Point_Feature_Measurement_Model)wide_model).threed_motion_model.func_xp(xv);

            // In this case (where the feature state is the same as the graphics
            // state) zeroedyigraphics is the same as zeroedyi
            func_zeroedyi_and_dzeroedyi_by_dxp_and_dzeroedyi_by_dyi(yi, ((Wide_Camera_Point_Feature_Measurement_Model)wide_model).threed_motion_model.get_xpRES());
            zeroedyigraphicsRES.Update(zeroedyiRES);

            MatrixFixed dzeroedyigraphics_by_dxv = dzeroedyi_by_dxpRES * ((Wide_Camera_Point_Feature_Measurement_Model)wide_model).threed_motion_model.get_dxp_by_dxvRES();

            PzeroedyigraphicsRES.Update(dzeroedyigraphics_by_dxv * Pxx * dzeroedyigraphics_by_dxv.Transpose() +
                                        dzeroedyi_by_dyiRES * Pxyi.Transpose() * dzeroedyigraphics_by_dxv.Transpose() +
                                        dzeroedyigraphics_by_dxv * Pxyi * dzeroedyi_by_dyiRES.Transpose() +
                                        dzeroedyi_by_dyiRES * Pyiyi * dzeroedyi_by_dyiRES.Transpose());
        }
예제 #2
0
        /// <summary>
        /// Step the MonoSLAM application on by one frame. This should be called every time
        /// a new frame is captured (and care should be taken to avoid skipping frames).
        /// Before calling this function, Scene_Single::load_new_image() should be called
        /// (e.g. using
        /// <code>monoslaminterface.GetRobotNoConst()->load_new_image()</code>), since the
        /// first parameter to GoOneStep() is currently ignored.
        /// 
        /// GoOneStep() performs the following processing steps:
        /// - Kalman filter prediction step (by calling Kalman::predict_filter_fast(),
        ///   with a zero control vector)
        /// - Select a set of features to make measurements from (set by
        ///   SetNumberOfFeaturesToSelect())
        /// - Predict the locations and and make measurements of those features
        /// - Kalman filter update step
        /// - Delete any bad features (those that have repeatedly failed to be matched)
        /// - If we are not currently initialising a enough new features, and the camera is
        ///   translating, and <code>currently_mapping_flag</code> is set, initialise a new
        ///   feature somewhere sensible
        /// - Update the partially-initialised features
        /// </summary>
        /// <param name="img">camera image</param>
        /// <param name="delta_t">The time between frames in seconds</param>
        /// <param name="currently_mapping_flag">Set to be true if new features should be detected and added to the map.</param>
        /// <returns></returns>
        public bool GoOneStep(float delta_t, bool currently_mapping_flag)
        {
            if (delta_t > 0)
            {
                // update the integral image for use in feature detection
                //img.updateIntegralImage();

                // nullify image selection
                robot.nullify_image_selection();

                init_feature_search_region_defined_flag = false;

                // Control vector of accelerations
                Vector u = new Vector(3);
                u.Fill(0.0f);

                sim_or_rob.set_control(u, delta_t);

                // Record the current position so that I can estimate velocity
                // (We can guarantee that the state vector has position; we can't 
                // guarantee that it has velocity.)
                Vector xv = scene.get_xv();
                scene.get_motion_model().func_xp(xv);
                Vector prev_xp_pos = (scene.get_motion_model().get_xpRES()).Extract(3);

                // Prediction step
                if (currently_mapping_flag)
                    kalman.predict_filter_fast(scene, u, delta_t);

                // if features are not seen the first time try a few more times
                int tries = 0;
                number_of_visible_features = 0;
                while (((tries == 0) || (scene.get_no_selected() < 2)) && (tries < 5))
                {
                    number_of_visible_features = scene.auto_select_n_features(NUMBER_OF_FEATURES_TO_SELECT);

                    if (scene.get_no_selected() > 0)
                    {
                        //scene.predict_measurements(); // commented out in original code                        

                        number_of_matched_features = (uint)SceneLib.make_measurements(scene, sim_or_rob, rnd);

                        if (scene.get_successful_measurement_vector_size() != 0)
                        {
                            // this function is the slowest part of the algorithm
                            kalman.total_update_filter_slow(scene);

                            scene.normalise_state();
                        }

                    }

                    tries++;
                }


                if (currently_mapping_flag)
                    scene.delete_bad_features();

                // Let's enforce symmetry of covariance matrix...
                // Add to transpose and divide by 2                
                uint tot_state_size = scene.get_total_state_size();
                MatrixFixed Pxx = new MatrixFixed(tot_state_size, tot_state_size);
                scene.construct_total_covariance(ref Pxx);
                MatrixFixed PxxT = Pxx.Transpose();                

                Pxx.Update(Pxx * 0.5f + PxxT * 0.5f);
                scene.fill_covariances(Pxx);
                
                // Look at camera speed estimate
                // Get the current position and estimate the speed from it
                xv = scene.get_xv();
                scene.get_motion_model().func_xp(xv);
                Vector xp_pos = scene.get_motion_model().get_xpRES().Extract(3);
                velocity = (xp_pos - prev_xp_pos) / delta_t;
                speed = (float)Math.Sqrt(velocity.SquaredMagnitude());

                // This section of code is a workaround for a situation where
                // the camera suddenly shoots off at high speed, which must be
                // a bug perhaps in the state update.  If the camera suddenly accelerates
                // at a high rate then this just sets the state back to the previous one.
                if (prev_speed == 0) 
                    prev_speed = speed;
                else
                {
                    float speed_change = speed / prev_speed;
                    if ((speed > 1) && (speed_change > 1.2) && (prev_xv != null))
                    {
                        xv.Update(prev_xv);
                    }
                }
                prev_speed = speed;
                if (prev_xv != null)  // TODO: minor bug here with vector != operator
                    prev_xv.Update(xv);
                else
                    prev_xv = new Vector(xv);

                if (currently_mapping_flag)
                {
                    if (speed > 0.2)
                    {
                        if ((number_of_visible_features < NUMBER_OF_FEATURES_TO_KEEP_VISIBLE) &&
                            (scene.get_feature_init_info_vector().Count < (uint)(MAX_FEATURES_TO_INIT_AT_ONCE)))
                        {
                            // if the number of features is low make more attempts
                            // to initialise new ones
                            tries = 1;
                            if (number_of_visible_features < 8) tries = 2;
                            if (number_of_visible_features < 5) tries = 3;
                            for (int i = 0; i < tries; i++) AutoInitialiseFeature(u, delta_t);
                        }
                    }
                }

                MatchPartiallyInitialisedFeatures();                

                recordCameraHistory();
                 
            }
            return true;
        }
예제 #3
0
        //*************************Some important operations***************************


        public void zero_axes()
        {
            uint size = get_total_state_size();
            Vector x = new Vector(size);
            x.Fill(0.0f);
            MatrixFixed dxnew_by_dxold = new MatrixFixed(size, size);
            dxnew_by_dxold.Fill(0.0f);

            // We form the new state and Jacobian of the new state w.r.t. the old state
            uint state_position = 0;
  
            // Robot part
            motion_model.func_zeroedxv_and_dzeroedxv_by_dxv(xv);
            x.Update(motion_model.get_zeroedxvRES(), 0);
            dxnew_by_dxold.Update(motion_model.get_dzeroedxv_by_dxvRES(), 0, 0);

            state_position += motion_model.STATE_SIZE;

            // Each feature in turn
            uint feature_no = 0;

            // Copy these to be on the safe side
            motion_model.func_xp(xv);
            Vector local_xp = new Vector(motion_model.get_xpRES());
            //Vector local_xp = motion_model.get_xpRES();
            motion_model.func_dxp_by_dxv(xv);
            //MatrixFixed local_dxp_by_dxv = motion_model.get_dxp_by_dxvRES();
            MatrixFixed local_dxp_by_dxv = new MatrixFixed(motion_model.get_dxp_by_dxvRES());

            Feature it;
            MatrixFixed Temp_FS1;
            for (int i=0; i < feature_list.Count; i++)
            {
                it = (Feature)feature_list[i];
                it.get_feature_measurement_model().func_zeroedyi_and_dzeroedyi_by_dxp_and_dzeroedyi_by_dyi(it.get_y(), local_xp);
                x.Update(it.get_feature_measurement_model().get_zeroedyiRES(), (int)state_position);

                // Calculate dzeroedyi_by_dxv in Temp_FS1
                Temp_FS1 = it.get_feature_measurement_model().get_dzeroedyi_by_dxpRES() * local_dxp_by_dxv;

                dxnew_by_dxold.Update(Temp_FS1, (int)state_position, 0);
                dxnew_by_dxold.Update(it.get_feature_measurement_model().get_dzeroedyi_by_dyiRES(), 
                    (int)state_position, (int)state_position);

                // We also want to redefine xp_orig for this feature: the robot
                // position at which is was initialised into the map (used by
                // functions for checking visibility)
                motion_model.func_xpredef_and_dxpredef_by_dxp_and_dxpredef_by_dxpdef(
                                                        it.get_xp_orig(), local_xp);
                it.set_xp_orig(it.get_feature_measurement_model().get_motion_model().get_xpredefRES());

                feature_no++;
                state_position += it.get_feature_measurement_model().FEATURE_STATE_SIZE;
            }

            // Check we've counted properly
            //assert (feature_no == feature_list.size() && 
	        //  state_position == total_state_size);

            // Do a Jacobian transform to get the new covariance
            MatrixFixed P = new MatrixFixed(size, size);
            construct_total_covariance(ref P);

            P = dxnew_by_dxold * P * dxnew_by_dxold.Transpose();

            // Finally load the scene data structures with the new state and covariance
            fill_states(x);
            fill_covariances(P);
        }
예제 #4
0
        /// <summary>
        /// Simple overall prediction 
        /// </summary>
        /// <param name="scene"></param>
        /// <param name="u"></param>
        /// <param name="delta_t"></param>
        public void predict_filter_slow (Scene_Single scene, Vector u, float delta_t)
        {
            Debug.WriteLine("*** SLOW PREDICTION ***");

            // What we need to do for the prediction:
     
            //    Calculate f and grad_f_x
            //    Calculate Q
            //    Form x(k+1|k) and P(k+1|k)

            int size = (int)scene.get_total_state_size();

            // First form original total state and covariance
            Vector x = new Vector(size);
            MatrixFixed P = new MatrixFixed(size, size);
            scene.construct_total_state_and_covariance(ref x, ref P);

            // Make model calculations: store results in RES matrices
            Vector xv = scene.get_xv();
            //Vector xv = new Vector(scene.get_xv());
            scene.get_motion_model().func_fv_and_dfv_by_dxv(xv, u, delta_t);
            scene.get_motion_model().func_Q(scene.get_xv(), u, delta_t);

            // Find new state f
            Vector f = new Vector(size);

            // Feature elements of f are the same as x 
            f.Update(x);
            f.Update(scene.get_motion_model().get_fvRES(), 0);

            // Find new P

            // Since most elements of df_by_dx are zero... 
            MatrixFixed df_by_dx = new MatrixFixed(size, size);
            df_by_dx.Fill(0.0f);

            // Fill the rest of the elements of df_by_dx: 1 on diagonal for features
            for (int i = (int)scene.get_motion_model().STATE_SIZE; i < df_by_dx.Rows; i++)
                df_by_dx[i,i] = 1.0f;

            df_by_dx.Update(scene.get_motion_model().get_dfv_by_dxvRES(), 0, 0);

            // Calculate the process noise
            MatrixFixed Q = new MatrixFixed(size, size);
            Q.Fill(0.0f);
            Q.Update(scene.get_motion_model().get_QxRES(), 0, 0);

            P.Update(df_by_dx * P * df_by_dx.Transpose());

            P += Q;

            scene.fill_state_and_covariance(f, P);
        }
예제 #5
0
        /// <summary>
        /// Fill noise covariance matrix Pnn: this is the covariance of 
        /// the noise vector (V)
        ///                  (Omega)
        /// that gets added to the state. 
        /// Form of this could change later, but for now assume that 
        /// V and Omega are independent, and that each of their components is
        /// independent... 
        /// </summary>
        /// <param name="xv"></param>
        /// <param name="v"></param>
        /// <param name="delta_t"></param>
        public override void func_Q(Vector xv, Vector v, float delta_t)
        {
            float linear_velocity_noise_variance = 
                SD_A_component_filter * SD_A_component_filter * delta_t * delta_t;
            float angular_velocity_noise_variance =
                SD_alpha_component_filter * SD_alpha_component_filter * delta_t * delta_t;

            // Independence means that the matrix is diagonal
            MatrixFixed Pnn = new MatrixFixed(6,6);
            Pnn.Fill(0.0f);
            Pnn.Put(0, 0, linear_velocity_noise_variance);
            Pnn.Put(1, 1, linear_velocity_noise_variance);
            Pnn.Put(2, 2, linear_velocity_noise_variance);
            Pnn.Put(3, 3, angular_velocity_noise_variance);
            Pnn.Put(4, 4, angular_velocity_noise_variance);
            Pnn.Put(5, 5, angular_velocity_noise_variance);

            // Form Jacobian dxnew_by_dn
            // Is like this:
            // I * delta_t     0
            // 0               dqnew_by_dOmega
            // I               0
            // 0               I

            // Start by zeroing
            MatrixFixed dxnew_by_dn = new MatrixFixed(13,6);
            dxnew_by_dn.Fill(0.0f);

            // Fill in easy bits first
            MatrixFixed Temp33A = new MatrixFixed(3,3);
            Temp33A.SetIdentity();
  
            dxnew_by_dn.Update(Temp33A, 7, 0);
            dxnew_by_dn.Update(Temp33A, 10, 3);
            Temp33A *= delta_t;
            dxnew_by_dn.Update(Temp33A, 0, 0);

            // Tricky bit is dqnew_by_dOmega
            // Is actually the same calculation as in func_fv...
            // Since omega and Omega are additive...?
            Vector3D rold = new Vector3D(0, 0, 0);
            Vector3D vold = new Vector3D(0, 0, 0);
            Vector3D omegaold = new Vector3D(0, 0, 0);
            Quaternion qold=new Quaternion();
            extract_r_q_v_omega(xv, rold, qold, vold, omegaold); // overkill but easy
            // Fill in dqnew_by_domega = d(q x qwt)_by_dqwt . dqwt_by_domega
            // Temp44A is d(q x qwt) by dqwt
            MatrixFixed Temp44A = MatrixFixed.dq3_by_dq1(qold); 
            // Use function below for dqwt_by_domega
            MatrixFixed Temp43A = new MatrixFixed(4,3);
            dqomegadt_by_domega(omegaold, delta_t, Temp43A);
            // Multiply them together
            MatrixFixed Temp43B = Temp44A * Temp43A;
            // And then plug into Jacobian
            dxnew_by_dn.Update(Temp43B, 3, 3);

            // Finally do Q = dxnew_by_dn . Pnn . dxnew_by_dnT
            QxRES.Update( dxnew_by_dn * Pnn * dxnew_by_dn.Transpose() );

            //  cout << "QxRES" << QxRES;
        }
예제 #6
0
        public void update_filter_internal_measurement_slow(Scene_Single scene, uint i)
        {
            // Size of measurement vector
            uint size = ((Internal_Measurement)(scene.internal_measurement_vector[(int)i])).get_internal_measurement_model().MEASUREMENT_SIZE;

            uint size2 = scene.get_total_state_size();   // Size of state vector

            Vector x = new Vector(size2);
            MatrixFixed P = new MatrixFixed(size2, size2);

            scene.construct_total_state_and_covariance(ref x, ref P);

            // cout << "x:" << x;
            // cout << "P:" << P;

            // 1. Form nu and dh_by_dx 
            Vector nu_tot = new Vector(size);
            MatrixFixed dh_by_dx_tot = new MatrixFixed(size, size2);
            MatrixFixed R_tot = new MatrixFixed(size, size);

            scene.construct_total_internal_measurement_stuff(nu_tot, dh_by_dx_tot, R_tot, i);

            // 2. Calculate S(k+1) 
            MatrixFixed S = new MatrixFixed(size, size);

            //MatrixFixed Tempss2 = new MatrixFixed(size, size2);
            //MatrixFixed Temps2s = new MatrixFixed(size2, size);

            MatrixFixed dh_by_dx_totT = dh_by_dx_tot.Transpose();

            S.Update(dh_by_dx_tot * P * dh_by_dx_totT);
            S += R_tot;

            // cout << "R_tot:" << R_tot;
            //  cout << "S:" << S;
            // cout << "dh_by_dx_tot:" << dh_by_dx_tot;
            // cout << "dh_by_dx_totT:" << dh_by_dx_totT;

            // 3. Calculate W(k+1) 
            Cholesky S_cholesky = new Cholesky(S);
            MatrixFixed W = P * dh_by_dx_totT * S_cholesky.Inverse();

            // cout << "W:" << W;

            // 4. Calculate x(k+1|k+1) 
            x += W * nu_tot;

            // 5. Calculate P(k+1|k+1) 
            P -= W * S * W.Transpose();

            scene.fill_state_and_covariance(x, P);

            // cout << "x after update:" << x;
            // cout << "P after update:" << P;
        }
예제 #7
0
        /// <summary>
        /// Update the filter in a simple overall way 
        /// </summary>
        /// <param name="scene"></param>
        public void total_update_filter_slow(Scene_Single scene)
        {
            // Steps to update the total filter:     
            // 1. Form h and dh_by_dx and R(k+1) and z
            // 2. Calculate S(k+1)
            // 3. Calculate W(k+1)
            // 4. Calculate x(k+1|k+1)
            // 5. Calculate P(k+1|k+1)

            uint size = scene.get_successful_measurement_vector_size(); // Size of measurement vector
                                                  
            uint size2 = scene.get_total_state_size();                  // Size of state vector

            Vector x = new Vector(size2);
            MatrixFixed P = new MatrixFixed(size2, size2);

            scene.construct_total_state_and_covariance(ref x, ref P);

            // 1. Form nu and dh_by_dx 
            Vector nu_tot = new Vector(size);
            MatrixFixed dh_by_dx_tot = new MatrixFixed(size, size2);
            MatrixFixed R_tot = new MatrixFixed(size, size);

            scene.construct_total_measurement_stuff(nu_tot, dh_by_dx_tot, R_tot);

            // 2. Calculate S(k+1)
            MatrixFixed temp_matrix = P * dh_by_dx_tot.Transpose();  //pre-calculate to speed up subsequent stuff
            MatrixFixed S = dh_by_dx_tot * temp_matrix;
            S += R_tot;

            // 3. Calculate W(k+1) 
            Cholesky S_cholesky = new Cholesky(S);

            MatrixFixed W = temp_matrix * S_cholesky.Inverse();

            // 4. Calculate x(k+1|k+1) 
            x += W * nu_tot;

            // 5. Calculate P(k+1|k+1) 
            P -= W * S * W.Transpose();

            scene.fill_state_and_covariance(x, P);

        }
예제 #8
0
        /// <summary>
        /// Calculate the innovation covariance. This is the overall measurement
        /// uncertainty in the feature, a combination of the uncertainty in the robot
        /// state, the feature state, and the measurement uncertainty. This calculation is
        /// generic to all feature measurement models, but could be over-ridden if necessary
        /// (e.g. for an efficient implementation). The innovation covariance is given by
        /// \f[S_i = \frac{\partial h_i}{\partial x_v} P_{xx}
        /// \frac{\partial h_i}{\partial x_v}^T
        /// + \frac{\partial h_i}{\partial x_v} P_{xy_i}
        /// \frac{\partial h_i}{\partial y_i}^T
        /// + \frac{\partial h_i}{\partial y_i} P_{y_i x}
        /// \frac{\partial h_i}{\partial x_v}^T
        /// + \frac{\partial h_i}{\partial y_i} P_{y_i y_i}
        /// \frac{\partial h_i}{\partial y_i}^T
        /// + R_i ]
        /// where R_i is the noise covariance of measurements (usually assumed to
        /// be diagonal with magnitude determined by the image resolution).
        /// </summary>
        /// <param name="Pxx">The covariance P_{xy_i} between the robot state x_v and the feature state y_i .</param>
        /// <param name="Pxyi">The covariance of the feature, P_{y_iy_i} </param>
        /// <param name="Pyiyi">The Jacobian \frac{\partial h_i}{\partial x_v} between the feature measurement h_i and the robot state x_v </param>
        /// <param name="dhi_by_dxv">The Jacobian  \frac{\partial h_i}{\partial y_i} between the feature measurement h_i and the feature state y_i </param>
        /// <param name="dhi_by_dyi">The innovation covariance R_i</param>
        /// <param name="Ri"></param>
        public void func_Si(MatrixFixed Pxx, 
                            MatrixFixed Pxyi,
                            MatrixFixed Pyiyi,
                            MatrixFixed dhi_by_dxv,
                            MatrixFixed dhi_by_dyi,
                            MatrixFixed Ri)
        {
            // Zero SiRES and add bits on
            SiRES.Fill(0.0f);

            SiRES += dhi_by_dxv * Pxx * dhi_by_dxv.Transpose();
  
            MatrixFixed Temp_MM1 = dhi_by_dxv * Pxyi * dhi_by_dyi.Transpose();
            SiRES += Temp_MM1;

            SiRES += Temp_MM1.Transpose();

            SiRES += dhi_by_dyi * Pyiyi * dhi_by_dyi.Transpose();

            SiRES += Ri;
        }    
예제 #9
0
        public void func_Sv(MatrixFixed Pxx,
                            MatrixFixed dhv_by_dxv,
                            MatrixFixed Rv)
        {
            // Zero SiRES and add bits on
            SvRES.Fill(0.0f);

            SvRES += dhv_by_dxv * Pxx * dhv_by_dxv.Transpose();

            SvRES += Rv;
        }
예제 #10
0
        /// <summary>
        /// Form the covariance matrix  Q  of the process noise associated with  x_v .
        /// </summary>
        /// <param name="xv"></param>
        /// <param name="v"></param>
        /// <param name="delta_t"></param>
        public override void func_Q(Vector xv, Vector v, float delta_t)
        {
            // Fill noise covariance matrix Pnn: this is the covariance of 
            // the noise vector (V)
            //                  (Omega)
            // that gets added to the state. 
            // Form of this could change later, but for now assume that 
            // V and Omega are independent, and that each of their components is
            // independent... 
            float linear_velocity_noise_variance =
                       SD_A_component_filter * SD_A_component_filter * delta_t * delta_t;
            float angular_velocity_noise_variance =
                       SD_alpha_component_filter * SD_alpha_component_filter * delta_t * delta_t;

            // Independence means that the matrix is diagonal
            MatrixFixed Pnn = new MatrixFixed(6, 6);
            Pnn.Fill(0.0f);
            Pnn.Put(0, 0, linear_velocity_noise_variance);
            Pnn.Put(1, 1, linear_velocity_noise_variance);
            Pnn.Put(2, 2, linear_velocity_noise_variance);
            Pnn.Put(3, 3, angular_velocity_noise_variance);
            Pnn.Put(4, 4, angular_velocity_noise_variance);
            Pnn.Put(5, 5, angular_velocity_noise_variance);

            // Form Jacobian dxnew_by_dn
            // Is like this:
            // I * delta_t     0
            // 0               dqnew_by_dOmega

            // Start by zeroing
            MatrixFixed dxnew_by_dn = new MatrixFixed(7, 6);
            dxnew_by_dn.Fill(0.0f);

            // The translation part is just I \Delta t
            MatrixFixed Temp33A = new MatrixFixed(3, 3);
            Temp33A.SetIdentity();
            Temp33A *= delta_t;
            dxnew_by_dn.Update(Temp33A, 0, 0);

            // qnew = q x \Omega \Deltat
            // dqnew_by_d\Omega = dqnew_by_d\Omega\Delta t . d\Omega\Delta t_by_d\Omega

            // Get the first part
            Vector qRXYZ = xv.Extract(4, 3);
            Quaternion qold = new Quaternion();
            qold.SetRXYZ(qRXYZ);
            MatrixFixed Temp44A = MatrixFixed.dq3_by_dq1(qold);

            // Use function below for dqwt_by_dOmega
            Vector Omega = new Vector(3);
            Omega.Fill(SD_alpha_component_filter);

            MatrixFixed Temp43A = new MatrixFixed(4, 3);
            dqomegadt_by_domega(new Vector3D(Omega), delta_t, Temp43A);
            // Multiply them together
            MatrixFixed Temp43B = Temp44A * Temp43A;
            // And then plug into Jacobian
            dxnew_by_dn.Update(Temp43B, 3, 3);

            // Finally do Q = dxnew_by_dn . Pnn . dxnew_by_dnT
            QxRES.Update(dxnew_by_dn * Pnn * dxnew_by_dn.Transpose());

        }