示例#1
0
        /// <summary>
        /// show detected features within the given image
        /// </summary>
        /// <param name="img"></param>
        public void ShowFeatures(classimage_mono img, int feature_type)
        {
            show_ellipses = false;
            switch (feature_type)
            {
            case MonoSLAM.DISPLAY_FEATURES:
            {
                monoslaminterface.ShowFeatures(img);
                break;
            }

            case MonoSLAM.DISPLAY_ELLIPSES:
            {
                show_ellipses = true;
                monoslaminterface.ShowFeatures(img);
                break;
            }

            case MonoSLAM.DISPLAY_PROBABILITIES:
            {
                monoslaminterface.showProbabilities(0, img);
                //monoslaminterface.ShowTriangles(img);
                //monoslaminterface.ShowLineHistory(img);
                //monoslaminterface.ShowMesh(img);
                break;
            }

            case MonoSLAM.DISPLAY_MAP:
            {
                monoslaminterface.ShowOverheadView(img, 2.5f, 2.5f);
                break;
            }
            }
        }
示例#2
0
 /// <summary>
 /// load a new image from the camera
 /// </summary>
 /// <param name="imageptr">camera image</param>
 public virtual void load_new_image(classimage_mono imageptr, classimage imageptr_colour, classimage_mono outputimageptr, bool show_ellipses)
 {
     image              = imageptr;
     image_colour       = imageptr_colour;
     outputimage        = outputimageptr;
     this.show_ellipses = show_ellipses;
 }
示例#3
0
 /// <summary>
 /// Make the first measurement of a feature.
 /// </summary>
 /// <param name="z">The location of the feature to measure</param>
 /// <param name="id">The Identifier to fill with the feature measurements</param>
 /// <param name="f_m_m">The feature measurement model to use for this partially-initialised feature</param>
 /// <returns></returns>
 public virtual bool make_initial_measurement_of_feature(Vector z,
                                                         ref classimage_mono id,
                                                         Partially_Initialised_Feature_Measurement_Model f_m_m,
                                                         Vector patch_colour)
 {
     return(false);
 }
示例#4
0
        /// <summary>
        /// Constructor for known features. The different number of
        /// arguments differentiates it from the constructor for partially-initialised
        /// features
        /// </summary>
        /// <param name="id">reference to the feature identifier</param>
        /// <param name="?"></param>
        public Feature(classimage_mono id, uint lab, uint list_pos,
                       Scene_Single scene, Vector y_known,
                       Vector xp_o,
                       Feature_Measurement_Model f_m_m, uint k_f_l)
        {
            feature_measurement_model = f_m_m;
            feature_constructor_bookeeping();

            identifier       = id;
            label            = lab;
            position_in_list = list_pos;   // Position of new feature in list

            // Save the vehicle position where this feature was acquired
            xp_orig = new Vector(xp_o);

            // Straighforward initialisation of state and covariances
            y   = y_known;
            Pxy = new MatrixFixed(scene.get_motion_model().STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE);
            Pxy.Fill(0.0f);
            Pyy = new MatrixFixed(feature_measurement_model.FEATURE_STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE);
            Pyy.Fill(0.0f);

            int         i = 0;
            MatrixFixed newPyjyi_to_store;

            foreach (Feature it in scene.get_feature_list_noconst())
            {
                if (i < position_in_list)
                {
                    newPyjyi_to_store = new MatrixFixed(
                        it.get_feature_measurement_model().FEATURE_STATE_SIZE,
                        feature_measurement_model.FEATURE_STATE_SIZE);

                    //add to the list
                    matrix_block_list.Add(newPyjyi_to_store);
                }

                i++;
            }

            known_feature_label = (int)k_f_l;

            if (feature_measurement_model.fully_initialised_flag)
            {
                partially_initialised_feature_measurement_model = null;
                fully_initialised_feature_measurement_model     =
                    (Fully_Initialised_Feature_Measurement_Model)feature_measurement_model;
            }
            else
            {
                fully_initialised_feature_measurement_model     = null;
                partially_initialised_feature_measurement_model =
                    (Partially_Initialised_Feature_Measurement_Model)feature_measurement_model;
            }
        }
示例#5
0
 /// <summary>
 /// display the line history as an image
 /// </summary>
 /// <param name="img"></param>
 public void show(classimage_mono img)
 {
     for (int x = 0; x < img.width; x++)
     {
         int xx = x * history / img.width;
         for (int y = 0; y < img.height; y++)
         {
             int yy = y * no_of_points / img.height;
             img.image[x, y] = pixel_intensity[yy, xx];
         }
     }
 }
示例#6
0
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="image">The image to search</param>
        /// <param name="patch">The image patch to search for</param>
        /// <param name="BOXSIZE">The size of the image patch to use</param>
        public SearchMultipleOverlappingEllipses(classimage_mono image, classimage_mono patch, uint BOXSIZE)
        {
            m_image   = image;
            m_patch   = patch;
            m_boxsize = BOXSIZE;

            // Rather than working out an overall bounding box, we'll be slightly
            // lazy and make an array for correlation results which is the same size
            // as the image
            // Pixels in this array in raster order
            correlation_image = new float[m_image.width, m_image.height];
        }
示例#7
0
        //**************************Write Image Patch to Disk**************************

        /// <summary>
        /// Save the currently-selected patch to a file.
        /// </summary>
        /// <param name="name"></param>
        public void write_patch(String name)
        {
            classimage_mono hip = new classimage_mono();

            hip.createImage((int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE);

            if (location_selected_flag)
            {
                // Copy the selected patch to the save space patch
                copy_into_patch(image, hip, uu, vv);
                hip.SaveAsBitmapMono(name);
            }
        }
示例#8
0
        /// <summary>
        /// Little service function to copy image region centred at uu, vv into patch
        /// </summary>
        /// <param name="im">the image to extract the patch from</param>
        /// <param name="patch">the patch to be returned</param>
        /// <param name="uu">x coordinate of the centre of the patch within the image</param>
        /// <param name="vv">y coordinate of the centre of the patch within the image</param>
        public void copy_into_patch(classimage_mono im, classimage_mono patch,
                                    uint uu, uint vv)
        {
            for (uint r = 0; r < Camera_Constants.BOXSIZE; r++)
            {
                for (uint c = 0; c < Camera_Constants.BOXSIZE; c++)
                {
                    int x = (int)(c + uu - (Camera_Constants.BOXSIZE - 1) / 2);
                    int y = (int)(r + vv - (Camera_Constants.BOXSIZE - 1) / 2);

                    patch.image[c, r] = im.image[x, y];
                }
            }
        }
示例#9
0
        /// <summary>
        /// update the line history
        /// </summary>
        /// <param name="img"></param>
        public void update(classimage_mono img)
        {
            int    i;
            Vector position1 = feature1.get_z();
            Vector position2 = feature2.get_z();

            // store the feature positions
            for (i = 0; i < 3; i++)
            {
                feature_position[curr_index, 0][i] = feature1.get_y()[i];
                feature_position[curr_index, 1][i] = feature2.get_y()[i];
            }

            // distances between features in image coordinates
            float dist_u = position1[0] - position2[0];
            float dist_v = position1[1] - position2[1];

            marked_for_deletion = false;
            for (i = 0; i < no_of_points; i++)
            {
                int x = (int)(position1[0] + (i * dist_u / no_of_points));
                if ((x > 0) && (x < img.width))
                {
                    int y = (int)(position1[1] + (i * dist_v / no_of_points));
                    if ((y > 0) && (y < img.height))
                    {
                        pixel_intensity[i, curr_index] = img.image[x, y];
                    }
                    else
                    {
                        marked_for_deletion = true;
                    }
                }
                else
                {
                    marked_for_deletion = true;
                }
            }
            curr_index++;
            if (curr_index >= history)
            {
                curr_index = 0;
            }
            if (hits < 254)
            {
                hits++;
            }
        }
示例#10
0
        /// <summary>
        /// Initialise a known feature. In this case it is assumed that a known feature
        /// is an image patch to be loaded from a file <code>known_patch?.bmp</code>.
        /// </summary>
        /// <param name="fmm"></param>
        /// <param name="v"></param>
        /// <param name="known_feature_label"></param>
        /// <returns></returns>
        public override classimage_mono initialise_known_feature(Feature_Measurement_Model fmm,
                                                                 Vector v, uint known_feature_label, String path)
        {
            String name = Camera_Constants.known_point_patch_stem + known_feature_label + ".bmp";



            classimage_mono patch = new classimage_mono();

            if (!patch.loadFromBitmapMono(path + name, (int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE))
            {
                patch = null;
            }

            return(patch);
        }
示例#11
0
        /// <summary>
        /// Make measurements of a feature which is represented by a set of particles.
        /// This is typically a partially-initialised feature (see
        /// Partially_Initialised_Feature_Measurement_Model), where the particles represent
        /// the probability distribution in the direction of the free parameters.
        /// </summary>
        /// <param name="id">The Identified for this feature (in this case this will be an image patch)</param>
        /// <param name="particle_vector">The vector of particles. The covariance of each particle is used to define the region over which the feature is matched and measured.</param>
        public void measure_feature_with_multiple_priors(classimage_mono patch, ArrayList particle_vector)
        {
            SearchMultipleOverlappingEllipses ellipse_search =
                new SearchMultipleOverlappingEllipses(image, patch, Camera_Constants.BOXSIZE);

            SearchDatum e;

            foreach (Particle part in particle_vector)
            {
                ellipse_search.add_ellipse(part.get_SInv(), part.get_h());

                //if (Camera_Constants.DEBUGDUMP)  cout << "Particle at " << part->get_lambda()
                //           << " h " << part->get_h()
                //         << " SInv " << part->get_SInv() << endl;
            }

            // cout << "MFWMP timer before search_multiple: " << timerlocal << endl;

            ellipse_search.search();

            // cout << "MFWMP timer after search_multiple: " << timerlocal << endl;

            // Turn results into matrix forms
            int i = 0;

            foreach (Particle it in particle_vector)
            {
                e = (SearchDatum)ellipse_search.m_searchdata[i];

                if (e.result_flag)
                {
                    // Save the measurement location back into the particle
                    Vector z_local = new Vector(2);
                    z_local[0] = e.result_u;
                    z_local[1] = e.result_v;
                    it.set_z(z_local);
                    it.set_successful_measurement_flag(true);
                }
                else
                {
                    it.set_successful_measurement_flag(false);
                }
                i++;
            }

            // cout << "MFWMP timer end: " << timerlocal << endl;
        }
示例#12
0
        /// <summary>
        /// Initialise a known feature, in this case by loading an image file. The name of
        /// the file is read from the Settings Section passed to this function, with the
        /// entry Identifier.
        /// </summary>
        /// <param name="fmm"></param>
        /// <param name="v"></param>
        /// <param name="section"></param>
        /// <returns></returns>
        public override classimage_mono initialise_known_feature(Feature_Measurement_Model fmm,
                                                                 Vector v, Settings.Section section,
                                                                 String path)
        {
            ArrayList values = section.get_entry("Identifier");
            String    name   = (String)values[0];

            //cout << "Reading patch " << name << endl;

            classimage_mono patch = new classimage_mono();

            if (!(patch.loadFromBitmapMono(path + name, (int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE)))
            {
                patch = null;
            }

            return(patch);
        }
示例#13
0
        /// <summary>
        /// Make the initial measurement of the currently-selected feature. This fills in
        /// the location and the identifier (a copy of the image patch) for the current
        /// feature. The feature location is set using set_image_selection_automatically()
        /// or manually by the user using set_image_selection(). This function just calls
        /// partially_initialise_point_feature() to fill in the measurement and identifier.
        /// </summary>
        /// <param name="z">The measurement vector to be filled in.</param>
        /// <param name="id_ptr">The identifier to be filled in.</param>
        /// <param name="m"></param>
        /// <returns>true on success, or <code>false</code> on failure (e.g. no patch is currently selected).</returns>
        public override bool make_initial_measurement_of_feature(Vector z, ref classimage_mono patch, Partially_Initialised_Feature_Measurement_Model m, Vector patch_colour)
        {
            patch = partially_initialise_point_feature(z);

            for (int c = 0; c < 3; c++)
            {
                patch_colour[c] = image_colour.image[uu, vv, c];
            }

            if (patch != null)
            {
                return(true);
            }
            else
            {
                return(false);
            }
        }
示例#14
0
文件: mesh.cs 项目: kvyu168/monoslam
 /// <summary>
 /// show the mesh in the given image
 /// </summary>
 /// <param name="img"></param>
 public void Show(classimage_mono img)
 {
     for (int i = 0; i < features.Count; i++)
     {
         Feature feat = (Feature)features[i];
         if (featureVisible(feat))
         {
             for (int j = 0; j < feat.triangles.Count; j++)
             {
                 model_triangle tri = (model_triangle)feat.triangles[j];
                 if (tri.isVisible())
                 {
                     tri.Show(img);
                 }
             }
         }
     }
 }
示例#15
0
        /// <summary>
        /// Make a measurement of a feature. This function calls elliptical_search() to
        /// find the best match within three standard deviations of the predicted location.
        /// </summary>
        /// <param name="patch">The identifier for this feature (in this case an image patch)</param>
        /// <param name="z">The best image location match for the feature, to be filled in by this function</param>
        /// <param name="h">The expected image location</param>
        /// <param name="S">The expected location covariance, used to specify the search region.</param>
        /// <returns></returns>
        public override bool measure_feature(classimage_mono patch, ref Vector z, Vector vz, Vector h, MatrixFixed S, Random rnd)
        {
            Cholesky    S_cholesky = new Cholesky(S);
            MatrixFixed Sinv       = S_cholesky.Inverse();

            uint u_found = 0, v_found = 0;

            if (SceneLib.elliptical_search(image, patch,
                                           h, Sinv, ref u_found, ref v_found, vz, Camera_Constants.BOXSIZE, outputimage, show_ellipses, calibrating, rnd) != true)
            {
                // Feature not successfully matched
                return(false);
            }

            z.Put(0, (float)u_found);
            z.Put(1, (float)v_found);

            return(true);
        }
示例#16
0
        public static void find_best_patch_inside_region_test(
            classimage_mono image,
            ref uint ubest, ref uint vbest, ref float evbest,
            uint BOXSIZE, uint ustart, uint vstart,
            uint ufinish, uint vfinish)
        {
            long corr;
            long corrmax = MIN_PATCH_DIFFERENCE * BOXSIZE * BOXSIZE / 2;
            int  tx, ty, bx, by;

            for (int x = (int)ustart; x < ufinish; x++)
            {
                for (int y = (int)vstart; y < vfinish; y++)
                {
                    tx = (int)(x - (BOXSIZE - 1) / 2);
                    ty = (int)(y - (BOXSIZE - 1) / 2);
                    bx = (int)(tx + BOXSIZE);
                    by = (int)(ty + BOXSIZE);
                    long leftval   = image.getIntegral(tx, ty, x, by);
                    long rightval  = image.getIntegral(x, ty, bx, by);
                    long topval    = image.getIntegral(tx, ty, bx, y);
                    long bottomval = image.getIntegral(tx, y, bx, by);

                    corr = Math.Abs(leftval - rightval) +
                           Math.Abs(topval - bottomval) +
                           Math.Abs(leftval - topval) +
                           Math.Abs(rightval - topval) +
                           Math.Abs(leftval - bottomval) +
                           Math.Abs(rightval - bottomval);
                    if (corr > corrmax)
                    {
                        corrmax = corr;
                        ubest   = (uint)x;
                        vbest   = (uint)y;
                        evbest  = corr;
                    }
                }
            }
        }
示例#17
0
文件: mesh.cs 项目: kvyu168/monoslam
        public void Show(classimage_mono img)
        {
            int i, j;

            int[,] screen_position = new int[3, 2];
            for (i = 0; i < 3; i++)
            {
                Vector screen_pos = ((model_vertex)vertices[i]).vertex_feature.get_h();
                screen_position[i, 0] = (int)screen_pos[0];
                screen_position[i, 1] = (int)screen_pos[1];
            }

            for (i = 0; i < 3; i++)
            {
                for (j = 0; j < 3; j++)
                {
                    if (i != j)
                    {
                        img.drawLine(screen_position[i, 0], screen_position[i, 1],
                                     screen_position[j, 0], screen_position[j, 1], 0);
                    }
                }
            }
        }
示例#18
0
        /// <summary>
        /// Creates a new ImageMonoExtraData to represent the currently-selected image
        /// patch, and also returns its location in the parameter z. The current
        /// image patch is set using set_image_selection_automatically() or manually by the
        /// user using set_image_selection().
        /// </summary>
        /// <param name="z">The measurement vector to be filled in</param>
        /// <returns>The classimage holding this image patch information (created using new, so must be deleted elsewhere), or null if no patch is currently selected.</returns>
        public classimage_mono partially_initialise_point_feature(Vector z)
        {
            if (location_selected_flag) // Patch selected
            {
                // Go and initialise it in scene + 3D
                classimage_mono hip = new classimage_mono();
                hip.createImage((int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE);

                // Final save of fixated image patch
                copy_into_patch(image, hip, uu, vv);

                // And set measurement
                z.Put(0, uu);
                z.Put(1, vv);

                // return the patch
                return(hip);
            }
            else
            {
                // No patch selected
                return(null);
            }
        }
示例#19
0
 /// <summary>
 /// Make a measurement of a feature.
 /// </summary>
 /// <param name="id">The identifier for this feature</param>
 /// <param name="z">The measurement of the state, to be filled in by this function</param>
 /// <param name="h">The expected measurement</param>
 /// <param name="S">The measurement covariance.</param>
 /// <returns></returns>
 public virtual bool measure_feature(classimage_mono id, ref Vector z, Vector vz, Vector h, MatrixFixed S, Random rnd)
 {
     return(false);
 }
示例#20
0
        /// <summary>
        /// one update
        /// </summary>
        /// <param name="img"></param>
        public void GoOneStep(classimage_mono img, classimage img_colour, classimage_mono output_img)
        {
            float delta_t;

            MAXIMUM_ANGLE_DIFFERENCE = 3.1415927f * (field_of_vision / 2) / 180.0f;

            //set calibration parameters
            if (calibrating)
            {
                monoslaminterface.set_camera_calibration(9e-06f, lens_distortion, lens_distortion, centre_of__distortion_x, centre_of__distortion_y, 1);
            }

            //load in the current raw image
            Robot r = monoslaminterface.GetRobot();

            r.calibrating = (!enable_mapping) | calibrating;
            r.load_new_image(img, img_colour, output_img, show_ellipses);

            stopwatch.Stop();

            //get the elapsed time in seconds
            delta_t = stopwatch.ElapsedMilliseconds / 1000.0f;

            //if using the simulation set the frame rate at a fixed value
            if (simulation_mode)
            {
                delta_t = 1.0f / 20.0f;
            }

            if (delta_t > 0)
            {
                frames_per_second = (int)((1.0f / delta_t) * 10) / 10.0f;

                //report if its taking a long time
                frame_rate_warning = false;
                if (delta_t > 0.2)
                {
                    frame_rate_warning = true;
                    Debug.WriteLine("Time between frames (sec): " + Convert.ToString(delta_t));
                }

                //update the state of the system
                monoslaminterface.GoOneStep(delta_t, enable_mapping);
            }

            stopwatch.Reset();
            stopwatch.Start();

            speed = (int)(monoslaminterface.Speed() * 100) / 100.0f;

            if ((enable_mapping) && (monoslaminterface.number_of_matched_features < 2))
            {
                enable_mapping = false;
                init();
            }

            if ((!enable_mapping) && (monoslaminterface.number_of_matched_features >= 3) && (!calibrating))
            {
                registered_seconds += delta_t;
                if (registered_seconds > 1)
                {
                    Debug.WriteLine("Ready to go");
                    enable_mapping     = true;
                    registered_seconds = 0;
                }
            }
            else
            {
                if (registered_seconds > 0)
                {
                    Debug.WriteLine("Waiting for initial registration");
                }
                registered_seconds = 0;
            }
        }
示例#21
0
        /// <summary>
        /// Normalise sum-of-squared-difference between two patches. Assumes that the images
        /// are continuous memory blocks in raster order. The two pointer arguments return
        /// the standard deviations of the patches: if either of these are low, the results
        /// can be misleading.
        /// </summary>
        /// <param name="x0">Start x-co-ordinate of patch in first image</param>
        /// <param name="y0">End x-co-ordinate of patch in first image</param>
        /// <param name="x0lim">End x-co-ordinate of patch in first image</param>
        /// <param name="y0lim">End y-co-ordinate of patch in first image</param>
        /// <param name="x1">Start x-co-ordinate of patch in second image</param>
        /// <param name="y1">Start y-co-ordinate of patch in second image</param>
        /// <param name="p0">First image</param>
        /// <param name="p1">Second image</param>
        /// <param name="sd0ptr">Standard deviation of patch from first image</param>
        /// <param name="sd1ptr">Standard deviation of patch from second image</param>
        /// <returns></returns>
        public static float correlate2_warning(
            int x0, int y0,
            int x0lim, int y0lim,
            int x1, int y1,
            ref classimage_mono p0, ref classimage_mono p1,
            ref float sd0ptr, ref float sd1ptr)
        {
            // Make these int rather than unsigned int for speed
            int patchwidth = x0lim - x0;
            int p0skip = p0.width - patchwidth;
            int p1skip = p1.width - patchwidth;
            int Sg0 = 0, Sg1 = 0, Sg0g1 = 0, Sg0sq = 0, Sg1sq = 0;

            float n = (x0lim - x0) * (y0lim - y0);     // to hold total no of pixels

            float varg0 = 0.0f, varg1 = 0.0f, sigmag0 = 0.0f, sigmag1 = 0.0f, g0bar = 0.0f, g1bar = 0.0f;

            // variances, standard deviations, means
            float Sg0doub = 0.0f, Sg1doub = 0.0f, Sg0g1doub = 0.0f, Sg0sqdoub = 0.0f, Sg1sqdoub = 0.0f;

            float C = 0.0f;       // to hold the final result
            float k = 0.0f;       // to hold an intermediate result

            // at the moment the far right and bottom pixels aren't included
            int v0, v1;


            for (int y0copy = y0lim - 1; y0copy >= 0; y0copy--)
            {
                int yy0 = y0 + y0copy;
                int yy1 = y1 + y0copy;
                for (int x0copy = x0lim - 1; x0copy >= 0; x0copy--)
                {
                    v0 = p0.image[x0 + x0copy, yy0];
                    v1 = p1.image[x1 + x0copy, yy1];

                    Sg0   += v0;
                    Sg1   += v1;
                    Sg0g1 += v0 * v1;
                    Sg0sq += v0 * v0;
                    Sg1sq += v1 * v1;
                }
            }


            Sg0doub   = Sg0;
            Sg1doub   = Sg1;
            Sg0g1doub = Sg0g1;
            Sg0sqdoub = Sg0sq;
            Sg1sqdoub = Sg1sq;

            g0bar = Sg0doub / n;
            g1bar = Sg1doub / n;

            varg0 = Sg0sqdoub / n - (g0bar * g0bar);
            varg1 = Sg1sqdoub / n - (g1bar * g1bar);

            sigmag0 = (float)Math.Sqrt(varg0);
            sigmag1 = (float)Math.Sqrt(varg1);

            sd0ptr = (float)sigmag0;
            sd1ptr = (float)sigmag1;

            if (sigmag0 == 0.0f)    // special checks for this algorithm
            {                       // to avoid division by zero
                if (sigmag1 == 0.0f)
                {
                    return(0.0f);
                }
                else
                {
                    return(1.0f);
                }
            }

            if (sigmag1 == 0.0f)
            {
                return(1.0f);
            }

            k = g0bar / sigmag0 - g1bar / sigmag1;

            C = Sg0sqdoub / varg0 + Sg1sqdoub / varg1 + n * (k * k)
                - Sg0g1doub * 2.0f / (sigmag0 * sigmag1)
                - Sg0doub * 2.0f * k / sigmag0 + Sg1doub * 2.0f * k / sigmag1;

            return(C / n);     // returns mean square no of s.d. from mean of pixels
        }
示例#22
0
        /// <summary>
        /// Function to scan over (a window in an) image and find the best patch by the Shi
        /// and Tomasi criterion.
        /// Method: as described in notes from 1/7/97. Form sums in an incremental
        /// way to speed things up.
        /// </summary>
        /// <param name="image">The image to scan</param>
        /// <param name="ubest">The x-co-ordinate of the best patch</param>
        /// <param name="vbest">The y-co-ordinate of the best patch</param>
        /// <param name="evbest">The smallest eigenvalue of the best patch (larger is better)</param>
        /// <param name="BOXSIZE">The size of the patch to use</param>
        /// <param name="ustart">The x-co-ordinate of the start of the search window</param>
        /// <param name="vstart">The y-co-ordinate of the start of the search window</param>
        /// <param name="ufinish">The x-co-ordinate of the end of the search window</param>
        /// <param name="vfinish">The v-co-ordinate of the end of the search window</param>
        public static void find_best_patch_inside_region(
            classimage_mono image,
            ref uint ubest, ref uint vbest, ref float evbest,
            uint BOXSIZE, uint ustart, uint vstart,
            uint ufinish, uint vfinish)
        {
            // Check that these limits aren't too close to the image edges.
            // Note that we can't use the edge pixels because we can't form
            // gradients here.
            if (ustart < (BOXSIZE - 1) / 2 + 1)
            {
                ustart = (BOXSIZE - 1) / 2 + 1;
            }
            if (ufinish > image.width - (BOXSIZE - 1) / 2 - 1)
            {
                ufinish = (uint)(image.width - (BOXSIZE - 1) / 2 - 1);
            }
            if (vstart < (BOXSIZE - 1) / 2 + 1)
            {
                vstart = (BOXSIZE - 1) / 2 + 1;
            }
            if (vfinish > image.height - (BOXSIZE - 1) / 2 - 1)
            {
                vfinish = (uint)(image.height - (BOXSIZE - 1) / 2 - 1);
            }

            // Is there anything left to search? If not, set the score to zero and return.
            if ((vstart >= vfinish) || (ustart >= ufinish))
            {
                ubest  = ustart;
                vbest  = vstart;
                evbest = 0;
                return;
            }

            // Calculate the width we need to find gradients in.
            uint calc_width = ufinish - ustart + BOXSIZE - 1;

            // Arrays which store column sums of height BOXSIZE
            float[] CSgxsq = new float[calc_width];
            float[] CSgysq = new float[calc_width];
            float[] CSgxgy = new float[calc_width];

            // For the final sums at each position (u, v)
            float TSgxsq = 0.0f, TSgysq = 0.0f, TSgxgy = 0.0f;

            float gx, gy;
            uint  u = ustart, v = vstart;
            float eval1 = 0, eval2 = 0;

            // Initial stage: fill these sums for the first horizontal position
            uint cstart  = ustart - (BOXSIZE - 1) / 2;
            uint cfinish = ufinish + (BOXSIZE - 1) / 2;
            uint rstart  = vstart - (BOXSIZE - 1) / 2;
            uint i;
            uint c, r;

            for (c = cstart, i = 0; c < cfinish; c++, i++)
            {
                CSgxsq[i] = 0; CSgysq[i] = 0; CSgxgy[i] = 0;
                for (r = rstart; r < rstart + BOXSIZE; r++)
                {
                    gx = (image.image[c + 1, r] -
                          image.image[c - 1, r]) / 2.0f;
                    gy = (image.image[c, r + 1] -
                          image.image[c, r - 1]) / 2.0f;

                    CSgxsq[i] += gx * gx;
                    CSgysq[i] += gy * gy;
                    CSgxgy[i] += gx * gy;
                }
            }


            // Now loop through u and v to form sums
            evbest = 0;
            for (v = vstart; v < vfinish; v++)
            {
                u = ustart;

                // Form first sums for u = ustart
                TSgxsq = 0.0f;
                TSgysq = 0.0f;
                TSgxgy = 0.0f;
                for (i = 0; i < BOXSIZE; i++)
                {
                    TSgxsq += CSgxsq[i];
                    TSgysq += CSgysq[i];
                    TSgxgy += CSgxgy[i];
                }

                for (u = ustart; u < ufinish; u++)
                {
                    if (u != ustart)
                    {
                        // Subtract old column, add new one
                        TSgxsq += CSgxsq[u - ustart + BOXSIZE - 1] - CSgxsq[u - ustart - 1];
                        TSgysq += CSgysq[u - ustart + BOXSIZE - 1] - CSgysq[u - ustart - 1];
                        TSgxgy += CSgxgy[u - ustart + BOXSIZE - 1] - CSgxgy[u - ustart - 1];
                    }

                    find_eigenvalues(TSgxsq, TSgxgy, TSgysq, ref eval1, ref eval2);

                    // eval2 will be the smaller eigenvalue. Compare it with the one
                    // we've already got
                    if (eval2 > evbest)
                    {
                        ubest  = u;
                        vbest  = v;
                        evbest = eval2;
                    }
                }

                if (v != vfinish - 1)
                {
                    // Update the column sums for the next v
                    for (c = cstart, i = 0; c < cfinish; c++, i++)
                    {
                        // Subtract the old top pixel
                        gx = (image.image[c + 1, v - (BOXSIZE - 1) / 2] -
                              image.image[c - 1, v - (BOXSIZE - 1) / 2]) / 2.0f;
                        gy = (image.image[c, v - (BOXSIZE - 1) / 2 + 1] -
                              image.image[c, v - (BOXSIZE - 1) / 2 - 1]) / 2.0f;
                        CSgxsq[i] -= gx * gx;
                        CSgysq[i] -= gy * gy;
                        CSgxgy[i] -= gx * gy;

                        // Add the new bottom pixel
                        gx = (image.image[c + 1, v + (BOXSIZE - 1) / 2 + 1] -
                              image.image[c - 1, v + (BOXSIZE - 1) / 2 + 1]) / 2.0f;
                        gy = (image.image[c, v + (BOXSIZE - 1) / 2 + 1 + 1] -
                              image.image[c, v + (BOXSIZE - 1) / 2 + 1 - 1]) / 2.0f;
                        CSgxsq[i] += gx * gx;
                        CSgysq[i] += gy * gy;
                        CSgxgy[i] += gx * gy;
                    }
                }
            }
        }
示例#23
0
        /// <summary>
        /// Version to find the best n patches within an image.
        /// Not very efficiently implemented I'm afraid.
        /// Now we expect the arguments ubest, vbest, evbest to be arrays of
        /// size n.
        /// </summary>
        /// <param name="image">The image to scan</param>
        /// <param name="n">Number of patches</param>
        /// <param name="ubest">Array containing x-co-ordinates of n best patches</param>
        /// <param name="vbest">Array containing y-co-ordinates of n best patches</param>
        /// <param name="evbest">Array containing smallest eigenvalues of best patches (larger is better)</param>
        /// <param name="BOXSIZE">The size of the patch to use</param>
        /// <param name="ustart">The x-co-ordinate of the start of the search window</param>
        /// <param name="vstart">The y-co-ordinate of the start of the search window</param>
        /// <param name="ufinish">The x-co-ordinate of the end of the search window</param>
        /// <param name="vfinish">The v-co-ordinate of the end of the search window</param>
        public void find_best_n_patches_inside_region(
            classimage_mono image,
            uint n,
            uint[] ubest, uint[] vbest,
            float[] evbest,
            uint BOXSIZE,
            uint ustart, uint vstart,
            uint ufinish, uint vfinish)
        {
            // Check that these limits aren't too close to the image edges.
            // Note that we can't use the edge pixels because we can't form
            // gradients here.
            if (ustart < (BOXSIZE - 1) / 2 + 1)
            {
                ustart = (BOXSIZE - 1) / 2 + 1;
            }
            if (ufinish > image.width - (BOXSIZE - 1) / 2 - 1)
            {
                ufinish = (uint)(image.width - (BOXSIZE - 1) / 2 - 1);
            }
            if (vstart < (BOXSIZE - 1) / 2 + 1)
            {
                vstart = (BOXSIZE - 1) / 2 + 1;
            }
            if (vfinish > image.height - (BOXSIZE - 1) / 2 - 1)
            {
                vfinish = (uint)(image.height - (BOXSIZE - 1) / 2 - 1);
            }

            // Calculate the width we need to find gradients in.
            uint calc_width = ufinish - ustart + BOXSIZE - 1;

            // Arrays which store column sums of height BOXSIZE
            float[] CSgxsq = new float[calc_width];
            float[] CSgysq = new float[calc_width];
            float[] CSgxgy = new float[calc_width];

            // For the final sums at each position (u, v)
            float TSgxsq = 0.0f, TSgysq = 0.0f, TSgxgy = 0.0f;

            float gx, gy;
            uint  u = ustart, v = vstart;
            float eval1 = 0, eval2 = 0;

            // Initial stage: fill these sums for the first horizontal position
            uint cstart  = ustart - (BOXSIZE - 1) / 2;
            uint cfinish = ufinish + (BOXSIZE - 1) / 2;
            uint rstart  = vstart - (BOXSIZE - 1) / 2;
            uint i;
            uint c, r;

            for (c = cstart, i = 0; c < cfinish; c++, i++)
            {
                CSgxsq[i] = 0;
                CSgysq[i] = 0;
                CSgxgy[i] = 0;
                for (r = rstart; r < rstart + BOXSIZE; r++)
                {
                    gx = (image.image[c + 1, r] - image.image[c - 1, r]) / 2.0f;
                    gy = (image.image[c, r + 1] - image.image[c, r - 1]) / 2.0f;

                    CSgxsq[i] += gx * gx;
                    CSgysq[i] += gy * gy;
                    CSgxgy[i] += gx * gy;
                }
            }

            float[,] evarray = new float[vfinish - vstart, ufinish - ustart];

            // Now loop through u and v to form sums
            for (v = vstart; v < vfinish; v++)
            {
                u = ustart;

                // Form first sums for u = ustart
                TSgxsq = 0.0f;
                TSgysq = 0.0f;
                TSgxgy = 0.0f;
                for (i = 0; i < BOXSIZE; i++)
                {
                    TSgxsq += CSgxsq[i];
                    TSgysq += CSgysq[i];
                    TSgxgy += CSgxgy[i];
                }

                for (u = ustart; u < ufinish; u++)
                {
                    if (u != ustart)
                    {
                        /* Subtract old column, add new one */
                        TSgxsq += CSgxsq[u - ustart + BOXSIZE - 1] - CSgxsq[u - ustart - 1];
                        TSgysq += CSgysq[u - ustart + BOXSIZE - 1] - CSgysq[u - ustart - 1];
                        TSgxgy += CSgxgy[u - ustart + BOXSIZE - 1] - CSgxgy[u - ustart - 1];
                    }

                    find_eigenvalues(TSgxsq, TSgxgy, TSgysq, ref eval1, ref eval2);

                    // eval2 will be the smaller eigenvalue. Store in the array
                    evarray[v - vstart, u - ustart] = eval2;
                }

                if (v != vfinish - 1)
                {
                    // Update the column sums for the next v
                    for (c = cstart, i = 0; c < cfinish; c++, i++)
                    {
                        // Subtract the old top pixel
                        gx = (image.image[c + 1, v - (BOXSIZE - 1) / 2]
                              - image.image[c - 1, v - (BOXSIZE - 1) / 2]) / 2.0f;
                        gy = (image.image[c, v - (BOXSIZE - 1) / 2 + 1]
                              - image.image[c, v - (BOXSIZE - 1) / 2 - 1]) / 2.0f;
                        CSgxsq[i] -= gx * gx;
                        CSgysq[i] -= gy * gy;
                        CSgxgy[i] -= gx * gy;

                        // Add the new bottom pixel
                        gx = (image.image[c + 1, v + (BOXSIZE - 1) / 2 + 1]
                              - image.image[c - 1, v + (BOXSIZE - 1) / 2 + 1]) / 2.0f;
                        gy = (image.image[c, v + (BOXSIZE - 1) / 2 + 1 + 1]
                              - image.image[c, v + (BOXSIZE - 1) / 2 + 1 - 1]) / 2.0f;
                        CSgxsq[i] += gx * gx;
                        CSgysq[i] += gy * gy;
                        CSgxgy[i] += gx * gy;
                    }
                }
            }

            // Now: work out the best n patches which don't overlap each other
            float best_so_far;
            int   xdist, ydist;
            bool  OKflag;
            float next_highest = 1000000000000.0f;

            for (i = 0; i < n; i++)
            {
                best_so_far = 0.0f;

                for (uint y = 0; y < vfinish - vstart; y++)
                {
                    for (uint x = 0; x < ufinish - ustart; x++)
                    {
                        if (evarray[y, x] > best_so_far && evarray[y, x] < next_highest)
                        {
                            // We've found a high one: check it doesn't overlap with higher ones

                            OKflag = true;
                            for (uint j = 0; j < i; j++)
                            {
                                xdist = (int)(x + ustart - ubest[j]);
                                ydist = (int)(y + vstart - vbest[j]);
                                xdist = (xdist >= 0 ? xdist : -xdist);
                                ydist = (ydist >= 0 ? ydist : -ydist);
                                if ((xdist < (int)BOXSIZE) && (ydist < (int)BOXSIZE))
                                {
                                    OKflag = false;
                                    break;
                                }
                            }
                            if (OKflag)
                            {
                                ubest[i]    = x + ustart;
                                vbest[i]    = y + vstart;
                                evbest[i]   = evarray[y, x];
                                best_so_far = evarray[y, x];
                            }
                        }
                    }
                }
                next_highest = evbest[i];
            }
        }
示例#24
0
        /// <summary>
        /// Do a search for patch in image within an elliptical region. The
        /// search region is parameterised an inverse covariance matrix (a distance of
        /// NO_SIGMA is used). The co-ordinates returned are those of centre of the patch.
        /// </summary>
        /// <param name="image">The image to search</param>
        /// <param name="patch">The patch  to search for</param>
        /// <param name="centre">The centre of the search ellipse</param>
        /// <param name="PuInv">The inverse covariance matrix to use</param>
        /// <param name="u">The x-co-ordinate of the best patch location</param>
        /// <param name="v">The y-co-ordinate of the best patch location</param>
        /// <param name="uBOXSIZE">The size of the image patch (TODO: Shouldn't this be the same as the size of patch?)</param>
        /// <returns>true if the a good match is found (above CORRTHRESH2), false otherwise</returns>
        public static bool elliptical_search(
            classimage_mono image,
            classimage_mono patch,
            Vector centre,                                  //1 dimensional x,y coords
            MatrixFixed PuInv,                              //2x2 matrix
            ref uint u, ref uint v,
            Vector vz,
            uint uBOXSIZE,
            classimage_mono outputimage,
            bool show_ellipses,
            bool calibrating,
            Random rnd)
        {
            float aspect;

            if ((vz[1] != 0) && (vz[0] != 0))
            {
                aspect = Math.Abs(vz[0] / vz[1]);
                if (aspect < 0.5)
                {
                    aspect = 0.5f;
                }
                if (aspect > 1.5)
                {
                    aspect = 1.5f;
                }
            }
            else
            {
                aspect = 1;
            }
            int vz_x = 0; // (int)(vz[0] / 2);
            int vz_y = 0; // (int)(vz[1] / 2);

            // We want to pass BOXSIZE as an unsigned int since it is,
            // but if we use it in the if statements below then C++ casts the
            // whole calculation to unsigned ints, so it is never < 0!
            // Force it to get the right answer by using an int version of BOXSIZE
            int BOXSIZE = (int)uBOXSIZE;

            // The dimensions of the bounding box of the ellipse we want to search in
            uint halfwidth = (uint)(NO_SIGMA * aspect /
                                    Math.Sqrt(PuInv[0, 0] - PuInv[0, 1] * PuInv[0, 1] / PuInv[1, 1]));

            uint halfheight = (uint)(NO_SIGMA / aspect /
                                     Math.Sqrt(PuInv[1, 1] - PuInv[0, 1] * PuInv[0, 1] / PuInv[0, 0]));

            // stop the search ellipse from expanding to large sizes!
            uint MAX_SEARCH_RADIUS = uBOXSIZE * 2;

            if (halfwidth > MAX_SEARCH_RADIUS)
            {
                halfwidth = MAX_SEARCH_RADIUS;
            }
            if (halfheight > MAX_SEARCH_RADIUS)
            {
                halfheight = MAX_SEARCH_RADIUS;
            }
            // don't allow the ellipse to get too small
            uint MIN_SEARCH_RADIUS = uBOXSIZE / 2;

            if (halfwidth < MIN_SEARCH_RADIUS)
            {
                halfwidth = MIN_SEARCH_RADIUS;
            }
            if (halfheight < MIN_SEARCH_RADIUS)
            {
                halfheight = MIN_SEARCH_RADIUS;
            }

            int ucentre = (int)(centre[0] + 0.5);
            int vcentre = (int)(centre[1] + 0.5);

            // Limits of search
            int urelstart  = -(int)(halfwidth);
            int urelfinish = (int)(halfwidth);
            int vrelstart  = -(int)(halfheight);
            int vrelfinish = (int)(halfheight);

            // Check these limits aren't outside the image
            if (ucentre + urelstart + vz_x - (BOXSIZE - 1) / 2 < 0)
            {
                urelstart = (BOXSIZE - 1) / 2 - ucentre - vz_x;
            }
            if (ucentre + urelfinish + vz_x - (BOXSIZE - 1) / 2 > (int)(image.width) - BOXSIZE)
            {
                urelfinish = image.width - BOXSIZE - ucentre - vz_x + (BOXSIZE - 1) / 2;
            }
            if (vcentre + vrelstart + vz_y - (BOXSIZE - 1) / 2 < 0)
            {
                vrelstart = (BOXSIZE - 1) / 2 - vcentre - vz_y;
            }
            if (vcentre + vrelfinish + vz_y - (BOXSIZE - 1) / 2 > (int)(image.height) - BOXSIZE)
            {
                vrelfinish = (int)(image.height) - BOXSIZE - vcentre - vz_y + (BOXSIZE - 1) / 2;
            }

            // Search counters
            int urel, vrel;

            float corrmax = 1000000.0f;
            float corr;

            // For passing to and_correlate2_warning
            float sdpatch = 0, sdimage = 0;

            // Do the search
            float max_dist = MAX_SEARCH_RADIUS;
            float v1 = PuInv[0, 0];
            float v2 = PuInv[0, 1];
            float v3 = PuInv[1, 1];
            float urelsq, vrelsq;
            bool  inside_ellipse;
            int   xx, yy, xx2, yy2;

            for (urel = urelstart; urel <= urelfinish; urel += 1)
            {
                urelsq = urel * urel;
                float urel2 = v1 * urelsq;
                inside_ellipse = false;
                for (vrel = vrelstart; vrel <= vrelfinish; vrel += 1)
                {
                    vrelsq = vrel * vrel;
                    if (urel2
                        + 2 * v2 * urel * vrel
                        + v3 * vrelsq < NO_SIGMA * NO_SIGMA)
                    {
                        if ((show_ellipses) || (calibrating))
                        {
                            if (!inside_ellipse)
                            {
                                xx  = ucentre + urel + vz_x - (BOXSIZE - 1) / 2;
                                yy  = vcentre + vrel + vz_y - (BOXSIZE - 1) / 2;
                                xx2 = xx * outputimage.width / image.width;
                                yy2 = yy * outputimage.height / image.height;
                                outputimage.image[xx2, yy2] = (Byte)255;
                            }
                            inside_ellipse = true;
                        }


                        // searching within the ellipse is probablistic,
                        // using something like a gaussian distribution
                        float dist = (float)Math.Sqrt(urelsq + vrelsq) / max_dist;
                        if (rnd.Next(1000) < search_probability(dist))
                        {
                            int offset_x = ucentre + urel + vz_x;
                            int offset_y = vcentre + vrel + vz_y;

                            corr = correlate2_warning(0, 0, BOXSIZE, BOXSIZE,
                                                      offset_x - (BOXSIZE - 1) / 2,
                                                      offset_y - (BOXSIZE - 1) / 2,
                                                      ref patch, ref image, ref sdpatch, ref sdimage);

                            if (corr <= corrmax)
                            {
                                if (sdpatch < CORRELATION_SIGMA_THRESHOLD)
                                {
                                    // cout << "Low patch sigma." << endl;
                                }
                                else
                                if (sdimage < CORRELATION_SIGMA_THRESHOLD)
                                {
                                    // cout << "Low image sigma." << endl;
                                }
                                else
                                {
                                    corrmax = corr;
                                    u       = (uint)offset_x;
                                    v       = (uint)offset_y;
                                }
                            }


                            //show ellipses in the output image

                            /*
                             * if ((show_ellipses) || (calibrating))
                             * {
                             *  int xx = offset_x;
                             *  int yy = offset_y;
                             *  int xx2 = xx * outputimage.width / image.width;
                             *  int yy2 = yy * outputimage.height / image.height;
                             *
                             *  if (!calibrating)
                             *  {
                             *      outputimage.image[xx2, yy2, 2] = (Byte)255;
                             *  }
                             *  else
                             *  {
                             *      outputimage.image[xx2, yy2, 0] = (Byte)255;
                             *      outputimage.image[xx2, yy2, 1] = (Byte)255;
                             *      outputimage.image[xx2, yy2, 2] = 0;
                             *  }
                             * }
                             */
                        }
                    }
                    else
                    {
                        if ((show_ellipses) || (calibrating))
                        {
                            if (inside_ellipse)
                            {
                                xx  = ucentre + urel + vz_x;
                                yy  = vcentre + vrel + vz_y;
                                xx2 = xx * outputimage.width / image.width;
                                yy2 = yy * outputimage.height / image.height;
                                outputimage.image[xx2, yy2] = (Byte)255;
                            }
                            inside_ellipse = false;
                        }
                    }
                }
            }



            if (corrmax > CORRTHRESH2)
            {
                return(false);
            }

            return(true);
        }
示例#25
0
        /**************************Initialise Known Features**************************/

        /// <summary>
        /// Initialise the Scene_Single class with some known features, read from the
        /// Settings. Each known feature has its own section, starting with
        /// <code>[KnownFeature1]</code> and counting upwards. The feature type is
        /// identified with the entry <code>FeatureMeasurementModel=</code>. Further
        /// settings are loaded by the feature measurement model itself.
        /// </summary>
        /// <param name="model_creator"></param>
        /// <param name="sim_or_rob"></param>
        /// <param name="scene"></param>
        /// <returns></returns>
        public static uint initialise_known_features(Settings settings,
                                                     Feature_Measurement_Model_Creator model_creator,
                                                     Sim_Or_Rob sim_or_rob,
                                                     Scene_Single scene,
                                                     String path,
                                                     float MAXIMUM_ANGLE_DIFFERENCE)
        {
            uint feature_no   = 1;
            uint num_features = 0;

            Settings.Section section = null;

            do
            {
                // Step through the section names
                String section_name = "KnownFeature" + Convert.ToString(feature_no);
                section = settings.get_section(section_name);
                // Does this section exist?
                if (section == null)
                {
                    return(num_features);
                }

                ArrayList values = section.get_entry("FeatureMeasurementModel");
                if (values == null)
                {
                    Debug.WriteLine("No FeatureMeasurementModel entry under the section [" +
                                    section_name + "] in initalisation file.");
                }
                else
                {
                    String type = (String)values[0];
                    Feature_Measurement_Model f_m_m =
                        model_creator.create_model(type, scene.get_motion_model(), MAXIMUM_ANGLE_DIFFERENCE);
                    if (f_m_m == null)
                    {
                        Debug.WriteLine("Unable to create a feature measurement model of type " +
                                        type + " as requested in initalisation file.");
                    }
                    else
                    {
                        // Initialise the feature measurement model with any settings
                        f_m_m.read_parameters(settings);
                        // Read the feature state
                        Vector yi      = new Vector(3);
                        Vector xp_orig = new Vector(7);
                        f_m_m.read_initial_state(section, yi, xp_orig);

                        // Initialise the feature
                        classimage_mono identifier =
                            sim_or_rob.initialise_known_feature(f_m_m, yi, section, path);
                        if (identifier == null)
                        {
                            Debug.WriteLine("Trouble reading known feature " +
                                            section_name + " : skipping.");
                        }
                        else
                        {
                            scene.add_new_known_feature(identifier, yi, xp_orig, f_m_m, feature_no);
                            Debug.WriteLine("Added known feature " + Convert.ToString(feature_no));
                            num_features++;
                        }
                    }
                }
                feature_no++;
            }while (section != null);

            return(num_features);
        }
示例#26
0
        //**************************Write Image Patch to Disk**************************

        /// <summary>
        /// Save the currently-selected patch to a file.
        /// </summary>
        /// <param name="name"></param>
        public void write_patch(String name)
        {
            classimage_mono hip = new classimage_mono();
            hip.createImage((int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE);

            if (location_selected_flag)
            {
                // Copy the selected patch to the save space patch 
                copy_into_patch(image, hip, uu, vv);
                hip.SaveAsBitmapMono(name);
            }
        }
示例#27
0
        /// <summary>
        /// Constructor for partially-initialised features.
        /// </summary>
        /// <param name="id">reference to the feature</param>
        /// <param name="lab"></param>
        /// <param name="list_pos"></param>
        /// <param name="scene"></param>
        /// <param name="h"></param>
        /// <param name="p_i_f_m_m"></param>
        public Feature(classimage_mono id, uint lab, uint list_pos,
                       Scene_Single scene, Vector h,
                       Partially_Initialised_Feature_Measurement_Model p_i_f_m_m,
                       Vector feature_colour)
        {
            feature_measurement_model = p_i_f_m_m;
            partially_initialised_feature_measurement_model = p_i_f_m_m;
            fully_initialised_feature_measurement_model     = null;

            // Stuff below substituted from Feature_common
            //   Feature_common(id, lab, list_pos, scene, h);

            feature_constructor_bookeeping();

            identifier       = id;
            label            = lab;
            position_in_list = list_pos;        // Position of new feature in list
            position_in_total_state_vector = 0; // This should be set properly
            colour = feature_colour;
            //when feature is added

            // Save the vehicle position where this feature was acquired
            scene.get_motion_model().func_xp(scene.get_xv());
            //xp_orig = scene.get_motion_model().get_xpRES();
            xp_orig = new Vector(scene.get_motion_model().get_xpRES());

            // Call model functions to calculate feature state, measurement noise
            // and associated Jacobians. Results are stored in RES matrices

            // First calculate "position state" and Jacobian
            scene.get_motion_model().func_xp(scene.get_xv());
            scene.get_motion_model().func_dxp_by_dxv(scene.get_xv());

            // Now ask the model to initialise the state vector and calculate Jacobians
            // so that I can go and calculate the covariance matrices
            partially_initialised_feature_measurement_model.func_ypi_and_dypi_by_dxp_and_dypi_by_dhi_and_Ri(h, scene.get_motion_model().get_xpRES());

            // State y
            //y = partially_initialised_feature_measurement_model.get_ypiRES();
            y = new Vector(partially_initialised_feature_measurement_model.get_ypiRES());

            // Temp_FS1 will store dypi_by_dxv
            MatrixFixed Temp_FS1 =
                partially_initialised_feature_measurement_model.get_dypi_by_dxpRES() *
                scene.get_motion_model().get_dxp_by_dxvRES();

            // Pxy
            Pxy = scene.get_Pxx() * Temp_FS1.Transpose();

            // Pyy
            Pyy = Temp_FS1 * scene.get_Pxx() * Temp_FS1.Transpose()
                  + partially_initialised_feature_measurement_model.get_dypi_by_dhiRES()
                  * partially_initialised_feature_measurement_model.get_RiRES()
                  * partially_initialised_feature_measurement_model.get_dypi_by_dhiRES().Transpose();

            // Covariances of this feature with others
            int j = 0;

            foreach (Feature it in scene.get_feature_list_noconst())
            {
                if (j < position_in_list)
                {
                    // new Pypiyj = dypi_by_dxv . Pxyj
                    // Size of this is FEATURE_STATE_SIZE(new) by FEATURE_STATE_SIZE(old)
                    MatrixFixed m = it.get_Pxy();
                    MatrixFixed newPyjypi_to_store = (Temp_FS1 * m).Transpose();

                    //add to the list
                    matrix_block_list.Add(newPyjypi_to_store);
                }
                j++;
            }

            known_feature_label = -1;
        }