コード例 #1
0
ファイル: linefeature.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// update the line history
        /// </summary>
        /// <param name="img"></param>
        public void update(classimage_mono img)
        {
            int i;
            Vector position1 = feature1.get_z();
            Vector position2 = feature2.get_z();

            // store the feature positions
            for (i = 0; i < 3; i++)
            {
                feature_position[curr_index, 0][i] = feature1.get_y()[i];
                feature_position[curr_index, 1][i] = feature2.get_y()[i];
            }

            // distances between features in image coordinates
            float dist_u = position1[0] - position2[0];
            float dist_v = position1[1] - position2[1];

            marked_for_deletion = false;
            for (i = 0; i < no_of_points; i++)
            {
                int x = (int)(position1[0] + (i * dist_u / no_of_points));
                if ((x > 0) && (x < img.width))
                {
                    int y = (int)(position1[1] + (i * dist_v / no_of_points));
                    if ((y > 0) && (y < img.height))
                        pixel_intensity[i, curr_index] = img.image[x, y];
                    else 
                        marked_for_deletion = true;
                }
                else marked_for_deletion = true;
            }
            curr_index++;
            if (curr_index >= history) curr_index = 0;
            if (hits < 254) hits++;
        }
コード例 #2
0
ファイル: linefeature.cs プロジェクト: iManbot/monoslam
 /// <summary>
 /// display the line history as an image
 /// </summary>
 /// <param name="img"></param> 
 public void show(classimage_mono img)
 {
     for (int x = 0; x < img.width; x++)
     {
         int xx = x * history / img.width;
         for (int y = 0; y < img.height; y++)
         {                    
             int yy = y * no_of_points / img.height;
             img.image[x, y] = pixel_intensity[yy, xx];
         }
     }
 }
コード例 #3
0
        //------------------------------------------------------------------------------------------------------------------------
        //update from another image
        //------------------------------------------------------------------------------------------------------------------------
        public void updateFromImage(classimage_mono img)
        {
            int x, y, xx, yy;

            for (x = 0; x < width; x++)
            {
                xx = (x * img.width) / width;
                for (y = 0; y < height; y++)
                {
                    yy          = (y * img.height) / height;
                    image[x, y] = img.image[xx, yy];
                }
            }
        }
コード例 #4
0
        //---------------------------------------------------------------------------------------------
        //copy from an image
        //---------------------------------------------------------------------------------------------
        public void copyImage(classimage_mono source_img)
        {
            int  x, y;
            Byte v;

            for (x = 0; x < width; x++)
            {
                for (y = 0; y < height; y++)
                {
                    v           = source_img.image[x, y];
                    image[x, y] = v;
                }
            }
        }
コード例 #5
0
ファイル: histogram.cs プロジェクト: iManbot/monoslam
 public void Show(classimage_mono img)
 {
     int prev_x = 0, prev_y = 0;
     img.clear();
     for (int i = 0; i < no_of_levels; i++)
     {
         int x = i * img.width / no_of_levels;
         int y = img.height - (level[i] * (img.height * 8 / 10) / maxval);
         if (i > 0)
         {
             img.drawLine(prev_x, prev_y, x, y, 0);
         }
         prev_x = x;
         prev_y = y;
     }
 }
コード例 #6
0
        public void Show(classimage_mono img)
        {
            int prev_x = 0, prev_y = 0;

            img.clear();
            for (int i = 0; i < no_of_levels; i++)
            {
                int x = i * img.width / no_of_levels;
                int y = img.height - (level[i] * (img.height * 8 / 10) / maxval);
                if (i > 0)
                {
                    img.drawLine(prev_x, prev_y, x, y, 0);
                }
                prev_x = x;
                prev_y = y;
            }
        }
コード例 #7
0
        //------------------------------------------------------------------------------------------------------------------------
        //  sample the given image within the given bounding box
        //------------------------------------------------------------------------------------------------------------------------
        public void sampleFromImage(classimage_mono example_img, int tx, int ty, int bx, int by)
        {
            int x, y, xx, yy, dx, dy;

            dx = bx - tx;
            dy = by - ty;

            for (x = 0; x < width; x++)
            {
                xx = tx + ((x * dx) / width);

                for (y = 0; y < height; y++)
                {
                    yy = ty + ((y * dy) / height);

                    image[x, y] = example_img.image[xx, yy];
                }
            }
            //updateIntegralImage();
        }
コード例 #8
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
 public void ShowMesh(classimage_mono img)
 {
     mesh.Show(img);
 }
コード例 #9
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// show tracked features within the given image
        /// </summary>
        /// <param name="img"></param>
        public void ShowFeatures(classimage_mono img)
        {
            int x, y, i, j, c;
            Vector position;
            int size_factor = 50;
            int point_radius_x = img.width / size_factor;
            if (point_radius_x < 1) point_radius_x = 1;

            int point_radius_y = img.height / size_factor;
            if (point_radius_y < 1) point_radius_y = 1;

            for (i = 0; i < scene.selected_feature_list.Count; i++)
            {
                Feature f = (Feature)scene.selected_feature_list[i];

                if ((f.get_successful_measurement_flag()) &&
                    (f.get_feature_measurement_model().fully_initialised_flag))
                {

                    Vector measured_image_position = f.get_z(); //measured position within the image (bottom up)
                    Vector map_image_position = f.get_h(); //position from within the 3D map (top down)

                    for (j = 0; j < 1; j++)
                    {
                        if (j == 0)
                            position = measured_image_position;
                        else
                            position = map_image_position;

                        //scale into the given image dimensions
                        position[0] = (position[0] * img.width / CAMERA_WIDTH);
                        position[1] = (position[1] * img.height / CAMERA_HEIGHT);

                        if ((position[0] > point_radius_x) && (position[0] < img.width - point_radius_x))
                            if ((position[1] > point_radius_y) && (position[1] < img.height - point_radius_y))
                            {
                                
                                for (x = (int)position[0] - point_radius_x; x < (int)position[0] + point_radius_x; x++)
                                    for (y = (int)position[1] - point_radius_y; y < (int)position[1] + point_radius_y; y++)
                                    {
                                        if ((x == (int)position[0]) || (y == (int)position[1]))
                                        {
                                            for (c = 0; c < 3; c++) img.image[x, y] = 0;
                                            //if (j == 0)
                                                img.image[x, y] = 255;
                                            //else
                                              //  img.image[x, y] = 255;
                                        }
                                    }
                                

                                /*
                                for (x = 0; x < Camera_Constants.BOXSIZE; x++)
                                {
                                    xx = x + (int)position[0] - (int)(Camera_Constants.BOXSIZE / 2);
                                    if ((xx > 0) && (xx < img.width))
                                    {
                                        for (y = 0; y < Camera_Constants.BOXSIZE; y++)
                                        {
                                            yy = y + (int)position[1] - (int)(Camera_Constants.BOXSIZE / 2);
                                            if ((yy > 0) && (yy < img.height))
                                            {
                                                for (c = 0; c < 3; c++) img.image[xx, yy, c] = f.get_identifier().image[x, y, c];
                                            }
                                        }
                                    }
                                }
                                */

                            }
                    }
                }
            }
        }
コード例 #10
0
ファイル: classimage_mono.cs プロジェクト: iManbot/monoslam
        //------------------------------------------------------------------------------------------------------------------------
        //update from another image
        //------------------------------------------------------------------------------------------------------------------------
        public void updateFromImage(classimage_mono img)
        {
            int x, y, xx, yy;

            for (x = 0; x < width; x++)
            {
                xx = (x * img.width) / width;
                for (y = 0; y < height; y++)
                {
                    yy = (y * img.height) / height;
                    image[x, y] = img.image[xx, yy];
                }
            }
        }
コード例 #11
0
ファイル: SceneLib.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Normalise sum-of-squared-difference between two patches. Assumes that the images
        /// are continuous memory blocks in raster order. The two pointer arguments return
        /// the standard deviations of the patches: if either of these are low, the results
        /// can be misleading.
        /// </summary>
        /// <param name="x0">Start x-co-ordinate of patch in first image</param>
        /// <param name="y0">End x-co-ordinate of patch in first image</param>
        /// <param name="x0lim">End x-co-ordinate of patch in first image</param>
        /// <param name="y0lim">End y-co-ordinate of patch in first image</param>
        /// <param name="x1">Start x-co-ordinate of patch in second image</param>
        /// <param name="y1">Start y-co-ordinate of patch in second image</param>
        /// <param name="p0">First image</param>
        /// <param name="p1">Second image</param>
        /// <param name="sd0ptr">Standard deviation of patch from first image</param>
        /// <param name="sd1ptr">Standard deviation of patch from second image</param>
        /// <returns></returns>
        public static float correlate2_warning(
                          int x0, int y0,
                          int x0lim, int y0lim,
                          int x1, int y1,
                          ref classimage_mono p0, ref classimage_mono p1,
                          ref float sd0ptr, ref float sd1ptr)
        {
            // Make these int rather than unsigned int for speed
            int patchwidth = x0lim - x0;
            int p0skip = p0.width - patchwidth;
            int p1skip = p1.width - patchwidth;
            int Sg0 = 0, Sg1 = 0, Sg0g1 = 0, Sg0sq = 0, Sg1sq = 0;

            float n = (x0lim - x0) * (y0lim - y0);     // to hold total no of pixels

            float varg0 = 0.0f, varg1 = 0.0f, sigmag0 = 0.0f, sigmag1 = 0.0f, g0bar = 0.0f, g1bar = 0.0f;

            // variances, standard deviations, means 
            float Sg0doub = 0.0f, Sg1doub = 0.0f, Sg0g1doub = 0.0f, Sg0sqdoub = 0.0f, Sg1sqdoub = 0.0f;

            float C = 0.0f;       // to hold the final result 
            float k = 0.0f;       // to hold an intermediate result 

            // at the moment the far right and bottom pixels aren't included 
            int v0, v1;

            
            for (int y0copy = y0lim - 1; y0copy >= 0; y0copy--)
            {
                int yy0 = y0 + y0copy;
                int yy1 = y1 + y0copy;
                for (int x0copy = x0lim - 1; x0copy >= 0; x0copy--)
                {

                    v0 = p0.image[x0 + x0copy, yy0];
                    v1 = p1.image[x1 + x0copy, yy1];
                    
                    Sg0 += v0;
                    Sg1 += v1;
                    Sg0g1 += v0 * v1;
                    Sg0sq += v0 * v0;
                    Sg1sq += v1 * v1;
                    
                }
            }
            

            Sg0doub = Sg0;
            Sg1doub = Sg1;
            Sg0g1doub = Sg0g1;
            Sg0sqdoub = Sg0sq;
            Sg1sqdoub = Sg1sq;

            g0bar = Sg0doub / n;
            g1bar = Sg1doub / n;

            varg0 = Sg0sqdoub / n - (g0bar * g0bar);
            varg1 = Sg1sqdoub / n - (g1bar * g1bar);

            sigmag0 = (float)Math.Sqrt(varg0);
            sigmag1 = (float)Math.Sqrt(varg1);

            sd0ptr = (float)sigmag0;
            sd1ptr = (float)sigmag1;

            if (sigmag0 == 0.0f)     // special checks for this algorithm 
            {                       // to avoid division by zero 
                if (sigmag1 == 0.0f)
                    return 0.0f;
                else
                    return 1.0f;
            }

            if (sigmag1 == 0.0f)
                return 1.0f;

            k = g0bar / sigmag0 - g1bar / sigmag1;

            C = Sg0sqdoub / varg0 + Sg1sqdoub / varg1 + n * (k * k)
                - Sg0g1doub * 2.0f / (sigmag0 * sigmag1)
                - Sg0doub * 2.0f * k / sigmag0 + Sg1doub * 2.0f * k / sigmag1;

            return (C / n);    // returns mean square no of s.d. from mean of pixels 
        }
コード例 #12
0
ファイル: scene_single.cs プロジェクト: iManbot/monoslam
 public void add_new_known_feature(classimage_mono id, Vector y_known, 
                                   Vector xp_o, Feature_Measurement_Model f_m_m,
                                   uint known_feature_label)
 {
     Feature nf = new Feature(id, next_free_label, (uint)feature_list.Count, 
                              this, y_known, xp_o, f_m_m, known_feature_label);
     add_new_feature_bookeeping(nf);
 }
コード例 #13
0
ファイル: feature.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Constructor for known features. The different number of 
        /// arguments differentiates it from the constructor for partially-initialised
        /// features
        /// </summary>
        /// <param name="id">reference to the feature identifier</param>
        /// <param name="?"></param>
        public Feature(classimage_mono id, uint lab, uint list_pos,
                       Scene_Single scene, Vector y_known,
                       Vector xp_o,
                       Feature_Measurement_Model f_m_m, uint k_f_l)
        {
            feature_measurement_model = f_m_m;
            feature_constructor_bookeeping();

            identifier = id;
            label = lab;
            position_in_list = list_pos;   // Position of new feature in list

            // Save the vehicle position where this feature was acquired 
            xp_orig = new Vector(xp_o);

            // Straighforward initialisation of state and covariances
            y = y_known;
            Pxy = new MatrixFixed(scene.get_motion_model().STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE);
            Pxy.Fill(0.0f);
            Pyy = new MatrixFixed(feature_measurement_model.FEATURE_STATE_SIZE, feature_measurement_model.FEATURE_STATE_SIZE);
            Pyy.Fill(0.0f);

            int i = 0;
            MatrixFixed newPyjyi_to_store;
            foreach (Feature it in scene.get_feature_list_noconst())
            {
                if (i < position_in_list)
                {
                    newPyjyi_to_store = new MatrixFixed(
                        it.get_feature_measurement_model().FEATURE_STATE_SIZE,
                        feature_measurement_model.FEATURE_STATE_SIZE);

                    //add to the list
                    matrix_block_list.Add(newPyjyi_to_store);
                }

                i++;
            }

            known_feature_label = (int)k_f_l;

            if (feature_measurement_model.fully_initialised_flag)
            {
                partially_initialised_feature_measurement_model = null;
                fully_initialised_feature_measurement_model =
                    (Fully_Initialised_Feature_Measurement_Model)feature_measurement_model;
            }
            else
            {
                fully_initialised_feature_measurement_model = null;
                partially_initialised_feature_measurement_model =
                    (Partially_Initialised_Feature_Measurement_Model)feature_measurement_model;
            }
        }
コード例 #14
0
ファイル: monoSLAM_test.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// one update
        /// </summary>
        /// <param name="img"></param>
        public void GoOneStep(classimage_mono img, classimage img_colour, classimage_mono output_img)
        {
            float delta_t;
            MAXIMUM_ANGLE_DIFFERENCE = 3.1415927f * (field_of_vision/2) / 180.0f;

            //set calibration parameters
            if (calibrating)
                monoslaminterface.set_camera_calibration(9e-06f, lens_distortion, lens_distortion, centre_of__distortion_x, centre_of__distortion_y, 1);

            //load in the current raw image
            Robot r = monoslaminterface.GetRobot();
            r.calibrating = (!enable_mapping) | calibrating;
            r.load_new_image(img, img_colour, output_img, show_ellipses);

            stopwatch.Stop();

            //get the elapsed time in seconds
            delta_t = stopwatch.ElapsedMilliseconds / 1000.0f;

            //if using the simulation set the frame rate at a fixed value
            if (simulation_mode) delta_t = 1.0f / 20.0f;

            if (delta_t > 0)
            {
                frames_per_second = (int)((1.0f / delta_t) * 10) / 10.0f;

                //report if its taking a long time
                frame_rate_warning = false;
                if (delta_t > 0.2)
                {
                    frame_rate_warning = true;
                    Debug.WriteLine("Time between frames (sec): " + Convert.ToString(delta_t));
                }

                //update the state of the system
                monoslaminterface.GoOneStep(delta_t, enable_mapping);
            }

            stopwatch.Reset();
            stopwatch.Start();

            speed = (int)(monoslaminterface.Speed() * 100) / 100.0f;

            if ((enable_mapping) && (monoslaminterface.number_of_matched_features < 2))
            {
                enable_mapping = false;
                init();
            }

            if ((!enable_mapping) && (monoslaminterface.number_of_matched_features >= 3) && (!calibrating))
            {
                registered_seconds += delta_t;
                if (registered_seconds > 1)
                {
                    Debug.WriteLine("Ready to go");
                    enable_mapping = true;
                    registered_seconds = 0;
                }                
            }
            else
            {
                if (registered_seconds > 0) Debug.WriteLine("Waiting for initial registration");
                registered_seconds = 0;
            }
        }
コード例 #15
0
ファイル: frmMain.cs プロジェクト: iManbot/monoslam
        private void timUpdate_Tick(object sender, EventArgs e)
        {
            bool image_loaded;
            int wdth;
            int hght;

            if ((left_imageloaded) || (left_camera_running))
            {
                //get images from the two cameras
                captureCameraImages();

                //record depth images if necessary
                //recordDepthImages();

                if ((!test.frame_rate_warning) || (!test.enable_mapping))
                {
                    if (!test.enable_mapping)
                        cmdBeginTracking.Visible = true;
                    else
                        cmdBeginTracking.Visible = false;

                    lblTracking.Visible = !cmdBeginTracking.Visible;
                    lblFrameRateWarning.Visible = false;
                }
                else
                {
                    lblTracking.Visible = false;
                    lblFrameRateWarning.Visible = true;
                }


                if (simulation_mode)
                {
                    //update from loaded images
                    image_loaded = loadVideoFrame(video_path, video_frame_number);
                    if (image_loaded)
                    {
                        wdth = picLeftImage.Image.Width;
                        hght = picLeftImage.Image.Height;

                        if (global_variables.left_bmp == null)
                        {
                            global_variables.left_bmp = new Byte[wdth * hght * 3];
                            raw_image = new classimage_mono();
                            raw_image.createImage(wdth, hght);
                            raw_image_colour = new classimage();
                            raw_image_colour.createImage(wdth, hght);
                        }

                        updatebitmap((Bitmap)picLeftImage.Image, global_variables.left_bmp);

                        raw_image_colour.updateFromBitmap(global_variables.left_bmp, 1, wdth, hght);
                        raw_image.copyImage(raw_image_colour);

                        if (!outputInitialised)
                        {
                            picOutput1.Image = new Bitmap(global_variables.standard_width, global_variables.standard_height, PixelFormat.Format24bppRgb);
                            background_bitmap = new Bitmap(global_variables.standard_width, global_variables.standard_height, PixelFormat.Format24bppRgb);
                            disp_bmp_data = new Byte[global_variables.standard_width * global_variables.standard_height * 3];
                            output_image = new classimage_mono();
                            output_image.createImage(global_variables.standard_width,global_variables.standard_height);
                            outputInitialised = true;
                        }
                        output_image.updateFromImage(raw_image);

                        txtCaptureTime.Text = Convert.ToString(endStopWatch());
                        beginStopWatch();

                        test.GoOneStep(raw_image, raw_image_colour, output_image);

                        txtProcessingTime.Text = Convert.ToString(endStopWatch());
                        txtfps.Text = Convert.ToString(test.frames_per_second);
                        txtSpeed.Text = Convert.ToString(test.speed);

                        beginStopWatch();

                        test.ShowFeatures(output_image, selected_display);
                        output_image.saveToBitmap(disp_bmp_data, global_variables.standard_width, global_variables.standard_height);

                        // either show the output image in the picturebox, or transfer it
                        // to a separate background bitmap for use during 3D rendering
                        if ((selected_display != MonoSLAM.DISPLAY_AUGMENTED_REALITY) ||
                            ((selected_display == MonoSLAM.DISPLAY_AUGMENTED_REALITY) && (!(initialised_3D && test.enable_mapping))))
                        {
                            updatebitmap(disp_bmp_data, (Bitmap)picOutput1.Image);
                            picOutput1.Refresh();
                        }
                        else
                        {
                            updatebitmap(disp_bmp_data, background_bitmap);
                        }

                        // render 3D objects if augmented reality is enabled
                        render3D(d3dDevice);


                        if (!single_step) video_frame_number++;
                    }
                    else
                    {
                        video_frame_number = 1;
                        test.init();
                    }
                }
                else
                {
                    //update from cameras
                    if (captureState[0] == 2)
                    {
                        captureState[0] = 0;
                        captureState[1] = 0;

                        wdth = picLeftImage.Image.Width;
                        hght = picLeftImage.Image.Height;

                        if (raw_image == null)
                        {
                            // initialise raw image object
                            raw_image = new classimage_mono();
                            raw_image.createImage(wdth, hght);
                            raw_image_colour = new classimage();
                            raw_image_colour.createImage(wdth, hght);
                        }
                        
                        // shove the bitmap from the camera into an image object
                        raw_image_colour.updateFromBitmap(global_variables.left_bmp, 1, wdth, hght);
                        raw_image.copyImage(raw_image_colour);

                        if (!outputInitialised)
                        {
                            // initialise image bitmaps
                            picOutput1.Image = new Bitmap(global_variables.standard_width, global_variables.standard_height, PixelFormat.Format24bppRgb);
                            background_bitmap = new Bitmap(global_variables.standard_width, global_variables.standard_height, PixelFormat.Format24bppRgb);
                            disp_bmp_data = new Byte[global_variables.standard_width * global_variables.standard_height * 3];
                            output_image = new classimage_mono();
                            output_image.createImage(global_variables.standard_width, global_variables.standard_height);
                            outputInitialised = true;
                        }
                        output_image.updateFromImage(raw_image);

                        txtCaptureTime.Text = Convert.ToString(endStopWatch());
                        beginStopWatch();

                        // one processing cycle update
                        test.GoOneStep(raw_image, raw_image_colour, output_image);

                        //show info
                        txtProcessingTime.Text = Convert.ToString(endStopWatch());
                        txtfps.Text = Convert.ToString(test.frames_per_second);
                        txtSpeed.Text = Convert.ToString(test.speed);

                        beginStopWatch();

                        // show crosshair features or overhead map
                        test.ShowFeatures(output_image, selected_display);
                        output_image.saveToBitmap(disp_bmp_data, global_variables.standard_width, global_variables.standard_height);

                        // either show the output image in the picturebox, or transfer it
                        // to a separate background bitmap for use during 3D rendering
                        if ((selected_display != MonoSLAM.DISPLAY_AUGMENTED_REALITY) ||
                            ((selected_display == MonoSLAM.DISPLAY_AUGMENTED_REALITY) && (!(initialised_3D && test.enable_mapping))))
                        {
                            updatebitmap(disp_bmp_data, (Bitmap)picOutput1.Image);
                            picOutput1.Refresh();
                        }
                        else
                        {
                            updatebitmap(disp_bmp_data, background_bitmap);
                        }

                        // render 3D objects if augmented reality is enabled
                        render3D(d3dDevice);

                        if (reset)
                        {
                            test.init();
                            reset = false;
                        }
                    }
                }

            }

        }
コード例 #16
0
ファイル: SceneLib.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Do a search for patch in image within an elliptical region. The
        /// search region is parameterised an inverse covariance matrix (a distance of
        /// NO_SIGMA is used). The co-ordinates returned are those of centre of the patch.
        /// </summary>
        /// <param name="image">The image to search</param>
        /// <param name="patch">The patch  to search for</param>
        /// <param name="centre">The centre of the search ellipse</param>
        /// <param name="PuInv">The inverse covariance matrix to use</param>
        /// <param name="u">The x-co-ordinate of the best patch location</param>
        /// <param name="v">The y-co-ordinate of the best patch location</param>
        /// <param name="uBOXSIZE">The size of the image patch (TODO: Shouldn't this be the same as the size of patch?)</param>
        /// <returns>true if the a good match is found (above CORRTHRESH2), false otherwise</returns>
        public static bool elliptical_search(
                          classimage_mono image,
                          classimage_mono patch,
                          Vector centre,                    //1 dimensional x,y coords
                          MatrixFixed PuInv,                //2x2 matrix
                          ref uint u, ref uint v,
                          Vector vz,
                          uint uBOXSIZE,
                          classimage_mono outputimage,
                          bool show_ellipses,
                          bool calibrating,
                          Random rnd)
        {
            float aspect;
            if ((vz[1] != 0) && (vz[0] != 0))
            {
                aspect = Math.Abs(vz[0] / vz[1]);
                if (aspect < 0.5) aspect = 0.5f;
                if (aspect > 1.5) aspect = 1.5f;
            }
            else
                aspect = 1;
            int vz_x = 0; // (int)(vz[0] / 2);
            int vz_y = 0; // (int)(vz[1] / 2);

            // We want to pass BOXSIZE as an unsigned int since it is,
            // but if we use it in the if statements below then C++ casts the
            // whole calculation to unsigned ints, so it is never < 0!
            // Force it to get the right answer by using an int version of BOXSIZE
            int BOXSIZE = (int)uBOXSIZE;

            // The dimensions of the bounding box of the ellipse we want to search in 
            uint halfwidth = (uint) (NO_SIGMA * aspect / 
                Math.Sqrt( PuInv[0, 0] - PuInv[0, 1] * PuInv[0, 1] / PuInv[1, 1] ));

            uint halfheight = (uint) (NO_SIGMA / aspect / 
                Math.Sqrt( PuInv[1, 1] - PuInv[0, 1] * PuInv[0, 1] / PuInv[0, 0] ));

            // stop the search ellipse from expanding to large sizes!
            uint MAX_SEARCH_RADIUS = uBOXSIZE * 2;
            if (halfwidth > MAX_SEARCH_RADIUS) halfwidth = MAX_SEARCH_RADIUS;
            if (halfheight > MAX_SEARCH_RADIUS) halfheight = MAX_SEARCH_RADIUS;
            // don't allow the ellipse to get too small
            uint MIN_SEARCH_RADIUS = uBOXSIZE / 2;
            if (halfwidth < MIN_SEARCH_RADIUS) halfwidth = MIN_SEARCH_RADIUS;
            if (halfheight < MIN_SEARCH_RADIUS) halfheight = MIN_SEARCH_RADIUS;

            int ucentre = (int)(centre[0] + 0.5);
            int vcentre = (int)(centre[1] + 0.5);

            // Limits of search 
            int urelstart = -(int)(halfwidth);
            int urelfinish = (int)(halfwidth);
            int vrelstart = -(int)(halfheight);
            int vrelfinish = (int)(halfheight);

            // Check these limits aren't outside the image 
            if (ucentre + urelstart + vz_x - (BOXSIZE - 1) / 2 < 0)
                urelstart = (BOXSIZE - 1) / 2 - ucentre - vz_x;
            if (ucentre + urelfinish + vz_x - (BOXSIZE - 1) / 2 > (int)(image.width) - BOXSIZE)
                urelfinish = image.width - BOXSIZE - ucentre - vz_x + (BOXSIZE - 1) / 2;
            if (vcentre + vrelstart + vz_y - (BOXSIZE - 1) / 2 < 0)
                vrelstart = (BOXSIZE - 1) / 2 - vcentre - vz_y;
            if (vcentre + vrelfinish + vz_y - (BOXSIZE - 1) / 2 > (int)(image.height) - BOXSIZE)
                vrelfinish = (int)(image.height) - BOXSIZE - vcentre - vz_y + (BOXSIZE - 1) / 2;

            // Search counters 
            int urel, vrel;

            float corrmax = 1000000.0f;
            float corr;

            // For passing to and_correlate2_warning
            float sdpatch=0, sdimage=0;

            // Do the search 
            float max_dist = MAX_SEARCH_RADIUS;
            float v1 = PuInv[0, 0];
            float v2 = PuInv[0, 1];
            float v3 = PuInv[1, 1];
            float urelsq, vrelsq;
            bool inside_ellipse;
            int xx, yy, xx2, yy2;
            for (urel = urelstart; urel <= urelfinish; urel+=1)
            {
                urelsq = urel * urel;
                float urel2 = v1 * urelsq;
                inside_ellipse = false;
                for (vrel = vrelstart; vrel <= vrelfinish; vrel+=1)
                {
                        vrelsq = vrel * vrel;
                        if (urel2
                            + 2 * v2 * urel * vrel
                            + v3 * vrelsq < NO_SIGMA * NO_SIGMA)
                        {
                            if ((show_ellipses) || (calibrating))
                            {
                                if (!inside_ellipse)
                                {
                                    xx = ucentre + urel + vz_x - (BOXSIZE - 1) / 2;
                                    yy = vcentre + vrel + vz_y - (BOXSIZE - 1) / 2;
                                    xx2 = xx * outputimage.width / image.width;
                                    yy2 = yy * outputimage.height / image.height;
                                    outputimage.image[xx2, yy2] = (Byte)255;
                                }
                                inside_ellipse = true;
                            }


                            // searching within the ellipse is probablistic,
                            // using something like a gaussian distribution
                            float dist = (float)Math.Sqrt(urelsq + vrelsq) / max_dist;
                            if (rnd.Next(1000) < search_probability(dist))
                            {

                                int offset_x = ucentre + urel + vz_x;
                                int offset_y = vcentre + vrel + vz_y;

                                corr = correlate2_warning(0, 0, BOXSIZE, BOXSIZE,
                                    offset_x - (BOXSIZE - 1) / 2,
                                    offset_y - (BOXSIZE - 1) / 2,
                                    ref patch, ref image, ref sdpatch, ref sdimage);

                                if (corr <= corrmax)
                                {
                                    if (sdpatch < CORRELATION_SIGMA_THRESHOLD)
                                    {
                                        // cout << "Low patch sigma." << endl;
                                    }
                                    else
                                        if (sdimage < CORRELATION_SIGMA_THRESHOLD)
                                        {
                                            // cout << "Low image sigma." << endl;
                                        }
                                        else
                                        {
                                            corrmax = corr;
                                            u = (uint)offset_x;
                                            v = (uint)offset_y;
                                        }
                                }


                                //show ellipses in the output image
                                /*
                                if ((show_ellipses) || (calibrating))
                                {
                                    int xx = offset_x;
                                    int yy = offset_y;
                                    int xx2 = xx * outputimage.width / image.width;
                                    int yy2 = yy * outputimage.height / image.height;
                                    
                                    if (!calibrating)
                                    {
                                        outputimage.image[xx2, yy2, 2] = (Byte)255;
                                    }
                                    else
                                    {
                                        outputimage.image[xx2, yy2, 0] = (Byte)255;
                                        outputimage.image[xx2, yy2, 1] = (Byte)255;
                                        outputimage.image[xx2, yy2, 2] = 0;
                                    }
                                }
                                */
                            }

                        }
                        else
                        {
                            if ((show_ellipses) || (calibrating))
                            {
                                if (inside_ellipse)
                                {
                                    xx = ucentre + urel + vz_x; 
                                    yy = vcentre + vrel + vz_y; 
                                    xx2 = xx * outputimage.width / image.width;
                                    yy2 = yy * outputimage.height / image.height;
                                    outputimage.image[xx2, yy2] = (Byte)255;
                                }
                                inside_ellipse = false;
                            }
                        }
                }
            }

            

            if (corrmax > CORRTHRESH2)
            {
                return false;
            }

            return true;
        }
コード例 #17
0
ファイル: ellipses.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="image">The image to search</param>
        /// <param name="patch">The image patch to search for</param>
        /// <param name="BOXSIZE">The size of the image patch to use</param>
        public SearchMultipleOverlappingEllipses(classimage_mono image, classimage_mono patch, uint BOXSIZE)
        {
            m_image = image;
            m_patch = patch;
            m_boxsize = BOXSIZE;

            // Rather than working out an overall bounding box, we'll be slightly
            // lazy and make an array for correlation results which is the same size 
            // as the image
            // Pixels in this array in raster order
            correlation_image = new float[m_image.width, m_image.height];
        }
コード例 #18
0
ファイル: SceneLib.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Version to find the best n patches within an image.
        /// Not very efficiently implemented I'm afraid.
        /// Now we expect the arguments ubest, vbest, evbest to be arrays of
        /// size n.
        /// </summary>
        /// <param name="image">The image to scan</param>
        /// <param name="n">Number of patches</param>
        /// <param name="ubest">Array containing x-co-ordinates of n best patches</param>
        /// <param name="vbest">Array containing y-co-ordinates of n best patches</param>
        /// <param name="evbest">Array containing smallest eigenvalues of best patches (larger is better)</param>
        /// <param name="BOXSIZE">The size of the patch to use</param>
        /// <param name="ustart">The x-co-ordinate of the start of the search window</param>
        /// <param name="vstart">The y-co-ordinate of the start of the search window</param>
        /// <param name="ufinish">The x-co-ordinate of the end of the search window</param>
        /// <param name="vfinish">The v-co-ordinate of the end of the search window</param>
        public void find_best_n_patches_inside_region(
                          classimage_mono image,
                          uint n,
                          uint[] ubest, uint[] vbest,
                          float[] evbest,
                          uint BOXSIZE,
                          uint ustart, uint vstart,
                          uint ufinish, uint vfinish)
        {
            // Check that these limits aren't too close to the image edges.
            // Note that we can't use the edge pixels because we can't form 
            // gradients here.
            if (ustart < (BOXSIZE - 1) / 2 + 1)
                ustart = (BOXSIZE - 1) / 2 + 1;
            if (ufinish > image.width - (BOXSIZE - 1) / 2 - 1)
                ufinish = (uint)(image.width - (BOXSIZE - 1) / 2 - 1);
            if (vstart < (BOXSIZE - 1) / 2 + 1)
                vstart = (BOXSIZE - 1) / 2 + 1;
            if (vfinish > image.height - (BOXSIZE - 1) / 2 - 1)
                vfinish = (uint)(image.height - (BOXSIZE - 1) / 2 - 1);

            // Calculate the width we need to find gradients in. 
            uint calc_width = ufinish - ustart + BOXSIZE - 1;

            // Arrays which store column sums of height BOXSIZE 
            float[] CSgxsq = new float[calc_width];
            float[] CSgysq = new float[calc_width];
            float[] CSgxgy = new float[calc_width];

            // For the final sums at each position (u, v) 
            float TSgxsq = 0.0f, TSgysq = 0.0f, TSgxgy = 0.0f;

            float gx, gy;
            uint u = ustart, v = vstart;
            float eval1 = 0, eval2 = 0;

            // Initial stage: fill these sums for the first horizontal position 
            uint cstart = ustart - (BOXSIZE - 1) / 2;
            uint cfinish = ufinish + (BOXSIZE - 1) / 2;
            uint rstart = vstart - (BOXSIZE - 1) / 2;
            uint i;
            uint c, r;
            for (c = cstart, i = 0; c < cfinish; c++, i++)
            {
                CSgxsq[i] = 0;
                CSgysq[i] = 0;
                CSgxgy[i] = 0;
                for (r = rstart; r < rstart + BOXSIZE; r++)
                {
                    gx = (image.image[c + 1, r] - image.image[c - 1, r]) / 2.0f;
                    gy = (image.image[c, r + 1] - image.image[c, r - 1]) / 2.0f;

                    CSgxsq[i] += gx * gx;
                    CSgysq[i] += gy * gy;
                    CSgxgy[i] += gx * gy;
                }
            }

            float[,] evarray = new float[vfinish - vstart, ufinish - ustart];

            // Now loop through u and v to form sums 
            for (v = vstart; v < vfinish; v++)
            {
                u = ustart;

                // Form first sums for u = ustart 
                TSgxsq = 0.0f;
                TSgysq = 0.0f;
                TSgxgy = 0.0f;
                for (i = 0; i < BOXSIZE; i++)
                {
                    TSgxsq += CSgxsq[i];
                    TSgysq += CSgysq[i];
                    TSgxgy += CSgxgy[i];
                }

                for (u = ustart; u < ufinish; u++)
                {
                    if (u != ustart)
                    {
                        /* Subtract old column, add new one */
                        TSgxsq += CSgxsq[u - ustart + BOXSIZE - 1] - CSgxsq[u - ustart - 1];
                        TSgysq += CSgysq[u - ustart + BOXSIZE - 1] - CSgysq[u - ustart - 1];
                        TSgxgy += CSgxgy[u - ustart + BOXSIZE - 1] - CSgxgy[u - ustart - 1];
                    }

                    find_eigenvalues(TSgxsq, TSgxgy, TSgysq, ref eval1, ref eval2);

                    // eval2 will be the smaller eigenvalue. Store in the array 
                    evarray[v - vstart, u - ustart] = eval2;
                }

                if (v != vfinish - 1)
                {
                    // Update the column sums for the next v 
                    for (c = cstart, i = 0; c < cfinish; c++, i++)
                    {
                        // Subtract the old top pixel
                        gx = (image.image[c + 1, v - (BOXSIZE - 1) / 2]
                             - image.image[c - 1, v - (BOXSIZE - 1) / 2]) / 2.0f;
                        gy = (image.image[c, v - (BOXSIZE - 1) / 2 + 1]
                             - image.image[c, v - (BOXSIZE - 1) / 2 - 1]) / 2.0f;
                        CSgxsq[i] -= gx * gx;
                        CSgysq[i] -= gy * gy;
                        CSgxgy[i] -= gx * gy;

                        // Add the new bottom pixel
                        gx = (image.image[c + 1, v + (BOXSIZE - 1) / 2 + 1]
                             - image.image[c - 1, v + (BOXSIZE - 1) / 2 + 1]) / 2.0f;
                        gy = (image.image[c, v + (BOXSIZE - 1) / 2 + 1 + 1]
                             - image.image[c, v + (BOXSIZE - 1) / 2 + 1 - 1]) / 2.0f;
                        CSgxsq[i] += gx * gx;
                        CSgysq[i] += gy * gy;
                        CSgxgy[i] += gx * gy;
                    }
                }
            }

            // Now: work out the best n patches which don't overlap each other 
            float best_so_far;
            int xdist, ydist;
            bool OKflag;
            float next_highest = 1000000000000.0f;
            for (i = 0; i < n; i++)
            {
                best_so_far = 0.0f;

                for (uint y = 0; y < vfinish - vstart; y++)
                    for (uint x = 0; x < ufinish - ustart; x++)
                    {

                        if (evarray[y, x] > best_so_far && evarray[y, x] < next_highest)
                        {
                            // We've found a high one: check it doesn't overlap with higher ones

                            OKflag = true;
                            for (uint j = 0; j < i; j++)
                            {
                                xdist = (int)(x + ustart - ubest[j]);
                                ydist = (int)(y + vstart - vbest[j]);
                                xdist = (xdist >= 0 ? xdist : -xdist);
                                ydist = (ydist >= 0 ? ydist : -ydist);
                                if ((xdist < (int)BOXSIZE) && (ydist < (int)BOXSIZE))
                                {
                                    OKflag = false;
                                    break;
                                }
                            }
                            if (OKflag)
                            {
                                ubest[i] = x + ustart;
                                vbest[i] = y + vstart;
                                evbest[i] = evarray[y, x];
                                best_so_far = evarray[y, x];
                            }

                        }

                    }
                next_highest = evbest[i];
            }
        }
コード例 #19
0
ファイル: SceneLib.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Function to scan over (a window in an) image and find the best patch by the Shi
        /// and Tomasi criterion. 
        /// Method: as described in notes from 1/7/97. Form sums in an incremental
        /// way to speed things up.
        /// </summary>
        /// <param name="image">The image to scan</param>
        /// <param name="ubest">The x-co-ordinate of the best patch</param>
        /// <param name="vbest">The y-co-ordinate of the best patch</param>
        /// <param name="evbest">The smallest eigenvalue of the best patch (larger is better)</param>
        /// <param name="BOXSIZE">The size of the patch to use</param>
        /// <param name="ustart">The x-co-ordinate of the start of the search window</param>
        /// <param name="vstart">The y-co-ordinate of the start of the search window</param>
        /// <param name="ufinish">The x-co-ordinate of the end of the search window</param>
        /// <param name="vfinish">The v-co-ordinate of the end of the search window</param>
        public static void find_best_patch_inside_region(
                          classimage_mono image,
                          ref uint ubest, ref uint vbest, ref float evbest,
                          uint BOXSIZE, uint ustart, uint vstart,
                          uint ufinish, uint vfinish)
        {
            // Check that these limits aren't too close to the image edges.
            // Note that we can't use the edge pixels because we can't form 
            // gradients here. 
            if (ustart < (BOXSIZE - 1) / 2 + 1)
                ustart = (BOXSIZE - 1) / 2 + 1;
            if (ufinish > image.width - (BOXSIZE - 1) / 2 - 1)
                ufinish = (uint)(image.width - (BOXSIZE - 1) / 2 - 1);
            if (vstart < (BOXSIZE - 1) / 2 + 1)
                vstart = (BOXSIZE - 1) / 2 + 1;
            if (vfinish > image.height - (BOXSIZE - 1) / 2 - 1)
                vfinish = (uint)(image.height - (BOXSIZE - 1) / 2 - 1);

            // Is there anything left to search? If not, set the score to zero and return.
            if ((vstart >= vfinish) || (ustart >= ufinish))
            {
                ubest = ustart;
                vbest = vstart;
                evbest = 0;
                return;
            }

            // Calculate the width we need to find gradients in. 
            uint calc_width = ufinish - ustart + BOXSIZE - 1;

            // Arrays which store column sums of height BOXSIZE 
            float[] CSgxsq = new float[calc_width];
            float[] CSgysq = new float[calc_width];
            float[] CSgxgy = new float[calc_width];

            // For the final sums at each position (u, v)
            float TSgxsq = 0.0f, TSgysq = 0.0f, TSgxgy = 0.0f;

            float gx, gy;
            uint u = ustart, v = vstart;
            float eval1 = 0, eval2 = 0;

            // Initial stage: fill these sums for the first horizontal position 
            uint cstart = ustart - (BOXSIZE - 1) / 2;
            uint cfinish = ufinish + (BOXSIZE - 1) / 2;
            uint rstart = vstart - (BOXSIZE - 1) / 2;
            uint i;
            uint c, r;
            for (c = cstart, i = 0; c < cfinish; c++, i++)
            {
                CSgxsq[i] = 0; CSgysq[i] = 0; CSgxgy[i] = 0;
                for (r = rstart; r < rstart + BOXSIZE; r++)
                {
                    gx = (image.image[c + 1, r] -
                        image.image[c - 1, r]) / 2.0f;
                    gy = (image.image[c, r + 1] -
                        image.image[c, r - 1]) / 2.0f;

                    CSgxsq[i] += gx * gx;
                    CSgysq[i] += gy * gy;
                    CSgxgy[i] += gx * gy;
                }
            }


            // Now loop through u and v to form sums 
            evbest = 0;
            for (v = vstart; v < vfinish; v++)
            {
                u = ustart;

                // Form first sums for u = ustart 
                TSgxsq = 0.0f;
                TSgysq = 0.0f;
                TSgxgy = 0.0f;
                for (i = 0; i < BOXSIZE; i++)
                {
                    TSgxsq += CSgxsq[i];
                    TSgysq += CSgysq[i];
                    TSgxgy += CSgxgy[i];
                }

                for (u = ustart; u < ufinish; u++)
                {
                    if (u != ustart)
                    {
                        // Subtract old column, add new one 
                        TSgxsq += CSgxsq[u - ustart + BOXSIZE - 1] - CSgxsq[u - ustart - 1];
                        TSgysq += CSgysq[u - ustart + BOXSIZE - 1] - CSgysq[u - ustart - 1];
                        TSgxgy += CSgxgy[u - ustart + BOXSIZE - 1] - CSgxgy[u - ustart - 1];
                    }

                    find_eigenvalues(TSgxsq, TSgxgy, TSgysq, ref eval1, ref eval2);

                    // eval2 will be the smaller eigenvalue. Compare it with the one
                    // we've already got 
                    if (eval2 > evbest)
                    {
                        ubest = u;
                        vbest = v;
                        evbest = eval2;
                    }
                }

                if (v != vfinish - 1)
                {
                    // Update the column sums for the next v 
                    for (c = cstart, i = 0; c < cfinish; c++, i++)
                    {
                        // Subtract the old top pixel
                        gx = (image.image[c + 1, v - (BOXSIZE - 1) / 2] -
                              image.image[c - 1, v - (BOXSIZE - 1) / 2]) / 2.0f;
                        gy = (image.image[c, v - (BOXSIZE - 1) / 2 + 1] -
                              image.image[c, v - (BOXSIZE - 1) / 2 - 1]) / 2.0f;
                        CSgxsq[i] -= gx * gx;
                        CSgysq[i] -= gy * gy;
                        CSgxgy[i] -= gx * gy;

                        // Add the new bottom pixel
                        gx = (image.image[c + 1, v + (BOXSIZE - 1) / 2 + 1] -
                              image.image[c - 1, v + (BOXSIZE - 1) / 2 + 1]) / 2.0f;
                        gy = (image.image[c, v + (BOXSIZE - 1) / 2 + 1 + 1] -
                              image.image[c, v + (BOXSIZE - 1) / 2 + 1 - 1]) / 2.0f;
                        CSgxsq[i] += gx * gx;
                        CSgysq[i] += gy * gy;
                        CSgxgy[i] += gx * gy;
                    }
                }
            }
        }
コード例 #20
0
ファイル: SceneLib.cs プロジェクト: iManbot/monoslam
        public static void find_best_patch_inside_region_test(
                          classimage_mono image,
                          ref uint ubest, ref uint vbest, ref float evbest,
                          uint BOXSIZE, uint ustart, uint vstart,
                          uint ufinish, uint vfinish)
        {
            long corr;
            long corrmax = MIN_PATCH_DIFFERENCE * BOXSIZE * BOXSIZE / 2;
            int tx, ty, bx, by;

            for (int x = (int)ustart; x < ufinish; x++)
            {
                for (int y = (int)vstart; y < vfinish; y++)
                {
                    tx = (int)(x - (BOXSIZE - 1) / 2);
                    ty = (int)(y - (BOXSIZE - 1) / 2);
                    bx = (int)(tx + BOXSIZE);
                    by = (int)(ty + BOXSIZE);
                    long leftval = image.getIntegral(tx, ty, x, by);
                    long rightval = image.getIntegral(x, ty, bx, by);
                    long topval = image.getIntegral(tx, ty, bx, y);
                    long bottomval = image.getIntegral(tx, y, bx, by);

                    corr = Math.Abs(leftval - rightval) +
                           Math.Abs(topval - bottomval) + 
                           Math.Abs(leftval - topval) +
                           Math.Abs(rightval - topval) +
                           Math.Abs(leftval - bottomval) +
                           Math.Abs(rightval - bottomval);
                    if (corr > corrmax)
                    {
                        corrmax = corr;
                        ubest = (uint)x;
                        vbest = (uint)y;
                        evbest = corr;
                    }
                    

                }
            }
        }
コード例 #21
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
        public void ShowTriangles(classimage_mono img)
        {
            int i, j;

            for (i = 0; i < scene.selected_feature_list.Count; i++)
            {
                Feature f1 = (Feature)scene.selected_feature_list[i];

                if ((f1.get_successful_measurement_flag()) &&
                    (f1.get_feature_measurement_model().fully_initialised_flag))
                {

                    Vector measured_image_position1 = f1.get_z(); //measured position within the image (bottom up)

                    //scale into the given image dimensions
                    measured_image_position1[0] = (measured_image_position1[0] * img.width / CAMERA_WIDTH);
                    measured_image_position1[1] = (measured_image_position1[1] * img.height / CAMERA_HEIGHT);

                    for (j = i+1; j < scene.selected_feature_list.Count; j++)
                    {
                            Feature f2 = (Feature)scene.selected_feature_list[j];

                            if ((f2.get_successful_measurement_flag()) &&
                                (f2.get_feature_measurement_model().fully_initialised_flag))
                            {

                                Vector measured_image_position2 = f2.get_z(); //measured position within the image (bottom up)
                    
                                //scale into the given image dimensions                    
                                measured_image_position2[0] = (measured_image_position2[0] * img.width / CAMERA_WIDTH);                    
                                measured_image_position2[1] = (measured_image_position2[1] * img.height / CAMERA_HEIGHT);

                                img.drawLine((int)measured_image_position1[0], (int)measured_image_position1[1],
                                             (int)measured_image_position2[0], (int)measured_image_position2[1], 0);
                            }
                    }
                }
            }

        }
コード例 #22
0
ファイル: robot.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Make measurements of a feature which is represented by a set of particles.
        /// This is typically a partially-initialised feature (see
        /// Partially_Initialised_Feature_Measurement_Model), where the particles represent
        /// the probability distribution in the direction of the free parameters.
        /// </summary>
        /// <param name="id">The Identified for this feature (in this case this will be an image patch)</param>
        /// <param name="particle_vector">The vector of particles. The covariance of each particle is used to define the region over which the feature is matched and measured.</param>
        public void measure_feature_with_multiple_priors(classimage_mono patch, ArrayList particle_vector)
        {
            
            SearchMultipleOverlappingEllipses ellipse_search =
                new SearchMultipleOverlappingEllipses(image, patch, Camera_Constants.BOXSIZE);

            SearchDatum e;
            foreach (Particle part in particle_vector)
            {
                ellipse_search.add_ellipse(part.get_SInv(), part.get_h());

                //if (Camera_Constants.DEBUGDUMP)  cout << "Particle at " << part->get_lambda()
                  //           << " h " << part->get_h()
                    //         << " SInv " << part->get_SInv() << endl;
            }

            // cout << "MFWMP timer before search_multiple: " << timerlocal << endl;
            
            ellipse_search.search();

            // cout << "MFWMP timer after search_multiple: " << timerlocal << endl;

            // Turn results into matrix forms
            int i = 0;
            foreach (Particle it in particle_vector)
            {
                e = (SearchDatum)ellipse_search.m_searchdata[i];

                if (e.result_flag)
                {
                    // Save the measurement location back into the particle
                    Vector z_local = new Vector(2);
                    z_local[0] = e.result_u;
                    z_local[1] = e.result_v;
                    it.set_z(z_local);
                    it.set_successful_measurement_flag(true);
                }
                else
                {
                    it.set_successful_measurement_flag(false);
                }
                i++;
            }

            // cout << "MFWMP timer end: " << timerlocal << endl;
            
        }
コード例 #23
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Create some default features for use with a target image
        /// </summary>
        private void createDefaultKnownFeatures(String path)
        {
            Byte value=0;
            Byte high_value = 180;
            Byte low_value = 60;
            classimage_mono known_feature = new classimage_mono();
            known_feature.createImage((int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE);

            for (int i = 0; i < 4; i++)
            {
                for (int x = 0; x < Camera_Constants.BOXSIZE; x++)
                {
                    for (int y = 0; y < Camera_Constants.BOXSIZE; y++)
                    {
                        switch (i)
                        {
                            case 0:
                                {
                                    if ((x > Camera_Constants.BOXSIZE / 2) &&
                                        (y > Camera_Constants.BOXSIZE / 2))
                                        value = low_value;
                                    else
                                        value = high_value;
                                    break;
                                }
                            case 1:
                                {
                                    if ((x < Camera_Constants.BOXSIZE / 2) &&
                                        (y > Camera_Constants.BOXSIZE / 2))
                                        value = low_value;
                                    else
                                        value = high_value;
                                    break;
                                }
                            case 2:
                                {
                                    if ((x > Camera_Constants.BOXSIZE / 2) &&
                                        (y < Camera_Constants.BOXSIZE / 2))
                                        value = low_value;
                                    else
                                        value = high_value;
                                    break;
                                }
                            case 3:
                                {
                                    if ((x < Camera_Constants.BOXSIZE / 2) &&
                                        (y < Camera_Constants.BOXSIZE / 2))
                                        value = low_value;
                                    else
                                        value = high_value;
                                    break;
                                }
                        }

                        known_feature.image[x, y] = value;
                    }
                }
                known_feature.SaveAsBitmapMono(path + "known_patch" + Convert.ToString(i) + ".bmp");
            }
        }
コード例 #24
0
ファイル: Sim_Or_Rob.cs プロジェクト: iManbot/monoslam
 /// <summary>
 /// Make a measurement of a feature.
 /// </summary>
 /// <param name="id">The identifier for this feature</param>
 /// <param name="z">The measurement of the state, to be filled in by this function</param>
 /// <param name="h">The expected measurement</param>
 /// <param name="S">The measurement covariance.</param>
 /// <returns></returns>
 public virtual bool measure_feature(classimage_mono id, ref Vector z, Vector vz, Vector h, MatrixFixed S, Random rnd) { return (false); }
コード例 #25
0
ファイル: scene_single.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Create a new partially-initialised feature. This creates a new Feature, and
        /// creates a new empty FeatureInitInfo to store the extra initialisation
        /// information, which must be then filled in by the caller of this function.
        /// </summary>
        /// <param name="id">The unique identifier for this feature (e.g. the image patch)</param>
        /// <param name="h">The initial measured state for this feature (e.g. the image location)</param>
        /// <param name="f_m_m">The partially-initialised feature measurement model to apply to this feature.</param>
        /// <returns>A pointer to the FeatureInitInfo object to be filled in with further initialisation information.</returns>
        public FeatureInitInfo add_new_partially_initialised_feature(classimage_mono id, 
                               Vector h, Partially_Initialised_Feature_Measurement_Model f_m_m,
                               Vector feature_colour)
        {
            Feature nf = new Feature(id, next_free_label, (uint)feature_list.Count, this, h, f_m_m, feature_colour);
            
            add_new_feature_bookeeping(nf);

            // Set stuff to store extra probability information for partially
            // initialised feature
            FeatureInitInfo feat = new FeatureInitInfo(this, nf);
            feature_init_info_vector.Add(feat);

            return (feat);
        }
コード例 #26
0
ファイル: robot.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Creates a new ImageMonoExtraData to represent the currently-selected image
        /// patch, and also returns its location in the parameter z. The current
        /// image patch is set using set_image_selection_automatically() or manually by the
        /// user using set_image_selection().
        /// </summary>
        /// <param name="z">The measurement vector to be filled in</param>
        /// <returns>The classimage holding this image patch information (created using new, so must be deleted elsewhere), or null if no patch is currently selected.</returns>
        public classimage_mono partially_initialise_point_feature(Vector z)
        {
            if (location_selected_flag) // Patch selected
            {
                // Go and initialise it in scene + 3D  
                classimage_mono hip = new classimage_mono();
                hip.createImage((int)Camera_Constants.BOXSIZE, (int)Camera_Constants.BOXSIZE);

                // Final save of fixated image patch
                copy_into_patch(image, hip, uu, vv);

                // And set measurement
                z.Put(0, uu);
                z.Put(1, vv);

                // return the patch
                return hip;
            }
            else
            {
                // No patch selected
                return null;
            }
        }
コード例 #27
0
ファイル: feature.cs プロジェクト: iManbot/monoslam
        /// <summary>
        /// Constructor for partially-initialised features.
        /// </summary>
        /// <param name="id">reference to the feature</param>
        /// <param name="lab"></param>
        /// <param name="list_pos"></param>
        /// <param name="scene"></param>
        /// <param name="h"></param>
        /// <param name="p_i_f_m_m"></param>
        public Feature(classimage_mono id, uint lab, uint list_pos,
                       Scene_Single scene, Vector h,
                       Partially_Initialised_Feature_Measurement_Model p_i_f_m_m,
                       Vector feature_colour)
        {
            feature_measurement_model = p_i_f_m_m;
            partially_initialised_feature_measurement_model = p_i_f_m_m;
            fully_initialised_feature_measurement_model = null;

            // Stuff below substituted from Feature_common
            //   Feature_common(id, lab, list_pos, scene, h);

            feature_constructor_bookeeping();

            identifier = id;
            label = lab;
            position_in_list = list_pos;   // Position of new feature in list
            position_in_total_state_vector = 0; // This should be set properly
            colour = feature_colour;
            //when feature is added 

            // Save the vehicle position where this feature was acquired 
            scene.get_motion_model().func_xp(scene.get_xv());
            //xp_orig = scene.get_motion_model().get_xpRES();
            xp_orig = new Vector(scene.get_motion_model().get_xpRES());

            // Call model functions to calculate feature state, measurement noise
            // and associated Jacobians. Results are stored in RES matrices 

            // First calculate "position state" and Jacobian
            scene.get_motion_model().func_xp(scene.get_xv());
            scene.get_motion_model().func_dxp_by_dxv(scene.get_xv());

            // Now ask the model to initialise the state vector and calculate Jacobians
            // so that I can go and calculate the covariance matrices
            partially_initialised_feature_measurement_model.func_ypi_and_dypi_by_dxp_and_dypi_by_dhi_and_Ri(h, scene.get_motion_model().get_xpRES());

            // State y
            //y = partially_initialised_feature_measurement_model.get_ypiRES();
            y = new Vector(partially_initialised_feature_measurement_model.get_ypiRES());

            // Temp_FS1 will store dypi_by_dxv
            MatrixFixed Temp_FS1 =
                     partially_initialised_feature_measurement_model.get_dypi_by_dxpRES() *
                     scene.get_motion_model().get_dxp_by_dxvRES();

            // Pxy  
            Pxy = scene.get_Pxx() * Temp_FS1.Transpose();

            // Pyy
            Pyy = Temp_FS1 * scene.get_Pxx() * Temp_FS1.Transpose()
                  + partially_initialised_feature_measurement_model.get_dypi_by_dhiRES()
                  * partially_initialised_feature_measurement_model.get_RiRES()
                  * partially_initialised_feature_measurement_model.get_dypi_by_dhiRES().Transpose();

            // Covariances of this feature with others
            int j = 0;
            foreach (Feature it in scene.get_feature_list_noconst())
            {
                if (j < position_in_list)
                {
                    // new Pypiyj = dypi_by_dxv . Pxyj
                    // Size of this is FEATURE_STATE_SIZE(new) by FEATURE_STATE_SIZE(old)
                    MatrixFixed m = it.get_Pxy();
                    MatrixFixed newPyjypi_to_store = (Temp_FS1 * m).Transpose();

                    //add to the list
                    matrix_block_list.Add(newPyjypi_to_store);
                }
                j++;
            }

            known_feature_label = -1;
        }
コード例 #28
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
        public void ShowOverheadView(classimage_mono img, float world_dimension_x, float world_dimension_z)
        {
            int xx, yy, x, z, feature_size_x, feature_size_z,i,t,prev_x=0,prev_z=0;
            Vector position;
            float half_x = world_dimension_x/2;
            float half_z = world_dimension_z/2;

            img.clear();

            //draw the camera trace
            
            for (i = 0; i < TRACE_LENGTH; i++)
            {
                t = trace_index - i;
                if (t < 0) t += TRACE_LENGTH;
                if (!((camera_trace[t, 0] == 0) && (camera_trace[t, 1] == 0) && (camera_trace[t, 2] == 0)))
                {
                    x = img.width-(int)((camera_trace[t,0] + half_x) / world_dimension_x * img.width);
                    z = img.height-(int)((camera_trace[t, 2] + half_z) / world_dimension_z * img.height);
                    if (i > 1)
                    {
                        img.drawLine(x, z, prev_x, prev_z, 0);
                    }
                    prev_x = x;
                    prev_z = z;
                }
            }
            
            //draw the camera
            ThreeD_Motion_Model threed_motion_model = (ThreeD_Motion_Model)scene.get_motion_model();
            threed_motion_model.func_xp(scene.get_xv());
            threed_motion_model.func_r(threed_motion_model.get_xpRES());
            threed_motion_model.func_q(threed_motion_model.get_xpRES());
            //Quaternion qWO = threed_motion_model.get_qRES() * qRO;
            Vector3D r_local = threed_motion_model.get_rRES();

            position = scene.get_xv();
            float scale_factor = 1.0f + ((position[1] + (world_dimension_x/2))/world_dimension_x);
            feature_size_x = (int)(img.width * scale_factor / 50);
            feature_size_z = (int)(img.height * scale_factor / 50);
            x = (int)((r_local.GetX() + half_x) / world_dimension_x * img.width);
            if ((x > 0) && (x < img.width))
            {
                z = (int)((r_local.GetZ() + half_z) / world_dimension_z * img.height);
                if ((z > 0) && (z < img.height))
                {
                    for (xx = x - feature_size_x; xx < x + feature_size_x; xx++)
                    {
                        if ((xx > 0) && (xx < img.width))
                        {
                            for (yy = z - feature_size_z; yy < z + feature_size_z; yy++)
                            {
                                if ((yy > 0) && (yy < img.height))
                                {                                    
                                    img.image[img.width - xx, img.height - yy] = 255;
                                }
                            }
                        }
                    }
                }
            }

            
            //draw ranged features
            /*
            scale_factor = 1;
            feature_size_x = (int)(img.width * scale_factor / 50);
            feature_size_z = (int)(img.height * scale_factor / 50);
            foreach (Feature v in ranged_features)
            {
                position = v.get_y();
                x = (int)((position[0] + half_x) / world_dimension_x * img.width);
                if ((x > 0) && (x < img.width))
                {
                    z = (int)((position[2] + half_z) / world_dimension_z * img.height);
                    if ((z > 0) && (z < img.height))
                    {
                        for (xx = x - feature_size_x; xx < x + feature_size_x; xx++)
                        {
                            if ((xx > 0) && (xx < img.width))
                            {
                                for (yy = z - feature_size_z; yy < z + feature_size_z; yy++)
                                {
                                    if ((yy > 0) && (yy < img.height))
                                    {
                                        img.image[img.width - xx, img.height - yy] = (Byte)v.colour[0];
                                    }
                                }
                            }
                        }
                    }
                }
            }
            */

            
            //draw the features
            scale_factor = 1;
            feature_size_x = (int)(img.width * scale_factor / 50);
            feature_size_z = (int)(img.height * scale_factor / 50);
            foreach (Feature f in scene.feature_list)
            {
                //if (f.get_fully_initialised_feature_measurement_model() != null)
                if (f.get_successful_measurement_flag())
                {
                    position = f.get_y();
                    x = (int)((position[0] + half_x) / world_dimension_x * img.width);
                    if ((x > 0) && (x < img.width))
                    {
                        z = (int)((position[2] + half_z) / world_dimension_z * img.height);
                        if ((z > 0) && (z < img.height))
                        {
                            for (xx = x - feature_size_x; xx < x + feature_size_x; xx++)
                            {
                                if ((xx > 0) && (xx < img.width))
                                {
                                    for (yy = z - feature_size_z; yy < z + feature_size_z; yy++)
                                    {
                                        if ((yy > 0) && (yy < img.height))
                                        {
                                            img.image[img.width - xx, img.height - yy] = 255;
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
            


            //show the particles associated with partially initialised features
            /*
            foreach (FeatureInitInfo feat in scene.get_feature_init_info_vector())
            {
                foreach (Particle p in feat.particle_vector)
                {
                    position = p.get_h;
                    if (position != null)
                    {
                        x = (int)((position[0] + half_x) / world_dimension_x * img.width);
                        z = (int)((position[2] + half_z) / world_dimension_z * img.height);
                        if ((x > 0) && (x < img.width) && (z > 0) && (z < img.height))
                        {
                            for (c = 0; c < 3; c++)
                                img.image[img.width - x, img.height - z, c] = 255;
                        }
                    }
                }
            }
            */
            
        }
コード例 #29
0
ファイル: classimage_mono.cs プロジェクト: iManbot/monoslam
        //------------------------------------------------------------------------------------------------------------------------
        //  sample the given image within the given bounding box
        //------------------------------------------------------------------------------------------------------------------------
        public void sampleFromImage(classimage_mono example_img, int tx, int ty, int bx, int by)
        {
            int x, y, xx, yy, dx, dy;

            dx = bx - tx;
            dy = by - ty;

            for (x = 0; x < width; x++)
            {
                xx = tx + ((x * dx) / width);

                for (y = 0; y < height; y++)
                {
                    yy = ty + ((y * dy) / height);

                    image[x, y] = example_img.image[xx, yy];
                }
            }
            //updateIntegralImage();
        }
コード例 #30
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
        public void showProbabilities(int feature_index, classimage_mono img)
        {
            img.clear();
            if (feature_index < scene.get_feature_init_info_vector().Count)
            {
                int i = 0;
                histogram hist = new histogram();
                hist.init(100);

                FeatureInitInfo feat = (FeatureInitInfo)scene.get_feature_init_info_vector()[feature_index];
                foreach (Particle p in feat.particle_vector)
                {
                    hist.add(i, (int)(p.probability*10000));
                    i++;
                }
                hist.Show(img);
            }
        }
コード例 #31
0
ファイル: classimage_mono.cs プロジェクト: iManbot/monoslam
        //---------------------------------------------------------------------------------------------
        //copy from an image
        //---------------------------------------------------------------------------------------------
        public void copyImage(classimage_mono source_img)
        {
            int x, y;
            Byte v;

            for (x = 0; x < width; x++)
            {
                for (y = 0; y < height; y++)
                {
                    v = source_img.image[x, y];
                    image[x, y] = v;
                }
            }
        }
コード例 #32
0
ファイル: monoSLAM_test.cs プロジェクト: iManbot/monoslam
 /// <summary>
 /// show detected features within the given image
 /// </summary>
 /// <param name="img"></param>
 public void ShowFeatures(classimage_mono img, int feature_type)
 {
     show_ellipses = false;
     switch (feature_type)
     {
         case MonoSLAM.DISPLAY_FEATURES:
             {
                 monoslaminterface.ShowFeatures(img);
                 break;
             }
         case MonoSLAM.DISPLAY_ELLIPSES:
             {
                 show_ellipses = true;
                 monoslaminterface.ShowFeatures(img);                        
                 break;
             }
         case MonoSLAM.DISPLAY_PROBABILITIES:
             {
                 monoslaminterface.showProbabilities(0, img);
                 //monoslaminterface.ShowTriangles(img);
                 //monoslaminterface.ShowLineHistory(img);
                 //monoslaminterface.ShowMesh(img);
                 break;
             }
         case MonoSLAM.DISPLAY_MAP:
             {
                 monoslaminterface.ShowOverheadView(img, 2.5f, 2.5f);
                 break;
             }
     }
 }
コード例 #33
0
ファイル: Sim_Or_Rob.cs プロジェクト: iManbot/monoslam
 /// <summary>
 /// Make the first measurement of a feature.
 /// </summary>
 /// <param name="z">The location of the feature to measure</param>
 /// <param name="id">The Identifier to fill with the feature measurements</param>
 /// <param name="f_m_m">The feature measurement model to use for this partially-initialised feature</param>
 /// <returns></returns>
 public virtual bool make_initial_measurement_of_feature(Vector z,
                                                  ref classimage_mono id,
                                                  Partially_Initialised_Feature_Measurement_Model f_m_m,
                                                  Vector patch_colour) { return (false); }
コード例 #34
0
ファイル: monoSLAM.cs プロジェクト: iManbot/monoslam
 /// <summary>
 /// show the history of a line
 /// </summary>
 /// <param name="img"></param>
 public void ShowLineHistory(classimage_mono img)
 {
     if (persistent_line != null)
     {
         persistent_line.show(img);
     }
 }