コード例 #1
0
ファイル: evidenceRay.cs プロジェクト: williammc/sentience
        /// <summary>
        /// returns a version of this evidence ray rotated through the given
        /// pan angle in the XY plane.  This is used for generating trial poses.
        /// </summary>
        /// <param name="extra_pan">pan angle to be added</param>
        /// <param name="translation_x">new obsevation x coordinate</param>
        /// <param name="translation_y">new obsevation y coordinate</param>
        /// <returns>evidence ray object</returns>
        public evidenceRay trialPose(float extra_pan,
                                     float translation_x, float translation_y)
        {
            evidenceRay rotated_ray   = new evidenceRay();
            float       new_pan_angle = extra_pan + pan_angle;

            // as observed from the centre of the camera baseline
            rotated_ray.observedFrom = new pos3D(translation_x, translation_y, 0);
            rotated_ray.fattestPoint = fattestPoint;

            rotated_ray.vertices[0]   = new pos3D(0, 0, 0);
            rotated_ray.vertices[0].x = translation_x + (start_dist * (float)Math.Sin(new_pan_angle));
            rotated_ray.vertices[0].y = translation_y + (start_dist * (float)Math.Cos(new_pan_angle));
            rotated_ray.vertices[0].z = vertices[0].z;

            rotated_ray.vertices[1]   = new pos3D(0, 0, 0);
            rotated_ray.vertices[1].x = translation_x + ((start_dist + length) * (float)Math.Sin(new_pan_angle));
            rotated_ray.vertices[1].y = translation_y + ((start_dist + length) * (float)Math.Cos(new_pan_angle));
            rotated_ray.vertices[1].z = vertices[1].z;

            rotated_ray.pan_angle  = new_pan_angle;
            rotated_ray.pan_index  = (int)(new_pan_angle * pan_steps / 6.2831854f);
            rotated_ray.length     = length;
            rotated_ray.width      = width;
            rotated_ray.start_dist = start_dist;
            rotated_ray.disparity  = disparity;

            for (int col = 2; col >= 0; col--)
            {
                rotated_ray.colour[col] = colour[col];
            }

            return(rotated_ray);
        }
コード例 #2
0
ファイル: viewpoint.cs プロジェクト: williammc/sentience
        /// <summary>
        /// shows rays within the viewpoint from the front
        /// </summary>
        /// <param name="img"></param>
        /// <param name="scale"></param>
        public void showFront(Byte[] img, int img_width, int img_height, int scale,
                              Byte r, Byte g, Byte b, bool showProbabilities)
        {
            Byte  rr, gg, bb;
            int   half_width = img_width / 2;
            int   half_height = img_height / 2;
            float start_x, start_z, end_x, end_z;

            //img.clear();
            for (int cam = 0; cam < rays.Length; cam++)
            {
                for (int ry = 0; ry < rays[cam].Count; ry++)
                {
                    evidenceRay ray = (evidenceRay)rays[cam][ry];

                    rr = r;
                    gg = g;
                    bb = b;

                    if (!showProbabilities)
                    {
                        start_x = ray.vertices[0].x * half_width / scale;
                        start_z = ray.vertices[0].z * half_height / scale;
                        end_x   = ray.vertices[1].x * half_width / scale;
                        end_z   = ray.vertices[1].z * half_height / scale;

                        util.drawLine(img, img_width, img_height, half_width + (int)start_x, half_height + (int)start_z, half_width + (int)end_x, half_height + (int)end_z, rr, gg, bb, 0, true);
                    }
                    else
                    {
                        float dx = ray.vertices[1].x - ray.vertices[0].x;
                        float dy = ray.vertices[1].y - ray.vertices[0].y;
                        float dz = ray.vertices[1].z - ray.vertices[0].z;

                        float sx = ray.vertices[0].x;
                        float sz = ray.vertices[0].z;
                        for (int l = 1; l < 50; l++)
                        {
                            float ex = ray.vertices[0].x + (dx * l / 50);
                            float ey = ray.vertices[0].y + (dy * l / 50);
                            float ez = ray.vertices[0].z + (dz * l / 50);

                            int intensity = (int)(ray.probability(ex, ey) * 255);
                            rr = (Byte)(r * intensity / 255);
                            gg = (Byte)(g * intensity / 255);
                            bb = (Byte)(b * intensity / 255);

                            start_x = sx * half_width / scale;
                            start_z = sz * half_height / scale;
                            end_x   = ex * half_width / scale;
                            end_z   = ez * half_height / scale;
                            util.drawLine(img, img_width, img_height, half_width + (int)start_x, half_height + (int)start_z, half_width + (int)end_x, half_height + (int)end_z, rr, gg, bb, 0, true);

                            sx = ex;
                            sz = ez;
                        }
                    }
                }
            }
        }
コード例 #3
0
        /// <summary>
        /// insert a stereo ray into the grid
        /// </summary>
        /// <param name="ray">stereo ray</param>
        /// <param name="sensormodel">sensor model for each grid level</param>
        /// <param name="left_camera_location">left stereo camera position and orientation</param>
        /// <param name="right_camera_location">right stereo camera position and orientation</param>
        /// <param name="localiseOnly">whether we are mapping or localising</param>
        /// <returns>localisation matching score</returns>
        public float Insert(
            evidenceRay ray,
            stereoModel[] sensormodel,
            pos3D left_camera_location,
            pos3D right_camera_location,
            bool localiseOnly)
        {
            float matchingScore = occupancygridBase.NO_OCCUPANCY_EVIDENCE;

            switch (grid_type)
            {
            case TYPE_SIMPLE:
            {
                for (int grid_level = 0; grid_level < grid.Length; grid_level++)
                {
                    rayModelLookup      sensormodel_lookup = sensormodel[grid_level].ray_model;
                    occupancygridSimple grd = (occupancygridSimple)grid[grid_level];
                    float score             = grd.Insert(ray, sensormodel_lookup, left_camera_location, right_camera_location, localiseOnly);

                    if (score != occupancygridBase.NO_OCCUPANCY_EVIDENCE)
                    {
                        if (matchingScore == occupancygridBase.NO_OCCUPANCY_EVIDENCE)
                        {
                            matchingScore = 0;
                        }
                        matchingScore += grid_weight[grid_level] * score;
                    }
                }
                break;
            }
            }
            return(matchingScore);
        }
コード例 #4
0
        /// <summary>
        /// insert a stereo ray into the grid
        /// </summary>
        /// <param name="ray">stereo ray</param>
        /// <param name="origin">pose from which this observation was made</param>
        /// <param name="sensormodel">sensor model for each grid level</param>
        /// <param name="left_camera_location">left stereo camera position and orientation</param>
        /// <param name="right_camera_location">right stereo camera position and orientation</param>
        /// <param name="localiseOnly">whether we are mapping or localising</param>
        /// <returns>localisation matching score</returns>
        public float Insert(
            evidenceRay ray,
            particlePose origin,
            stereoModel[] sensormodel,
            pos3D left_camera_location,
            pos3D right_camera_location,
            bool localiseOnly)
        {
            float matchingScore = 0;

            switch (grid_type)
            {
            case TYPE_MULTI_HYPOTHESIS:
            {
                for (int grid_level = 0; grid_level < grid.Length; grid_level++)
                {
                    rayModelLookup sensormodel_lookup = sensormodel[grid_level].ray_model;
                    occupancygridMultiHypothesis grd  = (occupancygridMultiHypothesis)grid[grid_level];
                    matchingScore += grid_weight[grid_level] * grd.Insert(ray, origin, sensormodel_lookup, left_camera_location, right_camera_location, localiseOnly);
                }
                break;
            }
            }
            return(matchingScore);
        }
コード例 #5
0
ファイル: viewpoint.cs プロジェクト: williammc/sentience
        /// <summary>
        /// create a new viewpoint which is a trial pose
        /// </summary>
        /// <param name="extra_pan"></param>
        /// <param name="translation_x"></param>
        /// <param name="translation_y"></param>
        /// <returns></returns>
        public viewpoint createTrialPose(float extra_pan,
                                         float translation_x, float translation_y)
        {
            viewpoint trialPose = new viewpoint(rays.Length);

            for (int cam = 0; cam < rays.Length; cam++)
            {
                for (int r = 0; r < rays[cam].Count; r++)
                {
                    evidenceRay ray       = (evidenceRay)rays[cam][r];
                    evidenceRay trial_ray = ray.trialPose(extra_pan, translation_x, translation_y);
                    trialPose.rays[cam].Add(trial_ray);
                }
            }
            return(trialPose);
        }
コード例 #6
0
ファイル: viewpoint.cs プロジェクト: kasertim/sentience
        public void Load(BinaryReader binfile)
        {
            odometry_position.x = binfile.ReadSingle();
            odometry_position.y = binfile.ReadSingle();
            odometry_position.z = binfile.ReadSingle();

            int no_of_cameras = binfile.ReadInt32();
            init(no_of_cameras);
            for (int cam = 0; cam < rays.Length; cam++)
            {
                int no_of_rays = binfile.ReadInt32();
                for (int r = 0; r < no_of_rays; r++)
                {
                    evidenceRay new_ray = new evidenceRay();
                    new_ray.Load(binfile);
                    new_ray.observedFrom.x = odometry_position.x;
                    new_ray.observedFrom.y = odometry_position.y;
                    new_ray.observedFrom.z = odometry_position.z;
                    rays[cam].Add(new_ray);
                }
            }
        }
コード例 #7
0
ファイル: viewpoint.cs プロジェクト: williammc/sentience
        public void Load(BinaryReader binfile)
        {
            odometry_position.x = binfile.ReadSingle();
            odometry_position.y = binfile.ReadSingle();
            odometry_position.z = binfile.ReadSingle();

            int no_of_cameras = binfile.ReadInt32();

            init(no_of_cameras);
            for (int cam = 0; cam < rays.Length; cam++)
            {
                int no_of_rays = binfile.ReadInt32();
                for (int r = 0; r < no_of_rays; r++)
                {
                    evidenceRay new_ray = new evidenceRay();
                    new_ray.Load(binfile);
                    new_ray.observedFrom.x = odometry_position.x;
                    new_ray.observedFrom.y = odometry_position.y;
                    new_ray.observedFrom.z = odometry_position.z;
                    rays[cam].Add(new_ray);
                }
            }
        }
コード例 #8
0
ファイル: stereoModel.cs プロジェクト: kasertim/sentience
        /// <summary>
        /// creates a ray object which may be used to update occupancy grids
        /// </summary>
        /// <param name="x">x position of the feature within the camera image</param>
        /// <param name="y">y position of the feature within the camera image</param>
        /// <param name="disparity">stereo disparity in pixels</param>
        /// <param name="uncertainty">standard deviation</param>
        /// <param name="r">red colour component of the ray</param>
        /// <param name="g">green colour component of the ray</param>
        /// <param name="b">blue colour component of the ray</param>
        /// <returns>evidence ray object</returns>
        public evidenceRay createRay(
		    float x, float y, float disparity, 
            float uncertainty, 
            byte r, byte g, byte b)
        {
            evidenceRay ray = null;
            float x1 = x + disparity;
            float x2 = x;
            float x_start = 0, y_start = 0;
            float x_end = 0, y_end = 0;
            float x_left = 0, y_left = 0;
            float x_right = 0, y_right = 0;
            int grid_dimension = 2000;            
            float distance = 100; //DisparityToDistance(disparity, focal_length, sensor_pixels_per_mm, baseline);

            raysIntersection(x1, x2, grid_dimension, uncertainty, distance,
                             ref x_start, ref y_start, ref x_end, ref y_end,
                             ref x_left, ref y_left, ref x_right, ref y_right);
            if (y_start < -1) y_start = -y_start;
            if (y_end < -1) y_end = -y_end;

            if (x_right >= x_left)
            {
                // calc uncertainty in angle (+/- half a pixel)
                float angular_uncertainty = FOV_vertical / (image_height * 2);

                // convert y image position to z height
                int half_height = image_height / 2;
                float angle = ((y - half_height) * FOV_vertical / image_height);
                float z = (int)(y_left * Math.Sin(angle));
                float z_start = (int)(y_start * Math.Sin(angle));
                float z_end = (int)(y_end * Math.Sin(angle));

                //use an offset so that the point from which the ray was observed was (0,0,0)
                float x_offset = grid_dimension * 0.5f;
                ray = new evidenceRay();
                ray.vertices[0] = new pos3D(x_start - x_offset, y_start, z_start);
                
                ray.vertices[1] = new pos3D(x_end - x_offset, y_end, z_end);
                ray.fattestPoint = (y_left - y_start) / (y_end - y_start);
                //ray.vertices[2] = new pos3D(x_left - x_offset, y_left, z);
                //ray.vertices[3] = new pos3D(x_right - x_offset, y_right, z);
                //ray.centre = new pos3D(x_left + ((x_right-x_left)*0.5f) - x_offset, y_left, z);
                ray.width = x_right - x_left;

                ray.colour[0] = r;
                ray.colour[1] = g;
                ray.colour[2] = b;

                ray.uncertainty = uncertainty;
                ray.disparity = disparity;
                ray.sigma = sigma;
            }
            return (ray);
        }
コード例 #9
0
ファイル: metagrid.cs プロジェクト: kasertim/sentience
        /// <summary>
        /// insert a stereo ray into the grid
        /// </summary>
        /// <param name="ray">stereo ray</param>
        /// <param name="sensormodel">sensor model for each grid level</param>
        /// <param name="left_camera_location">left stereo camera position and orientation</param>
        /// <param name="right_camera_location">right stereo camera position and orientation</param>
        /// <param name="localiseOnly">whether we are mapping or localising</param>
        /// <returns>localisation matching score</returns>
        public float Insert(
            evidenceRay ray,
		    stereoModel[] sensormodel,             
            pos3D left_camera_location,
            pos3D right_camera_location,
            bool localiseOnly)
        {
            float matchingScore = occupancygridBase.NO_OCCUPANCY_EVIDENCE;
            switch (grid_type)
            {
                case TYPE_SIMPLE:
                {
                    for (int grid_level = 0; grid_level < grid.Length; grid_level++)
                    {
					    rayModelLookup sensormodel_lookup = sensormodel[grid_level].ray_model;
                        occupancygridSimple grd = (occupancygridSimple)grid[grid_level];
                        float score = grd.Insert(ray, sensormodel_lookup, left_camera_location, right_camera_location, localiseOnly);

                        if (score != occupancygridBase.NO_OCCUPANCY_EVIDENCE)
                        {
                            if (matchingScore == occupancygridBase.NO_OCCUPANCY_EVIDENCE) matchingScore = 0;
                            matchingScore += grid_weight[grid_level] * score;
                        }
                    }
                    break;
                }                
            }
            return (matchingScore);
        }
コード例 #10
0
ファイル: metagrid.cs プロジェクト: kasertim/sentience
        /// <summary>
        /// insert a stereo ray into the grid
        /// </summary>
        /// <param name="ray">stereo ray</param>
        /// <param name="origin">pose from which this observation was made</param>
        /// <param name="sensormodel">sensor model for each grid level</param>
        /// <param name="left_camera_location">left stereo camera position and orientation</param>
        /// <param name="right_camera_location">right stereo camera position and orientation</param>
        /// <param name="localiseOnly">whether we are mapping or localising</param>
        /// <returns>localisation matching score</returns>
        public float Insert(
            evidenceRay ray,
            particlePose origin,
		    stereoModel[] sensormodel,                            
            pos3D left_camera_location,
            pos3D right_camera_location,
            bool localiseOnly)
        {
            float matchingScore = 0;

            switch (grid_type)
            {
                case TYPE_MULTI_HYPOTHESIS:
                    {
                        for (int grid_level = 0; grid_level < grid.Length; grid_level++)
                        {
					        rayModelLookup sensormodel_lookup = sensormodel[grid_level].ray_model;
                            occupancygridMultiHypothesis grd = (occupancygridMultiHypothesis)grid[grid_level];
                            matchingScore += grid_weight[grid_level] * grd.Insert(ray, origin, sensormodel_lookup, left_camera_location, right_camera_location, localiseOnly);
                        }
                        break;
                    }
            }
            return (matchingScore);
        }
コード例 #11
0
        /// <summary>
        /// inserts the given ray into the grid
        /// There are three components to the sensor model used here:
        /// two areas determining the probably vacant area and one for 
        /// the probably occupied space
        /// </summary>
        /// <param name="ray">ray object to be inserted into the grid</param>
        /// <param name="origin">the pose of the robot</param>
        /// <param name="sensormodel">the sensor model to be used</param>
        /// <param name="leftcam_x">x position of the left camera in millimetres</param>
        /// <param name="leftcam_y">y position of the left camera in millimetres</param>
        /// <param name="rightcam_x">x position of the right camera in millimetres</param>
        /// <param name="rightcam_y">y position of the right camera in millimetres</param>
        /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param>
        /// <returns>matching probability, expressed as log odds</returns>
        public float Insert(evidenceRay ray, 
                            particlePose origin,
                            rayModelLookup sensormodel_lookup,
                            pos3D left_camera_location, 
                            pos3D right_camera_location,
                            bool localiseOnly)
        {
            // some constants to aid readability
            const int OCCUPIED_SENSORMODEL = 0;
            const int VACANT_SENSORMODEL_LEFT_CAMERA = 1;
            const int VACANT_SENSORMODEL_RIGHT_CAMERA = 2;

            const int X_AXIS = 0;
            const int Y_AXIS = 1;

            // which disparity index in the lookup table to use
            // we multiply by 2 because the lookup is in half pixel steps
            int sensormodel_index = (int)Math.Round(ray.disparity * 2);
            
            // the initial models are blank, so just default to the one disparity pixel model
            bool small_disparity_value = false;
            if (sensormodel_index < 2)
            {
                sensormodel_index = 2;
                small_disparity_value = true;
            }

            // beyond a certain disparity the ray model for occupied space
            // is always only going to be only a single grid cell
            if (sensormodel_index >= sensormodel_lookup.probability.GetLength(0))
                sensormodel_index = sensormodel_lookup.probability.GetLength(0) - 1;

            float xdist_mm=0, ydist_mm=0, zdist_mm=0, xx_mm=0, yy_mm=0, zz_mm=0;
            float occupied_dx = 0, occupied_dy = 0, occupied_dz = 0;
            float intersect_x = 0, intersect_y = 0, intersect_z = 0;
            float centre_prob = 0, prob = 0, prob_localisation = 0; // probability values at the centre axis and outside
            float matchingScore = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;  // total localisation matching score
            int rayWidth = 0;         // widest point in the ray in cells
            int widest_point;         // widest point index
            int step_size = 1;
            particleGridCell hypothesis;

            // ray width at the fattest point in cells
            rayWidth = (int)Math.Round(ray.width / (cellSize_mm * 2));

            // calculate the centre position of the grid in millimetres
            int half_grid_width_mm = dimension_cells * cellSize_mm / 2;
            //int half_grid_width_vertical_mm = dimension_cells_vertical * cellSize_mm / 2;
            int grid_centre_x_mm = (int)(x - half_grid_width_mm);
            int grid_centre_y_mm = (int)(y - half_grid_width_mm);
            int grid_centre_z_mm = (int)z;

            int max_dimension_cells = dimension_cells - rayWidth;

            // in turbo mode only use a single vacancy ray
            int max_modelcomponent = VACANT_SENSORMODEL_RIGHT_CAMERA;
            if (TurboMode) max_modelcomponent = VACANT_SENSORMODEL_LEFT_CAMERA;

			float[][] sensormodel_lookup_probability = sensormodel_lookup.probability;
			
            // consider each of the three parts of the sensor model
            for (int modelcomponent = OCCUPIED_SENSORMODEL; modelcomponent <= max_modelcomponent; modelcomponent++)
            {
                // the range from the cameras from which insertion of data begins
                // for vacancy rays this will be zero, but will be non-zero for the occupancy area
                int starting_range_cells = 0;

                switch (modelcomponent)
                {
                    case OCCUPIED_SENSORMODEL:
                        {
                            // distance between the beginning and end of the probably
                            // occupied area
                            occupied_dx = ray.vertices[1].x - ray.vertices[0].x;
                            occupied_dy = ray.vertices[1].y - ray.vertices[0].y;
                            occupied_dz = ray.vertices[1].z - ray.vertices[0].z;
                            intersect_x = ray.vertices[0].x + (occupied_dx * ray.fattestPoint);
                            intersect_y = ray.vertices[0].y + (occupied_dy * ray.fattestPoint);
                            intersect_z = ray.vertices[0].z + (occupied_dz * ray.fattestPoint);

                            xdist_mm = occupied_dx;
                            ydist_mm = occupied_dy;
                            zdist_mm = occupied_dz;

                            // begin insertion at the beginning of the 
                            // probably occupied area
                            xx_mm = ray.vertices[0].x;
                            yy_mm = ray.vertices[0].y;
                            zz_mm = ray.vertices[0].z;
                            break;
                        }
                    case VACANT_SENSORMODEL_LEFT_CAMERA:
                        {
                            // distance between the left camera and the left side of
                            // the probably occupied area of the sensor model                            
                            xdist_mm = intersect_x - left_camera_location.x;
                            ydist_mm = intersect_y - left_camera_location.y;
                            zdist_mm = intersect_z - left_camera_location.z;

                            // begin insertion from the left camera position
                            xx_mm = left_camera_location.x;
                            yy_mm = left_camera_location.y;
                            zz_mm = left_camera_location.z;
                            step_size = 2;
                            break;
                        }
                    case VACANT_SENSORMODEL_RIGHT_CAMERA:
                        {
                            // distance between the right camera and the right side of
                            // the probably occupied area of the sensor model
                            xdist_mm = intersect_x - right_camera_location.x;
                            ydist_mm = intersect_y - right_camera_location.y;
                            zdist_mm = intersect_z - right_camera_location.z;

                            // begin insertion from the right camera position
                            xx_mm = right_camera_location.x;
                            yy_mm = right_camera_location.y;
                            zz_mm = right_camera_location.z;
                            step_size = 2;
                            break;
                        }
                }

                // which is the longest axis ?
                int longest_axis = X_AXIS;
                float longest = Math.Abs(xdist_mm);
                if (Math.Abs(ydist_mm) > longest)
                {
                    // y has the longest length
                    longest = Math.Abs(ydist_mm);
                    longest_axis = Y_AXIS;
                }

                // ensure that the vacancy model does not overlap
                // the probably occupied area
                // This is crude and could potentially leave a gap
                if (modelcomponent != OCCUPIED_SENSORMODEL)
                    longest -= ray.width;

                int steps = (int)(longest / cellSize_mm);
                if (steps < 1) steps = 1;

                // calculate the range from the cameras to the start of the ray in grid cells
                if (modelcomponent == OCCUPIED_SENSORMODEL)
                {
                    if (longest_axis == Y_AXIS)
                        starting_range_cells = (int)Math.Abs((ray.vertices[0].y - ray.observedFrom.y) / cellSize_mm);
                    else
                        starting_range_cells = (int)Math.Abs((ray.vertices[0].x - ray.observedFrom.x) / cellSize_mm);
                }

                // what is the widest point of the ray in cells
                if (modelcomponent == OCCUPIED_SENSORMODEL)
                    widest_point = (int)(ray.fattestPoint * steps / ray.length);
                else
                    widest_point = steps;

                // calculate increment values in millimetres
                float x_incr_mm = xdist_mm / steps;
                float y_incr_mm = ydist_mm / steps;
                float z_incr_mm = zdist_mm / steps;

                // step through the ray, one grid cell at a time
                int grid_step = 0;
                while (grid_step < steps)
                {
                    // is this position inside the maximum mapping range
                    bool withinMappingRange = true;
                    if (grid_step + starting_range_cells > max_mapping_range_cells)
                    {
                        withinMappingRange = false;
                        if ((grid_step==0) && (modelcomponent == OCCUPIED_SENSORMODEL))
                        {
                            grid_step = steps;
                            modelcomponent = 9999;
                        }
                    }

                    // calculate the width of the ray in cells at this point
                    // using a diamond shape ray model
                    int ray_wdth = 0;
                    if (rayWidth > 0)
                    {
                        if (grid_step < widest_point)
                            ray_wdth = grid_step * rayWidth / widest_point;
                        else
                        {
                            if (!small_disparity_value)
                                // most disparity values tail off to some point in the distance
                                ray_wdth = (steps - grid_step + widest_point) * rayWidth / (steps - widest_point);
                            else
                                // for very small disparity values the ray model has an infinite tail
                                // and maintains its width after the widest point
                                ray_wdth = rayWidth; 
                        }
                    }

                    // localisation rays are wider, to enable a more effective matching score
                    // which is not too narrowly focussed and brittle
                    int ray_wdth_localisation = ray_wdth + 1; //localisation_search_cells;

                    xx_mm += x_incr_mm*step_size;
                    yy_mm += y_incr_mm*step_size;
                    zz_mm += z_incr_mm*step_size;
                    // convert the x millimetre position into a grid cell position
                    int x_cell = (int)Math.Round((xx_mm - grid_centre_x_mm) / (float)cellSize_mm);
                    if ((x_cell > ray_wdth_localisation) && (x_cell < dimension_cells - ray_wdth_localisation))
                    {
                        // convert the y millimetre position into a grid cell position
                        int y_cell = (int)Math.Round((yy_mm - grid_centre_y_mm) / (float)cellSize_mm);
                        if ((y_cell > ray_wdth_localisation) && (y_cell < dimension_cells - ray_wdth_localisation))
                        {
                            // convert the z millimetre position into a grid cell position
                            int z_cell = (int)Math.Round((zz_mm - grid_centre_z_mm) / (float)cellSize_mm);
                            if ((z_cell >= 0) && (z_cell < dimension_cells_vertical))
                            {

                                int x_cell2 = x_cell;
                                int y_cell2 = y_cell;

                                // get the probability at this point 
                                // for the central axis of the ray using the inverse sensor model
                                if (modelcomponent == OCCUPIED_SENSORMODEL)
                                    centre_prob = 0.5f + (sensormodel_lookup_probability[sensormodel_index][grid_step] * 0.5f);
                                else
                                    // calculate the probability from the vacancy model
                                    centre_prob = vacancyFunction(grid_step / (float)steps, steps);


                                // width of the localisation ray
                                for (int width = -ray_wdth_localisation; width <= ray_wdth_localisation; width++)
                                {
                                    // is the width currently inside the mapping area of the ray ?
                                    bool isInsideMappingRayWidth = false;
                                    if ((width >= -ray_wdth) && (width <= ray_wdth))
                                        isInsideMappingRayWidth = true;

                                    // adjust the x or y cell position depending upon the 
                                    // deviation from the main axis of the ray
                                    if (longest_axis == Y_AXIS)
                                        x_cell2 = x_cell + width;
                                    else
                                        y_cell2 = y_cell + width;

                                    // probability at the central axis
                                    prob = centre_prob;
                                    prob_localisation = centre_prob;

                                    // probabilities are symmetrical about the axis of the ray
                                    // this multiplier implements a gaussian distribution around the centre
                                    if (width != 0) // don't bother if this is the centre of the ray
                                    {
                                        // the probability used for wide localisation rays
                                        prob_localisation *= gaussianLookup[Math.Abs(width) * 9 / ray_wdth_localisation];

                                        // the probability used for narrower mapping rays
                                        if (isInsideMappingRayWidth)
                                            prob *= gaussianLookup[Math.Abs(width) * 9 / ray_wdth];
                                    }

                                    if ((cell[x_cell2][y_cell2] != null) && (withinMappingRange))
                                    {
                                        // only localise using occupancy, not vacancy
                                        if (modelcomponent == OCCUPIED_SENSORMODEL)
                                        {
                                            // update the matching score, by combining the probability
                                            // of the grid cell with the probability from the localisation ray
                                            float score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;
                                            if (longest_axis == X_AXIS)
                                                score = matchingProbability(x_cell2, y_cell2, z_cell, origin, prob_localisation, ray.colour);

                                            if (longest_axis == Y_AXIS)
                                                score = matchingProbability(x_cell2, y_cell2, z_cell, origin, prob_localisation, ray.colour);

                                            if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                                            {
                                                if (matchingScore != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                                                    matchingScore += score;
                                                else
                                                    matchingScore = score;
                                            }
                                        }
                                    }

                                    if ((isInsideMappingRayWidth) && 
                                        (withinMappingRange) &&
                                        (!localiseOnly))
                                    {
                                        // add a new hypothesis to this grid coordinate
                                        // note that this is also added to the original pose
                                        hypothesis = new particleGridCell(x_cell2, y_cell2, z_cell, 
                                                                          prob, origin,
                                                                          ray.colour);
                                        if (origin.AddHypothesis(hypothesis, max_mapping_range_cells, dimension_cells, dimension_cells_vertical))
                                        {
                                            // generate a grid cell if necessary
                                            if (cell[x_cell2][y_cell2] == null)
                                                cell[x_cell2][y_cell2] = new occupancygridCellMultiHypothesis(dimension_cells_vertical);

                                            cell[x_cell2][y_cell2].AddHypothesis(hypothesis);                                            
                                            total_valid_hypotheses++;
                                        }
                                    }
                                }
                            }
                            else grid_step = steps;  // its the end of the ray, break out of the loop
                        }
                        else grid_step = steps;  // its the end of the ray, break out of the loop
                    }
                    else grid_step = steps;  // time to bail out chaps!
                    grid_step += step_size;
                }
            }

            return (matchingScore);
        }
コード例 #12
0
ファイル: evidenceRay.cs プロジェクト: kasertim/sentience
        /// <summary>
        /// returns a version of this evidence ray rotated through the given 
        /// pan angle in the XY plane.  This is used for generating trial poses.
        /// </summary>
        /// <param name="extra_pan">pan angle to be added</param>
        /// <param name="translation_x">new obsevation x coordinate</param>
        /// <param name="translation_y">new obsevation y coordinate</param>
        /// <returns>evidence ray object</returns>
        public evidenceRay trialPose(float extra_pan,
                                     float translation_x, float translation_y)
        {
            evidenceRay rotated_ray = new evidenceRay();
            float new_pan_angle = extra_pan + pan_angle;

            // as observed from the centre of the camera baseline
            rotated_ray.observedFrom = new pos3D(translation_x, translation_y, 0);
            rotated_ray.fattestPoint = fattestPoint;

            rotated_ray.vertices[0] = new pos3D(0, 0, 0);
            rotated_ray.vertices[0].x = translation_x + (start_dist * (float)Math.Sin(new_pan_angle));
            rotated_ray.vertices[0].y = translation_y + (start_dist * (float)Math.Cos(new_pan_angle));
            rotated_ray.vertices[0].z = vertices[0].z;

            rotated_ray.vertices[1] = new pos3D(0, 0, 0);
            rotated_ray.vertices[1].x = translation_x + ((start_dist + length) * (float)Math.Sin(new_pan_angle));
            rotated_ray.vertices[1].y = translation_y + ((start_dist + length) * (float)Math.Cos(new_pan_angle));
            rotated_ray.vertices[1].z = vertices[1].z;

            rotated_ray.pan_angle = new_pan_angle;
            rotated_ray.pan_index = (int)(new_pan_angle * pan_steps / 6.2831854f);
            rotated_ray.length = length;
            rotated_ray.width = width;
            rotated_ray.start_dist = start_dist;
            rotated_ray.disparity = disparity;

            for (int col = 2; col >= 0; col--)
                rotated_ray.colour[col] = colour[col];

            return (rotated_ray);
        }
コード例 #13
0
ファイル: particlePose.cs プロジェクト: williammc/sentience
        /// <summary>
        /// add an observation taken from this pose
        /// </summary>
        /// <param name="stereo_rays">list of ray objects in this observation</param>
        /// <param name="rob">robot object</param>
        /// <param name="LocalGrid">occupancy grid into which to insert the observation</param>
        /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param>
        /// <returns>localisation matching score</returns>
        public float AddObservation(List <evidenceRay>[] stereo_rays,
                                    robot rob,
                                    occupancygridMultiHypothesis LocalGrid,
                                    bool localiseOnly)
        {
            // clear the localisation score
            float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0, 0, 0);

            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location   = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location  = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(rob, ref head_location,
                                     ref camera_centre_location,
                                     ref left_camera_location,
                                     ref right_camera_location);

            // itterate for each stereo camera
            int cam = stereo_rays.Length - 1;

            while (cam >= 0)
            {
                // itterate through each ray
                int r = stereo_rays[cam].Count - 1;
                while (r >= 0)
                {
                    // observed ray.  Note that this is in an egocentric
                    // coordinate frame relative to the head of the robot
                    evidenceRay ray = stereo_rays[cam][r];

                    // translate and rotate this ray appropriately for the pose
                    evidenceRay trial_ray = ray.trialPose(camera_centre_location[cam].pan,
                                                          camera_centre_location[cam].x,
                                                          camera_centre_location[cam].y);

                    // update the grid cells for this ray and update the
                    // localisation score accordingly
                    float score =
                        LocalGrid.Insert(trial_ray, this,
                                         rob.head.sensormodel[cam],
                                         left_camera_location[cam],
                                         right_camera_location[cam],
                                         localiseOnly);
                    if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                    {
                        if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                        {
                            localisation_score += score;
                        }
                        else
                        {
                            localisation_score = score;
                        }
                    }
                    r--;
                }
                cam--;
            }

            return(localisation_score);
        }
コード例 #14
0
ファイル: viewpoint.cs プロジェクト: williammc/sentience
        /// <summary>
        /// shows rays within the viewpoint from above
        /// </summary>
        /// <param name="img"></param>
        /// <param name="scale"></param>
        public void showAbove(Byte[] img, int img_width, int img_height, int scale,
                              Byte r, Byte g, Byte b, bool showProbabilities, int vertical_adjust,
                              bool lowUncertaintyOnly)
        {
            Byte  rr, gg, bb;
            int   half_width = img_width / 2;
            int   half_height = (img_height / 2);
            int   vertAdjust = vertical_adjust * img_height / 100;
            float start_x, start_y, end_x, end_y;

            //img.clear();
            for (int cam = 0; cam < rays.Length; cam++)
            {
                for (int ry = 0; ry < rays[cam].Count; ry++)
                {
                    evidenceRay ray = (evidenceRay)rays[cam][ry];
                    if ((!lowUncertaintyOnly) || ((lowUncertaintyOnly) && (ray.uncertainty < 0.8f)))
                    {
                        rr = r;
                        gg = g;
                        bb = b;

                        if (!showProbabilities)
                        {
                            start_x = ray.vertices[0].x * half_width / scale;
                            start_y = ray.vertices[0].y * half_height / scale;
                            end_x   = ray.vertices[1].x * half_width / scale;
                            end_y   = ray.vertices[1].y * half_height / scale;

                            start_y -= vertAdjust;
                            end_y   -= vertAdjust;

                            util.drawLine(img, img_width, img_height, half_width + (int)start_x, half_height + (int)start_y, half_width + (int)end_x, half_height + (int)end_y, rr, gg, bb, 0, true);
                        }
                        else
                        {
                            float dx = ray.vertices[1].x - ray.vertices[0].x;
                            float dy = ray.vertices[1].y - ray.vertices[0].y;

                            float sx = ray.vertices[0].x;
                            float sy = ray.vertices[0].y;
                            for (int l = 1; l < 50; l++)
                            {
                                float ex = ray.vertices[0].x + (dx * l / 50);
                                float ey = ray.vertices[0].y + (dy * l / 50);

                                int intensity = (int)(ray.probability(ex, ey) * 255);
                                rr = (Byte)(r * intensity / 255);
                                gg = (Byte)(g * intensity / 255);
                                bb = (Byte)(b * intensity / 255);

                                start_x = sx * half_width / scale;
                                start_y = sy * half_height / scale;
                                end_x   = ex * half_width / scale;
                                end_y   = ey * half_height / scale;

                                start_y -= vertAdjust;
                                end_y   -= vertAdjust;

                                util.drawLine(img, img_width, img_height, half_width + (int)start_x, half_height + (int)start_y, half_width + (int)end_x, half_height + (int)end_y, rr, gg, bb, 0, true);

                                sx = ex;
                                sy = ey;
                            }
                        }
                    }
                }
            }
        }
コード例 #15
0
ファイル: viewpoint.cs プロジェクト: williammc/sentience
        /// <summary>
        /// returns a score indicating how closely matched the two viewpoints are
        /// </summary>
        /// <param name="other"></param>
        /// <param name="intersects"></param>
        /// <returns></returns>
        public float matchingScore(viewpoint other, float separation_tollerance, int ray_thickness, ArrayList intersects)
        {
            float separation;
            float score        = 0;
            pos3D intersectPos = new pos3D(0, 0, 0);

            if (ray_thickness < 1)
            {
                ray_thickness = 1;
            }

            for (int cam1 = 0; cam1 < rays.Length; cam1++)
            {
                for (int ry1 = 0; ry1 < rays[cam1].Count; ry1++)
                {
                    evidenceRay ray1       = (evidenceRay)rays[cam1][ry1];
                    int         pan_index1 = ray1.pan_index - 1;
                    int         pan_index2 = ray1.pan_index;
                    int         pan_index3 = ray1.pan_index + 1;
                    if (pan_index1 < 0)
                    {
                        pan_index1 = evidenceRay.pan_steps - 1;
                    }
                    if (pan_index3 >= evidenceRay.pan_steps)
                    {
                        pan_index3 = 0;
                    }

                    int cam2 = cam1;
                    //for (int cam2 = 0; cam2 < other.rays.Length; cam2++)
                    {
                        for (int ry2 = 0; ry2 < other.rays[cam2].Count; ry2++)
                        {
                            evidenceRay ray2       = (evidenceRay)other.rays[cam2][ry2];
                            int         pan_index4 = ray2.pan_index;
                            if ((pan_index1 == pan_index4) ||
                                (pan_index2 == pan_index4) ||
                                (pan_index3 == pan_index4))
                            {
                                //do these rays intersect
                                separation = 999;
                                if (stereoModel.raysOverlap(ray1, ray2, intersectPos, separation_tollerance, ray_thickness, ref separation))
                                {
                                    float p1   = ray1.probability(intersectPos.x, intersectPos.y);
                                    float p2   = ray2.probability(intersectPos.x, intersectPos.y);
                                    float prob = (p1 * p2) + ((1.0f - p1) * (1.0f - p2));

                                    if (intersects != null)
                                    {
                                        // add the intersection to the list
                                        intersects.Add(intersectPos);
                                        intersectPos = new pos3D(0, 0, 0);
                                    }

                                    //increment the matching score
                                    score += 1.0f / (1.0f + ((1.0f - prob) * separation * separation));
                                    //score += 1.0f / (1.0f + (separation * separation) + (ray2.length * ray1.length / 2));
                                    //score += 1.0f / (1.0f + (separation * separation));
                                }
                            }
                        }
                    }
                }
            }
            return(score);
        }