/// <summary> /// returns a version of this evidence ray rotated through the given /// pan angle in the XY plane. This is used for generating trial poses. /// </summary> /// <param name="extra_pan">pan angle to be added</param> /// <param name="translation_x">new obsevation x coordinate</param> /// <param name="translation_y">new obsevation y coordinate</param> /// <returns>evidence ray object</returns> public evidenceRay trialPose( float extra_pan, float extra_tilt, float extra_roll, float translation_x, float translation_y, float translation_z) { evidenceRay rotated_ray = new evidenceRay(); float new_pan_angle = extra_pan + pan_angle; float new_tilt_angle = extra_tilt + tilt_angle; float new_roll_angle = extra_roll + roll_angle; // as observed from the centre of the camera baseline rotated_ray.observedFrom = new pos3D(translation_x, translation_y, translation_z); rotated_ray.fattestPoint = fattestPoint; pos3D r1 = new pos3D(0, start_dist, 0); rotated_ray.vertices[0] = r1.rotate(new_pan_angle, new_tilt_angle, new_roll_angle); rotated_ray.vertices[0].x += translation_x; rotated_ray.vertices[0].y += translation_y; rotated_ray.vertices[0].z += translation_z; pos3D r2 = new pos3D(0, start_dist + length, 0); rotated_ray.vertices[1] = r2.rotate(new_pan_angle, new_tilt_angle, new_roll_angle); rotated_ray.vertices[1].x += translation_x; rotated_ray.vertices[1].y += translation_y; rotated_ray.vertices[1].z += translation_z; rotated_ray.pan_angle = new_pan_angle; rotated_ray.tilt_angle = new_tilt_angle; rotated_ray.roll_angle = new_roll_angle; rotated_ray.pan_index = (int)(new_pan_angle * pan_steps / 6.2831854f); rotated_ray.tilt_index = (int)(new_tilt_angle * pan_steps / 6.2831854f); rotated_ray.roll_index = (int)(new_roll_angle * pan_steps / 6.2831854f); rotated_ray.length = length; rotated_ray.width = width; rotated_ray.start_dist = start_dist; rotated_ray.disparity = disparity; for (int col = 2; col >= 0; col--) { rotated_ray.colour[col] = colour[col]; } return(rotated_ray); }
/// <summary> /// add an observation taken from this pose /// </summary> /// <param name="stereo_rays">list of ray objects in this observation</param> /// <param name="rob">robot object</param> /// <param name="LocalGrid">occupancy grid into which to insert the observation</param> /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param> /// <returns>localisation matching score</returns> public float AddObservation( List <evidenceRay>[] stereo_rays, robot rob, dpslam LocalGrid, bool localiseOnly) { // clear the localisation score float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE; // get the positions of the head and cameras for this pose pos3D head_location = new pos3D(0, 0, 0); pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; calculateCameraPositions( rob, ref head_location, ref camera_centre_location, ref left_camera_location, ref right_camera_location); // itterate for each stereo camera int cam = stereo_rays.Length - 1; while (cam >= 0) { // itterate through each ray int r = stereo_rays[cam].Count - 1; while (r >= 0) { // observed ray. Note that this is in an egocentric // coordinate frame relative to the head of the robot evidenceRay ray = stereo_rays[cam][r]; // translate and rotate this ray appropriately for the pose evidenceRay trial_ray = ray.trialPose( camera_centre_location[cam].pan, camera_centre_location[cam].tilt, camera_centre_location[cam].roll, camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z); //Console.WriteLine("ray.vert[0] " + (trial_ray.vertices[0] == null).ToString()); //Console.WriteLine("ray.vert[1] " + (trial_ray.vertices[1] == null).ToString()); // update the grid cells for this ray and update the // localisation score accordingly float score = LocalGrid.Insert( trial_ray, this, rob.head.sensormodel[cam], left_camera_location[cam], right_camera_location[cam], localiseOnly); if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) { if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) { localisation_score += score; } else { localisation_score = score; } } r--; } cam--; } return(localisation_score); }
/// <summary> /// creates a ray object which may be used to update occupancy grids /// </summary> /// <param name="x">x position of the feature within the camera image</param> /// <param name="y">y position of the feature within the camera image</param> /// <param name="disparity">stereo disparity in pixels</param> /// <param name="uncertainty">standard deviation</param> /// <param name="r">red colour component of the ray</param> /// <param name="g">green colour component of the ray</param> /// <param name="b">blue colour component of the ray</param> /// <returns>evidence ray object</returns> public evidenceRay createRay( float x, float y, float disparity, float uncertainty, byte r, byte g, byte b) { evidenceRay ray = null; float x1 = x + disparity; float x2 = x; float x_start = 0, y_start = 0; float x_end = 0, y_end = 0; float x_left = 0, y_left = 0; float x_right = 0, y_right = 0; int grid_dimension = 2000; float distance = 100; //DisparityToDistance(disparity, focal_length, sensor_pixels_per_mm, baseline); raysIntersection(x1, x2, grid_dimension, uncertainty, distance, ref x_start, ref y_start, ref x_end, ref y_end, ref x_left, ref y_left, ref x_right, ref y_right); if (y_start < -1) y_start = -y_start; if (y_end < -1) y_end = -y_end; if (x_right >= x_left) { // calc uncertainty in angle (+/- half a pixel) //float angular_uncertainty = FOV_vertical / (image_height * 2); // convert y image position to z height int half_height = image_height / 2; float angle = ((y - half_height) * FOV_vertical / image_height); //float z = (int)(y_left * Math.Sin(angle)); float z_start = (int)(y_start * Math.Sin(angle)); float z_end = (int)(y_end * Math.Sin(angle)); //use an offset so that the point from which the ray was observed was (0,0,0) float x_offset = grid_dimension * 0.5f; ray = new evidenceRay(); ray.vertices[0] = new pos3D(x_start - x_offset, y_start, z_start); ray.vertices[1] = new pos3D(x_end - x_offset, y_end, z_end); ray.fattestPoint = (y_left - y_start) / (y_end - y_start); //ray.vertices[2] = new pos3D(x_left - x_offset, y_left, z); //ray.vertices[3] = new pos3D(x_right - x_offset, y_right, z); //ray.centre = new pos3D(x_left + ((x_right-x_left)*0.5f) - x_offset, y_left, z); ray.width = x_right - x_left; ray.colour[0] = r; ray.colour[1] = g; ray.colour[2] = b; ray.uncertainty = uncertainty; ray.disparity = disparity; ray.sigma = sigma; } return (ray); }
/// <summary> /// inserts the given ray into the grid /// There are three components to the sensor model used here: /// two areas determining the probably vacant area and one for /// the probably occupied space /// </summary> /// <param name="ray">ray object to be inserted into the grid</param> /// <param name="origin">the pose of the robot</param> /// <param name="sensormodel">the sensor model to be used</param> /// <param name="leftcam_x">x position of the left camera in millimetres</param> /// <param name="leftcam_y">y position of the left camera in millimetres</param> /// <param name="rightcam_x">x position of the right camera in millimetres</param> /// <param name="rightcam_y">y position of the right camera in millimetres</param> /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param> /// <returns>matching probability, expressed as log odds</returns> public float Insert( evidenceRay ray, particlePose origin, rayModelLookup sensormodel_lookup, pos3D left_camera_location, pos3D right_camera_location, bool localiseOnly) { // some constants to aid readability const int OCCUPIED_SENSORMODEL = 0; const int VACANT_SENSORMODEL_LEFT_CAMERA = 1; const int VACANT_SENSORMODEL_RIGHT_CAMERA = 2; const int X_AXIS = 0; const int Y_AXIS = 1; // which disparity index in the lookup table to use // we multiply by 2 because the lookup is in half pixel steps float ray_model_interval_pixels = sensormodel_lookup.ray_model_interval_pixels; int sensormodel_index = (int)Math.Round(ray.disparity / ray_model_interval_pixels); // the initial models are blank, so just default to the one disparity pixel model bool small_disparity_value = false; if (sensormodel_index < 2) { sensormodel_index = 2; small_disparity_value = true; } // beyond a certain disparity the ray model for occupied space // is always only going to be only a single grid cell if (sensormodel_lookup == null) Console.WriteLine("Sensor models are missing"); if (sensormodel_index >= sensormodel_lookup.probability.GetLength(0)) sensormodel_index = sensormodel_lookup.probability.GetLength(0) - 1; float xdist_mm=0, ydist_mm=0, zdist_mm=0, xx_mm=0, yy_mm=0, zz_mm=0; float occupied_dx = 0, occupied_dy = 0, occupied_dz = 0; float intersect_x = 0, intersect_y = 0, intersect_z = 0; float centre_prob = 0, prob = 0, prob_localisation = 0; // probability values at the centre axis and outside float matchingScore = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE; // total localisation matching score int rayWidth = 0; // widest point in the ray in cells int widest_point; // widest point index int step_size = 1; particleGridCell hypothesis; // ray width at the fattest point in cells rayWidth = (int)Math.Round(ray.width / (cellSize_mm * 2)); // calculate the centre position of the grid in millimetres int half_grid_width_mm = dimension_cells * cellSize_mm / 2; //int half_grid_width_vertical_mm = dimension_cells_vertical * cellSize_mm / 2; int grid_centre_x_mm = (int)(x - half_grid_width_mm); int grid_centre_y_mm = (int)(y - half_grid_width_mm); int grid_centre_z_mm = (int)z; //int max_dimension_cells = dimension_cells - rayWidth; // in turbo mode only use a single vacancy ray int max_modelcomponent = VACANT_SENSORMODEL_RIGHT_CAMERA; if (TurboMode) max_modelcomponent = VACANT_SENSORMODEL_LEFT_CAMERA; float[][] sensormodel_lookup_probability = sensormodel_lookup.probability; // consider each of the three parts of the sensor model for (int modelcomponent = OCCUPIED_SENSORMODEL; modelcomponent <= max_modelcomponent; modelcomponent++) { // the range from the cameras from which insertion of data begins // for vacancy rays this will be zero, but will be non-zero for the occupancy area int starting_range_cells = 0; switch (modelcomponent) { case OCCUPIED_SENSORMODEL: { // distance between the beginning and end of the probably // occupied area occupied_dx = ray.vertices[1].x - ray.vertices[0].x; occupied_dy = ray.vertices[1].y - ray.vertices[0].y; occupied_dz = ray.vertices[1].z - ray.vertices[0].z; intersect_x = ray.vertices[0].x + (occupied_dx * ray.fattestPoint); intersect_y = ray.vertices[0].y + (occupied_dy * ray.fattestPoint); intersect_z = ray.vertices[0].z + (occupied_dz * ray.fattestPoint); xdist_mm = occupied_dx; ydist_mm = occupied_dy; zdist_mm = occupied_dz; // begin insertion at the beginning of the // probably occupied area xx_mm = ray.vertices[0].x; yy_mm = ray.vertices[0].y; zz_mm = ray.vertices[0].z; break; } case VACANT_SENSORMODEL_LEFT_CAMERA: { // distance between the left camera and the left side of // the probably occupied area of the sensor model xdist_mm = intersect_x - left_camera_location.x; ydist_mm = intersect_y - left_camera_location.y; zdist_mm = intersect_z - left_camera_location.z; // begin insertion from the left camera position xx_mm = left_camera_location.x; yy_mm = left_camera_location.y; zz_mm = left_camera_location.z; step_size = 2; break; } case VACANT_SENSORMODEL_RIGHT_CAMERA: { // distance between the right camera and the right side of // the probably occupied area of the sensor model xdist_mm = intersect_x - right_camera_location.x; ydist_mm = intersect_y - right_camera_location.y; zdist_mm = intersect_z - right_camera_location.z; // begin insertion from the right camera position xx_mm = right_camera_location.x; yy_mm = right_camera_location.y; zz_mm = right_camera_location.z; step_size = 2; break; } } // which is the longest axis ? int longest_axis = X_AXIS; float longest = Math.Abs(xdist_mm); if (Math.Abs(ydist_mm) > longest) { // y has the longest length longest = Math.Abs(ydist_mm); longest_axis = Y_AXIS; } // ensure that the vacancy model does not overlap // the probably occupied area // This is crude and could potentially leave a gap if (modelcomponent != OCCUPIED_SENSORMODEL) longest -= ray.width; int steps = (int)(longest / cellSize_mm); if (steps < 1) steps = 1; // calculate the range from the cameras to the start of the ray in grid cells if (modelcomponent == OCCUPIED_SENSORMODEL) { if (longest_axis == Y_AXIS) starting_range_cells = (int)Math.Abs((ray.vertices[0].y - ray.observedFrom.y) / cellSize_mm); else starting_range_cells = (int)Math.Abs((ray.vertices[0].x - ray.observedFrom.x) / cellSize_mm); } // what is the widest point of the ray in cells if (modelcomponent == OCCUPIED_SENSORMODEL) widest_point = (int)(ray.fattestPoint * steps / ray.length); else widest_point = steps; // calculate increment values in millimetres float x_incr_mm = xdist_mm / steps; float y_incr_mm = ydist_mm / steps; float z_incr_mm = zdist_mm / steps; // step through the ray, one grid cell at a time int grid_step = 0; while (grid_step < steps) { // is this position inside the maximum mapping range bool withinMappingRange = true; if (grid_step + starting_range_cells > max_mapping_range_cells) { withinMappingRange = false; if ((grid_step==0) && (modelcomponent == OCCUPIED_SENSORMODEL)) { grid_step = steps; modelcomponent = 9999; } } // calculate the width of the ray in cells at this point // using a diamond shape ray model int ray_wdth = 0; if (rayWidth > 0) { if (grid_step < widest_point) ray_wdth = grid_step * rayWidth / widest_point; else { if (!small_disparity_value) // most disparity values tail off to some point in the distance ray_wdth = (steps - grid_step + widest_point) * rayWidth / (steps - widest_point); else // for very small disparity values the ray model has an infinite tail // and maintains its width after the widest point ray_wdth = rayWidth; } } // localisation rays are wider, to enable a more effective matching score // which is not too narrowly focussed and brittle int ray_wdth_localisation = ray_wdth + 1; //localisation_search_cells; xx_mm += x_incr_mm*step_size; yy_mm += y_incr_mm*step_size; zz_mm += z_incr_mm*step_size; // convert the x millimetre position into a grid cell position int x_cell = (int)Math.Round((xx_mm - grid_centre_x_mm) / (float)cellSize_mm); if ((x_cell > ray_wdth_localisation) && (x_cell < dimension_cells - ray_wdth_localisation)) { // convert the y millimetre position into a grid cell position int y_cell = (int)Math.Round((yy_mm - grid_centre_y_mm) / (float)cellSize_mm); if ((y_cell > ray_wdth_localisation) && (y_cell < dimension_cells - ray_wdth_localisation)) { // convert the z millimetre position into a grid cell position int z_cell = (int)Math.Round((zz_mm - grid_centre_z_mm) / (float)cellSize_mm); if ((z_cell >= 0) && (z_cell < dimension_cells_vertical)) { int x_cell2 = x_cell; int y_cell2 = y_cell; // get the probability at this point // for the central axis of the ray using the inverse sensor model if (modelcomponent == OCCUPIED_SENSORMODEL) centre_prob = ray_model_interval_pixels + (sensormodel_lookup_probability[sensormodel_index][grid_step] * ray_model_interval_pixels); else // calculate the probability from the vacancy model centre_prob = vacancyFunction(grid_step / (float)steps, steps); // width of the localisation ray for (int width = -ray_wdth_localisation; width <= ray_wdth_localisation; width++) { // is the width currently inside the mapping area of the ray ? bool isInsideMappingRayWidth = false; if ((width >= -ray_wdth) && (width <= ray_wdth)) isInsideMappingRayWidth = true; // adjust the x or y cell position depending upon the // deviation from the main axis of the ray if (longest_axis == Y_AXIS) x_cell2 = x_cell + width; else y_cell2 = y_cell + width; // probability at the central axis prob = centre_prob; prob_localisation = centre_prob; // probabilities are symmetrical about the axis of the ray // this multiplier implements a gaussian distribution around the centre if (width != 0) // don't bother if this is the centre of the ray { // the probability used for wide localisation rays prob_localisation *= gaussianLookup[Math.Abs(width) * 9 / ray_wdth_localisation]; // the probability used for narrower mapping rays if (isInsideMappingRayWidth) prob *= gaussianLookup[Math.Abs(width) * 9 / ray_wdth]; } if ((cell[x_cell2][y_cell2] != null) && (withinMappingRange)) { // only localise using occupancy, not vacancy if (modelcomponent == OCCUPIED_SENSORMODEL) { // update the matching score, by combining the probability // of the grid cell with the probability from the localisation ray float score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE; if (longest_axis == X_AXIS) score = matchingProbability(x_cell2, y_cell2, z_cell, origin, prob_localisation, ray.colour); if (longest_axis == Y_AXIS) score = matchingProbability(x_cell2, y_cell2, z_cell, origin, prob_localisation, ray.colour); if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) { if (matchingScore != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) matchingScore += score; else matchingScore = score; } } } if ((isInsideMappingRayWidth) && (withinMappingRange) && (!localiseOnly)) { // add a new hypothesis to this grid coordinate // note that this is also added to the original pose hypothesis = new particleGridCell(x_cell2, y_cell2, z_cell, prob, origin, ray.colour); if (origin.AddHypothesis(hypothesis, max_mapping_range_cells, dimension_cells, dimension_cells_vertical)) { // generate a grid cell if necessary if (cell[x_cell2][y_cell2] == null) cell[x_cell2][y_cell2] = new occupancygridCellMultiHypothesis(dimension_cells_vertical); cell[x_cell2][y_cell2].AddHypothesis(hypothesis); total_valid_hypotheses++; } } } } else grid_step = steps; // its the end of the ray, break out of the loop } else grid_step = steps; // its the end of the ray, break out of the loop } else grid_step = steps; // time to bail out chaps! grid_step += step_size; } } return (matchingScore); }
/// <summary> /// returns a version of this evidence ray rotated through the given /// pan angle in the XY plane. This is used for generating trial poses. /// </summary> /// <param name="extra_pan">pan angle to be added</param> /// <param name="translation_x">new obsevation x coordinate</param> /// <param name="translation_y">new obsevation y coordinate</param> /// <returns>evidence ray object</returns> public evidenceRay trialPose( float extra_pan, float extra_tilt, float extra_roll, float translation_x, float translation_y, float translation_z) { evidenceRay rotated_ray = new evidenceRay(); float new_pan_angle = extra_pan + pan_angle; float new_tilt_angle = extra_tilt + tilt_angle; float new_roll_angle = extra_roll + roll_angle; // as observed from the centre of the camera baseline rotated_ray.observedFrom = new pos3D(translation_x, translation_y, translation_z); rotated_ray.fattestPoint = fattestPoint; pos3D r1 = new pos3D(0, start_dist, 0); rotated_ray.vertices[0] = r1.rotate(new_pan_angle, new_tilt_angle, new_roll_angle); rotated_ray.vertices[0].x += translation_x; rotated_ray.vertices[0].y += translation_y; rotated_ray.vertices[0].z += translation_z; pos3D r2 = new pos3D(0, start_dist+length, 0); rotated_ray.vertices[1] = r2.rotate(new_pan_angle, new_tilt_angle, new_roll_angle); rotated_ray.vertices[1].x += translation_x; rotated_ray.vertices[1].y += translation_y; rotated_ray.vertices[1].z += translation_z; rotated_ray.pan_angle = new_pan_angle; rotated_ray.tilt_angle = new_tilt_angle; rotated_ray.roll_angle = new_roll_angle; rotated_ray.pan_index = (int)(new_pan_angle * pan_steps / 6.2831854f); rotated_ray.tilt_index = (int)(new_tilt_angle * pan_steps / 6.2831854f); rotated_ray.roll_index = (int)(new_roll_angle * pan_steps / 6.2831854f); rotated_ray.length = length; rotated_ray.width = width; rotated_ray.start_dist = start_dist; rotated_ray.disparity = disparity; for (int col = 2; col >= 0; col--) rotated_ray.colour[col] = colour[col]; return (rotated_ray); }