public processstereo() { disparities = new float[MAX_FEATURES * 3]; tracking = new sentienceTracking(); radar = new sentienceRadar(); robot_head = new stereoHead(1); stereo_model = new stereoModel(); }
/// <summary> /// initialise variables prior to performing test routines /// </summary> private void init() { grid_layer = new float[grid_dimension, grid_dimension, 3]; pos3D_x = new float[4]; pos3D_y = new float[4]; stereo_model = new stereoModel(); robot_head = new stereoHead(4); stereo_features = new float[900]; stereo_uncertainties = new float[900]; img_rays = new byte[standard_width * standard_height * 3]; imgOutput.Pixbuf = GtkBitmap.createPixbuf(standard_width, standard_height); GtkBitmap.setBitmap(img_rays, imgOutput); }
public frmMain() { InitializeComponent(); // Set the help text description for the FolderBrowserDialog. folderBrowserDialog1.Description = "Select the directory where stereo images are located"; // Do not allow the user to create new files via the FolderBrowserDialog. folderBrowserDialog1.ShowNewFolderButton = false; txtImagesDirectory.Text = images_directory; openFileDialog1.DefaultExt = "xml"; openFileDialog1.Filter = "xml files (*.xml)|*.xml"; txtCalibrationFilename.Text = calibration_filename; // sensor model used for mapping inverseSensorModel = new stereoModel(); }
public void RaysIntersection() { int debug_img_width = 640; int debug_img_height = 480; byte[] debug_img = new byte[debug_img_width * debug_img_height * 3]; for (int i = (debug_img_width * debug_img_height * 3)-1; i >= 0; i--) debug_img[i] = 255; Bitmap bmp = new Bitmap(debug_img_width, debug_img_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); float min_x = float.MaxValue, max_x = float.MinValue; float min_y = 0, max_y = float.MinValue; float ray_uncertainty = 0.5f; List<float> x_start = new List<float>(); List<float> y_start = new List<float>(); List<float> x_end = new List<float>(); List<float> y_end = new List<float>(); List<float> x_left = new List<float>(); List<float> y_left = new List<float>(); List<float> x_right = new List<float>(); List<float> y_right = new List<float>(); float disparity = 7; float x1 = 640/2; float x2 = x1 - disparity; int grid_dimension = 2000; float focal_length = 5; float sensor_pixels_per_mm = 100; float baseline = 100; stereoModel inverseSensorModel = new stereoModel(); inverseSensorModel.image_width = 640; inverseSensorModel.image_height = 480; for (disparity = 15; disparity >= 15; disparity-=5) { for (int example = 0; example < 640 / 40; example++) { x1 = example * 40; x2 = x1 - disparity; float distance = stereoModel.DisparityToDistance(disparity, focal_length, sensor_pixels_per_mm, baseline); float curr_x_start = 0; float curr_y_start = 0; float curr_x_end = 0; float curr_y_end = 0; float curr_x_left = 0; float curr_y_left = 0; float curr_x_right = 0; float curr_y_right = 0; inverseSensorModel.raysIntersection( x1, x2, grid_dimension, ray_uncertainty, distance, ref curr_x_start, ref curr_y_start, ref curr_x_end, ref curr_y_end, ref curr_x_left, ref curr_y_left, ref curr_x_right, ref curr_y_right); /* curr_y_start = -curr_y_start; curr_y_end = -curr_y_end; curr_y_left = -curr_y_left; curr_y_right = -curr_y_right; */ x_start.Add(curr_x_start); y_start.Add(curr_y_start); x_end.Add(curr_x_end); y_end.Add(curr_y_end); x_left.Add(curr_x_left); y_left.Add(curr_y_left); x_right.Add(curr_x_right); y_right.Add(curr_y_right); if (curr_x_end < min_x) min_x = curr_x_end; if (curr_x_end > max_x) max_x = curr_x_end; if (curr_x_left < min_x) min_x = curr_x_left; if (curr_x_right > max_x) max_x = curr_x_right; if (curr_y_start < min_y) min_y = curr_y_start; if (curr_y_end > max_y) max_y = curr_y_end; Console.WriteLine("curr_y_start: " + curr_y_start.ToString()); } } for (int i = 0; i < x_start.Count; i++) { float curr_x_start = (x_start[i] - min_x) * debug_img_width / (max_x - min_x); float curr_y_start = (y_start[i] - min_y) * debug_img_height / (max_y - min_y); float curr_x_end = (x_end[i] - min_x) * debug_img_width / (max_x - min_x); float curr_y_end = (y_end[i] - min_y) * debug_img_height / (max_y - min_y); float curr_x_left = (x_left[i] - min_x) * debug_img_width / (max_x - min_x); float curr_y_left = (y_left[i] - min_y) * debug_img_height / (max_y - min_y); float curr_x_right = (x_right[i] - min_x) * debug_img_width / (max_x - min_x); float curr_y_right = (y_right[i] - min_y) * debug_img_height / (max_y - min_y); curr_y_start = debug_img_height - 1 - curr_y_start; curr_y_end = debug_img_height - 1 - curr_y_end; curr_y_left = debug_img_height - 1 - curr_y_left; curr_y_right = debug_img_height - 1 - curr_y_right; //Console.WriteLine("max: " + max.ToString()); drawing.drawLine(debug_img, debug_img_width, debug_img_height, (int)curr_x_start, (int)curr_y_start, (int)curr_x_left, (int)curr_y_left, 0,0,0,0,false); drawing.drawLine(debug_img, debug_img_width, debug_img_height, (int)curr_x_end, (int)curr_y_end, (int)curr_x_left, (int)curr_y_left, 0,0,0,0,false); drawing.drawLine(debug_img, debug_img_width, debug_img_height, (int)curr_x_end, (int)curr_y_end, (int)curr_x_right, (int)curr_y_right, 0,0,0,0,false); drawing.drawLine(debug_img, debug_img_width, debug_img_height, (int)curr_x_start, (int)curr_y_start, (int)curr_x_right, (int)curr_y_right, 0,0,0,0,false); } BitmapArrayConversions.updatebitmap_unsafe(debug_img, bmp); bmp.Save("tests_occupancygrid_simple_RaysIntersection.jpg", System.Drawing.Imaging.ImageFormat.Jpeg); }
/// <summary> /// Update the current grid with new mapping rays loaded from the disparities file /// </summary> /// <param name="body_width_mm">width of the robot body in millimetres</param> /// <param name="body_length_mm">length of the robot body in millimetres</param> /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param> /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param> /// <param name="baseline_mm">stereo camera baseline in millimetres</param> /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param> /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param> /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param> /// <param name="image_width">image width for each stereo camera</param> /// <param name="image_height">image height for each stereo camera</param> /// <param name="FOV_degrees">field of view for each stereo camera in degrees</param> /// <param name="sensormodel">sensor model for each stereo camera</param> protected void UpdateMap( float body_width_mm, float body_length_mm, float body_centre_of_rotation_x, float body_centre_of_rotation_y, float body_centre_of_rotation_z, float head_centroid_x, float head_centroid_y, float head_centroid_z, float[] baseline_mm, float[] stereo_camera_position_x, float[] stereo_camera_position_y, float[] stereo_camera_position_z, float[] stereo_camera_pan, float[] stereo_camera_tilt, float[] stereo_camera_roll, int[] image_width, int[] image_height, float[] FOV_degrees, stereoModel[][] sensormodel) { const bool use_compression = false; if (disparities_file_open) { float[] stereo_features; byte[,] stereo_features_colour; float[] stereo_features_uncertainties; pos3D robot_pose = new pos3D(0,0,0); //int next_grid_index = current_grid_index + 1; //if (next_grid_index >= grid_centres.Count / 3) next_grid_index = (grid_centres.Count / 3) - 1; int next_grid_index = current_grid_index; //if (current_grid_index < 1) // next_grid_index = 1; int next_disparity_index = disparities_index[next_grid_index]; for (int i = current_disparity_index; i < next_disparity_index; i++) { long time_long = disparities_reader.ReadInt64(); robot_pose.x = disparities_reader.ReadSingle(); robot_pose.y = disparities_reader.ReadSingle(); robot_pose.pan = disparities_reader.ReadSingle(); float head_pan = disparities_reader.ReadSingle(); float head_tilt = disparities_reader.ReadSingle(); float head_roll = disparities_reader.ReadSingle(); int stereo_camera_index = disparities_reader.ReadInt32(); int features_count = disparities_reader.ReadInt32(); if (use_compression) { int features_bytes = disparities_reader.ReadInt32(); byte[] fb = new byte[features_bytes]; disparities_reader.Read(fb, 0, features_bytes); byte[] packed_stereo_features2 = AcedInflator.Instance.Decompress(fb, 0, 0, 0); stereo_features = ArrayConversions.ToFloatArray(packed_stereo_features2); } else { byte[] fb = new byte[features_count * 3 * 4]; disparities_reader.Read(fb, 0, fb.Length); stereo_features = ArrayConversions.ToFloatArray(fb); } byte[] packed_stereo_feature_colours = null; if (use_compression) { int colour_bytes = disparities_reader.ReadInt32(); byte[] cb = new byte[colour_bytes]; disparities_reader.Read(cb, 0, colour_bytes); packed_stereo_feature_colours = AcedInflator.Instance.Decompress(cb, 0, 0, 0); } else { packed_stereo_feature_colours = new byte[features_count * 3]; disparities_reader.Read(packed_stereo_feature_colours, 0, packed_stereo_feature_colours.Length); } // unpack stereo features int ctr = 0; stereo_features_colour = new byte[features_count,3]; stereo_features_uncertainties = new float[features_count]; for (int f = 0; f < features_count; f++) { stereo_features_uncertainties[f] = 1; stereo_features_colour[f, 0] = packed_stereo_feature_colours[ctr++]; stereo_features_colour[f, 1] = packed_stereo_feature_colours[ctr++]; stereo_features_colour[f, 2] = packed_stereo_feature_colours[ctr++]; } // insert the rays into the map Map(body_width_mm, body_length_mm, body_centre_of_rotation_x, body_centre_of_rotation_y, body_centre_of_rotation_z, head_centroid_x, head_centroid_y, head_centroid_z, head_pan, head_tilt, head_roll, stereo_camera_index, baseline_mm[stereo_camera_index], stereo_camera_position_x[stereo_camera_index], stereo_camera_position_y[stereo_camera_index], stereo_camera_position_z[stereo_camera_index], stereo_camera_pan[stereo_camera_index], stereo_camera_tilt[stereo_camera_index], stereo_camera_roll[stereo_camera_index], image_width[stereo_camera_index], image_height[stereo_camera_index], FOV_degrees[stereo_camera_index], stereo_features, stereo_features_colour, stereo_features_uncertainties, sensormodel, robot_pose); stereo_features = null; stereo_features_colour = null; stereo_features_uncertainties = null; } current_disparity_index = next_disparity_index; } else { Console.WriteLine("disparities file not open"); } }
/// <summary> /// Mapping /// </summary> /// <param name="body_width_mm">width of the robot body in millimetres</param> /// <param name="body_length_mm">length of the robot body in millimetres</param> /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param> /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param> /// <param name="head_pan">head pan angle in radians</param> /// <param name="head_tilt">head tilt angle in radians</param> /// <param name="head_roll">head roll angle in radians</param> /// <param name="baseline_mm">stereo camera baseline in millimetres</param> /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param> /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param> /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param> /// <param name="image_width">image width for each stereo camera</param> /// <param name="image_height">image height for each stereo camera</param> /// <param name="FOV_degrees">field of view for each stereo camera in degrees</param> /// <param name="stereo_features">stereo features (disparities) for each stereo camera</param> /// <param name="stereo_features_colour">stereo feature colours for each stereo camera</param> /// <param name="stereo_features_uncertainties">stereo feature uncertainties (priors) for each stereo camera</param> /// <param name="sensormodel">sensor model for each stereo camera</param> /// <param name="robot_pose">current estimated position and orientation of the robots centre of rotation</param> protected void Map( float body_width_mm, float body_length_mm, float body_centre_of_rotation_x, float body_centre_of_rotation_y, float body_centre_of_rotation_z, float head_centroid_x, float head_centroid_y, float head_centroid_z, float head_pan, float head_tilt, float head_roll, int stereo_camera_index, float baseline_mm, float stereo_camera_position_x, float stereo_camera_position_y, float stereo_camera_position_z, float stereo_camera_pan, float stereo_camera_tilt, float stereo_camera_roll, int image_width, int image_height, float FOV_degrees, float[] stereo_features, byte[,] stereo_features_colour, float[] stereo_features_uncertainties, stereoModel[][] sensormodel, pos3D robot_pose) { Parallel.For(0, 2, delegate(int i) { pos3D left_camera_location = null; pos3D right_camera_location = null; buffer[i].Map( body_width_mm, body_length_mm, body_centre_of_rotation_x, body_centre_of_rotation_y, body_centre_of_rotation_z, head_centroid_x, head_centroid_y, head_centroid_z, head_pan, head_tilt, head_roll, stereo_camera_index, baseline_mm, stereo_camera_position_x, stereo_camera_position_y, stereo_camera_position_z, stereo_camera_pan, stereo_camera_tilt, stereo_camera_roll, image_width, image_height, FOV_degrees, stereo_features, stereo_features_colour, stereo_features_uncertainties, sensormodel, ref left_camera_location, ref right_camera_location, robot_pose); }); }
/// <summary> /// Localisation /// </summary> /// <param name="body_width_mm">width of the robot body in millimetres</param> /// <param name="body_length_mm">length of the robot body in millimetres</param> /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param> /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param> /// <param name="head_pan">head pan angle in radians</param> /// <param name="head_tilt">head tilt angle in radians</param> /// <param name="head_roll">head roll angle in radians</param> /// <param name="baseline_mm">stereo camera baseline in millimetres</param> /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param> /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param> /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param> /// <param name="image_width">image width for each stereo camera</param> /// <param name="image_height">image height for each stereo camera</param> /// <param name="FOV_degrees">field of view for each stereo camera in degrees</param> /// <param name="stereo_features">stereo features (disparities) for each stereo camera</param> /// <param name="stereo_features_colour">stereo feature colours for each stereo camera</param> /// <param name="stereo_features_uncertainties">stereo feature uncertainties (priors) for each stereo camera</param> /// <param name="sensormodel">sensor model for each stereo camera</param> /// <param name="left_camera_location">returned position and orientation of the left camera on each stereo camera</param> /// <param name="right_camera_location">returned position and orientation of the right camera on each stereo camera</param> /// <param name="no_of_samples">number of sample poses</param> /// <param name="sampling_radius_major_mm">major radius for samples, in the direction of robot movement</param> /// <param name="sampling_radius_minor_mm">minor radius for samples, perpendicular to the direction of robot movement</param> /// <param name="robot_pose">current estimated position and orientation of the robots centre of rotation, for each stereo camera observation</param> /// <param name="max_orientation_variance">maximum variance in orientation in radians, used to create sample poses</param> /// <param name="max_tilt_variance">maximum variance in tilt angle in radians, used to create sample poses</param> /// <param name="max_roll_variance">maximum variance in roll angle in radians, used to create sample poses</param> /// <param name="poses">list of poses tried</param> /// <param name="pose_score">list of pose matching scores</param> /// <param name="pose_offset">offset of the best pose from the current one</param> /// <param name="rnd">random number generator</param> /// <param name="buffer_transition">have we transitioned to the next grid buffer?</param> /// <param name="overall_map_filename">filename to save an overall map of the path</param> /// <param name="overall_map_img">overall map image data</param> /// <param name="overall_map_dimension_mm">dimension of the overall map in millimetres</param> /// <param name="overall_map_centre_x_mm">centre x position of the overall map in millimetres</param> /// <param name="overall_map_centre_y_mm">centre x position of the overall map in millimetres</param> /// <returns>best localisation matching score</returns> protected float Localise( float body_width_mm, float body_length_mm, float body_centre_of_rotation_x, float body_centre_of_rotation_y, float body_centre_of_rotation_z, float head_centroid_x, float head_centroid_y, float head_centroid_z, float head_pan, float head_tilt, float head_roll, float[] baseline_mm, float[] stereo_camera_position_x, float[] stereo_camera_position_y, float[] stereo_camera_position_z, float[] stereo_camera_pan, float[] stereo_camera_tilt, float[] stereo_camera_roll, int[] image_width, int[] image_height, float[] FOV_degrees, float[][] stereo_features, byte[][,] stereo_features_colour, float[][] stereo_features_uncertainties, stereoModel[][] sensormodel, ref pos3D[] left_camera_location, ref pos3D[] right_camera_location, int no_of_samples, float sampling_radius_major_mm, float sampling_radius_minor_mm, pos3D[] robot_pose, float max_orientation_variance, float max_tilt_variance, float max_roll_variance, List<pos3D> poses, List<float> pose_score, Random rnd, ref pos3D pose_offset, ref bool buffer_transition, string debug_mapping_filename, float known_best_pose_x_mm, float known_best_pose_y_mm, string overall_map_filename, ref byte[] overall_map_img, int overall_img_width, int overall_img_height, int overall_map_dimension_mm, int overall_map_centre_x_mm, int overall_map_centre_y_mm) { bool update_map = false; // move to the next grid buffer_transition = MoveToNextLocalGrid( ref current_grid_index, ref current_disparity_index, robot_pose[0], buffer, ref current_buffer_index, grid_centres, ref update_map, debug_mapping_filename, overall_map_filename, ref overall_map_img, overall_img_width, overall_img_height, overall_map_dimension_mm, overall_map_centre_x_mm, overall_map_centre_y_mm); // create the map if necessary if (update_map) { UpdateMap( body_width_mm, body_length_mm, body_centre_of_rotation_x, body_centre_of_rotation_y, body_centre_of_rotation_z, head_centroid_x, head_centroid_y, head_centroid_z, baseline_mm, stereo_camera_position_x, stereo_camera_position_y, stereo_camera_position_z, stereo_camera_pan, stereo_camera_tilt, stereo_camera_roll, image_width, image_height, FOV_degrees, sensormodel); } int img_poses_width = 640; int img_poses_height = 480; byte[] img_poses = null; if ((debug_mapping_filename != null) && (debug_mapping_filename != "")) { img_poses = new byte[img_poses_width * img_poses_height * 3]; } // localise within the currently active grid float matching_score = buffer[current_buffer_index].Localise( body_width_mm, body_length_mm, body_centre_of_rotation_x, body_centre_of_rotation_y, body_centre_of_rotation_z, head_centroid_x, head_centroid_y, head_centroid_z, head_pan, head_tilt, head_roll, baseline_mm, stereo_camera_position_x, stereo_camera_position_y, stereo_camera_position_z, stereo_camera_pan, stereo_camera_tilt, stereo_camera_roll, image_width, image_height, FOV_degrees, stereo_features, stereo_features_colour, stereo_features_uncertainties, sensormodel, ref left_camera_location, ref right_camera_location, no_of_samples, sampling_radius_major_mm, sampling_radius_minor_mm, robot_pose, max_orientation_variance, max_tilt_variance, max_roll_variance, poses, pose_score, rnd, ref pose_offset, img_poses, img_poses_width, img_poses_height, known_best_pose_x_mm, known_best_pose_y_mm); if (matching_score != occupancygridBase.NO_OCCUPANCY_EVIDENCE) { // add this to the list of localisations localisations.Add(robot_pose[0].x + pose_offset.x); localisations.Add(robot_pose[0].y + pose_offset.y); localisations.Add(robot_pose[0].z + pose_offset.z); localisations.Add(pose_offset.pan); localisations.Add(matching_score); if ((debug_mapping_filename != null) && (debug_mapping_filename != "")) { string[] str = debug_mapping_filename.Split('.'); string poses_filename = str[0] + "_gridcells.gif"; Bitmap poses_bmp = new Bitmap(img_poses_width, img_poses_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); BitmapArrayConversions.updatebitmap_unsafe(img_poses, poses_bmp); poses_bmp.Save(poses_filename, System.Drawing.Imaging.ImageFormat.Gif); } } return(matching_score); }
/// <summary> /// parse an xml node to extract buffer parameters /// </summary> /// <param name="xnod"></param> /// <param name="level"></param> public void LoadFromXmlSensorModels( XmlNode xnod, int level, ref int no_of_stereo_cameras, ref int no_of_grid_levels, ref int camera_index, ref int grid_level) { XmlNode xnodWorking; if (xnod.Name == "NoOfStereoCameras") { no_of_stereo_cameras = Convert.ToInt32(xnod.InnerText); } if (xnod.Name == "NoOfGridLevels") { no_of_grid_levels = Convert.ToInt32(xnod.InnerText); camera_index = 0; grid_level = -1; sensormodel = new stereoModel[no_of_stereo_cameras][]; for (int stereo_cam = 0; stereo_cam < no_of_stereo_cameras; stereo_cam++) { sensormodel[stereo_cam] = new stereoModel[no_of_grid_levels]; for (int size = 0; size < no_of_grid_levels; size++) { sensormodel[stereo_cam][size] = new stereoModel(); sensormodel[stereo_cam][size].ray_model = new rayModelLookup(1,1); } } } if (xnod.Name == "Model") { grid_level++; if (grid_level >= no_of_grid_levels) { grid_level = 0; camera_index++; } List<string> rayModelsData = new List<string>(); sensormodel[camera_index][grid_level].ray_model.LoadFromXml(xnod, level+1, rayModelsData); sensormodel[camera_index][grid_level].ray_model.LoadSensorModelData(rayModelsData); if (rayModelsData.Count == 0) { Console.WriteLine("Warning: ray models not loaded"); } } // call recursively on all children of the current node if (xnod.HasChildNodes) { xnodWorking = xnod.FirstChild; while (xnodWorking != null) { LoadFromXmlSensorModels(xnodWorking, level + 1, ref no_of_stereo_cameras, ref no_of_grid_levels, ref camera_index, ref grid_level); xnodWorking = xnodWorking.NextSibling; } } }
/// <summary> /// initialise with the given number of stereo cameras /// </summary> /// <param name="no_of_stereo_cameras">the number of stereo cameras on the robot (not the total number of cameras)</param> /// <param name="rays_per_stereo_camera">the number of rays which will be thrown from each stereo camera per time step</param> /// <param name="mapping_type">the type of mapping to be used</param> private void init(int no_of_stereo_cameras, int rays_per_stereo_camera, int mapping_type) { this.no_of_stereo_cameras = no_of_stereo_cameras; this.mapping_type = mapping_type; // head and shoulders head = new stereoHead(no_of_stereo_cameras); // sensor model used for mapping inverseSensorModel = new stereoModel(); // set the number of stereo features to be detected and inserted into the grid // on each time step. This figure should be large enough to get reasonable // detail, but not so large that the mapping consumes a huge amount of // processing resource inverseSensorModel.no_of_stereo_features = rays_per_stereo_camera; correspondence = new stereoCorrespondence[no_of_stereo_cameras]; for (int i = 0; i < no_of_stereo_cameras; i++) correspondence[i] = new stereoCorrespondence(inverseSensorModel.no_of_stereo_features); if (mapping_type == MAPPING_DPSLAM) { // add local occupancy grids LocalGrid = new occupancygridMultiHypothesis[mapping_threads]; for (int i = 0; i < mapping_threads; i++) createLocalGrid(i); // create a motion model for each possible grid motion = new motionModel[mapping_threads]; for (int i = 0; i < mapping_threads; i++) motion[i] = new motionModel(this, LocalGrid[i], 100 * (i+1)); } if (mapping_type == MAPPING_SIMPLE) { } // a list of places where the robot might work or make observations worksites = new kmlZone(); // zero encoder positions prev_left_wheel_encoder = 0; prev_right_wheel_encoder = 0; }
private void gaussianFunctionToolStripMenuItem_Click(object sender, EventArgs e) { grid_layer = new float[grid_dimension, grid_dimension, 3]; pos3D_x = new float[4]; pos3D_y = new float[4]; stereo_model = new stereoModel(); robot_head = new stereoHead(4); stereo_features = new float[900]; stereo_uncertainties = new float[900]; img_rays = new Byte[standard_width * standard_height * 3]; rays = new Bitmap(standard_width, standard_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); picRays.Image = rays; stereo_model.showDistribution(img_rays, standard_width, standard_height); BitmapArrayConversions.updatebitmap_unsafe(img_rays, (Bitmap)picRays.Image); }
private void multipleStereoRaysToolStripMenuItem_Click(object sender, EventArgs e) { grid_layer = new float[grid_dimension, grid_dimension, 3]; pos3D_x = new float[4]; pos3D_y = new float[4]; stereo_model = new stereoModel(); robot_head = new stereoHead(4); stereo_features = new float[900]; stereo_uncertainties = new float[900]; img_rays = new Byte[standard_width * standard_height * 3]; rays = new Bitmap(standard_width, standard_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); picRays.Image = rays; bool mirror = false; stereo_model.showProbabilities(grid_layer, grid_dimension, img_rays, standard_width, standard_height, false, true, mirror); BitmapArrayConversions.updatebitmap_unsafe(img_rays, (Bitmap)picRays.Image); }
/// <summary> /// Localisation /// </summary> /// <param name="body_width_mm">width of the robot body in millimetres</param> /// <param name="body_length_mm">length of the robot body in millimetres</param> /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param> /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param> /// <param name="head_pan">head pan angle in radians</param> /// <param name="head_tilt">head tilt angle in radians</param> /// <param name="head_roll">head roll angle in radians</param> /// <param name="baseline_mm">stereo camera baseline in millimetres</param> /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param> /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param> /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param> /// <param name="image_width">image width for each stereo camera</param> /// <param name="image_height">image height for each stereo camera</param> /// <param name="FOV_degrees">field of view for each stereo camera in degrees</param> /// <param name="stereo_features">stereo features (disparities) for each stereo camera</param> /// <param name="stereo_features_colour">stereo feature colours for each stereo camera</param> /// <param name="stereo_features_uncertainties">stereo feature uncertainties (priors) for each stereo camera</param> /// <param name="sensormodel">sensor model for each stereo camera</param> /// <param name="left_camera_location">returned position and orientation of the left camera on each stereo camera</param> /// <param name="right_camera_location">returned position and orientation of the right camera on each stereo camera</param> /// <param name="no_of_samples">number of sample poses</param> /// <param name="sampling_radius_major_mm">major radius for samples, in the direction of robot movement</param> /// <param name="sampling_radius_minor_mm">minor radius for samples, perpendicular to the direction of robot movement</param> /// <param name="robot_pose">current estimated position and orientation of the robots centre of rotation, from which each stereo camera took its observations</param> /// <param name="max_orientation_variance">maximum variance in orientation in radians, used to create sample poses</param> /// <param name="max_tilt_variance">maximum variance in tilt angle in radians, used to create sample poses</param> /// <param name="max_roll_variance">maximum variance in roll angle in radians, used to create sample poses</param> /// <param name="poses">list of poses tried</param> /// <param name="pose_score">list of pose matching scores</param> /// <param name="pose_offset">offset of the best pose from the current one</param> /// <param name="rnd">random number generator</param> /// <param name="pose_offset">returned pose offset</param> /// <param name="img_poses">optional image within which to show poses</param> /// <param name="img_poses_width">width of the poses image</param> /// <param name="img_poses_height">height of the poses image</param> /// <returns>best localisation matching score</returns> public float Localise( float body_width_mm, float body_length_mm, float body_centre_of_rotation_x, float body_centre_of_rotation_y, float body_centre_of_rotation_z, float head_centroid_x, float head_centroid_y, float head_centroid_z, float head_pan, float head_tilt, float head_roll, float[] baseline_mm, float[] stereo_camera_position_x, float[] stereo_camera_position_y, float[] stereo_camera_position_z, float[] stereo_camera_pan, float[] stereo_camera_tilt, float[] stereo_camera_roll, int[] image_width, int[] image_height, float[] FOV_degrees, float[][] stereo_features, byte[][,] stereo_features_colour, float[][] stereo_features_uncertainties, stereoModel[][] sensormodel, ref pos3D[] left_camera_location, ref pos3D[] right_camera_location, int no_of_samples, float sampling_radius_major_mm, float sampling_radius_minor_mm, pos3D[] robot_pose, float max_orientation_variance, float max_tilt_variance, float max_roll_variance, List<pos3D> poses, List<float> pose_score, Random rnd, ref pos3D pose_offset, byte[] img_poses, int img_poses_width, int img_poses_height, float known_best_pose_x_mm, float known_best_pose_y_mm) { float best_matching_score = float.MinValue; poses.Clear(); pose_score.Clear(); gridCells.CreateMoireGrid( sampling_radius_major_mm, sampling_radius_minor_mm, no_of_samples, robot_pose[0].pan, robot_pose[0].tilt, robot_pose[0].roll, max_orientation_variance, max_tilt_variance, max_roll_variance, rnd, ref poses, null, null, 0, 0); // positions of the left and right camera relative to the robots centre of rotation pos3D head_location = new pos3D(0, 0, 0); left_camera_location = new pos3D[baseline_mm.Length]; right_camera_location = new pos3D[baseline_mm.Length]; pos3D[] camera_centre_location = new pos3D[baseline_mm.Length]; pos3D[] relative_left_cam = new pos3D[baseline_mm.Length]; pos3D[] relative_right_cam = new pos3D[baseline_mm.Length]; for (int cam = 0; cam < baseline_mm.Length; cam++) { occupancygridBase.calculateCameraPositions( body_width_mm, body_length_mm, body_centre_of_rotation_x, body_centre_of_rotation_y, body_centre_of_rotation_z, head_centroid_x, head_centroid_y, head_centroid_z, head_pan, head_tilt, head_roll, baseline_mm[cam], stereo_camera_position_x[cam], stereo_camera_position_y[cam], stereo_camera_position_z[cam], stereo_camera_pan[cam], stereo_camera_tilt[cam], stereo_camera_roll[cam], ref head_location, ref camera_centre_location[cam], ref relative_left_cam[cam], ref relative_right_cam[cam]); left_camera_location[cam] = relative_left_cam[cam].translate(robot_pose[cam].x, robot_pose[cam].y, robot_pose[cam].z); right_camera_location[cam] = relative_right_cam[cam].translate(robot_pose[cam].x, robot_pose[cam].y, robot_pose[cam].z); } pose_score.Clear(); for (int p = 0; p < poses.Count; p++) { pose_score.Add(0); } int no_of_zero_probabilities = 0; // try a number of random poses // we can do this in parallel Parallel.For(0, poses.Count, delegate(int i) //for (int i = 0; i < poses.Count; i++) { pos3D sample_pose = poses[i]; float matching_score = 0; for (int cam = 0; cam < baseline_mm.Length; cam++) { // update the position of the left camera for this pose pos3D sample_pose_left_cam = relative_left_cam[cam].add(sample_pose); sample_pose_left_cam.pan = 0; sample_pose_left_cam.tilt = 0; sample_pose_left_cam.roll = 0; sample_pose_left_cam = sample_pose_left_cam.rotate(sample_pose.pan, sample_pose.tilt, sample_pose.roll); sample_pose_left_cam.x += robot_pose[cam].x; sample_pose_left_cam.y += robot_pose[cam].y; sample_pose_left_cam.z += robot_pose[cam].z; // update the position of the right camera for this pose pos3D sample_pose_right_cam = relative_right_cam[cam].add(sample_pose); sample_pose_right_cam.pan = 0; sample_pose_right_cam.tilt = 0; sample_pose_right_cam.roll = 0; sample_pose_right_cam = sample_pose_right_cam.rotate(sample_pose.pan, sample_pose.tilt, sample_pose.roll); sample_pose_right_cam.x += robot_pose[cam].x; sample_pose_right_cam.y += robot_pose[cam].y; sample_pose_right_cam.z += robot_pose[cam].z; // centre position between the left and right cameras pos3D stereo_camera_centre = new pos3D( sample_pose_left_cam.x + ((sample_pose_right_cam.x - sample_pose_left_cam.x) * 0.5f), sample_pose_left_cam.y + ((sample_pose_right_cam.y - sample_pose_left_cam.y) * 0.5f), sample_pose_left_cam.z + ((sample_pose_right_cam.z - sample_pose_left_cam.z) * 0.5f)); stereo_camera_centre.pan = head_pan + stereo_camera_pan[cam] + sample_pose.pan; stereo_camera_centre.tilt = head_tilt + stereo_camera_tilt[cam] + sample_pose.tilt; stereo_camera_centre.roll = head_roll + stereo_camera_roll[cam] + sample_pose.roll; // create a set of stereo rays as observed from this pose List<evidenceRay> rays = sensormodel[cam][0].createObservation( stereo_camera_centre, baseline_mm[cam], image_width[cam], image_height[cam], FOV_degrees[cam], stereo_features[cam], stereo_features_colour[cam], stereo_features_uncertainties[cam], true); // insert rays into the occupancy grid for (int r = 0; r < rays.Count; r++) { float score = Insert(rays[r], sensormodel[cam], sample_pose_left_cam, sample_pose_right_cam, true); if (score != occupancygridBase.NO_OCCUPANCY_EVIDENCE) { if (matching_score == occupancygridBase.NO_OCCUPANCY_EVIDENCE) matching_score = 0; matching_score += score; } } } // add the pose to the list sample_pose.pan -= robot_pose[0].pan; sample_pose.tilt -= robot_pose[0].tilt; sample_pose.roll -= robot_pose[0].roll; if (Math.Abs(sample_pose.pan) > max_orientation_variance) { Console.WriteLine("Pose variance out of range"); } if (matching_score != occupancygridBase.NO_OCCUPANCY_EVIDENCE) { float prob = probabilities.LogOddsToProbability(matching_score); if (prob == 0) no_of_zero_probabilities++; pose_score[i] = prob; //pose_score[i] = matching_score; } } ); if (no_of_zero_probabilities == poses.Count) { Console.WriteLine("Localisation failure"); pose_offset = new pos3D(0, 0, 0); best_matching_score = occupancygridBase.NO_OCCUPANCY_EVIDENCE; } else { // locate the best possible pose pos3D best_robot_pose = new pos3D(0, 0, 0); gridCells.FindBestPose(poses, pose_score, ref best_robot_pose, sampling_radius_major_mm, img_poses, null, img_poses_width, img_poses_height, known_best_pose_x_mm, known_best_pose_y_mm); // rotate the best pose to the robot's current orientation // this becomes an offset, which may be used for course correction pose_offset = best_robot_pose.rotate(robot_pose[0].pan, robot_pose[0].tilt, robot_pose[0].roll); // orientation relative to the current heading pose_offset.pan = best_robot_pose.pan; pose_offset.tilt = best_robot_pose.tilt; pose_offset.roll = best_robot_pose.roll; // range checks if (Math.Abs(pose_offset.pan) > max_orientation_variance) Console.WriteLine("pose_offset pan out of range"); if (Math.Abs(pose_offset.tilt) > max_tilt_variance) Console.WriteLine("pose_offset tilt out of range"); if (Math.Abs(pose_offset.roll) > max_roll_variance) Console.WriteLine("pose_offset roll out of range"); } return (best_matching_score); }
/// <summary> /// Mapping /// </summary> /// <param name="body_width_mm">width of the robot body in millimetres</param> /// <param name="body_length_mm">length of the robot body in millimetres</param> /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param> /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param> /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param> /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param> /// <param name="head_pan">head pan angle in radians</param> /// <param name="head_tilt">head tilt angle in radians</param> /// <param name="head_roll">head roll angle in radians</param> /// <param name="baseline_mm">stereo camera baseline in millimetres</param> /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param> /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param> /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param> /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param> /// <param name="image_width">image width for each stereo camera</param> /// <param name="image_height">image height for each stereo camera</param> /// <param name="FOV_degrees">field of view for each stereo camera in degrees</param> /// <param name="stereo_features">stereo features (disparities) for each stereo camera</param> /// <param name="stereo_features_colour">stereo feature colours for each stereo camera</param> /// <param name="stereo_features_uncertainties">stereo feature uncertainties (priors) for each stereo camera</param> /// <param name="sensormodel">sensor model for each grid level</param> /// <param name="left_camera_location">returned position and orientation of the left camera on each stereo camera</param> /// <param name="right_camera_location">returned position and orientation of the right camera on each stereo camera</param> /// <param name="robot_pose">current estimated position and orientation of the robots centre of rotation</param> public void Map( float body_width_mm, float body_length_mm, float body_centre_of_rotation_x, float body_centre_of_rotation_y, float body_centre_of_rotation_z, float head_centroid_x, float head_centroid_y, float head_centroid_z, float head_pan, float head_tilt, float head_roll, int stereo_camera_index, float baseline_mm, float stereo_camera_position_x, float stereo_camera_position_y, float stereo_camera_position_z, float stereo_camera_pan, float stereo_camera_tilt, float stereo_camera_roll, int image_width, int image_height, float FOV_degrees, float[] stereo_features, byte[,] stereo_features_colour, float[] stereo_features_uncertainties, stereoModel[][] sensormodel, ref pos3D left_camera_location, ref pos3D right_camera_location, pos3D robot_pose) { // positions of the left and right camera relative to the robots centre of rotation pos3D head_location = new pos3D(0,0,0); pos3D camera_centre_location = null; pos3D relative_left_cam = null; pos3D relative_right_cam = null; occupancygridBase.calculateCameraPositions( body_width_mm, body_length_mm, body_centre_of_rotation_x, body_centre_of_rotation_y, body_centre_of_rotation_z, head_centroid_x, head_centroid_y, head_centroid_z, head_pan, head_tilt, head_roll, baseline_mm, stereo_camera_position_x, stereo_camera_position_y, stereo_camera_position_z, stereo_camera_pan, stereo_camera_tilt, stereo_camera_roll, ref head_location, ref camera_centre_location, ref relative_left_cam, ref relative_right_cam); left_camera_location = relative_left_cam.translate(robot_pose.x, robot_pose.y, robot_pose.z); right_camera_location = relative_right_cam.translate(robot_pose.x, robot_pose.y, robot_pose.z); pos3D stereo_camera_centre = new pos3D(0, 0, 0); // update the grid // centre position between the left and right cameras stereo_camera_centre.x = left_camera_location.x + ((right_camera_location.x - left_camera_location.x) * 0.5f); stereo_camera_centre.y = left_camera_location.y + ((right_camera_location.y - left_camera_location.y) * 0.5f); stereo_camera_centre.z = left_camera_location.z + ((right_camera_location.z - left_camera_location.z) * 0.5f); stereo_camera_centre.pan = robot_pose.pan + head_pan + stereo_camera_pan; stereo_camera_centre.tilt = robot_pose.tilt + head_tilt + stereo_camera_tilt; stereo_camera_centre.roll = robot_pose.roll + head_roll + stereo_camera_roll; // create a set of stereo rays as observed from this pose List<evidenceRay> rays = sensormodel[stereo_camera_index][0].createObservation( stereo_camera_centre, baseline_mm, image_width, image_height, FOV_degrees, stereo_features, stereo_features_colour, stereo_features_uncertainties, true); // insert rays into the occupancy grid for (int r = 0; r < rays.Count; r++) { Insert(rays[r], sensormodel[stereo_camera_index], left_camera_location, right_camera_location, false); } }
/// <summary> /// insert a stereo ray into the grid /// </summary> /// <param name="ray">stereo ray</param> /// <param name="sensormodel">sensor model for each grid level</param> /// <param name="left_camera_location">left stereo camera position and orientation</param> /// <param name="right_camera_location">right stereo camera position and orientation</param> /// <param name="localiseOnly">whether we are mapping or localising</param> /// <returns>localisation matching score</returns> public float Insert( evidenceRay ray, stereoModel[] sensormodel, pos3D left_camera_location, pos3D right_camera_location, bool localiseOnly) { float matchingScore = occupancygridBase.NO_OCCUPANCY_EVIDENCE; switch (grid_type) { case TYPE_SIMPLE: { for (int grid_level = 0; grid_level < grid.Length; grid_level++) { rayModelLookup sensormodel_lookup = sensormodel[grid_level].ray_model; occupancygridSimple grd = (occupancygridSimple)grid[grid_level]; float score = grd.Insert(ray, sensormodel_lookup, left_camera_location, right_camera_location, localiseOnly); if (score != occupancygridBase.NO_OCCUPANCY_EVIDENCE) { if (matchingScore == occupancygridBase.NO_OCCUPANCY_EVIDENCE) matchingScore = 0; matchingScore += grid_weight[grid_level] * score; } } break; } } return (matchingScore); }
/// <summary> /// insert a stereo ray into the grid /// </summary> /// <param name="ray">stereo ray</param> /// <param name="origin">pose from which this observation was made</param> /// <param name="sensormodel">sensor model for each grid level</param> /// <param name="left_camera_location">left stereo camera position and orientation</param> /// <param name="right_camera_location">right stereo camera position and orientation</param> /// <param name="localiseOnly">whether we are mapping or localising</param> /// <returns>localisation matching score</returns> public float Insert( evidenceRay ray, particlePose origin, stereoModel[] sensormodel, pos3D left_camera_location, pos3D right_camera_location, bool localiseOnly) { float matchingScore = 0; switch (grid_type) { case TYPE_MULTI_HYPOTHESIS: { for (int grid_level = 0; grid_level < grid.Length; grid_level++) { rayModelLookup sensormodel_lookup = sensormodel[grid_level].ray_model; occupancygridMultiHypothesis grd = (occupancygridMultiHypothesis)grid[grid_level]; matchingScore += grid_weight[grid_level] * grd.Insert(ray, origin, sensormodel_lookup, left_camera_location, right_camera_location, localiseOnly); } break; } } return (matchingScore); }
public void EvidenceRayRotation() { int debug_img_width = 640; int debug_img_height = 480; byte[] debug_img = new byte[debug_img_width * debug_img_height * 3]; for (int i = (debug_img_width * debug_img_height * 3)-1; i >= 0; i--) debug_img[i] = 255; Bitmap bmp = new Bitmap(debug_img_width, debug_img_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); int cellSize_mm = 32; int image_width = 320; int image_height = 240; Console.WriteLine("Creating sensor models"); stereoModel inverseSensorModel = new stereoModel(); inverseSensorModel.createLookupTable(cellSize_mm, image_width, image_height); // create a ray float FOV_horizontal = 78 * (float)Math.PI / 180.0f; inverseSensorModel.FOV_horizontal = FOV_horizontal; inverseSensorModel.FOV_vertical = FOV_horizontal * image_height / image_width; evidenceRay ray = inverseSensorModel.createRay( image_width/2, image_height/2, 4, 0, 255, 255, 255); Assert.AreNotEqual(null, ray, "No ray was created"); Assert.AreNotEqual(null, ray.vertices, "No ray vertices were created"); pos3D[] start_vertices = (pos3D[])ray.vertices.Clone(); Console.WriteLine("x,y,z: " + start_vertices[0].x.ToString() + ", " + start_vertices[0].y.ToString() + ", " + start_vertices[0].z.ToString()); for (int i = 0; i < ray.vertices.Length; i++) { int j = i + 1; if (j == ray.vertices.Length) j = 0; int x0 = (debug_img_width/2) + (int)ray.vertices[i].x/50; int y0 = (debug_img_height/2) + (int)ray.vertices[i].y/50; int x1 = (debug_img_width/2) + (int)ray.vertices[j].x/50; int y1 = (debug_img_height/2) + (int)ray.vertices[j].y/50; drawing.drawLine(debug_img, debug_img_width, debug_img_height, x0,y0,x1,y1,0,255,0,0,false); } float angle_degrees = 30; float angle_radians = angle_degrees / 180.0f * (float)Math.PI; pos3D rotation = new pos3D(0, 0, 0); rotation.pan = angle_degrees; ray.translateRotate(rotation); Console.WriteLine("x,y,z: " + ray.vertices[0].x.ToString() + ", " + ray.vertices[0].y.ToString() + ", " + ray.vertices[0].z.ToString()); for (int i = 0; i < ray.vertices.Length; i++) { int j = i + 1; if (j == ray.vertices.Length) j = 0; int x0 = (debug_img_width/2) + (int)ray.vertices[i].x/50; int y0 = (debug_img_height/2) + (int)ray.vertices[i].y/50; int x1 = (debug_img_width/2) + (int)ray.vertices[j].x/50; int y1 = (debug_img_height/2) + (int)ray.vertices[j].y/50; drawing.drawLine(debug_img, debug_img_width, debug_img_height, x0,y0,x1,y1,255,0,0,0,false); } BitmapArrayConversions.updatebitmap_unsafe(debug_img, bmp); bmp.Save("tests_occupancygrid_simple_EvidenceRayRotation.jpg", System.Drawing.Imaging.ImageFormat.Jpeg); }
public void InsertRays() { int no_of_stereo_features = 2000; int image_width = 640; int image_height = 480; int no_of_stereo_cameras = 1; int localisationRadius_mm = 16000; int maxMappingRange_mm = 16000; int cellSize_mm = 32; int dimension_cells = 16000 / cellSize_mm; int dimension_cells_vertical = dimension_cells/2; float vacancyWeighting = 0.5f; float FOV_horizontal = 78 * (float)Math.PI / 180.0f; // create a grid Console.WriteLine("Creating grid"); occupancygridSimple grid = new occupancygridSimple( dimension_cells, dimension_cells_vertical, cellSize_mm, localisationRadius_mm, maxMappingRange_mm, vacancyWeighting); Assert.AreNotEqual(grid, null, "object occupancygridSimple was not created"); Console.WriteLine("Creating sensor models"); stereoModel inverseSensorModel = new stereoModel(); inverseSensorModel.FOV_horizontal = FOV_horizontal; inverseSensorModel.FOV_vertical = FOV_horizontal * image_height / image_width; inverseSensorModel.createLookupTable(cellSize_mm, image_width, image_height); //Assert.AreNotEqual(0, inverseSensorModel.ray_model.probability[1][5], "Ray model probabilities not updated"); // observer parameters int pan_angle_degrees = 0; pos3D observer = new pos3D(0,0,0); observer.pan = pan_angle_degrees * (float)Math.PI / 180.0f; float stereo_camera_baseline_mm = 100; pos3D left_camera_location = new pos3D(stereo_camera_baseline_mm*0.5f,0,0); pos3D right_camera_location = new pos3D(-stereo_camera_baseline_mm*0.5f,0,0); left_camera_location = left_camera_location.rotate(observer.pan, observer.tilt, observer.roll); right_camera_location = right_camera_location.rotate(observer.pan, observer.tilt, observer.roll); left_camera_location = left_camera_location.translate(observer.x, observer.y, observer.z); right_camera_location = right_camera_location.translate(observer.x, observer.y, observer.z); float FOV_degrees = 78; float[] stereo_features = new float[no_of_stereo_features * 3]; byte[,] stereo_features_colour = new byte[no_of_stereo_features, 3]; float[] stereo_features_uncertainties = new float[no_of_stereo_features]; // create some stereo disparities within the field of view Console.WriteLine("Adding disparities"); //MersenneTwister rnd = new MersenneTwister(0); Random rnd = new Random(0); for (int correspondence = 0; correspondence < no_of_stereo_features; correspondence++) { float x = rnd.Next(image_width-1); float y = rnd.Next(image_height/50) + (image_height/2); float disparity = 7; if ((x < image_width/5) || (x > image_width * 4/5)) { disparity = 7; //15; } byte colour_red = (byte)rnd.Next(255); byte colour_green = (byte)rnd.Next(255); byte colour_blue = (byte)rnd.Next(255); stereo_features[correspondence*3] = x; stereo_features[(correspondence*3)+1] = y; stereo_features[(correspondence*3)+2] = disparity; stereo_features_colour[correspondence, 0] = colour_red; stereo_features_colour[correspondence, 1] = colour_green; stereo_features_colour[correspondence, 2] = colour_blue; stereo_features_uncertainties[correspondence] = 0; } // create an observation as a set of rays from the stereo correspondence results List<evidenceRay>[] stereo_rays = new List<evidenceRay>[no_of_stereo_cameras]; for (int cam = 0; cam < no_of_stereo_cameras; cam++) { Console.WriteLine("Creating rays"); stereo_rays[cam] = inverseSensorModel.createObservation( observer, stereo_camera_baseline_mm, image_width, image_height, FOV_degrees, stereo_features, stereo_features_colour, stereo_features_uncertainties, true); // insert rays into the grid Console.WriteLine("Throwing rays"); for (int ray = 0; ray < stereo_rays[cam].Count; ray++) { grid.Insert(stereo_rays[cam][ray], inverseSensorModel.ray_model, left_camera_location, right_camera_location, false); } } // save the result as an image Console.WriteLine("Saving grid"); int debug_img_width = 640; int debug_img_height = 480; byte[] debug_img = new byte[debug_img_width * debug_img_height * 3]; Bitmap bmp = new Bitmap(debug_img_width, debug_img_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); grid.Show(debug_img, debug_img_width, debug_img_height, false, false); BitmapArrayConversions.updatebitmap_unsafe(debug_img, bmp); bmp.Save("tests_occupancygrid_simple_InsertRays_overhead.jpg", System.Drawing.Imaging.ImageFormat.Jpeg); grid.ShowFront(debug_img, debug_img_width, debug_img_height, true); BitmapArrayConversions.updatebitmap_unsafe(debug_img, bmp); bmp.Save("tests_occupancygrid_simple_InsertRays_front.jpg", System.Drawing.Imaging.ImageFormat.Jpeg); // side view of the probabilities float max_prob = -1; float min_prob = 1; float[] probs = new float[dimension_cells/2]; float[] mean_colour = new float[3]; for (int y = dimension_cells / 2; y < dimension_cells; y++) { float p = grid.GetProbability(dimension_cells / 2, y, mean_colour); probs[y - (dimension_cells / 2)] = p; if (p != occupancygridSimple.NO_OCCUPANCY_EVIDENCE) { if (p < min_prob) min_prob = p; if (p > max_prob) max_prob = p; } } for (int i = 0; i < debug_img.Length; i++) debug_img[i] = 255; int prev_x = -1; int prev_y = debug_img_height / 2; for (int i = 0; i < probs.Length; i++) { if (probs[i] != occupancygridSimple.NO_OCCUPANCY_EVIDENCE) { int x = i * (debug_img_width - 1) / probs.Length; int y = debug_img_height - 1 - (int)((probs[i] - min_prob) / (max_prob - min_prob) * (debug_img_height - 1)); int n = ((y * debug_img_width) + x) * 3; if (prev_x > -1) { int r = 255; int g = 0; int b = 0; if (probs[i] > 0.5f) { r = 0; g = 255; b = 0; } drawing.drawLine(debug_img, debug_img_width, debug_img_height, prev_x, prev_y, x, y, r, g, b, 0, false); } prev_x = x; prev_y = y; } } int y_zero = debug_img_height - 1 - (int)((0.5f-min_prob) / (max_prob - min_prob) * (debug_img_height - 1)); drawing.drawLine(debug_img, debug_img_width, debug_img_height, 0, y_zero, debug_img_width - 1, y_zero, 0, 0, 0, 0, false); BitmapArrayConversions.updatebitmap_unsafe(debug_img, bmp); bmp.Save("tests_occupancygrid_simple_InsertRays_probs.jpg", System.Drawing.Imaging.ImageFormat.Jpeg); }
public void CreateSensorModels( metagridBuffer buf) { List<int> cell_sizes = buf.GetCellSizes(); sensormodel = new stereoModel[image_width.Length][]; for (int stereo_cam = 0; stereo_cam < image_width.Length; stereo_cam++) sensormodel[stereo_cam] = new stereoModel[cell_sizes.Count]; for (int stereo_cam = 0; stereo_cam < image_width.Length; stereo_cam++) { for (int grid_level = 0; grid_level < cell_sizes.Count; grid_level++) { if (stereo_cam > 0) { if (image_width[stereo_cam - 1] == image_width[stereo_cam]) { sensormodel[stereo_cam][grid_level] = sensormodel[stereo_cam-1][grid_level]; } } if (sensormodel[stereo_cam][grid_level] == null) { sensormodel[stereo_cam][grid_level] = new stereoModel(); sensormodel[stereo_cam][grid_level].createLookupTable( cell_sizes[grid_level], image_width[stereo_cam], image_height[stereo_cam]); } } } }