public void Add(robot rob, String path, String trackName, int starting_index, String TrackName, int trackType, bool mappingOnly) { int starting_index1 = starting_index; int starting_index2 = starting_index; //starting_index1 += 42; //rob.correspondence.LoadCalibration(path + trackName + "\\calibration.txt", ref calibration_offset_x, ref calibration_offset_y); Name = trackName; robotPath p; if (!mappingOnly) { p = getPath(rob, path, trackName, "L", starting_index1, TrackName, trackType); Add(p, false); } p = getPath(rob, path, trackName, "M", starting_index2, TrackName, trackType); Add(p, true); }
public void update() { int no_of_cameras = Convert.ToInt32(txtNoOfCameras.Text); robot new_rob = new robot(no_of_cameras, robot.MAPPING_DPSLAM); for (int i = 0; i < rob.head.no_of_stereo_cameras; i++) { new_rob.head.calibration[i] = rob.head.calibration[i]; new_rob.head.sensormodel[i] = rob.head.sensormodel[i]; } rob = new_rob; rob.Name = txtName.Text; rob.TotalMass_kg = Convert.ToSingle(txtTotalMass.Text); rob.SetMappingThreads(Convert.ToInt32(txtNoOfThreads.Text)); rob.BodyWidth_mm = Convert.ToSingle(txtBodyWidth.Text); rob.BodyLength_mm = Convert.ToSingle(txtBodyLength.Text); rob.BodyHeight_mm = Convert.ToSingle(txtBodyHeight.Text); rob.BodyShape = cmbBodyShape.SelectedIndex; rob.propulsionType = Convert.ToInt32(cmbPropulsion.SelectedIndex); rob.WheelBase_mm = Convert.ToSingle(txtWheelBase.Text); rob.WheelBaseForward_mm = Convert.ToSingle(txtWheelBaseForward.Text); rob.WheelDiameter_mm = Convert.ToSingle(txtWheelDiameter.Text); rob.WheelPositionFeedbackType = Convert.ToInt32(cmbWheelFeedback.SelectedIndex); rob.GearRatio = Convert.ToInt32(txtGearRatio.Text); rob.CountsPerRev = Convert.ToInt32(txtCountsPerRev.Text); rob.MotorNoLoadSpeedRPM = Convert.ToSingle(txtMotorNoLoadSpeedRPM.Text); rob.MotorTorqueKgMm = Convert.ToSingle(txtMotorTorque.Text); for (int i = 0; i < rob.head.no_of_stereo_cameras; i++) { rob.head.calibration[i].baseline = Convert.ToSingle(txtCameraBaseline.Text); rob.head.calibration[i].leftcam.camera_FOV_degrees = Convert.ToSingle(txtCameraFOV.Text); rob.head.calibration[i].rightcam.camera_FOV_degrees = Convert.ToSingle(txtCameraFOV.Text); rob.head.calibration[i].positionOrientation.roll = Convert.ToSingle(txtRollAngle.Text) * (float)Math.PI / 180.0f; } rob.HeadType = Convert.ToInt32(cmbHeadType.SelectedIndex); rob.HeadSize_mm = Convert.ToSingle(txtHeadSize.Text); rob.HeadShape = Convert.ToInt32(cmbHeadShape.SelectedIndex); rob.head.x = Convert.ToSingle(txtHeadPositionLeft.Text); rob.head.y = Convert.ToSingle(txtHeadPositionForward.Text); rob.head.z = Convert.ToSingle(txtHeadHeightFromGround.Text); rob.head.no_of_stereo_cameras = Convert.ToInt32(txtNoOfCameras.Text); rob.CameraOrientation = Convert.ToInt32(cmbCameraOrientation.SelectedIndex); rob.LocalGridCellSize_mm = Convert.ToSingle(txtGridCellDimension.Text); rob.LocalGridLevels = Convert.ToInt32(txtGridLevels.Text); rob.LocalGridDimension = (int)(Convert.ToInt32(txtGridWidth.Text) / rob.LocalGridCellSize_mm); rob.LocalGridDimensionVertical = (int)(Convert.ToInt32(txtGridHeight.Text) / rob.LocalGridCellSize_mm); rob.LocalGridInterval_mm = Convert.ToSingle(txtGridInterval.Text); rob.LocalGridMappingRange_mm = Convert.ToSingle(txtMappingRange.Text); rob.LocalGridLocalisationRadius_mm = Convert.ToSingle(txtLocalGridLocalisationRadius.Text); rob.SetMotionModelTrialPoses(Convert.ToInt32(txtTrialPoses.Text)); rob.SetMotionModelCullingThreshold(Convert.ToInt32(txtCullingThreshold.Text)); rob.EnableScanMatching = chkEnableScanMatching.Checked; }
/// <summary> /// calculate the position of the robots head and cameras for this pose /// </summary> /// <param name="rob">robot object</param> /// <param name="head_location">location of the centre of the head</param> /// <param name="camera_centre_location">location of the centre of each stereo camera</param> /// <param name="left_camera_location">location of the left camera within each stereo camera</param> /// <param name="right_camera_location">location of the right camera within each stereo camera</param> protected void calculateCameraPositions( robot rob, ref pos3D head_location, ref pos3D[] camera_centre_location, ref pos3D[] left_camera_location, ref pos3D[] right_camera_location) { // calculate the position of the centre of the head relative to // the centre of rotation of the robots body pos3D head_centroid = new pos3D(-(rob.BodyWidth_mm / 2) + rob.head.x, -(rob.BodyLength_mm / 2) + rob.head.y, rob.head.z); // location of the centre of the head on the grid map // adjusted for the robot pose and the head pan and tilt angle. // Note that the positions and orientations of individual cameras // on the head have already been accounted for within stereoModel.createObservation pos3D head_locn = head_centroid.rotate(rob.head.pan + pan, rob.head.tilt, 0); head_locn = head_locn.translate(x, y, 0); head_location.copyFrom(head_locn); for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++) { // calculate the position of the centre of the stereo camera // (baseline centre point) pos3D camera_centre_locn = new pos3D(rob.head.calibration[cam].positionOrientation.x, rob.head.calibration[cam].positionOrientation.y, rob.head.calibration[cam].positionOrientation.y); camera_centre_locn = camera_centre_locn.rotate(rob.head.calibration[cam].positionOrientation.pan + rob.head.pan + pan, rob.head.calibration[cam].positionOrientation.tilt, rob.head.calibration[cam].positionOrientation.roll); camera_centre_location[cam] = camera_centre_locn.translate(head_location.x, head_location.y, head_location.z); // where are the left and right cameras? // we need to know this for the origins of the vacancy models float half_baseline_length = rob.head.calibration[cam].baseline / 2; pos3D left_camera_locn = new pos3D(-half_baseline_length, 0, 0); left_camera_locn = left_camera_locn.rotate(rob.head.calibration[cam].positionOrientation.pan + rob.head.pan + pan, rob.head.calibration[cam].positionOrientation.tilt, rob.head.calibration[cam].positionOrientation.roll); pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z); left_camera_location[cam] = left_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z); right_camera_location[cam] = right_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z); right_camera_location[cam].pan = left_camera_location[cam].pan; } }
private void test_motion_model(bool closed_loop) { robot rob = new robot(1); int min_x_mm = 0; int min_y_mm = 0; int max_x_mm = 1000; int max_y_mm = 1000; int step_size = (max_y_mm - min_y_mm) / 15; int x = (max_x_mm - min_x_mm) / 2; bool initial = true; float pan = 0; // (float)Math.PI / 4; for (int y = min_y_mm; y < max_y_mm; y += step_size) { if (closed_loop) surveyPosesDummy(rob); rob.updateFromKnownPosition(null, x, y, pan); rob.GetBestMotionModel().Show(img_rays, standard_width, standard_height, min_x_mm, min_y_mm, max_x_mm, max_y_mm, initial); initial = false; } }
/* private void test_head() { pos3D robotPosition = new pos3D(0, 0, 0); pos3D robotOrientation = new pos3D(0, 0, 0); beginStopWatch(); for (int i = 0; i < view.Length; i++) { for (int cam = 0; cam < 4; cam++) { //invent some stereo features for (int f = 0; f < stereo_features.Length; f += 3) { stereo_features[f] = rnd.Next(robot_head.image_width - 1); stereo_features[f + 1] = rnd.Next(robot_head.image_height - 1); //stereo_features[f] = (robot_head.image_width*1/10) + (rnd.Next(robot_head.image_width/300) - 1); //stereo_features[f + 1] = rnd.Next(robot_head.image_height - 1); stereo_features[f + 2] = rnd.Next(robot_head.image_width * 35 / 100); } robot_head.setStereoFeatures(cam, stereo_features, stereo_uncertainties, stereo_features.Length); } view[i] = stereo_model.createViewpoint(robot_head, robotOrientation); view[0].showAbove(img_rays, standard_width, standard_height, 2000, 255, 255, 255, true, 0, false); robotPosition.y += 0; robotPosition.pan += 0.0f; } long mappingTime = endStopWatch(); txtMappingTime.Text = Convert.ToString(mappingTime); } */ /* private void test_head2() { robot_head = new stereoHead(2); robot_head.initDualCam(); pos3D robotPosition = new pos3D(0, 0, 0); pos3D robotOrientation = new pos3D(0, 0, 0); beginStopWatch(); for (int i = 0; i < view.Length; i++) { for (int cam = 0; cam < 2; cam++) { //invent some stereo features for (int f = 0; f < stereo_features.Length; f += 3) { stereo_features[f] = rnd.Next(robot_head.image_width - 1); stereo_features[f + 1] = robot_head.image_height / 2; // rnd.Next(robot_head.image_height - 1); //stereo_features[f] = (robot_head.image_width*1/10) + (rnd.Next(robot_head.image_width/300) - 1); //stereo_features[f + 1] = rnd.Next(robot_head.image_height - 1); stereo_features[f + 2] = rnd.Next(robot_head.image_width * 35 / 100); } robot_head.setStereoFeatures(cam, stereo_features, stereo_uncertainties, stereo_features.Length); } view[i] = stereo_model.createViewpoint(robot_head, robotOrientation); view[0].showFront(img_rays, standard_width, standard_height, 2000, 255, 255, 255, true); robotPosition.y += 0; robotPosition.pan += 0.0f; } long mappingTime = endStopWatch(); txtMappingTime.Text = Convert.ToString(mappingTime); } */ /* private void test_grid() { occupancygridClassic grd = new occupancygridClassic(128, 32); pos3D robotPosition = new pos3D(0, 0, 0); pos3D robotOrientation = new pos3D(0, 0, 0); beginStopWatch(); //int i = 0; //for (i = 0; i < 9; i++) { for (int cam = 0; cam < 4; cam++) { //invent some stereo features for (int f = 0; f < stereo_features.Length; f += 3) { stereo_features[f] = rnd.Next(robot_head.image_width - 1); stereo_features[f + 1] = rnd.Next(robot_head.image_height - 1); stereo_features[f + 2] = rnd.Next(robot_head.image_width * 35 / 100); } robot_head.setStereoFeatures(cam, stereo_features, stereo_uncertainties, stereo_features.Length); } view[0] = stereo_model.createViewpoint(robot_head, robotOrientation); pos3D centre = new pos3D(0, 0, 0); grd.insert(view[0],true,centre); //view[0].show(img_rays, standard_width, standard_height, 2000, 255, 255, 255, true); robotPosition.y += 50; robotPosition.pan += 0.0f; } long mappingTime = endStopWatch(); txtMappingTime.Text = Convert.ToString(mappingTime); grd.show(img_rays, standard_width, standard_height); } */ /// <summary> /// generate scores for poses. This is only for testing purposes. /// </summary> /// <param name="rob"></param> private void surveyPosesDummy(robot rob) { motionModel motion_model = rob.GetBestMotionModel(); // examine the pose list for (int sample = 0; sample < motion_model.survey_trial_poses; sample++) { particlePath path = (particlePath)motion_model.Poses[sample]; particlePose pose = path.current_pose; float dx = rob.x - pose.x; float dy = rob.y - pose.y; float dist = (float)Math.Sqrt((dx * dx) + (dy * dy)); float score = 1.0f / (1 + dist); // update the score for this pose motion_model.updatePoseScore(path, score); } // indicate that the pose scores have been updated motion_model.PosesEvaluated = true; }
public static void Main(string[] args) { // extract command line parameters ArrayList parameters = commandline.ParseCommandLineParameters(args, "-", GetValidParameters()); string help_str = commandline.GetParameterValue("help", parameters); if (help_str != "") { ShowHelp(); } else { Console.WriteLine("stereosensormodel version 0.1"); int no_of_cameras = 1; robot rob = new robot(no_of_cameras, robot.MAPPING_DPSLAM); string filename = commandline.GetParameterValue("filename", parameters); if (filename == "") filename = "sensormodel.xml"; rob.integer_sensor_model_values = false; string integer_sensor_model_values_str = commandline.GetParameterValue("integer", parameters); if (integer_sensor_model_values_str != "") { Console.WriteLine("Use integer sensor models"); rob.integer_sensor_model_values = true; } float baseline_mm = 107; string baseline_mm_str = commandline.GetParameterValue("baseline", parameters); if (baseline_mm_str != "") { baseline_mm = Convert.ToSingle(baseline_mm_str); } float camera_FOV_degrees = 90; string camera_FOV_degrees_str = commandline.GetParameterValue("fov", parameters); if (camera_FOV_degrees_str != "") { camera_FOV_degrees = Convert.ToSingle(camera_FOV_degrees_str); } float LocalGridCellSize_mm = 40; string LocalGridCellSize_mm_str = commandline.GetParameterValue("cellsize", parameters); if (LocalGridCellSize_mm_str != "") { LocalGridCellSize_mm = Convert.ToSingle(LocalGridCellSize_mm_str); } int image_width = 320; string image_width_str = commandline.GetParameterValue("imagewidth", parameters); if (image_width_str != "") { image_width = Convert.ToInt32(image_width_str); } int image_height = image_width / 2; for (int i = 0; i < rob.head.no_of_stereo_cameras; i++) { rob.head.calibration[i].baseline = baseline_mm; rob.head.calibration[i].leftcam.camera_FOV_degrees = camera_FOV_degrees; rob.head.calibration[i].rightcam.camera_FOV_degrees = camera_FOV_degrees; rob.head.calibration[i].positionOrientation.roll = 0; rob.head.calibration[i].leftcam.image_width = 640; rob.head.calibration[i].leftcam.image_height = 480; } if (File.Exists(filename)) { Console.WriteLine("Loading " + filename); rob.head.sensormodel = new rayModelLookup[no_of_cameras]; rob.head.sensormodel[0] = new rayModelLookup(image_width/2, 4000); rob.head.sensormodel[0].Load(filename); } // create the sensor models Console.Write("Creating sensor models, please wait..."); rob.inverseSensorModel.createLookupTables(rob.head, (int)LocalGridCellSize_mm); Console.WriteLine("Done"); string raymodel_image_filename = "ray_model.jpg"; Console.WriteLine("Saving ray model image: " + raymodel_image_filename); int img_width = 640; int img_height = 480; byte[] img = new byte[img_width * img_height *3]; rob.inverseSensorModel.ray_model_to_graph_image(img, img_width, img_height); Bitmap bmp = new Bitmap(img_width, img_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); BitmapArrayConversions.updatebitmap_unsafe(img, bmp); bmp.Save(raymodel_image_filename, System.Drawing.Imaging.ImageFormat.Jpeg); string distribution_image_filename = "gaussian_distribution.jpg"; Console.WriteLine("Saving gaussian distribution image: " + distribution_image_filename); rob.inverseSensorModel.showDistribution(img, img_width, img_height); BitmapArrayConversions.updatebitmap_unsafe(img, bmp); bmp.Save(distribution_image_filename, System.Drawing.Imaging.ImageFormat.Jpeg); Console.WriteLine("Saving " + filename); rob.Save(filename); } }
public motionModel(robot rob, occupancygridMultiHypothesis LocalGrid, int random_seed) { // seed the random number generator rnd = new Random(random_seed); // MersenneTwister(random_seed); this.rob = rob; this.LocalGrid = LocalGrid; Poses = new List<particlePath>(); ActivePoses = new List<particlePath>(); motion_noise = new float[6]; // some default noise values int i = 0; while (i < 2) { motion_noise[i] = default_motion_noise_1; i++; } while (i < 4) { motion_noise[i] = default_motion_noise_2; i++; } while (i < motion_noise.Length) { motion_noise[i] = default_motion_noise_3; i++; } // create some initial poses createNewPoses(rob.x, rob.y, rob.pan); }
/// <summary> /// add an observation taken from this pose /// </summary> /// <param name="stereo_rays">list of ray objects in this observation</param> /// <param name="rob">robot object</param> /// <param name="LocalGrid">occupancy grid into which to insert the observation</param> /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param> /// <returns>localisation matching score</returns> public float AddObservation(List<evidenceRay>[] stereo_rays, robot rob, occupancygridMultiHypothesis LocalGrid, bool localiseOnly) { // clear the localisation score float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE; // get the positions of the head and cameras for this pose pos3D head_location = new pos3D(0,0,0); pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; calculateCameraPositions(rob, ref head_location, ref camera_centre_location, ref left_camera_location, ref right_camera_location); // itterate for each stereo camera int cam = stereo_rays.Length - 1; while (cam >= 0) { // itterate through each ray int r = stereo_rays[cam].Count - 1; while (r >= 0) { // observed ray. Note that this is in an egocentric // coordinate frame relative to the head of the robot evidenceRay ray = stereo_rays[cam][r]; // translate and rotate this ray appropriately for the pose evidenceRay trial_ray = ray.trialPose(camera_centre_location[cam].pan, camera_centre_location[cam].x, camera_centre_location[cam].y); // update the grid cells for this ray and update the // localisation score accordingly float score = LocalGrid.Insert(trial_ray, this, rob.head.sensormodel[cam], left_camera_location[cam], right_camera_location[cam], localiseOnly); if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) localisation_score += score; else localisation_score = score; r--; } cam--; } return (localisation_score); }
/// <summary> /// Show a diagram of the robot in this pose /// This is useful for checking that the positions of cameras have /// been calculated correctly /// </summary> /// <param name="robot">robot object</param> /// <param name="img">image as a byte array</param> /// <param name="width">width of the image</param> /// <param name="height">height of the image</param> /// <param name="clearBackground">clear the background before drawing</param> /// <param name="min_x_mm">top left x coordinate</param> /// <param name="min_y_mm">top left y coordinate</param> /// <param name="max_x_mm">bottom right x coordinate</param> /// <param name="max_y_mm">bottom right y coordinate</param> /// <param name="showFieldOfView">whether to show the field of view of each camera</param> public void Show(robot rob, byte[] img, int width, int height, bool clearBackground, int min_x_mm, int min_y_mm, int max_x_mm, int max_y_mm, int line_width, bool showFieldOfView) { if (clearBackground) for (int i = 0; i < width * height * 3; i++) img[i] = 255; // get the positions of the head and cameras for this pose pos3D head_location = new pos3D(0, 0, 0); pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; calculateCameraPositions(rob, ref head_location, ref camera_centre_location, ref left_camera_location, ref right_camera_location); int w = max_x_mm - min_x_mm; int h = max_y_mm - min_y_mm; // draw the body int xx = (int)((x - min_x_mm) * width / w); int yy = (int)((y - min_y_mm) * height / h); int wdth = (int)(rob.BodyWidth_mm * width / w); int hght = (int)(rob.BodyLength_mm * height / h); drawing.drawBox(img, width, height, xx, yy, wdth, hght, pan, 0, 255, 0, line_width); // draw the head xx = (int)((head_location.x - min_x_mm) * width / w); yy = (int)((head_location.y - min_y_mm) * height / h); int radius = (int)(rob.HeadSize_mm * width / w); drawing.drawBox(img, width, height, xx, yy, radius, radius, head_location.pan, 0, 255, 0, line_width); // draw the cameras for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++) { // draw the left camera int xx1 = (int)((left_camera_location[cam].x - min_x_mm) * width / w); int yy1 = (int)((left_camera_location[cam].y - min_y_mm) * height / h); wdth = (int)((rob.head.calibration[cam].baseline / 4) * width / w); hght = (int)((rob.head.calibration[cam].baseline / 12) * height / h); if (hght < 1) hght = 1; drawing.drawBox(img, width, height, xx1, yy1, wdth, hght, left_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width); // draw the right camera int xx2 = (int)((right_camera_location[cam].x - min_x_mm) * width / w); int yy2 = (int)((right_camera_location[cam].y - min_y_mm) * height / h); drawing.drawBox(img, width, height, xx2, yy2, wdth, hght, right_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width); if (showFieldOfView) { float half_FOV = rob.head.calibration[cam].leftcam.camera_FOV_degrees * (float)Math.PI / 360.0f; int xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan + half_FOV)); int yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan + half_FOV)); drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false); xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan - half_FOV)); yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan - half_FOV)); drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false); xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan + half_FOV)); yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan + half_FOV)); drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false); xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan - half_FOV)); yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan - half_FOV)); drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false); } } }
/// <summary> /// reset the simulation /// Note that the path or pathSegment data is not reset /// </summary> public void Reset() { // create the robot object rob = new robot(); // load the design file rob.Load(RobotDesignFile); current_time_step = 0; position_error_mm = 0; updatePath(); }
public float loadGlimpseSentience(robot rob, String path, String TrackName, String track, int distance_index, int pathPoints, String index) { float matching_score = 0; Byte[] left_bmp = null; Byte[] right_bmp = null; String image_filename; bool mapping = true; if (index != "M") mapping = false; int stereo_cam_index = 0; bool left_camera = true; for (int i = 0; i < rob.head.no_of_cameras * 2; i++) { if (left_camera) { image_filename = path + TrackName + "_left_" + Convert.ToString(distance_index + (stereo_cam_index * pathPoints)) + ".bmp"; if (!File.Exists(image_filename)) errorMessage = image_filename + " not found"; rob.head.imageFilename[i] = image_filename; left_bmp = util.loadFromBitmap(image_filename, rob.head.image_width, rob.head.image_height, 3); } else { image_filename = path + TrackName + "_right_" + Convert.ToString(distance_index + (stereo_cam_index * pathPoints)) + ".bmp"; if (!File.Exists(image_filename)) errorMessage = image_filename + " not found"; rob.head.imageFilename[i] = image_filename; right_bmp = util.loadFromBitmap(image_filename, rob.head.image_width, rob.head.image_height, 3); matching_score += rob.loadRectifiedImages(stereo_cam_index, left_bmp, right_bmp, 3, mapping); // store images and features for later display if (stereo_cam_index == 0) { image_filenames_stereoCamera0.Add(rob.head.imageFilename[i - 1]); image_filenames_stereoCamera0.Add(rob.head.imageFilename[i]); features_stereoCamera0.Add(rob.head.features[stereo_cam_index]); } else { image_filenames_stereoCamera1.Add(rob.head.imageFilename[i - 1]); image_filenames_stereoCamera1.Add(rob.head.imageFilename[i]); features_stereoCamera1.Add(rob.head.features[stereo_cam_index]); } stereo_cam_index++; } left_camera = !left_camera; } return (matching_score); }
private robotPath getPath(robot rob, String path, String track, String index, int starting_index, String TrackName, int trackType) { robotPath rpath = new robotPath(); robotOdometry odometry = new robotOdometry(); odometry.readPathFileSentience(path + track + "\\" + TrackName + ".path", index, rob); switch (trackType) { case 1: { // mapping X track standardXTrack(2000, 500, odometry, index); break; } case 2: { //localisation X track standardXTrack(2000, -500, odometry, index); break; } case 3: { //there and back track odometry.Clear(); thereAndBack(100, 46, odometry, 110, 0, 0); break; } case 4: { //square odometry.Clear(); squareAndBack(100, 10, 6, odometry, 110); break; } case 5: { //special odometry.Clear(); specialTrack(100, 16, 21, 3, odometry, 110); break; } } if (odometry.no_of_measurements == 0) { errorMessage = path + track + "\\" + TrackName + ".path not found or contains no data. "; errorMessage += "Check that the path file is included as 'Content' within the project folder"; } for (int distance_index = 0; distance_index < odometry.no_of_measurements; distance_index++) { pos3D pos = odometry.getPosition(distance_index); rob.x = pos.x; rob.y = pos.y; rob.z = pos.z; rob.pan = pos.pan; rob.tilt = pos.tilt; rob.roll = pos.roll; loadGlimpseSentience(rob, path + track + "\\", TrackName, index, distance_index + starting_index, odometry.no_of_measurements, index); rob.updatePath(rpath); } return (rpath); }
public void testLocalisation(robot rob, int localisation_track_index, int localisation_test_index, occupancygridMultiResolution grid, int ray_thickness, int no_of_trial_poses, bool pruneSurvey, int survey_diameter_mm, int randomSeed, int pruneThreshold, float survey_angular_offset, float momentum, ref float error_x, ref float error_y, ref float error_pan, pos3D estimatedPos) { robotPath pathLocalisation = getLocalisationTrack(localisation_track_index); robotPath pathMap = getMappingTrack(localisation_track_index); viewpoint view_localisation = pathLocalisation.getViewpoint(localisation_test_index); viewpoint view_map = pathMap.getViewpoint(localisation_test_index); float max_score = 0; // position estimate from odometry, which positions the robot within the grid // as an initial estimate from which to search pos3D local_odometry_position = view_map.odometry_position.subtract(pathMap.pathCentre()); ArrayList survey_results = rob.sensorModelLocalisation.surveyXYP(view_localisation, grid, survey_diameter_mm, survey_diameter_mm, no_of_trial_poses, ray_thickness, pruneSurvey, randomSeed, pruneThreshold, survey_angular_offset, local_odometry_position, momentum, ref max_score); float peak_x = 0; float peak_y = 0; float peak_pan = 0; rob.sensorModelLocalisation.SurveyPeak(survey_results, ref peak_x, ref peak_y); //float[] peak = rob.sensorModel.surveyPan(view_map, view_localisation, separation_tollerance, ray_thickness, peak_x, peak_y); float[] peak = rob.sensorModelLocalisation.surveyPan(view_localisation, grid, ray_thickness, peak_x, peak_y); peak_pan = rob.sensorModelLocalisation.SurveyPeakPan(peak); float half_track_length = 1000; float dx = view_localisation.odometry_position.x - view_map.odometry_position.x; float dy = view_localisation.odometry_position.y - (view_map.odometry_position.y - half_track_length); float dp = view_localisation.odometry_position.pan - view_map.odometry_position.pan; error_x = dx + peak_x; error_y = dy + peak_y; error_pan = dp + peak_pan; error_pan = error_pan / (float)Math.PI * 180; estimatedPos.x = view_map.odometry_position.x - peak_x; estimatedPos.y = view_map.odometry_position.y - peak_y; estimatedPos.pan = view_map.odometry_position.pan - peak_pan; }