/// <summary> /// calculate the position of the robots head and cameras for this pose /// </summary> /// <param name="rob">robot object</param> /// <param name="head_location">location of the centre of the head</param> /// <param name="camera_centre_location">location of the centre of each stereo camera</param> /// <param name="left_camera_location">location of the left camera within each stereo camera</param> /// <param name="right_camera_location">location of the right camera within each stereo camera</param> protected void calculateCameraPositions( robot rob, ref pos3D head_location, ref pos3D[] camera_centre_location, ref pos3D[] left_camera_location, ref pos3D[] right_camera_location) { // calculate the position of the centre of the head relative to // the centre of rotation of the robots body pos3D head_centroid = new pos3D(-(rob.BodyWidth_mm / 2) + rob.head.x, -(rob.BodyLength_mm / 2) + rob.head.y, rob.head.z); // location of the centre of the head on the grid map // adjusted for the robot pose and the head pan and tilt angle. // Note that the positions and orientations of individual cameras // on the head have already been accounted for within stereoModel.createObservation pos3D head_locn = head_centroid.rotate(rob.head.pan + pan, rob.head.tilt, 0); head_locn = head_locn.translate(x, y, 0); head_location.copyFrom(head_locn); for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++) { // calculate the position of the centre of the stereo camera // (baseline centre point) pos3D camera_centre_locn = new pos3D(rob.head.calibration[cam].positionOrientation.x, rob.head.calibration[cam].positionOrientation.y, rob.head.calibration[cam].positionOrientation.y); camera_centre_locn = camera_centre_locn.rotate(rob.head.calibration[cam].positionOrientation.pan + rob.head.pan + pan, rob.head.calibration[cam].positionOrientation.tilt, rob.head.calibration[cam].positionOrientation.roll); camera_centre_location[cam] = camera_centre_locn.translate(head_location.x, head_location.y, head_location.z); // where are the left and right cameras? // we need to know this for the origins of the vacancy models float half_baseline_length = rob.head.calibration[cam].baseline / 2; pos3D left_camera_locn = new pos3D(-half_baseline_length, 0, 0); left_camera_locn = left_camera_locn.rotate(rob.head.calibration[cam].positionOrientation.pan + rob.head.pan + pan, rob.head.calibration[cam].positionOrientation.tilt, rob.head.calibration[cam].positionOrientation.roll); pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z); left_camera_location[cam] = left_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z); right_camera_location[cam] = right_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z); right_camera_location[cam].pan = left_camera_location[cam].pan; } }
public static robot[] generateRobot() { int batterylife = 1; robot[] arr = new robot[SIZE]; for (int i = 0; i < 3; i++) { arr[i] = new robot(directory, batterylife); } for (int i = 3; i < 6; i++) { arr[i] = new RotatingRobot(directory, batterylife); } for (int i = 6; i < 9; i++) { arr[i] = new TranRobot(directory, batterylife); } return(arr); }
public void Add(robot rob, String path, String trackName, int starting_index, String TrackName, int trackType, bool mappingOnly) { int starting_index1 = starting_index; int starting_index2 = starting_index; //starting_index1 += 42; //rob.correspondence.LoadCalibration(path + trackName + "\\calibration.txt", ref calibration_offset_x, ref calibration_offset_y); Name = trackName; robotPath p; if (!mappingOnly) { p = getPath(rob, path, trackName, "L", starting_index1, TrackName, trackType); Add(p, false); } p = getPath(rob, path, trackName, "M", starting_index2, TrackName, trackType); Add(p, true); }
public void SampleNormalDistribution() { int image_width = 640; robot rob = new robot(1); motionModel mm = new motionModel(rob, rob.LocalGrid, 1); byte[] img_rays = new byte[image_width * image_width * 3]; Bitmap bmp = new Bitmap(image_width, image_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb); int[] hist = new int[20]; int max = 1; int max_index = 0; for (int sample = 0; sample < 2000; sample++) { float v = mm.sample_normal_distribution(20); int index = (hist.Length / 2) + ((int)v / (hist.Length / 2)); if ((index > -1) && (index < hist.Length)) { hist[index]++; if (hist[index] > max) { max = hist[index]; max_index = index; } } } max += 5; for (int x = 0; x < image_width; x++) { int index = x * hist.Length / image_width; drawing.drawLine(img_rays, image_width, image_width, x, image_width - 1, x, image_width - 1 - (hist[index] * image_width / max), 255, 255, 255, 0, false); } BitmapArrayConversions.updatebitmap_unsafe(img_rays, bmp); bmp.Save("motionmodel_tests_SampleNormalDistribution.bmp", System.Drawing.Imaging.ImageFormat.Bmp); Assert.IsTrue(max_index == hist.Length / 2, "Peak of normal distribution is offset"); }
/// <summary> /// generate scores for poses. This is only for testing purposes. /// </summary> /// <param name="rob"></param> private void surveyPosesDummy(robot rob) { motionModel motion_model = rob.motion; // examine the pose list for (int sample = 0; sample < motion_model.survey_trial_poses; sample++) { particlePath path = (particlePath)motion_model.Poses[sample]; particlePose pose = path.current_pose; float dx = rob.x - pose.x; float dy = rob.y - pose.y; float dist = (float)Math.Sqrt((dx * dx) + (dy * dy)); float score = 1.0f / (1 + dist); // update the score for this pose motion_model.updatePoseScore(path, score); } // indicate that the pose scores have been updated motion_model.PosesEvaluated = true; }
public motionModel( robot rob, dpslam LocalGrid, int random_seed) { // seed the random number generator //rnd = new Random(random_seed); rnd = new MersenneTwister(random_seed); this.rob = rob; this.LocalGrid = LocalGrid; Poses = new List <particlePath>(); ActivePoses = new List <particlePath>(); motion_noise = new float[6]; // some default noise values int i = 0; while (i < 2) { motion_noise[i] = default_motion_noise_1; i++; } while (i < 4) { motion_noise[i] = default_motion_noise_2; i++; } while (i < motion_noise.Length) { motion_noise[i] = default_motion_noise_3; i++; } // create some initial poses createNewPoses(rob.x, rob.y, rob.z, rob.pan, rob.tilt, rob.roll); }
public MainWindow() { InitializeComponent(); SerialStream = new ReliableSerialPort("COM6", 115200, System.IO.Ports.Parity.None, 8, System.IO.Ports.StopBits.One); FrameDecoder = new msgDecoder(); FrameProcessor = new msgProcessor(); UI_Updater = new DispatcherTimer(); UI_Updater.Interval = new TimeSpan(0, 0, 0, 0, 100); //100ms tick RobotModel = new robot(); //block logic UI_Updater.Tick += UpdateUI; SerialStream.DataReceived += FrameDecoder.DecodeMessage; FrameDecoder.OnDataDecodedEvent += FrameProcessor.ProcessMessage; FrameProcessor.OnTextMessageProcessedEvent += FrameProcessor_OnTextMessageProcessedEvent; FrameProcessor.OnCheckSumErrorOccuredEvent += FrameProcessor_OnCheckSumErrorOccuredEvent; FrameProcessor.OnIrMessageProcessedEvent += FrameProcessor_OnIrMessageProcessedEvent; FrameProcessor.OnSpeedMessageProcessedEvent += FrameProcessor_OnSpeedMessageProcessedEvent; SerialStream.Open(); UI_Updater.Start(); }
/// <summary> /// test the motion model in open or closed loop mode /// </summary> /// <param name="closed_loop">use closed loop mode</param> private void test_motion_model(bool closed_loop) { robot rob = new robot(1); motionModel motion_model = rob.GetBestMotionModel(); int min_x_mm = 0; int min_y_mm = 0; int max_x_mm = 1000; int max_y_mm = 1000; int step_size = (max_y_mm - min_y_mm) / 15; int x = (max_x_mm - min_x_mm) / 2; bool initial = true; float pan = 0; // (float)Math.PI / 4; for (int y = min_y_mm; y < max_y_mm; y += step_size) { if (closed_loop) surveyPosesDummy(rob); rob.updateFromKnownPosition(null, x, y, pan); motion_model.Show(img_rays, standard_width, standard_height, min_x_mm, min_y_mm, max_x_mm, max_y_mm, initial); initial = false; } }
/// <summary> /// generate scores for poses. This is only for testing purposes. /// </summary> /// <param name="rob"></param> private void surveyPosesDummy(robot rob) { motionModel motion_model = rob.GetBestMotionModel(); // examine the pose list for (int sample = 0; sample < motion_model.survey_trial_poses; sample++) { particlePath path = (particlePath)motion_model.Poses[sample]; particlePose pose = path.current_pose; float dx = rob.x - pose.x; float dy = rob.y - pose.y; float dist = (float)Math.Sqrt((dx * dx) + (dy * dy)); float score = 1.0f / (1 + dist); // update the score for this pose motion_model.updatePoseScore(path, score); } // indicate that the pose scores have been updated motion_model.PosesEvaluated = true; }
public float loadGlimpseSentience(robot rob, String path, String TrackName, String track, int distance_index, int pathPoints, String index) { float matching_score = 0; Byte[] left_bmp = null; Byte[] right_bmp = null; String image_filename; bool mapping = true; if (index != "M") { mapping = false; } int stereo_cam_index = 0; bool left_camera = true; for (int i = 0; i < rob.head.no_of_cameras * 2; i++) { if (left_camera) { image_filename = path + TrackName + "_left_" + Convert.ToString(distance_index + (stereo_cam_index * pathPoints)) + ".bmp"; if (!File.Exists(image_filename)) { errorMessage = image_filename + " not found"; } rob.head.imageFilename[i] = image_filename; left_bmp = util.loadFromBitmap(image_filename, rob.head.image_width, rob.head.image_height, 3); } else { image_filename = path + TrackName + "_right_" + Convert.ToString(distance_index + (stereo_cam_index * pathPoints)) + ".bmp"; if (!File.Exists(image_filename)) { errorMessage = image_filename + " not found"; } rob.head.imageFilename[i] = image_filename; right_bmp = util.loadFromBitmap(image_filename, rob.head.image_width, rob.head.image_height, 3); matching_score += rob.loadRectifiedImages(stereo_cam_index, left_bmp, right_bmp, 3, mapping); // store images and features for later display if (stereo_cam_index == 0) { image_filenames_stereoCamera0.Add(rob.head.imageFilename[i - 1]); image_filenames_stereoCamera0.Add(rob.head.imageFilename[i]); features_stereoCamera0.Add(rob.head.features[stereo_cam_index]); } else { image_filenames_stereoCamera1.Add(rob.head.imageFilename[i - 1]); image_filenames_stereoCamera1.Add(rob.head.imageFilename[i]); features_stereoCamera1.Add(rob.head.features[stereo_cam_index]); } stereo_cam_index++; } left_camera = !left_camera; } return(matching_score); }
/// <summary> /// Show a diagram of the robot in this pose /// This is useful for checking that the positions of cameras have /// been calculated correctly /// </summary> /// <param name="robot">robot object</param> /// <param name="img">image as a byte array</param> /// <param name="width">width of the image</param> /// <param name="height">height of the image</param> /// <param name="clearBackground">clear the background before drawing</param> /// <param name="min_x_mm">top left x coordinate</param> /// <param name="min_y_mm">top left y coordinate</param> /// <param name="max_x_mm">bottom right x coordinate</param> /// <param name="max_y_mm">bottom right y coordinate</param> /// <param name="showFieldOfView">whether to show the field of view of each camera</param> public void Show( robot rob, byte[] img, int width, int height, bool clearBackground, int min_x_mm, int min_y_mm, int max_x_mm, int max_y_mm, int line_width, bool showFieldOfView) { if (clearBackground) { for (int i = 0; i < width * height * 3; i++) { img[i] = 255; } } // get the positions of the head and cameras for this pose pos3D head_location = new pos3D(0, 0, 0); pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; calculateCameraPositions(rob, ref head_location, ref camera_centre_location, ref left_camera_location, ref right_camera_location); int w = max_x_mm - min_x_mm; int h = max_y_mm - min_y_mm; // draw the body int xx = (int)((x - min_x_mm) * width / w); int yy = (int)((y - min_y_mm) * height / h); int wdth = (int)(rob.BodyWidth_mm * width / w); int hght = (int)(rob.BodyLength_mm * height / h); drawing.drawBox(img, width, height, xx, yy, wdth, hght, pan, 0, 255, 0, line_width); // draw the head xx = (int)((head_location.x - min_x_mm) * width / w); yy = (int)((head_location.y - min_y_mm) * height / h); int radius = (int)(rob.HeadSize_mm * width / w); drawing.drawBox(img, width, height, xx, yy, radius, radius, head_location.pan, 0, 255, 0, line_width); // draw the cameras for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++) { // draw the left camera int xx1 = (int)((left_camera_location[cam].x - min_x_mm) * width / w); int yy1 = (int)((left_camera_location[cam].y - min_y_mm) * height / h); wdth = (int)((rob.head.cameraBaseline[cam] / 4) * width / w); hght = (int)((rob.head.cameraBaseline[cam] / 12) * height / h); if (hght < 1) { hght = 1; } drawing.drawBox(img, width, height, xx1, yy1, wdth, hght, left_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width); // draw the right camera int xx2 = (int)((right_camera_location[cam].x - min_x_mm) * width / w); int yy2 = (int)((right_camera_location[cam].y - min_y_mm) * height / h); drawing.drawBox(img, width, height, xx2, yy2, wdth, hght, right_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width); if (showFieldOfView) { float half_FOV = rob.head.cameraFOVdegrees[cam] * (float)Math.PI / 360.0f; int xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan + half_FOV)); int yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan + half_FOV)); drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false); xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan - half_FOV)); yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan - half_FOV)); drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false); xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan + half_FOV)); yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan + half_FOV)); drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false); xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan - half_FOV)); yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan - half_FOV)); drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false); } } }
private void Window_Loaded_1(object sender, RoutedEventArgs e) { var temp = new PlotModel(); var fs = new LineSeries(); var car = new robot(); car.set(0, 0, 0); for (int i = 0; i < 15; i++) { fs.Points.Add(new DataPoint(car.x, car.y)); if (i == 5) { car.move(15 * (Math.PI / 180), 1); continue; } if (i == 10) { car.move(-15 * (Math.PI / 180), 1); continue; } car.move(0, 1); } temp.Series.Add(fs); var fss = new LineSeries(); car = new robot(); car.set(0, 0, 0); for (int i = 0; i < 15; i++) { fss.Points.Add(new DataPoint(car.x, car.y)); if (i == 5) { car.move(5 * (Math.PI / 180), 1); continue; } if (i == 10) { car.move(-5 * (Math.PI / 180), 1); continue; } car.move(0, 1); } temp.Series.Add(fss); var fss2 = new LineSeries(); car = new robot(); car.set(0, 0, 0); for (int i = 0; i < 15; i++) { fss2.Points.Add(new DataPoint(car.x, car.y)); if (i == 5) { car.move(1 * (Math.PI / 180), 1); continue; } if (i == 10) { car.move(-1 * (Math.PI / 180), 1); continue; } car.move(0, 1); } temp.Series.Add(fss2); Plot.Model = temp; }
public static void printRobotCoord(robot obj) { Console.WriteLine("row:{0} Column:{1}", obj.getRow(), obj.getColumn()); }
// Use this for initialization void Start() { robotRef = GetComponentInParent <robot>(); mCollider = GetComponent <BoxCollider2D>(); }
public static void Main(string[] args) { // extract command line parameters ArrayList parameters = commandline.ParseCommandLineParameters(args, "-", GetValidParameters()); string help_str = commandline.GetParameterValue("help", parameters); if (help_str != "") { ShowHelp(); } else { Console.WriteLine("stereosensormodel version 0.1"); int no_of_cameras = 1; robot rob = new robot(no_of_cameras, robot.MAPPING_DPSLAM); string filename = commandline.GetParameterValue("filename", parameters); if (filename == "") { filename = "sensormodel.xml"; } rob.integer_sensor_model_values = false; string integer_sensor_model_values_str = commandline.GetParameterValue("integer", parameters); if (integer_sensor_model_values_str != "") { Console.WriteLine("Use integer sensor models"); rob.integer_sensor_model_values = true; } float baseline_mm = 107; string baseline_mm_str = commandline.GetParameterValue("baseline", parameters); if (baseline_mm_str != "") { baseline_mm = Convert.ToSingle(baseline_mm_str); } float camera_FOV_degrees = 90; string camera_FOV_degrees_str = commandline.GetParameterValue("fov", parameters); if (camera_FOV_degrees_str != "") { camera_FOV_degrees = Convert.ToSingle(camera_FOV_degrees_str); } float LocalGridCellSize_mm = 40; string LocalGridCellSize_mm_str = commandline.GetParameterValue("cellsize", parameters); if (LocalGridCellSize_mm_str != "") { LocalGridCellSize_mm = Convert.ToSingle(LocalGridCellSize_mm_str); } int image_width = 320; string image_width_str = commandline.GetParameterValue("imagewidth", parameters); if (image_width_str != "") { image_width = Convert.ToInt32(image_width_str); } int image_height = image_width / 2; for (int i = 0; i < rob.head.no_of_stereo_cameras; i++) { rob.head.calibration[i].baseline = baseline_mm; rob.head.calibration[i].leftcam.camera_FOV_degrees = camera_FOV_degrees; rob.head.calibration[i].rightcam.camera_FOV_degrees = camera_FOV_degrees; rob.head.calibration[i].positionOrientation.roll = 0; rob.head.calibration[i].leftcam.image_width = 640; rob.head.calibration[i].leftcam.image_height = 480; } if (File.Exists(filename)) { Console.WriteLine("Loading " + filename); rob.head.sensormodel = new rayModelLookup[no_of_cameras]; rob.head.sensormodel[0] = new rayModelLookup(image_width / 2, 4000); rob.head.sensormodel[0].Load(filename); } // create the sensor models Console.Write("Creating sensor models, please wait..."); rob.inverseSensorModel.createLookupTables(rob.head, (int)LocalGridCellSize_mm); Console.WriteLine("Done"); string raymodel_image_filename = "ray_model.jpg"; Console.WriteLine("Saving ray model image: " + raymodel_image_filename); int img_width = 640; int img_height = 480; byte[] img = new byte[img_width * img_height * 3]; rob.inverseSensorModel.ray_model_to_graph_image(img, img_width, img_height); Bitmap bmp = new Bitmap(img_width, img_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb); BitmapArrayConversions.updatebitmap_unsafe(img, bmp); bmp.Save(raymodel_image_filename, System.Drawing.Imaging.ImageFormat.Jpeg); string distribution_image_filename = "gaussian_distribution.jpg"; Console.WriteLine("Saving gaussian distribution image: " + distribution_image_filename); rob.inverseSensorModel.showDistribution(img, img_width, img_height); BitmapArrayConversions.updatebitmap_unsafe(img, bmp); bmp.Save(distribution_image_filename, System.Drawing.Imaging.ImageFormat.Jpeg); Console.WriteLine("Saving " + filename); rob.Save(filename); } }
static void Main(string[] args) { robot a = new robot("/Users/shaunlee/Projects/P3/P3/grid.txt", 1); }
// GET: Robot/Delete/5 public ActionResult Delete(int id) { robot rob = db.robot.SingleOrDefault(c => c.id_robot == id); return(View(rob)); }
public dpslamServer(int no_of_stereo_cameras) { rob = new robot(no_of_stereo_cameras); dpslam_tests.CreateSim(); }
//[Test()] public static void CreateSim() { int dimension_cells = 100; int dimension_cells_vertical = 20; int cellSize_mm = 50; int localisationRadius_mm = 2000; int maxMappingRange_mm = 2000; float vacancyWeighting = 0.8f; int map_dim = 14; byte[] map = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; dpslam sim = CreateSimulation(map_dim, map, 50); particlePose pose = null; int img_width = 640; byte[] img = new byte[img_width * img_width * 3]; sim.Show(0, img, img_width, img_width, pose, true, true); Bitmap bmp = new Bitmap(img_width, img_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb); BitmapArrayConversions.updatebitmap_unsafe(img, bmp); bmp.Save("dpslam_tests_CreateSimulation1.bmp", System.Drawing.Imaging.ImageFormat.Bmp); robot rob = new robot(1); rob.WheelBase_mm = 90; rob.WheelDiameter_mm = 30; rob.x = 0; rob.y = 0; rob.pan = 90 * (float)Math.PI / 180.0f; rob.head.cameraFOVdegrees[0] = 90; rob.head.cameraSensorSizeMm[0] = 4.17f; rob.head.cameraFocalLengthMm[0] = 3.6f; rob.head.cameraImageWidth[0] = 320; rob.head.cameraImageHeight[0] = 240; rayModelLookup sensor_model = new rayModelLookup(10, 10); sensor_model.InitSurveyorSVS(); rob.head.sensormodel[0] = sensor_model; float time_elapsed_sec = 1; float forward_velocity = 50; float angular_velocity_pan = 0; float angular_velocity_tilt = 0; float angular_velocity_roll = 0; float min_x_mm = -((dimension_cells / 2) * cellSize_mm) / 3; float min_y_mm = -((dimension_cells / 2) * cellSize_mm) / 3; float max_x_mm = -min_x_mm; float max_y_mm = -min_y_mm; for (float t = 0.0f; t < 4; t += time_elapsed_sec) { rob.updateFromVelocities(sim, forward_velocity, angular_velocity_pan, angular_velocity_tilt, angular_velocity_roll, time_elapsed_sec); Console.WriteLine("xy: " + rob.x.ToString() + " " + rob.y.ToString()); rob.motion.Show(img, img_width, img_width, min_x_mm, min_y_mm, max_x_mm, max_y_mm, true, false, t == 0.0f); } rob.SaveGridImage("dpslam_tests_CreateSimulation2.bmp"); BitmapArrayConversions.updatebitmap_unsafe(img, bmp); bmp.Save("dpslam_tests_CreateSimulation3.bmp", System.Drawing.Imaging.ImageFormat.Bmp); }
/// <summary> /// add an observation taken from this pose /// </summary> /// <param name="stereo_rays">list of ray objects in this observation</param> /// <param name="rob">robot object</param> /// <param name="LocalGrid">occupancy grid into which to insert the observation</param> /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param> /// <returns>localisation matching score</returns> public float AddObservation( List <evidenceRay>[] stereo_rays, robot rob, dpslam LocalGrid, bool localiseOnly) { // clear the localisation score float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE; // get the positions of the head and cameras for this pose pos3D head_location = new pos3D(0, 0, 0); pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras]; calculateCameraPositions( rob, ref head_location, ref camera_centre_location, ref left_camera_location, ref right_camera_location); // itterate for each stereo camera int cam = stereo_rays.Length - 1; while (cam >= 0) { // itterate through each ray int r = stereo_rays[cam].Count - 1; while (r >= 0) { // observed ray. Note that this is in an egocentric // coordinate frame relative to the head of the robot evidenceRay ray = stereo_rays[cam][r]; // translate and rotate this ray appropriately for the pose evidenceRay trial_ray = ray.trialPose( camera_centre_location[cam].pan, camera_centre_location[cam].tilt, camera_centre_location[cam].roll, camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z); //Console.WriteLine("ray.vert[0] " + (trial_ray.vertices[0] == null).ToString()); //Console.WriteLine("ray.vert[1] " + (trial_ray.vertices[1] == null).ToString()); // update the grid cells for this ray and update the // localisation score accordingly float score = LocalGrid.Insert( trial_ray, this, rob.head.sensormodel[cam], left_camera_location[cam], right_camera_location[cam], localiseOnly); if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) { if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE) { localisation_score += score; } else { localisation_score = score; } } r--; } cam--; } return(localisation_score); }
private robotPath getPath(robot rob, String path, String track, String index, int starting_index, String TrackName, int trackType) { robotPath rpath = new robotPath(); robotOdometry odometry = new robotOdometry(); odometry.readPathFileSentience(path + track + "\\" + TrackName + ".path", index, rob); switch (trackType) { case 1: { // mapping X track standardXTrack(2000, 500, odometry, index); break; } case 2: { //localisation X track standardXTrack(2000, -500, odometry, index); break; } case 3: { //there and back track odometry.Clear(); thereAndBack(100, 46, odometry, 110, 0, 0); break; } case 4: { //square odometry.Clear(); squareAndBack(100, 10, 6, odometry, 110); break; } case 5: { //special odometry.Clear(); specialTrack(100, 16, 21, 3, odometry, 110); break; } } if (odometry.no_of_measurements == 0) { errorMessage = path + track + "\\" + TrackName + ".path not found or contains no data. "; errorMessage += "Check that the path file is included as 'Content' within the project folder"; } for (int distance_index = 0; distance_index < odometry.no_of_measurements; distance_index++) { pos3D pos = odometry.getPosition(distance_index); rob.x = pos.x; rob.y = pos.y; rob.z = pos.z; rob.pan = pos.pan; rob.tilt = pos.tilt; rob.roll = pos.roll; loadGlimpseSentience(rob, path + track + "\\", TrackName, index, distance_index + starting_index, odometry.no_of_measurements, index); rob.updatePath(rpath); } return(rpath); }