Ejemplo n.º 1
0
        /// <summary>
        /// generate scores for poses.  This is only for testing purposes.
        /// </summary>
        /// <param name="rob"></param>        
        private void surveyPosesDummy(robot rob)
        {
            motionModel motion_model = rob.motion;

            // examine the pose list
            for (int sample = 0; sample < motion_model.survey_trial_poses; sample++)
            {
                particlePath path = (particlePath)motion_model.Poses[sample];
                particlePose pose = path.current_pose;

                float dx = rob.x - pose.x;
                float dy = rob.y - pose.y;
                float dist = (float)Math.Sqrt((dx * dx) + (dy * dy));
                float score = 1.0f / (1 + dist);

                // update the score for this pose
                motion_model.updatePoseScore(path, score);
            }

            // indicate that the pose scores have been updated
            motion_model.PosesEvaluated = true;
        }
Ejemplo n.º 2
0
		public void SampleNormalDistribution()
		{
			int image_width = 640;
            robot rob = new robot(1);
			motionModel mm = new motionModel(rob, rob.LocalGrid, 1);
			byte[] img_rays = new byte[image_width*image_width*3];
			Bitmap bmp = new Bitmap(image_width, image_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb);

			int[] hist = new int[20];
			int max = 1;
			int max_index = 0;
			for (int sample = 0; sample < 2000; sample++)
			{
				float v = mm.sample_normal_distribution(20);
			    int index = (hist.Length/2) + ((int)v/(hist.Length/2));
				if ((index > -1) && (index < hist.Length))
				{
					hist[index]++;
					if (hist[index] > max)
					{
						max = hist[index];
						max_index = index;
					}
				}
			}
			
			max += 5;
			for (int x = 0; x < image_width; x++)
			{
				int index = x * hist.Length / image_width;
				drawing.drawLine(img_rays, image_width, image_width, x,image_width-1, x, image_width-1-(hist[index] * image_width / max), 255,255,255,0,false);
			}
			
			BitmapArrayConversions.updatebitmap_unsafe(img_rays, bmp);
			bmp.Save("motionmodel_tests_SampleNormalDistribution.bmp", System.Drawing.Imaging.ImageFormat.Bmp);
			
			Assert.IsTrue(max_index == hist.Length/2, "Peak of normal distribution is offset");
		}
Ejemplo n.º 3
0
        /// <summary>
        /// calculate the position of the robots head and cameras for this pose
        /// </summary>
        /// <param name="rob">robot object</param>
        /// <param name="head_location">location of the centre of the head</param>
        /// <param name="camera_centre_location">location of the centre of each stereo camera</param>
        /// <param name="left_camera_location">location of the left camera within each stereo camera</param>
        /// <param name="right_camera_location">location of the right camera within each stereo camera</param>
        protected void calculateCameraPositions(
            robot rob,
            ref pos3D head_location,
            ref pos3D[] camera_centre_location,
            ref pos3D[] left_camera_location,
            ref pos3D[] right_camera_location)
        {
            // calculate the position of the centre of the head relative to 
            // the centre of rotation of the robots body
            pos3D head_centroid = 
				new pos3D(
				    -(rob.BodyWidth_mm / 2) + rob.head.x,
                    -(rob.BodyLength_mm / 2) + rob.head.y,
                    rob.head.z);

            // location of the centre of the head on the grid map
            // adjusted for the robot pose and the head pan and tilt angle.
            // Note that the positions and orientations of individual cameras
            // on the head have already been accounted for within stereoModel.createObservation
            pos3D head_locn = head_centroid.rotate(rob.head.pan + pan, rob.head.tilt, 0);
            head_locn = head_locn.translate(x, y, 0);
            head_location.copyFrom(head_locn);

            for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++)
            {
                // calculate the position of the centre of the stereo camera
                // (baseline centre point)
                pos3D camera_centre_locn = new pos3D(rob.head.cameraPositionHeadRelative[cam].x, rob.head.cameraPositionHeadRelative[cam].y, rob.head.cameraPositionHeadRelative[cam].y);
                camera_centre_locn = camera_centre_locn.rotate(rob.head.cameraPositionHeadRelative[cam].pan + rob.head.pan + pan, rob.head.cameraPositionHeadRelative[cam].tilt, rob.head.cameraPositionHeadRelative[cam].roll);
                camera_centre_location[cam] = camera_centre_locn.translate(head_location.x, head_location.y, head_location.z);

                // where are the left and right cameras?
                // we need to know this for the origins of the vacancy models
                float half_baseline_length = rob.head.cameraBaseline[cam] / 2;
                pos3D left_camera_locn = new pos3D(-half_baseline_length, 0, 0);
                left_camera_locn = left_camera_locn.rotate(rob.head.cameraPositionHeadRelative[cam].pan + rob.head.pan + pan, rob.head.cameraPositionHeadRelative[cam].tilt, rob.head.cameraPositionHeadRelative[cam].roll);
                pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z);
                left_camera_location[cam] = left_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z);
                right_camera_location[cam] = right_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z);
                right_camera_location[cam].pan = left_camera_location[cam].pan;
            }
        }
Ejemplo n.º 4
0
        /// <summary>
        /// create a list of rays using the current grid as a simulation
        /// </summary>
        /// <param name="robot">rob</param>
        /// <param name="stereo_camera_index">index of the stereo camera</param>
        /// <returns>list of evidence rays, centred at (0, 0, 0)</returns>
        public List<evidenceRay> createSimulatedObservation(		                                                    
		    robot rob,
		    int stereo_camera_index)
        {
			stereoHead head = rob.head;
			stereoModel inverseSensorModel = rob.inverseSensorModel;
			float max_range_mm = rob.LocalGridMappingRange_mm;
            List<evidenceRay> result = new List<evidenceRay>();
			
            // get essential data for this stereo camera
			int image_width = head.cameraImageWidth[stereo_camera_index];
			int image_height = head.cameraImageHeight[stereo_camera_index];

			// probe the map and return ranges
			pos3D camPose = head.cameraPosition[stereo_camera_index];			
			int step_size = 10;
			float[] range_buffer = new float[(image_width/step_size)*(image_height/step_size)];
			ProbeView(
			    null, 
			    camPose.x, camPose.y, camPose.z, 
			    camPose.pan, camPose.tilt, camPose.roll, 
			    head.cameraFOVdegrees[stereo_camera_index], 
			    image_width, image_height, 
			    step_size, 
			    max_range_mm, true, 
			    range_buffer);
			
			// generate stereo features from the ranges
			bool nothing_seen = true;
			float focal_length_pixels = 
				head.cameraFocalLengthMm[stereo_camera_index] * image_width / 
				head.cameraSensorSizeMm[stereo_camera_index];
			float baseline_mm = head.cameraBaseline[stereo_camera_index];
			int n = 0;
			int idx = 0;
			int no_of_features = 0;
			float[] features = new float[range_buffer.Length*4];
			byte[] featureColours = new byte[range_buffer.Length*3];
			for (int i = 0; i < featureColours.Length; i++) featureColours[i] = 100;
			for (int camy = 0; camy < image_height; camy+=step_size)
			{
			    for (int camx = 0; camx < image_width; camx+=step_size, n++)
			    {
					if (idx < head.MAX_STEREO_FEATURES-5)
					{
						if (range_buffer[n] > -1) nothing_seen = false;
					    float disparity_pixels = 
							stereoModel.DistanceToDisparity(
							    range_buffer[n], 
							    focal_length_pixels, 
							    baseline_mm);
						//Console.WriteLine("range:        " + range_buffer[n].ToString());
						//Console.WriteLine("focal length: " + head.cameraFocalLengthMm[stereo_camera_index] + " " + focal_length_pixels.ToString());
						//Console.WriteLine("disparity:    " + disparity_pixels.ToString());
					    features[idx++] = 1;
					    features[idx++] = camx;
					    features[idx++] = camy;
					    features[idx++] = disparity_pixels;						
						no_of_features++;
					}
				}
			}

            Console.WriteLine("no_of_features " + no_of_features.ToString());

			if (nothing_seen) Console.WriteLine("createSimulatedObservation: nothing seen");
			head.setStereoFeatures(stereo_camera_index, features, no_of_features);
			head.setStereoFeatureColours(stereo_camera_index, featureColours, no_of_features);
			
            result = inverseSensorModel.createObservation(
			    head.cameraPosition[stereo_camera_index],
			    head.cameraBaseline[stereo_camera_index],
			    head.cameraImageWidth[stereo_camera_index],
			    head.cameraImageHeight[stereo_camera_index],
			    head.cameraFOVdegrees[stereo_camera_index],
			    head.features[stereo_camera_index],
			    head.featureColours[stereo_camera_index],
			    false);
			
            return (result);
        }
Ejemplo n.º 5
0
		//[Test()]
		public static void CreateSim()
		{		
            int dimension_cells = 100;
            int dimension_cells_vertical = 20;
            int cellSize_mm = 50;
            int localisationRadius_mm = 2000;
            int maxMappingRange_mm = 2000; 
            float vacancyWeighting = 0.8f;
			
			int map_dim = 14;
			byte[] map = {
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,				
				0,0,0,0,1,1,1,1,0,0,0,0,0,0,
				0,0,0,0,1,0,0,1,0,0,0,0,0,0,
				0,0,0,0,1,0,0,1,0,0,0,0,0,0,
				0,0,0,0,1,0,0,1,1,1,1,0,0,0,
				0,0,0,0,1,0,0,0,0,0,1,0,0,0,
				0,0,0,0,1,0,0,0,0,0,1,0,0,0,
				0,0,0,0,1,0,0,0,0,0,1,0,0,0,
				0,0,0,0,1,1,1,0,1,1,1,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,			
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0
			};
			dpslam sim = CreateSimulation(map_dim, map, 50);
			
			particlePose pose = null;
			int img_width = 640;
			byte[] img = new byte[img_width * img_width * 3];
			sim.Show(0, img, img_width, img_width, pose, true, true);
			Bitmap bmp = new Bitmap(img_width, img_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
			BitmapArrayConversions.updatebitmap_unsafe(img, bmp);
			bmp.Save("dpslam_tests_CreateSimulation1.bmp", System.Drawing.Imaging.ImageFormat.Bmp);			
			
			robot rob = new robot(1);
			rob.WheelBase_mm = 90;
			rob.WheelDiameter_mm = 30;
			rob.x = 0;
			rob.y = 0;
			rob.pan = 90 * (float)Math.PI / 180.0f;
			rob.head.cameraFOVdegrees[0] = 90;
			rob.head.cameraSensorSizeMm[0] = 4.17f;
			rob.head.cameraFocalLengthMm[0] = 3.6f;
			rob.head.cameraImageWidth[0] = 320;
			rob.head.cameraImageHeight[0] = 240;

			rayModelLookup sensor_model = new rayModelLookup(10, 10);
			sensor_model.InitSurveyorSVS();
			
			rob.head.sensormodel[0] = sensor_model;

			float time_elapsed_sec = 1;
			float forward_velocity = 50;
			float angular_velocity_pan = 0;
			float angular_velocity_tilt = 0;
			float angular_velocity_roll = 0;
            float min_x_mm = -((dimension_cells / 2) * cellSize_mm) / 3;
            float min_y_mm = -((dimension_cells / 2) * cellSize_mm) / 3;
            float max_x_mm = -min_x_mm;
            float max_y_mm = -min_y_mm;
            for (float t = 0.0f; t < 4; t += time_elapsed_sec)
			{
  			    rob.updateFromVelocities(sim, forward_velocity, angular_velocity_pan, angular_velocity_tilt, angular_velocity_roll, time_elapsed_sec);
				Console.WriteLine("xy: " + rob.x.ToString() + " " + rob.y.ToString());
                
                rob.motion.Show(img, img_width, img_width, min_x_mm, min_y_mm, max_x_mm, max_y_mm, true, false, t == 0.0f);
			}
			rob.SaveGridImage("dpslam_tests_CreateSimulation2.bmp");
            BitmapArrayConversions.updatebitmap_unsafe(img, bmp);
            bmp.Save("dpslam_tests_CreateSimulation3.bmp", System.Drawing.Imaging.ImageFormat.Bmp);
		}
Ejemplo n.º 6
0
        public motionModel(
		    robot rob, 
            dpslam LocalGrid,
            int random_seed)
        {
            // seed the random number generator
			//rnd = new Random(random_seed);
            rnd = new MersenneTwister(random_seed);

            this.rob = rob;
            this.LocalGrid = LocalGrid;
            Poses = new List<particlePath>();
            ActivePoses = new List<particlePath>();
            motion_noise = new float[6];

            // some default noise values
            int i = 0;
            while (i < 2)
            {
                motion_noise[i] = default_motion_noise_1;
                i++;
            }
            while (i < 4)
            {
                motion_noise[i] = default_motion_noise_2;
                i++;
            }
            while (i < motion_noise.Length)
            {
                motion_noise[i] = default_motion_noise_3;
                i++;
            }
            
            // create some initial poses
            createNewPoses(rob.x, rob.y, rob.z, rob.pan, rob.tilt, rob.roll);
        }
Ejemplo n.º 7
0
        public void OpenLoop()
        {
			int image_width = 640;
			byte[] img_rays = new byte[image_width*image_width*3];
			Bitmap bmp = new Bitmap(image_width, image_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
			
			bool closed_loop = false;
            robot rob = new robot(1);

            int min_x_mm = 0;
            int min_y_mm = 0;
            int max_x_mm = 1000;
            int max_y_mm = 1000;
            int step_size = (max_y_mm - min_y_mm) / 15;
            int x = min_x_mm + ((max_x_mm - min_x_mm) / 2);
            bool initial = true;
            float pan = 0; // (float)Math.PI / 4;
            for (int y = min_y_mm; y <= max_y_mm; y += step_size)
            {
                if (closed_loop) surveyPosesDummy(rob);
				List<ushort[]> stereo_matches = new List<ushort[]>();
                rob.updateFromKnownPosition(stereo_matches, x, y, 0, pan, 0, 0);
                
                rob.motion.Show(
				    img_rays, image_width, image_width, 
                    min_x_mm, min_y_mm, max_x_mm, max_y_mm,
				    true, false,
                    initial);
                initial = false;
            }
									
			BitmapArrayConversions.updatebitmap_unsafe(img_rays, bmp);
			bmp.Save("motionmodel_tests_OpenLoop.bmp", System.Drawing.Imaging.ImageFormat.Bmp);
			
			Assert.IsTrue(rob.y > max_y_mm-50, "The robot did not move far enough " + rob.y.ToString());
        }
Ejemplo n.º 8
0
        public dpslamServer(int no_of_stereo_cameras)
        {
			rob = new robot(no_of_stereo_cameras);
			
			dpslam_tests.CreateSim();
        }
Ejemplo n.º 9
0
        /// <summary>
        /// add an observation taken from this pose
        /// </summary>
        /// <param name="stereo_rays">list of ray objects in this observation</param>
        /// <param name="rob">robot object</param>
        /// <param name="LocalGrid">occupancy grid into which to insert the observation</param>
        /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param>
        /// <returns>localisation matching score</returns>
        public float AddObservation(
		    List<evidenceRay>[] stereo_rays,
            robot rob, 
            dpslam LocalGrid,
            bool localiseOnly)
        {
            // clear the localisation score
            float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0,0,0);
            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(
			    rob, 
			    ref head_location,
                ref camera_centre_location,
                ref left_camera_location,
                ref right_camera_location);
            
            // itterate for each stereo camera
            int cam = stereo_rays.Length - 1;
            while (cam >= 0)
            {
                // itterate through each ray
                int r = stereo_rays[cam].Count - 1;
                while (r >= 0)
                {
                    // observed ray.  Note that this is in an egocentric
                    // coordinate frame relative to the head of the robot
                    evidenceRay ray = stereo_rays[cam][r];
										
                    // translate and rotate this ray appropriately for the pose
                    evidenceRay trial_ray = 
						ray.trialPose(
						    camera_centre_location[cam].pan, 
						    camera_centre_location[cam].tilt, 
						    camera_centre_location[cam].roll,
                            camera_centre_location[cam].x, 
                            camera_centre_location[cam].y,
						    camera_centre_location[cam].z);
					
						//Console.WriteLine("ray.vert[0] " + (trial_ray.vertices[0] == null).ToString());
						//Console.WriteLine("ray.vert[1] " + (trial_ray.vertices[1] == null).ToString());					

                    // update the grid cells for this ray and update the
                    // localisation score accordingly					
					
                    float score =
                        LocalGrid.Insert(
						    trial_ray, this, 
                            rob.head.sensormodel[cam],
                            left_camera_location[cam], 
                            right_camera_location[cam],
                            localiseOnly);
                    if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                        if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                            localisation_score += score;
                        else
                            localisation_score = score;
                    r--;
                }
                cam--;
            }

            return (localisation_score);
        }
Ejemplo n.º 10
0
        /// <summary>
        /// Show a diagram of the robot in this pose
        /// This is useful for checking that the positions of cameras have 
        /// been calculated correctly
        /// </summary>
        /// <param name="robot">robot object</param>
        /// <param name="img">image as a byte array</param>
        /// <param name="width">width of the image</param>
        /// <param name="height">height of the image</param>
        /// <param name="clearBackground">clear the background before drawing</param>
        /// <param name="min_x_mm">top left x coordinate</param>
        /// <param name="min_y_mm">top left y coordinate</param>
        /// <param name="max_x_mm">bottom right x coordinate</param>
        /// <param name="max_y_mm">bottom right y coordinate</param>
        /// <param name="showFieldOfView">whether to show the field of view of each camera</param>
        public void Show(
		    robot rob, 
            byte[] img, int width, int height, bool clearBackground,
            int min_x_mm, int min_y_mm,
            int max_x_mm, int max_y_mm, int line_width,
            bool showFieldOfView)
        {
            if (clearBackground)
                for (int i = 0; i < width * height * 3; i++)
                    img[i] = 255;

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0, 0, 0);
            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(rob, ref head_location,
                                     ref camera_centre_location,
                                     ref left_camera_location,
                                     ref right_camera_location);
            
            int w = max_x_mm - min_x_mm;
            int h = max_y_mm - min_y_mm;

            // draw the body
            int xx = (int)((x - min_x_mm) * width / w);
            int yy = (int)((y - min_y_mm) * height / h);
            int wdth = (int)(rob.BodyWidth_mm * width / w);
            int hght = (int)(rob.BodyLength_mm * height / h);
            drawing.drawBox(img, width, height, xx, yy, wdth, hght, pan, 0, 255, 0, line_width);

            // draw the head
            xx = (int)((head_location.x - min_x_mm) * width / w);
            yy = (int)((head_location.y - min_y_mm) * height / h);
            int radius = (int)(rob.HeadSize_mm * width / w);
            drawing.drawBox(img, width, height, xx, yy, radius, radius, head_location.pan, 0, 255, 0, line_width);

            // draw the cameras
            for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++)
            {
                // draw the left camera
                int xx1 = (int)((left_camera_location[cam].x - min_x_mm) * width / w);
                int yy1 = (int)((left_camera_location[cam].y - min_y_mm) * height / h);
                wdth = (int)((rob.head.cameraBaseline[cam] / 4) * width / w);
                hght = (int)((rob.head.cameraBaseline[cam] / 12) * height / h);
                if (hght < 1) hght = 1;
                drawing.drawBox(img, width, height, xx1, yy1, wdth, hght, left_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width);

                // draw the right camera
                int xx2 = (int)((right_camera_location[cam].x - min_x_mm) * width / w);
                int yy2 = (int)((right_camera_location[cam].y - min_y_mm) * height / h);
                drawing.drawBox(img, width, height, xx2, yy2, wdth, hght, right_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width);

                if (showFieldOfView)
                {
                    float half_FOV = rob.head.cameraFOVdegrees[cam] * (float)Math.PI / 360.0f;
                    int xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan + half_FOV));
                    int yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan + half_FOV));
                    drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan - half_FOV));
                    yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan - half_FOV));
                    drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan + half_FOV));
                    yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan + half_FOV));
                    drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan - half_FOV));
                    yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan - half_FOV));
                    drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false);
                }
            }
        }