Example #1
0
        /// <summary>
        /// translate and rotate the ray by the given values
        /// </summary>
        /// <param name="r">object containing the desired translation and rotation</param>
        public void translateRotate(pos3D r)
        {
            //take a note of where the ray was observed from
            //in egocentric coordinates.  This will help to
            //restrict localisation searching
            observedFrom      = new pos3D(r.x, r.y, r.z);
            observedFrom.pan  = r.pan;
            observedFrom.tilt = r.tilt;

            //centre = centre.rotate(r.pan, r.tilt, r.roll);
            for (int v = 0; v < 2; v++)
            {
                vertices[v] = vertices[v].rotate(r.pan, r.tilt, r.roll);
            }

            //store the XY plane pan angle, which may later be used
            //to reduce the searching during localisation
            pan_angle  = vertices[0].new_pan_angle;
            pan_index  = (int)(pan_angle * pan_steps / (2 * 3.1415927f));
            start_dist = vertices[0].dist_xy;
            length     = vertices[1].dist_xy - start_dist;

            //centre = centre.translate(r.x, r.y, r.z);
            for (int v = 0; v < 2; v++)
            {
                vertices[v] = vertices[v].translate(r.x, r.y, r.z);
            }
        }
Example #2
0
 public pos3D add(pos3D other)
 {
     pos3D sum = new pos3D(x + other.x, y + other.y, z + other.z);
     sum.pan = pan + other.pan;
     sum.tilt = tilt + other.tilt;
     sum.roll = roll + other.roll;
     return (sum);
 }
Example #3
0
 public pos3D subtract(pos3D other)
 {
     pos3D sum = new pos3D(x - other.x, y - other.y, z - other.z);
     sum.pan = pan - other.pan;
     sum.tilt = tilt - other.tilt;
     sum.roll = roll - other.roll;
     return (sum);
 }
Example #4
0
        /// <summary>
        /// return a translated version of the point
        /// </summary>
        /// <param name="x"></param>
        /// <param name="y"></param>
        /// <param name="z"></param>
        /// <returns></returns>
        public pos3D translate(float x, float y, float z)
        {
            pos3D result = new pos3D(this.x + x, this.y + y, this.z + z);

            result.pan  = pan;
            result.tilt = tilt;
            result.roll = roll;
            return(result);
        }
Example #5
0
 /// <summary>
 /// copy the position from another
 /// </summary>
 /// <param name="other"></param>
 /// <returns></returns>
 public void copyFrom(pos3D other)
 {
     x    = other.x;
     y    = other.y;
     z    = other.z;
     pan  = other.pan;
     tilt = other.tilt;
     roll = other.roll;
 }
Example #6
0
        public pos3D Subtract(pos3D p)
        {
            pos3D new_p = new pos3D(x - p.x, y - p.y, z - p.z);

            new_p.pan  = pan - p.pan;
            new_p.tilt = tilt - p.tilt;
            new_p.roll = roll - p.roll;
            return(new_p);
        }
Example #7
0
        public pos3D add(pos3D other)
        {
            pos3D sum = new pos3D(x + other.x, y + other.y, z + other.z);

            sum.pan  = pan + other.pan;
            sum.tilt = tilt + other.tilt;
            sum.roll = roll + other.roll;
            return(sum);
        }
Example #8
0
        public pos3D subtract(pos3D other)
        {
            pos3D sum = new pos3D(x - other.x, y - other.y, z - other.z);

            sum.pan  = pan - other.pan;
            sum.tilt = tilt - other.tilt;
            sum.roll = roll - other.roll;
            return(sum);
        }
Example #9
0
        public pos3D rotate_old(float pan, float tilt, float roll)
        {
            float hyp;
            pos3D rotated = new pos3D(x,y,z);

            // roll            
            //if (roll != 0)
            {
                hyp = (float)Math.Sqrt((rotated.x * rotated.x) + (rotated.z * rotated.z));
                if (hyp > 0)
                {
                    float roll_angle = (float)Math.Acos(rotated.z / hyp);
                    if (rotated.x < 0) roll_angle = (float)(Math.PI * 2) - roll_angle;
                    float new_roll_angle = roll + roll_angle;
                    rotated.x = hyp * (float)Math.Sin(new_roll_angle);
                    rotated.z = hyp * (float)Math.Cos(new_roll_angle);
                }
            }
            if (tilt != 0)
            {
                // tilt
                hyp = (float)Math.Sqrt((rotated.y * rotated.y) + (rotated.z * rotated.z));
                if (hyp > 0)
                {
                    float tilt_angle = (float)Math.Acos(rotated.y / hyp);
                    if (rotated.z < 0) tilt_angle = (float)(Math.PI * 2) - tilt_angle;
                    float new_tilt_angle = tilt + tilt_angle;
                    rotated.y = hyp * (float)Math.Sin(new_tilt_angle);
                    rotated.z = hyp * (float)Math.Cos(new_tilt_angle);
                }
            }

            
            
            //if (pan != 0)
            {
                // pan                
                hyp = (float)Math.Sqrt((rotated.x * rotated.x) + (rotated.y * rotated.y));
                if (hyp > 0)
                {
                    float pan_angle = (float)Math.Acos(rotated.y / hyp);
                    if (rotated.x < 0) pan_angle = (float)(Math.PI * 2) - pan_angle;
                    rotated.new_pan_angle = pan - pan_angle;
                    rotated.dist_xy = hyp;
                    rotated.x = hyp * (float)Math.Sin(rotated.new_pan_angle);
                    rotated.y = hyp * (float)Math.Cos(rotated.new_pan_angle);
                }
            }
            rotated.pan = this.pan + pan;
            rotated.tilt = this.tilt + tilt;
            rotated.roll = this.roll + roll;
            return (rotated);
        }
Example #10
0
        /// <summary>
        /// returns a version of this evidence ray rotated through the given
        /// pan angle in the XY plane.  This is used for generating trial poses.
        /// </summary>
        /// <param name="extra_pan">pan angle to be added</param>
        /// <param name="translation_x">new obsevation x coordinate</param>
        /// <param name="translation_y">new obsevation y coordinate</param>
        /// <returns>evidence ray object</returns>
        public evidenceRay trialPose(
            float extra_pan,
            float extra_tilt,
            float extra_roll,
            float translation_x,
            float translation_y,
            float translation_z)
        {
            evidenceRay rotated_ray    = new evidenceRay();
            float       new_pan_angle  = extra_pan + pan_angle;
            float       new_tilt_angle = extra_tilt + tilt_angle;
            float       new_roll_angle = extra_roll + roll_angle;

            // as observed from the centre of the camera baseline
            rotated_ray.observedFrom = new pos3D(translation_x, translation_y, translation_z);
            rotated_ray.fattestPoint = fattestPoint;

            pos3D r1 = new pos3D(0, start_dist, 0);

            rotated_ray.vertices[0]    = r1.rotate(new_pan_angle, new_tilt_angle, new_roll_angle);
            rotated_ray.vertices[0].x += translation_x;
            rotated_ray.vertices[0].y += translation_y;
            rotated_ray.vertices[0].z += translation_z;

            pos3D r2 = new pos3D(0, start_dist + length, 0);

            rotated_ray.vertices[1]    = r2.rotate(new_pan_angle, new_tilt_angle, new_roll_angle);
            rotated_ray.vertices[1].x += translation_x;
            rotated_ray.vertices[1].y += translation_y;
            rotated_ray.vertices[1].z += translation_z;

            rotated_ray.pan_angle  = new_pan_angle;
            rotated_ray.tilt_angle = new_tilt_angle;
            rotated_ray.roll_angle = new_roll_angle;
            rotated_ray.pan_index  = (int)(new_pan_angle * pan_steps / 6.2831854f);
            rotated_ray.tilt_index = (int)(new_tilt_angle * pan_steps / 6.2831854f);
            rotated_ray.roll_index = (int)(new_roll_angle * pan_steps / 6.2831854f);
            rotated_ray.length     = length;
            rotated_ray.width      = width;
            rotated_ray.start_dist = start_dist;
            rotated_ray.disparity  = disparity;

            for (int col = 2; col >= 0; col--)
            {
                rotated_ray.colour[col] = colour[col];
            }

            return(rotated_ray);
        }
Example #11
0
        public scanMatching[] scanmatch;           // simple scan matching

		/// <summary>
		/// constructor
		/// </summary>
		/// <param name="no_of_stereo_cameras">
		/// A <see cref="System.Int32"/>
		/// The number of stereo cameras
		/// </param>
        public stereoHead(int no_of_stereo_cameras) : base(0,0,0)
        {
            this.no_of_stereo_cameras = no_of_stereo_cameras;
            
            // create feature lists
            features = new float[no_of_stereo_cameras][];
            featureColours = new byte[no_of_stereo_cameras][];
            no_of_features = new int[no_of_stereo_cameras];
                        
            // create the cameras
            cameraPosition = new pos3D[no_of_stereo_cameras];
            cameraPositionHeadRelative = new pos3D[no_of_stereo_cameras];
			cameraBaseline = new float[no_of_stereo_cameras];
			cameraFOVdegrees = new float[no_of_stereo_cameras];
			cameraImageWidth = new int[no_of_stereo_cameras];
			cameraImageHeight = new int[no_of_stereo_cameras];
			cameraFocalLengthMm = new float[no_of_stereo_cameras];
			cameraSensorSizeMm = new float[no_of_stereo_cameras];
                        
            // create objects for each stereo camera
            for (int cam = 0; cam < no_of_stereo_cameras; cam++)
            {
                cameraPosition[cam] = new pos3D(0, 0, 0);
                cameraPositionHeadRelative[cam] = new pos3D(0, 0, 0);
				cameraBaseline[cam] = 120;
				cameraFOVdegrees[cam] = 68;
				cameraImageWidth[cam] = 320;
				cameraImageHeight[cam] = 240;
				cameraFocalLengthMm[cam] = 3.6f;
				cameraSensorSizeMm[cam] = 4;
                features[cam] = null; // new stereoFeatures();
				featureColours[cam] = null;
            }

            // sensor models
            sensormodel = new rayModelLookup[no_of_stereo_cameras];

            //scan matching
            scanmatch = new scanMatching[no_of_stereo_cameras];

            /*
            if (no_of_cameras == 4) initQuadCam();
            if (no_of_cameras == 2) initDualCam();
            if (no_of_cameras == 1) initSingleStereoCamera(false);
             */
        }
Example #12
0
        public scanMatching[] scanmatch;           // simple scan matching

        /// <summary>
        /// constructor
        /// </summary>
        /// <param name="no_of_stereo_cameras">
        /// A <see cref="System.Int32"/>
        /// The number of stereo cameras
        /// </param>
        public stereoHead(int no_of_stereo_cameras) : base(0, 0, 0)
        {
            this.no_of_stereo_cameras = no_of_stereo_cameras;

            // create feature lists
            features       = new float[no_of_stereo_cameras][];
            featureColours = new byte[no_of_stereo_cameras][];
            no_of_features = new int[no_of_stereo_cameras];

            // create the cameras
            cameraPosition             = new pos3D[no_of_stereo_cameras];
            cameraPositionHeadRelative = new pos3D[no_of_stereo_cameras];
            cameraBaseline             = new float[no_of_stereo_cameras];
            cameraFOVdegrees           = new float[no_of_stereo_cameras];
            cameraImageWidth           = new int[no_of_stereo_cameras];
            cameraImageHeight          = new int[no_of_stereo_cameras];
            cameraFocalLengthMm        = new float[no_of_stereo_cameras];
            cameraSensorSizeMm         = new float[no_of_stereo_cameras];

            // create objects for each stereo camera
            for (int cam = 0; cam < no_of_stereo_cameras; cam++)
            {
                cameraPosition[cam]             = new pos3D(0, 0, 0);
                cameraPositionHeadRelative[cam] = new pos3D(0, 0, 0);
                cameraBaseline[cam]             = 120;
                cameraFOVdegrees[cam]           = 68;
                cameraImageWidth[cam]           = 320;
                cameraImageHeight[cam]          = 240;
                cameraFocalLengthMm[cam]        = 3.6f;
                cameraSensorSizeMm[cam]         = 4;
                features[cam]       = null; // new stereoFeatures();
                featureColours[cam] = null;
            }

            // sensor models
            sensormodel = new rayModelLookup[no_of_stereo_cameras];

            //scan matching
            scanmatch = new scanMatching[no_of_stereo_cameras];

            /*
             * if (no_of_cameras == 4) initQuadCam();
             * if (no_of_cameras == 2) initDualCam();
             * if (no_of_cameras == 1) initSingleStereoCamera(false);
             */
        }
Example #13
0
        public void UpdateSurveyor()
        {
            float    horizon = 2000;
            DateTime save_motion_model_time = new DateTime(1990, 1, 1);
            pos3D    map_centre             = new pos3D(x, y, z);

            updating = true;
            while (updating)
            {
                if (current_speed_mmsec != 0)
                {
                    motion.speed_uncertainty_forward = current_speed_uncertainty_mmsec;
                    motion.speed_uncertainty_angular = 0.5f / 180.0f * (float)Math.PI;
                }
                else
                {
                    motion.speed_uncertainty_forward = 0;
                    motion.speed_uncertainty_angular = 0;
                }

                UpdatePosition();
                TimeSpan diff = DateTime.Now.Subtract(save_motion_model_time);
                if (diff.TotalSeconds >= 5)
                {
                    float dx         = map_centre.x - curr_pos.x;
                    float dy         = map_centre.y - curr_pos.y;
                    float dz         = map_centre.z - curr_pos.z;
                    bool  redraw_map = false;
                    if (!displaying_motion_model)
                    {
                        float dist = (float)Math.Sqrt(dx * dx + dy * dy + dz * dz);
                        if (dist > 1000)
                        {
                            map_centre.x = curr_pos.x;
                            map_centre.y = curr_pos.y;
                            map_centre.z = curr_pos.z;
                            redraw_map   = true;
                        }
                        SaveMotionModel("motion_model.jpg", map_centre.x - horizon, map_centre.y - horizon, map_centre.x + horizon, map_centre.y + horizon, redraw_map);
                    }
                }
                Thread.Sleep(500);
            }
            thread_stopped = true;
        }
Example #14
0
        /// <summary>
        /// rotates this position, using Y forward convention
        /// </summary>
        /// <param name="pan">pan angle in radians</param>
        /// <param name="tilt">tilt angle in radians</param>
        /// <param name="roll">roll angle in radians</param>
        /// <returns>rotated position</returns>
        public pos3D rotate(float pan, float tilt, float roll)
        {
            float x2 = x;
            float y2 = y;
            float z2 = z;

            float x3 = x;
            float y3 = y;
            float z3 = z;

            // Rotation about the y axis
            if (roll != 0)
            {
                float roll2 = roll + (float)Math.PI;
                x3 = (float)((Math.Cos(roll2) * x2) + (Math.Sin(roll2) * z2));
                z3 = (float)(-(Math.Sin(roll) * x2) + (Math.Cos(roll) * z2));
                x2 = x3;
                z2 = z3;
            }

            // Rotatation about the x axis
            if (tilt != 0)
            {
                float tilt2 = tilt;
                z3 = (float)((Math.Cos(tilt2) * y2) - (Math.Sin(tilt2) * z2));
                y3 = (float)((Math.Sin(tilt2) * y2) + (Math.Cos(tilt2) * z2));
                y2 = y3;
                z2 = z3;
            }

            // Rotation about the z axis:
            if (pan != 0)
            {
                float pan2 = pan + (float)Math.PI;
                x3 = (float)((Math.Cos(pan2) * x2) - (Math.Sin(pan2) * y2));
                y3 = (float)((Math.Sin(pan) * x2) + (Math.Cos(pan) * y2));
            }

            pos3D rotated = new pos3D(x3, y3, z3);

            rotated.pan  = this.pan + pan;
            rotated.tilt = this.tilt + tilt;
            rotated.roll = this.roll + roll;
            return(rotated);
        }
Example #15
0
        /// <summary>
        /// calculate the position of the robots head and cameras for this pose
        /// </summary>
        /// <param name="rob"></param>
        /// <param name="head_location">location of the centre of the head</param>
        /// <param name="camera_centre_location">location of the centre of each stereo camera</param>
        /// <param name="left_camera_location">location of the left camera within each stereo camera</param>
        /// <param name="right_camera_location">location of the right camera within each stereo camera</param>
        public void calculateCameraPositions(
            robot rob,
            ref pos3D head_location,
            ref pos3D[] camera_centre_location,
            ref pos3D[] left_camera_location,
            ref pos3D[] right_camera_location)
        {
            // calculate the position of the centre of the head relative to
            // the centre of rotation of the robots body
            pos3D head_centroid =
                new pos3D(
                    -(rob.BodyWidth_mm / 2) + rob.head.x,
                    -(rob.BodyLength_mm / 2) + rob.head.y,
                    rob.head.z);

            // location of the centre of the head on the grid map
            // adjusted for the robot pose and the head pan and tilt angle.
            // Note that the positions and orientations of individual cameras
            // on the head have already been accounted for within stereoModel.createObservation
            pos3D head_locn = head_centroid.rotate(rob.head.pan + pan, rob.head.tilt + tilt, rob.head.roll + roll);

            head_locn = head_locn.translate(x, y, 0);
            head_location.copyFrom(head_locn);

            for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++)
            {
                // calculate the position of the centre of the stereo camera
                // (baseline centre point)
                pos3D camera_centre_locn = new pos3D(rob.head.cameraPositionHeadRelative[cam].x, rob.head.cameraPositionHeadRelative[cam].y, rob.head.cameraPositionHeadRelative[cam].y);
                camera_centre_locn          = camera_centre_locn.rotate(rob.head.cameraPositionHeadRelative[cam].pan + rob.head.pan + pan, rob.head.cameraPositionHeadRelative[cam].tilt, rob.head.cameraPositionHeadRelative[cam].roll);
                camera_centre_location[cam] = camera_centre_locn.translate(head_location.x, head_location.y, head_location.z);

                // where are the left and right cameras?
                // we need to know this for the origins of the vacancy models
                float half_baseline_length = rob.head.cameraBaseline[cam] / 2;
                pos3D left_camera_locn     = new pos3D(-half_baseline_length, 0, 0);
                left_camera_locn = left_camera_locn.rotate(rob.head.cameraPositionHeadRelative[cam].pan + rob.head.pan + pan, rob.head.cameraPositionHeadRelative[cam].tilt, rob.head.cameraPositionHeadRelative[cam].roll);
                pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z);
                left_camera_location[cam]      = left_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z);
                right_camera_location[cam]     = right_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z);
                right_camera_location[cam].pan = left_camera_location[cam].pan;
            }
        }
Example #16
0
        /// <summary>
        /// constructor
        /// </summary>
        public robotSurveyor() : base(1)
        {
            Name = "Surveyor SVS";

            map_image_width = 640;
            map             = new byte[map_image_width * map_image_width * 3];
            map_bitmap      = new Bitmap(map_image_width, map_image_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb);

            WheelBase_mm        = 90;
            WheelDiameter_mm    = 30;
            WheelBaseForward_mm = 0;

            stereo_matches       = new List <ushort[]>();
            curr_pos             = new pos3D(0, 0, 0);
            last_position_update = DateTime.Now;
            UpdatePosition();

            // start the thread which updates the position
            update_thread2         = new robotSurveyorThread(new WaitCallback(Callback), this);
            update_thread          = new Thread(new ThreadStart(update_thread2.Execute));
            update_thread.Priority = ThreadPriority.Normal;
            update_thread.Start();
        }
Example #17
0
		/// <summary>
		/// constructor
		/// </summary>
		public robotSurveyor() : base(1)
		{
			Name = "Surveyor SVS";
			
		    map_image_width = 640;
		    map = new byte[map_image_width*map_image_width*3];
		    map_bitmap = new Bitmap(map_image_width, map_image_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
			
            WheelBase_mm = 90;
            WheelDiameter_mm = 30;
            WheelBaseForward_mm = 0;
					
			stereo_matches = new List<ushort[]>();
			curr_pos = new pos3D(0,0,0);
			last_position_update = DateTime.Now;
			UpdatePosition();
			
			// start the thread which updates the position
			update_thread2 = new robotSurveyorThread(new WaitCallback(Callback), this);
            update_thread = new Thread(new ThreadStart(update_thread2.Execute));
            update_thread.Priority = ThreadPriority.Normal;
            update_thread.Start();
			
		}
Example #18
0
        /// <summary>
        /// add an observation taken from this pose
        /// </summary>
        /// <param name="stereo_rays">list of ray objects in this observation</param>
        /// <param name="rob">robot object</param>
        /// <param name="LocalGrid">occupancy grid into which to insert the observation</param>
        /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param>
        /// <returns>localisation matching score</returns>
        public float AddObservation(
		    List<evidenceRay>[] stereo_rays,
            robot rob, 
            dpslam LocalGrid,
            bool localiseOnly)
        {
            // clear the localisation score
            float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0,0,0);
            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(
			    rob, 
			    ref head_location,
                ref camera_centre_location,
                ref left_camera_location,
                ref right_camera_location);
            
            // itterate for each stereo camera
            int cam = stereo_rays.Length - 1;
            while (cam >= 0)
            {
                // itterate through each ray
                int r = stereo_rays[cam].Count - 1;
                while (r >= 0)
                {
                    // observed ray.  Note that this is in an egocentric
                    // coordinate frame relative to the head of the robot
                    evidenceRay ray = stereo_rays[cam][r];
										
                    // translate and rotate this ray appropriately for the pose
                    evidenceRay trial_ray = 
						ray.trialPose(
						    camera_centre_location[cam].pan, 
						    camera_centre_location[cam].tilt, 
						    camera_centre_location[cam].roll,
                            camera_centre_location[cam].x, 
                            camera_centre_location[cam].y,
						    camera_centre_location[cam].z);
					
						//Console.WriteLine("ray.vert[0] " + (trial_ray.vertices[0] == null).ToString());
						//Console.WriteLine("ray.vert[1] " + (trial_ray.vertices[1] == null).ToString());					

                    // update the grid cells for this ray and update the
                    // localisation score accordingly					
					
                    float score =
                        LocalGrid.Insert(
						    trial_ray, this, 
                            rob.head.sensormodel[cam],
                            left_camera_location[cam], 
                            right_camera_location[cam],
                            localiseOnly);
                    if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                        if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                            localisation_score += score;
                        else
                            localisation_score = score;
                    r--;
                }
                cam--;
            }

            return (localisation_score);
        }
Example #19
0
        /// <summary>
        /// removes low probability poses
        /// Note that a maturity threshold is used, so that poses which may initially
        /// be unfairly judged as improbable have time to prove their worth
        /// </summary>
        private void Prune()
        {
            float max_score = float.MinValue;

            best_path = null;

            // sort poses by score
            for (int i = 0; i < Poses.Count - 1; i++)
            {
                particlePath p1 = Poses[i];

                // keep track of the bounding region within which the path tree occurs
                if (p1.current_pose.x < min_tree_x)
                {
                    min_tree_x = p1.current_pose.x;
                }
                if (p1.current_pose.x > max_tree_x)
                {
                    max_tree_x = p1.current_pose.x;
                }
                if (p1.current_pose.y < min_tree_y)
                {
                    min_tree_y = p1.current_pose.y;
                }
                if (p1.current_pose.y > max_tree_y)
                {
                    max_tree_y = p1.current_pose.y;
                }

                max_score = p1.total_score;
                particlePath winner       = null;
                int          winner_index = 0;
                for (int j = i + 1; j < Poses.Count; j++)
                {
                    particlePath p2 = Poses[i];
                    float        sc = p2.total_score;
                    if ((sc > max_score) ||
                        ((max_score == 0) && (sc != 0)))
                    {
                        max_score    = sc;
                        winner       = p2;
                        winner_index = j;
                    }
                }
                if (winner != null)
                {
                    Poses[i]            = winner;
                    Poses[winner_index] = p1;
                }
            }

            // the best path is at the top
            best_path = Poses[0];

            // It's culling season
            int cull_index = (100 - cull_threshold) * Poses.Count / 100;

            if (cull_index > Poses.Count - 2)
            {
                cull_index = Poses.Count - 2;
            }
            for (int i = Poses.Count - 1; i > cull_index; i--)
            {
                particlePath path = Poses[i];
                if (path.path.Count >= pose_maturation)
                {
                    // remove mapping hypotheses for this path
                    path.Remove(LocalGrid);

                    // now remove the path itself
                    Poses.RemoveAt(i);
                }
            }

            // garbage collect any dead paths (R.I.P.)
            List <particlePath> possible_roots = new List <particlePath>(); // stores paths where all branches have coinverged to a single possibility

            for (int i = ActivePoses.Count - 1; i >= 0; i--)
            {
                particlePath path = ActivePoses[i];

                if ((!path.Enabled) ||
                    ((path.total_children == 0) && (path.branch_pose == null) && (path.current_pose.time_step < time_step - pose_maturation - 5)))
                {
                    ActivePoses.RemoveAt(i);
                }
                else
                {
                    // record any fully collapsed paths
                    if (!path.Collapsed)
                    {
                        if (path.branch_pose == null)
                        {
                            possible_roots.Add(path);
                        }
                        else
                        {
                            if (path.branch_pose.path.Collapsed)
                            {
                                possible_roots.Add(path);
                            }
                        }
                    }
                }
            }

            if (possible_roots.Count == 1)
            {
                // collapse tha path
                particlePath path = possible_roots[0];

                if (path.branch_pose != null)
                {
                    particlePath previous_path = path.branch_pose.path;
                    previous_path.Distill(LocalGrid);
                    path.branch_pose.parent = null;
                    path.branch_pose        = null;
                }

                path.Collapsed = true;

                // take note of the time step.  This is for use with the display functions only
                root_time_step = path.current_pose.time_step;
            }

            if (best_path != null)
            {
                // update the current robot position with the best available pose
                if (current_robot_pose == null)
                {
                    current_robot_pose =
                        new pos3D(best_path.current_pose.x,
                                  best_path.current_pose.y,
                                  0);
                }
                current_robot_pose.pan   = best_path.current_pose.pan;
                current_robot_path_score = best_path.total_score;

                // generate new poses from the ones which have survived culling
                int new_poses_required = survey_trial_poses; // -Poses.Count;
                int max = Poses.Count;
                int n = 0, added = 0;
                while ((max > 0) &&
                       (n < new_poses_required * 4) &&
                       (added < new_poses_required))
                {
                    // identify a potential parent at random,
                    // from one of the surviving paths
                    int          random_parent_index = rnd.Next(max - 1);
                    particlePath parent_path         = Poses[random_parent_index];

                    // only mature parents can have children
                    if (parent_path.path.Count >= pose_maturation)
                    {
                        // generate a child branching from the parent path
                        particlePath child_path = new particlePath(parent_path, path_ID, rob.LocalGridDimension);
                        createNewPose(child_path);
                        added++;
                    }
                    n++;
                }

                // like salmon, after parents spawn they die
                if (added > 0)
                {
                    for (int i = max - 1; i >= 0; i--)
                    {
                        Poses.RemoveAt(i);
                    }
                }

                // if the robot has rotated through a large angle since
                // the previous time step then reset the scan matching estimate
                //if (Math.Abs(best_path.current_pose.pan - rob.pan) > rob.ScanMatchingMaxPanAngleChange * Math.PI / 180)
                rob.ScanMatchingPanAngleEstimate = scanMatching.NOT_MATCHED;
            }


            PosesEvaluated = false;
        }
Example #20
0
		public void CreateObservation()
		{			
		    float baseline = 120;
		    int image_width = 320;
		    int image_height = 240;
		    float FOV_degrees = 68;
			int no_of_stereo_features = 10;
			float[] stereo_features = new float[no_of_stereo_features*4];
			byte[] stereo_features_colour = new byte[no_of_stereo_features*3];
			bool translate = false;
			
			for (int i = 0; i < no_of_stereo_features; i++)
			{
				stereo_features[i*4] = 1;
				stereo_features[i*4+1] = i * image_width / no_of_stereo_features;
				stereo_features[i*4+2] = image_height/2;
				stereo_features[i*4+3] = 1;
				stereo_features_colour[i*3] = 200;
				stereo_features_colour[i*3+1] = 200;
				stereo_features_colour[i*3+2] = 200;
			}
			
			for (int rotation_degrees = 0; rotation_degrees < 360; rotation_degrees += 90)
			{
				stereoModel model = new stereoModel();
				pos3D observer = new pos3D(0,0,0);
				observer = observer.rotate(rotation_degrees/180.0f*(float)Math.PI,0,0);	
	            List<evidenceRay> rays = model.createObservation(
			        observer,
			        baseline,
			        image_width,
			        image_height,
			        FOV_degrees,
			        stereo_features,
			        stereo_features_colour,
			        translate);
				
				float tx = float.MaxValue;
				float ty = float.MaxValue;
				float bx = float.MinValue;
				float by = float.MinValue;
				for (int i = 0; i < no_of_stereo_features; i++)
				{
					//float pan_degrees = rays[i].pan_angle * 180 / (float)Math.PI; 
					//Console.WriteLine(pan_degrees.ToString());
					for (int j = 0; j < rays[i].vertices.Length; j++)
					{
					    Console.WriteLine("Vertex " + j.ToString());
					    Console.WriteLine("xyz: " + rays[i].vertices[j].x.ToString() + " " + rays[i].vertices[j].y.ToString() + " " + rays[i].vertices[j].z.ToString());
						
						if (rays[i].vertices[j].x < tx) tx = rays[i].vertices[j].x;
						if (rays[i].vertices[j].x > bx) bx = rays[i].vertices[j].x;
	
						if (rays[i].vertices[j].y < ty) ty = rays[i].vertices[j].y;
						if (rays[i].vertices[j].y > by) by = rays[i].vertices[j].y;
					}
				}
				
				int img_width = 640;
				Bitmap bmp = new Bitmap(img_width, img_width, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
				byte[] img = new byte[img_width * img_width * 3];
				for (int i = 0; i < img.Length; i++) img[i] = 255;
	
				for (int i = 0; i < no_of_stereo_features; i++)
				{
					int x0 = (int)((rays[i].vertices[0].x - tx) * img_width / (bx - tx));
					int y0 = (int)((rays[i].vertices[0].y - ty) * img_width / (by - ty));
					int x1 = (int)((rays[i].vertices[1].x - tx) * img_width / (bx - tx));
					int y1 = (int)((rays[i].vertices[1].y - ty) * img_width / (by - ty));
					drawing.drawLine(img, img_width, img_width, x0,y0,x1,y1,0,0,0,0,false);
				}
				
				BitmapArrayConversions.updatebitmap_unsafe(img, bmp);
				bmp.Save("dpslam_tests_createobservation_" + rotation_degrees.ToString() + ".bmp", System.Drawing.Imaging.ImageFormat.Bmp);
				Console.WriteLine("dpslam_tests_createobservation_" + rotation_degrees.ToString() + ".bmp");
			}
		}
Example #21
0
        /// <summary>
        /// removes low probability poses
        /// Note that a maturity threshold is used, so that poses which may initially 
        /// be unfairly judged as improbable have time to prove their worth
        /// </summary>
        private void Prune()
        {
            float max_score = float.MinValue;
            best_path = null;

            // sort poses by score
            for (int i = 0; i < Poses.Count-1; i++)
            {
                particlePath p1 = Poses[i];

                // keep track of the bounding region within which the path tree occurs
                if (p1.current_pose.x < min_tree_x) min_tree_x = p1.current_pose.x;
                if (p1.current_pose.x > max_tree_x) max_tree_x = p1.current_pose.x;
                if (p1.current_pose.y < min_tree_y) min_tree_y = p1.current_pose.y;
                if (p1.current_pose.y > max_tree_y) max_tree_y = p1.current_pose.y;

                max_score = p1.total_score;
                particlePath winner = null;
                int winner_index = 0;
                for (int j = i + 1; j < Poses.Count; j++)
                {
                    particlePath p2 = Poses[i];
                    float sc = p2.total_score;
                    if ((sc > max_score) ||
                        ((max_score == 0) && (sc != 0)))
                    {
                        max_score = sc;
                        winner = p2;
                        winner_index = j;
                    }
                }
                if (winner != null)
                {
                    Poses[i] = winner;
                    Poses[winner_index] = p1;
                }
            }            

            // the best path is at the top
            best_path = Poses[0];

            // It's culling season
            int cull_index = (100 - cull_threshold) * Poses.Count / 100;
            if (cull_index > Poses.Count - 2) cull_index = Poses.Count - 2;
            for (int i = Poses.Count - 1; i > cull_index; i--)
            {
                particlePath path = Poses[i];
                if (path.path.Count >= pose_maturation)
                {                    
                    // remove mapping hypotheses for this path
                    path.Remove(LocalGrid);

                    // now remove the path itself
                    Poses.RemoveAt(i);                    
                }
            }

            // garbage collect any dead paths (R.I.P.)
            List<particlePath> possible_roots = new List<particlePath>(); // stores paths where all branches have coinverged to a single possibility
            for (int i = ActivePoses.Count - 1; i >= 0; i--)
            {
                particlePath path = ActivePoses[i];

                if ((!path.Enabled) ||
                    ((path.total_children == 0) && (path.branch_pose == null) && (path.current_pose.time_step < time_step - pose_maturation - 5)))
                {
                    ActivePoses.RemoveAt(i);
                }
                else
                {
                    // record any fully collapsed paths
                    if (!path.Collapsed)
                        if (path.branch_pose == null)
                            possible_roots.Add(path);
                        else
                        {
                            if (path.branch_pose.path.Collapsed)
                                possible_roots.Add(path);
                        }
                }
            }

            if (possible_roots.Count == 1)
            {
                // collapse tha path
                particlePath path = possible_roots[0];

                if (path.branch_pose != null)
                {
                    particlePath previous_path = path.branch_pose.path;
                    previous_path.Distill(LocalGrid);
                    path.branch_pose.parent = null;
                    path.branch_pose = null;                    
                }
                
                path.Collapsed = true;

                // take note of the time step.  This is for use with the display functions only
                root_time_step = path.current_pose.time_step;
            }

            if (best_path != null)
            {
                // update the current robot position with the best available pose
                if (current_robot_pose == null) current_robot_pose = 
                    new pos3D(best_path.current_pose.x,
                              best_path.current_pose.y,
                              0);
                current_robot_pose.pan = best_path.current_pose.pan;
                current_robot_path_score = best_path.total_score;

                // generate new poses from the ones which have survived culling
                int new_poses_required = survey_trial_poses; // -Poses.Count;
                int max = Poses.Count;
                int n = 0, added = 0;
                while ((max > 0) &&
                       (n < new_poses_required*4) && 
                       (added < new_poses_required))
                {
                    // identify a potential parent at random, 
                    // from one of the surviving paths
                    int random_parent_index = rnd.Next(max-1);
                    particlePath parent_path = Poses[random_parent_index];
                    
                    // only mature parents can have children
                    if (parent_path.path.Count >= pose_maturation)
                    {
                        // generate a child branching from the parent path
                        particlePath child_path = new particlePath(parent_path, path_ID, rob.LocalGridDimension);
                        createNewPose(child_path);
                        added++;
                    }
                    n++;
                }
                
                // like salmon, after parents spawn they die
                if (added > 0)
                    for (int i = max - 1; i >= 0; i--)
                        Poses.RemoveAt(i);

                // if the robot has rotated through a large angle since
                // the previous time step then reset the scan matching estimate
                //if (Math.Abs(best_path.current_pose.pan - rob.pan) > rob.ScanMatchingMaxPanAngleChange * Math.PI / 180)
                    rob.ScanMatchingPanAngleEstimate = scanMatching.NOT_MATCHED;
            }


            PosesEvaluated = false;
        }
Example #22
0
        /// <summary>
        /// inserts the given ray into the grid
        /// There are three components to the sensor model used here:
        /// two areas determining the probably vacant area and one for 
        /// the probably occupied space
        /// </summary>
        /// <param name="ray">ray object to be inserted into the grid</param>
        /// <param name="origin">the pose of the robot</param>
        /// <param name="sensormodel">the sensor model to be used</param>
        /// <param name="leftcam_x">x position of the left camera in millimetres</param>
        /// <param name="leftcam_y">y position of the left camera in millimetres</param>
        /// <param name="rightcam_x">x position of the right camera in millimetres</param>
        /// <param name="rightcam_y">y position of the right camera in millimetres</param>
        /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param>
        /// <returns>matching probability, expressed as log odds</returns>
        public float Insert(
		    evidenceRay ray, 
            particlePose origin,
            rayModelLookup sensormodel_lookup,
            pos3D left_camera_location, 
            pos3D right_camera_location,
            bool localiseOnly)
        {
            // some constants to aid readability
            const int OCCUPIED_SENSORMODEL = 0;
            const int VACANT_SENSORMODEL_LEFT_CAMERA = 1;
            const int VACANT_SENSORMODEL_RIGHT_CAMERA = 2;

            const int X_AXIS = 0;
            const int Y_AXIS = 1;

            // which disparity index in the lookup table to use
            // we multiply by 2 because the lookup is in half pixel steps
			float ray_model_interval_pixels = sensormodel_lookup.ray_model_interval_pixels;
            int sensormodel_index = (int)Math.Round(ray.disparity / ray_model_interval_pixels);
            
            // the initial models are blank, so just default to the one disparity pixel model
            bool small_disparity_value = false;
            if (sensormodel_index < 2)
            {
                sensormodel_index = 2;
                small_disparity_value = true;
            }

            // beyond a certain disparity the ray model for occupied space
            // is always only going to be only a single grid cell
			if (sensormodel_lookup == null) Console.WriteLine("Sensor models are missing");
            if (sensormodel_index >= sensormodel_lookup.probability.GetLength(0))
                sensormodel_index = sensormodel_lookup.probability.GetLength(0) - 1;

            float xdist_mm=0, ydist_mm=0, zdist_mm=0, xx_mm=0, yy_mm=0, zz_mm=0;
            float occupied_dx = 0, occupied_dy = 0, occupied_dz = 0;
            float intersect_x = 0, intersect_y = 0, intersect_z = 0;
            float centre_prob = 0, prob = 0, prob_localisation = 0; // probability values at the centre axis and outside
            float matchingScore = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;  // total localisation matching score
            int rayWidth = 0;         // widest point in the ray in cells
            int widest_point;         // widest point index
            int step_size = 1;
            particleGridCell hypothesis;

            // ray width at the fattest point in cells
            rayWidth = (int)Math.Round(ray.width / (cellSize_mm * 2));

            // calculate the centre position of the grid in millimetres
            int half_grid_width_mm = dimension_cells * cellSize_mm / 2;
            //int half_grid_width_vertical_mm = dimension_cells_vertical * cellSize_mm / 2;
            int grid_centre_x_mm = (int)(x - half_grid_width_mm);
            int grid_centre_y_mm = (int)(y - half_grid_width_mm);
            int grid_centre_z_mm = (int)z;

            //int max_dimension_cells = dimension_cells - rayWidth;

            // in turbo mode only use a single vacancy ray
            int max_modelcomponent = VACANT_SENSORMODEL_RIGHT_CAMERA;
            if (TurboMode) max_modelcomponent = VACANT_SENSORMODEL_LEFT_CAMERA;

			float[][] sensormodel_lookup_probability = sensormodel_lookup.probability;
			
            // consider each of the three parts of the sensor model
            for (int modelcomponent = OCCUPIED_SENSORMODEL; modelcomponent <= max_modelcomponent; modelcomponent++)
            {				
			
                // the range from the cameras from which insertion of data begins
                // for vacancy rays this will be zero, but will be non-zero for the occupancy area
                int starting_range_cells = 0;

                switch (modelcomponent)
                {
                    case OCCUPIED_SENSORMODEL:
                        {
                            // distance between the beginning and end of the probably
                            // occupied area
                            occupied_dx = ray.vertices[1].x - ray.vertices[0].x;
                            occupied_dy = ray.vertices[1].y - ray.vertices[0].y;
                            occupied_dz = ray.vertices[1].z - ray.vertices[0].z;
                            intersect_x = ray.vertices[0].x + (occupied_dx * ray.fattestPoint);
                            intersect_y = ray.vertices[0].y + (occupied_dy * ray.fattestPoint);
                            intersect_z = ray.vertices[0].z + (occupied_dz * ray.fattestPoint);

                            xdist_mm = occupied_dx;
                            ydist_mm = occupied_dy;
                            zdist_mm = occupied_dz;

                            // begin insertion at the beginning of the 
                            // probably occupied area
                            xx_mm = ray.vertices[0].x;
                            yy_mm = ray.vertices[0].y;
                            zz_mm = ray.vertices[0].z;
                            break;
                        }
                    case VACANT_SENSORMODEL_LEFT_CAMERA:
                        {
                            // distance between the left camera and the left side of
                            // the probably occupied area of the sensor model                            
                            xdist_mm = intersect_x - left_camera_location.x;
                            ydist_mm = intersect_y - left_camera_location.y;
                            zdist_mm = intersect_z - left_camera_location.z;

                            // begin insertion from the left camera position
                            xx_mm = left_camera_location.x;
                            yy_mm = left_camera_location.y;
                            zz_mm = left_camera_location.z;
                            step_size = 2;
                            break;
                        }
                    case VACANT_SENSORMODEL_RIGHT_CAMERA:
                        {
                            // distance between the right camera and the right side of
                            // the probably occupied area of the sensor model
                            xdist_mm = intersect_x - right_camera_location.x;
                            ydist_mm = intersect_y - right_camera_location.y;
                            zdist_mm = intersect_z - right_camera_location.z;

                            // begin insertion from the right camera position
                            xx_mm = right_camera_location.x;
                            yy_mm = right_camera_location.y;
                            zz_mm = right_camera_location.z;
                            step_size = 2;
                            break;
                        }
                }

                // which is the longest axis ?
                int longest_axis = X_AXIS;
                float longest = Math.Abs(xdist_mm);
                if (Math.Abs(ydist_mm) > longest)
                {
                    // y has the longest length
                    longest = Math.Abs(ydist_mm);
                    longest_axis = Y_AXIS;
                }

                // ensure that the vacancy model does not overlap
                // the probably occupied area
                // This is crude and could potentially leave a gap
                if (modelcomponent != OCCUPIED_SENSORMODEL)
                    longest -= ray.width;

                int steps = (int)(longest / cellSize_mm);
                if (steps < 1) steps = 1;

                // calculate the range from the cameras to the start of the ray in grid cells
                if (modelcomponent == OCCUPIED_SENSORMODEL)
                {
                    if (longest_axis == Y_AXIS)
                        starting_range_cells = (int)Math.Abs((ray.vertices[0].y - ray.observedFrom.y) / cellSize_mm);
                    else
                        starting_range_cells = (int)Math.Abs((ray.vertices[0].x - ray.observedFrom.x) / cellSize_mm);
                }

                // what is the widest point of the ray in cells
                if (modelcomponent == OCCUPIED_SENSORMODEL)
                    widest_point = (int)(ray.fattestPoint * steps / ray.length);
                else
                    widest_point = steps;

                // calculate increment values in millimetres
                float x_incr_mm = xdist_mm / steps;
                float y_incr_mm = ydist_mm / steps;
                float z_incr_mm = zdist_mm / steps;

                // step through the ray, one grid cell at a time
                int grid_step = 0;
                while (grid_step < steps)
                {
                    // is this position inside the maximum mapping range
                    bool withinMappingRange = true;
                    if (grid_step + starting_range_cells > max_mapping_range_cells)
                    {
                        withinMappingRange = false;
                        if ((grid_step==0) && (modelcomponent == OCCUPIED_SENSORMODEL))
                        {
                            grid_step = steps;
                            modelcomponent = 9999;
                        }
                    }

                    // calculate the width of the ray in cells at this point
                    // using a diamond shape ray model
                    int ray_wdth = 0;
                    if (rayWidth > 0)
                    {
                        if (grid_step < widest_point)
                            ray_wdth = grid_step * rayWidth / widest_point;
                        else
                        {
                            if (!small_disparity_value)
                                // most disparity values tail off to some point in the distance
                                ray_wdth = (steps - grid_step + widest_point) * rayWidth / (steps - widest_point);
                            else
                                // for very small disparity values the ray model has an infinite tail
                                // and maintains its width after the widest point
                                ray_wdth = rayWidth; 
                        }
                    }

                    // localisation rays are wider, to enable a more effective matching score
                    // which is not too narrowly focussed and brittle
                    int ray_wdth_localisation = ray_wdth + 1; //localisation_search_cells;

                    xx_mm += x_incr_mm*step_size;
                    yy_mm += y_incr_mm*step_size;
                    zz_mm += z_incr_mm*step_size;
                    // convert the x millimetre position into a grid cell position
                    int x_cell = (int)Math.Round((xx_mm - grid_centre_x_mm) / (float)cellSize_mm);
                    if ((x_cell > ray_wdth_localisation) && (x_cell < dimension_cells - ray_wdth_localisation))
                    {
                        // convert the y millimetre position into a grid cell position
                        int y_cell = (int)Math.Round((yy_mm - grid_centre_y_mm) / (float)cellSize_mm);
                        if ((y_cell > ray_wdth_localisation) && (y_cell < dimension_cells - ray_wdth_localisation))
                        {
                            // convert the z millimetre position into a grid cell position
                            int z_cell = (int)Math.Round((zz_mm - grid_centre_z_mm) / (float)cellSize_mm);
                            if ((z_cell >= 0) && (z_cell < dimension_cells_vertical))
                            {
								
                                int x_cell2 = x_cell;
                                int y_cell2 = y_cell;

                                // get the probability at this point 
                                // for the central axis of the ray using the inverse sensor model
                                if (modelcomponent == OCCUPIED_SENSORMODEL)
                                    centre_prob = ray_model_interval_pixels + (sensormodel_lookup_probability[sensormodel_index][grid_step] * ray_model_interval_pixels);
                                else
                                    // calculate the probability from the vacancy model
                                    centre_prob = vacancyFunction(grid_step / (float)steps, steps);


                                // width of the localisation ray
                                for (int width = -ray_wdth_localisation; width <= ray_wdth_localisation; width++)
                                {
                                    // is the width currently inside the mapping area of the ray ?
                                    bool isInsideMappingRayWidth = false;
                                    if ((width >= -ray_wdth) && (width <= ray_wdth))
                                        isInsideMappingRayWidth = true;

                                    // adjust the x or y cell position depending upon the 
                                    // deviation from the main axis of the ray
                                    if (longest_axis == Y_AXIS)
                                        x_cell2 = x_cell + width;
                                    else
                                        y_cell2 = y_cell + width;

                                    // probability at the central axis
                                    prob = centre_prob;
                                    prob_localisation = centre_prob;

                                    // probabilities are symmetrical about the axis of the ray
                                    // this multiplier implements a gaussian distribution around the centre
                                    if (width != 0) // don't bother if this is the centre of the ray
                                    {
                                        // the probability used for wide localisation rays
                                        prob_localisation *= gaussianLookup[Math.Abs(width) * 9 / ray_wdth_localisation];

                                        // the probability used for narrower mapping rays
                                        if (isInsideMappingRayWidth)
                                            prob *= gaussianLookup[Math.Abs(width) * 9 / ray_wdth];
                                    }

                                    if ((cell[x_cell2][y_cell2] != null) && (withinMappingRange))
                                    {
                                        // only localise using occupancy, not vacancy
                                        if (modelcomponent == OCCUPIED_SENSORMODEL)
                                        {
                                            // update the matching score, by combining the probability
                                            // of the grid cell with the probability from the localisation ray
                                            float score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;
                                            if (longest_axis == X_AXIS)
                                                score = matchingProbability(x_cell2, y_cell2, z_cell, origin, prob_localisation, ray.colour);

                                            if (longest_axis == Y_AXIS)
                                                score = matchingProbability(x_cell2, y_cell2, z_cell, origin, prob_localisation, ray.colour);
											
                                            if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                                            {
                                                if (matchingScore != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                                                    matchingScore += score;
                                                else
                                                    matchingScore = score;
                                            }
                                        }
                                    }

                                    if ((isInsideMappingRayWidth) && 
                                        (withinMappingRange) &&
                                        (!localiseOnly))
                                    {
                                        // add a new hypothesis to this grid coordinate
                                        // note that this is also added to the original pose
                                        hypothesis = new particleGridCell(x_cell2, y_cell2, z_cell, 
                                                                          prob, origin,
                                                                          ray.colour);
                                        if (origin.AddHypothesis(hypothesis, max_mapping_range_cells, dimension_cells, dimension_cells_vertical))
                                        {
                                            // generate a grid cell if necessary
                                            if (cell[x_cell2][y_cell2] == null)
                                                cell[x_cell2][y_cell2] = new occupancygridCellMultiHypothesis(dimension_cells_vertical);

                                            cell[x_cell2][y_cell2].AddHypothesis(hypothesis);                                            
                                            total_valid_hypotheses++;
                                        }
                                    }
                                }
                            }
                            else grid_step = steps;  // its the end of the ray, break out of the loop
                        }
                        else grid_step = steps;  // its the end of the ray, break out of the loop
                    }
                    else grid_step = steps;  // time to bail out chaps!
                    grid_step += step_size;
                }
            }

            return (matchingScore);
        }
Example #23
0
        public pos3D rotate_old(float pan, float tilt, float roll)
        {
            float hyp;
            pos3D rotated = new pos3D(x, y, z);

            // roll
            //if (roll != 0)
            {
                hyp = (float)Math.Sqrt((rotated.x * rotated.x) + (rotated.z * rotated.z));
                if (hyp > 0)
                {
                    float roll_angle = (float)Math.Acos(rotated.z / hyp);
                    if (rotated.x < 0)
                    {
                        roll_angle = (float)(Math.PI * 2) - roll_angle;
                    }
                    float new_roll_angle = roll + roll_angle;
                    rotated.x = hyp * (float)Math.Sin(new_roll_angle);
                    rotated.z = hyp * (float)Math.Cos(new_roll_angle);
                }
            }
            if (tilt != 0)
            {
                // tilt
                hyp = (float)Math.Sqrt((rotated.y * rotated.y) + (rotated.z * rotated.z));
                if (hyp > 0)
                {
                    float tilt_angle = (float)Math.Acos(rotated.y / hyp);
                    if (rotated.z < 0)
                    {
                        tilt_angle = (float)(Math.PI * 2) - tilt_angle;
                    }
                    float new_tilt_angle = tilt + tilt_angle;
                    rotated.y = hyp * (float)Math.Sin(new_tilt_angle);
                    rotated.z = hyp * (float)Math.Cos(new_tilt_angle);
                }
            }



            //if (pan != 0)
            {
                // pan
                hyp = (float)Math.Sqrt((rotated.x * rotated.x) + (rotated.y * rotated.y));
                if (hyp > 0)
                {
                    float pan_angle = (float)Math.Acos(rotated.y / hyp);
                    if (rotated.x < 0)
                    {
                        pan_angle = (float)(Math.PI * 2) - pan_angle;
                    }
                    rotated.new_pan_angle = pan - pan_angle;
                    rotated.dist_xy       = hyp;
                    rotated.x             = hyp * (float)Math.Sin(rotated.new_pan_angle);
                    rotated.y             = hyp * (float)Math.Cos(rotated.new_pan_angle);
                }
            }
            rotated.pan  = this.pan + pan;
            rotated.tilt = this.tilt + tilt;
            rotated.roll = this.roll + roll;
            return(rotated);
        }
Example #24
0
        /// <summary>
        /// Calculate the position of the robots head and cameras for this pose
        /// the positions returned are relative to the robots centre of rotation
        /// </summary>
        /// <param name="body_width_mm">width of the robot body in millimetres</param>
        /// <param name="body_length_mm">length of the robot body in millimetres</param>
        /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param>
        /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param>
        /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param>
        /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param>
        /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param>
        /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param>
        /// <param name="head_pan">head pan angle in radians</param>
        /// <param name="head_tilt">head tilt angle in radians</param>
        /// <param name="head_roll">head roll angle in radians</param>
        /// <param name="baseline_mm">stereo camera baseline in millimetres</param>
        /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param>
        /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param>
        /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param>
        /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param>
        /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param>
        /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param>
        /// <param name="head_location">returned location/orientation of the robot head</param>
        /// <param name="camera_centre_location">returned stereo camera centre position/orientation</param>
        /// <param name="left_camera_location">returned left camera position/orientation</param>
        /// <param name="right_camera_location">returned right camera position/orientation</param>
        public static void calculateCameraPositions(
		    float body_width_mm,
		    float body_length_mm,
		    float body_centre_of_rotation_x,
		    float body_centre_of_rotation_y,
		    float body_centre_of_rotation_z,
		    float head_centroid_x,
		    float head_centroid_y,
		    float head_centroid_z,
		    float head_pan,
		    float head_tilt,
		    float head_roll,
		    float baseline_mm,
		    float stereo_camera_position_x,
		    float stereo_camera_position_y,
		    float stereo_camera_position_z,
		    float stereo_camera_pan,
		    float stereo_camera_tilt,
		    float stereo_camera_roll,
            ref pos3D head_location,
            ref pos3D camera_centre_location,
            ref pos3D left_camera_location,
            ref pos3D right_camera_location)
        {
            // calculate the position of the centre of the head relative to 
            // the centre of rotation of the robots body
            pos3D head_centroid = 
				new pos3D(
				    -(body_width_mm * 0.5f) + (body_centre_of_rotation_x - (body_width_mm * 0.5f)) + head_centroid_x,
                    -(body_length_mm * 0.5f) + (body_centre_of_rotation_y - (body_length_mm * 0.5f)) + head_centroid_y,
                    head_centroid_z);

            // location of the centre of the head
            // adjusted for the robot pose and the head pan and tilt angle.
            // Note that the positions and orientations of individual cameras
            // on the head have already been accounted for within stereoModel.createObservation
            pos3D head_locn = 
				head_centroid.rotate(
				    head_pan, head_tilt, 0);
            head_location.copyFrom(head_locn);

            // calculate the position of the centre of the stereo camera
            // (baseline centre point)
            pos3D camera_centre_locn = 
				new pos3D(
				    stereo_camera_position_x, 
				    stereo_camera_position_y, 
				    stereo_camera_position_z);
			
			// rotate the stereo camera	    	    
            camera_centre_locn = 
                camera_centre_locn.rotate(
                    stereo_camera_pan + head_pan, 
                    stereo_camera_tilt + head_tilt, 
                    stereo_camera_roll + head_roll);
            
            // move the stereo camera relative to the head position
            camera_centre_location = 
                camera_centre_locn.translate(
                    head_location.x, 
                    head_location.y, 
                    head_location.z);

            // where are the left and right cameras?
            // we need to know this for the origins of the vacancy models
            float half_baseline_length = baseline_mm * 0.5f;
            pos3D left_camera_locn = new pos3D(-half_baseline_length, 0, 0);
            left_camera_locn = left_camera_locn.rotate(stereo_camera_pan + head_pan, stereo_camera_tilt + head_tilt, stereo_camera_roll + head_roll);            
            pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z);
            left_camera_location = left_camera_locn.translate(camera_centre_location.x, camera_centre_location.y, camera_centre_location.z);
            right_camera_location = right_camera_locn.translate(camera_centre_location.x, camera_centre_location.y, camera_centre_location.z);
            right_camera_location.pan = left_camera_location.pan;
        }
Example #25
0
        /// <summary>
        /// returns a version of this evidence ray rotated through the given 
        /// pan angle in the XY plane.  This is used for generating trial poses.
        /// </summary>
        /// <param name="extra_pan">pan angle to be added</param>
        /// <param name="translation_x">new obsevation x coordinate</param>
        /// <param name="translation_y">new obsevation y coordinate</param>
        /// <returns>evidence ray object</returns>
        public evidenceRay trialPose(
		    float extra_pan,
		    float extra_tilt,
		    float extra_roll,
            float translation_x, 
		    float translation_y,
		    float translation_z)
        {
            evidenceRay rotated_ray = new evidenceRay();
            float new_pan_angle = extra_pan + pan_angle;
            float new_tilt_angle = extra_tilt + tilt_angle;
            float new_roll_angle = extra_roll + roll_angle;

            // as observed from the centre of the camera baseline
            rotated_ray.observedFrom = new pos3D(translation_x, translation_y, translation_z);
            rotated_ray.fattestPoint = fattestPoint;

			pos3D r1 = new pos3D(0, start_dist, 0);			
            rotated_ray.vertices[0] = r1.rotate(new_pan_angle, new_tilt_angle, new_roll_angle);
            rotated_ray.vertices[0].x += translation_x;
            rotated_ray.vertices[0].y += translation_y;
            rotated_ray.vertices[0].z += translation_z;

			pos3D r2 = new pos3D(0, start_dist+length, 0);			
            rotated_ray.vertices[1] = r2.rotate(new_pan_angle, new_tilt_angle, new_roll_angle);
            rotated_ray.vertices[1].x += translation_x;
            rotated_ray.vertices[1].y += translation_y;
            rotated_ray.vertices[1].z += translation_z;
			
            rotated_ray.pan_angle = new_pan_angle;
			rotated_ray.tilt_angle = new_tilt_angle;
			rotated_ray.roll_angle = new_roll_angle;
            rotated_ray.pan_index = (int)(new_pan_angle * pan_steps / 6.2831854f);
            rotated_ray.tilt_index = (int)(new_tilt_angle * pan_steps / 6.2831854f);
            rotated_ray.roll_index = (int)(new_roll_angle * pan_steps / 6.2831854f);
            rotated_ray.length = length;
            rotated_ray.width = width;
            rotated_ray.start_dist = start_dist;
            rotated_ray.disparity = disparity;

            for (int col = 2; col >= 0; col--)
                rotated_ray.colour[col] = colour[col];

            return (rotated_ray);
        }
Example #26
0
        /// <summary>
        /// mapping update, which can consist of multiple threads running concurrently
        /// </summary>
        /// <param name="stereo_rays">rays to be thrown into the grid map</param>
        private void MappingUpdate(
		    List<evidenceRay>[] stereo_rays)
        {
		    pos3D pose = new pos3D(x, y, z);

            // object used for taking benchmark timings
            stopwatch clock = new stopwatch();

            clock.Start();

            // update all current poses with the observed rays
            motion.LocalGrid = LocalGrid;
            motion.AddObservation(stereo_rays, false);

            clock.Stop();
            benchmark_observation_update = clock.time_elapsed_mS;

            // what's the relative position of the robot inside the grid ?
            pos3D relative_position = new pos3D(pose.x - LocalGrid.x, pose.y - LocalGrid.y, 0);
            relative_position.pan = pose.pan - LocalGrid.pan;

            clock.Start();

            // garbage collect dead occupancy hypotheses
            LocalGrid.GarbageCollect();

            clock.Stop();
            benchmark_garbage_collection = clock.time_elapsed_mS;
				
            // update the robots position and orientation
            // with the pose from the highest scoring path
			if (motion.current_robot_pose != null)
			{
	            x = motion.current_robot_pose.x;
	            y = motion.current_robot_pose.y;
	            z = motion.current_robot_pose.z;
	            pan = motion.current_robot_pose.pan;
				//Console.WriteLine("Position " + x.ToString() + " " + y.ToString());
			}

        }
Example #27
0
        /// <summary>
        /// Show a diagram of the robot in this pose
        /// This is useful for checking that the positions of cameras have 
        /// been calculated correctly
        /// </summary>
        /// <param name="robot">robot object</param>
        /// <param name="img">image as a byte array</param>
        /// <param name="width">width of the image</param>
        /// <param name="height">height of the image</param>
        /// <param name="clearBackground">clear the background before drawing</param>
        /// <param name="min_x_mm">top left x coordinate</param>
        /// <param name="min_y_mm">top left y coordinate</param>
        /// <param name="max_x_mm">bottom right x coordinate</param>
        /// <param name="max_y_mm">bottom right y coordinate</param>
        /// <param name="showFieldOfView">whether to show the field of view of each camera</param>
        public void Show(
		    robot rob, 
            byte[] img, int width, int height, bool clearBackground,
            int min_x_mm, int min_y_mm,
            int max_x_mm, int max_y_mm, int line_width,
            bool showFieldOfView)
        {
            if (clearBackground)
                for (int i = 0; i < width * height * 3; i++)
                    img[i] = 255;

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0, 0, 0);
            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(rob, ref head_location,
                                     ref camera_centre_location,
                                     ref left_camera_location,
                                     ref right_camera_location);
            
            int w = max_x_mm - min_x_mm;
            int h = max_y_mm - min_y_mm;

            // draw the body
            int xx = (int)((x - min_x_mm) * width / w);
            int yy = (int)((y - min_y_mm) * height / h);
            int wdth = (int)(rob.BodyWidth_mm * width / w);
            int hght = (int)(rob.BodyLength_mm * height / h);
            drawing.drawBox(img, width, height, xx, yy, wdth, hght, pan, 0, 255, 0, line_width);

            // draw the head
            xx = (int)((head_location.x - min_x_mm) * width / w);
            yy = (int)((head_location.y - min_y_mm) * height / h);
            int radius = (int)(rob.HeadSize_mm * width / w);
            drawing.drawBox(img, width, height, xx, yy, radius, radius, head_location.pan, 0, 255, 0, line_width);

            // draw the cameras
            for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++)
            {
                // draw the left camera
                int xx1 = (int)((left_camera_location[cam].x - min_x_mm) * width / w);
                int yy1 = (int)((left_camera_location[cam].y - min_y_mm) * height / h);
                wdth = (int)((rob.head.cameraBaseline[cam] / 4) * width / w);
                hght = (int)((rob.head.cameraBaseline[cam] / 12) * height / h);
                if (hght < 1) hght = 1;
                drawing.drawBox(img, width, height, xx1, yy1, wdth, hght, left_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width);

                // draw the right camera
                int xx2 = (int)((right_camera_location[cam].x - min_x_mm) * width / w);
                int yy2 = (int)((right_camera_location[cam].y - min_y_mm) * height / h);
                drawing.drawBox(img, width, height, xx2, yy2, wdth, hght, right_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width);

                if (showFieldOfView)
                {
                    float half_FOV = rob.head.cameraFOVdegrees[cam] * (float)Math.PI / 360.0f;
                    int xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan + half_FOV));
                    int yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan + half_FOV));
                    drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan - half_FOV));
                    yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan - half_FOV));
                    drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan + half_FOV));
                    yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan + half_FOV));
                    drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan - half_FOV));
                    yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan - half_FOV));
                    drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false);
                }
            }
        }
Example #28
0
        /// <summary>
        /// translate and rotate the ray by the given values
        /// </summary>
        /// <param name="r">object containing the desired translation and rotation</param>        
        public void translateRotate(pos3D r)
        {
            //take a note of where the ray was observed from
            //in egocentric coordinates.  This will help to
            //restrict localisation searching
            observedFrom = new pos3D(r.x, r.y, r.z);
            observedFrom.pan = r.pan;
            observedFrom.tilt = r.tilt;

            //centre = centre.rotate(r.pan, r.tilt, r.roll);
            for (int v = 0; v < 2; v++)
                vertices[v] = vertices[v].rotate(r.pan, r.tilt, r.roll);

            //store the XY plane pan angle, which may later be used
            //to reduce the searching during localisation            
            pan_angle = vertices[0].new_pan_angle;
            pan_index = (int)(pan_angle * pan_steps / (2 * 3.1415927f));
            start_dist = vertices[0].dist_xy;
            length = vertices[1].dist_xy - start_dist;            

            //centre = centre.translate(r.x, r.y, r.z);
            for (int v = 0; v < 2; v++)
                vertices[v] = vertices[v].translate(r.x, r.y, r.z);
        }
Example #29
0
        /// <summary>
        /// Calculate the position of the robots head and cameras for this pose
        /// the positions returned are relative to the robots centre of rotation
        /// </summary>
        /// <param name="body_width_mm">width of the robot body in millimetres</param>
        /// <param name="body_length_mm">length of the robot body in millimetres</param>
        /// <param name="body_centre_of_rotation_x">x centre of rotation of the robot, relative to the top left corner</param>
        /// <param name="body_centre_of_rotation_y">y centre of rotation of the robot, relative to the top left corner</param>
        /// <param name="body_centre_of_rotation_z">z centre of rotation of the robot, relative to the top left corner</param>
        /// <param name="head_centroid_x">head centroid x position in millimetres relative to the top left corner of the body</param>
        /// <param name="head_centroid_y">head centroid y position in millimetres relative to the top left corner of the body</param>
        /// <param name="head_centroid_z">head centroid z position in millimetres relative to the top left corner of the body</param>
        /// <param name="head_pan">head pan angle in radians</param>
        /// <param name="head_tilt">head tilt angle in radians</param>
        /// <param name="head_roll">head roll angle in radians</param>
        /// <param name="baseline_mm">stereo camera baseline in millimetres</param>
        /// <param name="stereo_camera_position_x">stereo camera x position in millimetres relative to the head centroid</param>
        /// <param name="stereo_camera_position_y">stereo camera y position in millimetres relative to the head centroid</param>
        /// <param name="stereo_camera_position_z">stereo camera z position in millimetres relative to the head centroid</param>
        /// <param name="stereo_camera_pan">stereo camera pan in radians relative to the head</param>
        /// <param name="stereo_camera_tilt">stereo camera tilt in radians relative to the head</param>
        /// <param name="stereo_camera_roll">stereo camera roll in radians relative to the head</param>
        /// <param name="head_location">returned location/orientation of the robot head</param>
        /// <param name="camera_centre_location">returned stereo camera centre position/orientation</param>
        /// <param name="left_camera_location">returned left camera position/orientation</param>
        /// <param name="right_camera_location">returned right camera position/orientation</param>
        public static void calculateCameraPositions(
            float body_width_mm,
            float body_length_mm,
            float body_centre_of_rotation_x,
            float body_centre_of_rotation_y,
            float body_centre_of_rotation_z,
            float head_centroid_x,
            float head_centroid_y,
            float head_centroid_z,
            float head_pan,
            float head_tilt,
            float head_roll,
            float baseline_mm,
            float stereo_camera_position_x,
            float stereo_camera_position_y,
            float stereo_camera_position_z,
            float stereo_camera_pan,
            float stereo_camera_tilt,
            float stereo_camera_roll,
            ref pos3D head_location,
            ref pos3D camera_centre_location,
            ref pos3D left_camera_location,
            ref pos3D right_camera_location)
        {
            // calculate the position of the centre of the head relative to
            // the centre of rotation of the robots body
            pos3D head_centroid =
                new pos3D(
                    -(body_width_mm * 0.5f) + (body_centre_of_rotation_x - (body_width_mm * 0.5f)) + head_centroid_x,
                    -(body_length_mm * 0.5f) + (body_centre_of_rotation_y - (body_length_mm * 0.5f)) + head_centroid_y,
                    head_centroid_z);

            // location of the centre of the head
            // adjusted for the robot pose and the head pan and tilt angle.
            // Note that the positions and orientations of individual cameras
            // on the head have already been accounted for within stereoModel.createObservation
            pos3D head_locn =
                head_centroid.rotate(
                    head_pan, head_tilt, 0);

            head_location.copyFrom(head_locn);

            // calculate the position of the centre of the stereo camera
            // (baseline centre point)
            pos3D camera_centre_locn =
                new pos3D(
                    stereo_camera_position_x,
                    stereo_camera_position_y,
                    stereo_camera_position_z);

            // rotate the stereo camera
            camera_centre_locn =
                camera_centre_locn.rotate(
                    stereo_camera_pan + head_pan,
                    stereo_camera_tilt + head_tilt,
                    stereo_camera_roll + head_roll);

            // move the stereo camera relative to the head position
            camera_centre_location =
                camera_centre_locn.translate(
                    head_location.x,
                    head_location.y,
                    head_location.z);

            // where are the left and right cameras?
            // we need to know this for the origins of the vacancy models
            float half_baseline_length = baseline_mm * 0.5f;
            pos3D left_camera_locn     = new pos3D(-half_baseline_length, 0, 0);

            left_camera_locn = left_camera_locn.rotate(stereo_camera_pan + head_pan, stereo_camera_tilt + head_tilt, stereo_camera_roll + head_roll);
            pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z);

            left_camera_location      = left_camera_locn.translate(camera_centre_location.x, camera_centre_location.y, camera_centre_location.z);
            right_camera_location     = right_camera_locn.translate(camera_centre_location.x, camera_centre_location.y, camera_centre_location.z);
            right_camera_location.pan = left_camera_location.pan;
        }
Example #30
0
		public void ProbeView()
		{		
			int map_dim = 14;
			byte[] map = {
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,				
				0,0,0,0,1,1,1,1,0,0,0,0,0,0,
				0,0,0,0,1,0,0,1,0,0,0,0,0,0,
				0,0,0,0,1,0,0,1,0,0,0,0,0,0,
				0,0,0,0,1,0,0,1,1,1,1,0,0,0,
				0,0,0,0,1,0,0,0,0,0,1,0,0,0,
				0,0,0,0,1,0,0,0,0,0,1,0,0,0,
				0,0,0,0,1,0,0,0,0,0,1,0,0,0,
				0,0,0,0,1,1,1,0,1,1,1,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,			
				0,0,0,0,0,0,0,0,0,0,0,0,0,0,
				0,0,0,0,0,0,0,0,0,0,0,0,0,0
			};
			dpslam sim = CreateSimulation(map_dim, map, 50);
			
			int image_width = 320;
			int image_height = 240;
			float FOV_degrees = 90;
			float max_range_mm = 2000;
			pos3D camPose = new pos3D(0,0,0);
			
            float x0_mm = 0;
		    float y0_mm = 0;
		    float z0_mm = 100;
	        float x1_mm = 0;
		    float y1_mm = 1500;
		    float z1_mm = -300;
		    bool use_ground_plane=true;
			float range = sim.ProbeRange(
		        null,
                x0_mm, y0_mm, z0_mm,
	            x1_mm, y1_mm, z1_mm,
		        use_ground_plane);
			Assert.IsTrue(range > -1, "Ground plane was not detected");
			Assert.IsTrue(range > 550);
			Assert.IsTrue(range < 650);
			
			int step_size = 10;
			float[] range_buffer = new float[(image_width/step_size)*(image_height/step_size)];
			sim.ProbeView(
			    null, 
			    camPose.x, camPose.y, camPose.z, 
			    camPose.pan, camPose.tilt, camPose.roll, 
			    FOV_degrees, 
			    image_width, image_height, 
			    step_size, 
			    max_range_mm, false, 
			    range_buffer);
			
			int ctr=0;
			for (int i = 0; i < range_buffer.Length; i++)
			{
				if (range_buffer[i] > -1) ctr++;
				//Console.WriteLine("Range: " + range_buffer[i].ToString());
			}
			Assert.IsTrue(ctr > 0, "No objects were ranged within the simulation");
		}
Example #31
0
		/// <summary>
		/// returns a set of evidence rays based upon the given observation
		/// </summary>
		/// <param name="observer">observer position and orientation</param>
		/// <param name="baseline">baseline distance</param>
		/// <param name="image_width">camera image width</param>
		/// <param name="image_height">camera image height</param>
		/// <param name="FOV_degrees">horizontal camera field of view in degrees</param>
		/// <param name="stereo_features">stereo features, as groups of three figures</param>
		/// <param name="stereo_features_colour">colour of each stereo feature</param>
		/// <param name="stereo_features_uncertainties">uncertainty value associated with each stereo feature</param>
		/// <param name="translate">translate the ray to be relative to the observer position, otherwise it's relative to (0,0,0)</param>
		/// <returns>list of evidence rays, centred at (0, 0, 0)</returns>
        public List<evidenceRay> createObservation(
		    pos3D observer,
		    float baseline,
		    int image_width,
		    int image_height,
		    float FOV_degrees,
		    float[] stereo_features,
		    byte[] stereo_features_colour,
		    bool translate)
        {
            List<evidenceRay> result = new List<evidenceRay>();

            // get essential data for this stereo camera
            FOV_horizontal = FOV_degrees * (float)Math.PI / 180.0f;
            FOV_vertical = FOV_horizontal * image_height / image_width;

            // calc uncertainty in angle (+/- half a pixel)
            //float angular_uncertainty = FOV_horizontal / (image_width * 2);
			//sigma = 100 * (float)Math.Tan(angular_uncertainty); // 100 is the factor used in RaysIntersection
			sigma = 1.0f / (image_width * 1) * FOV_horizontal;

            // some head geometry
            pos3D headOrientation = observer;
            pos3D cameraOrientation = observer;
			if (!translate) cameraOrientation = new pos3D(0, 0, 0);
            cameraOrientation.pan = headOrientation.pan;
            cameraOrientation.tilt = headOrientation.tilt;
            cameraOrientation.roll = headOrientation.roll;

            if (stereo_features != null)  // if there are stereo features associated with this camera
            {
                int f2 = 0;
                for (int f = 0; f < stereo_features.Length; f += 4)
                {
                    // get the parameters of the feature
					//float match_prob = stereo_features[f];
                    float image_x = stereo_features[f + 1];
                    float image_y = stereo_features[f + 2];
                    float disparity = stereo_features[f + 3];

                    // create a ray
                    evidenceRay ray = 
						createRay(
						    image_x, image_y, disparity, 
						    1,
                            stereo_features_colour[f2*3],
                            stereo_features_colour[f2*3+1],
                            stereo_features_colour[f2*3+2]);

                    if (ray != null)
                    {
                        // convert from camera-centric coordinates to head coordinates
                        ray.translateRotate(cameraOrientation);
						
                        // add to the result
                        result.Add(ray);
                    }
                    f2++;
                }
            }

            return (result);
        }
Example #32
0
		/// <summary>
		/// probes the grid and returns a set of ranges
		/// </summary>
		/// <param name="pose">current best pose estimate</param>
		/// <param name="camera_x_mm">camera sensor x coordinate</param>
		/// <param name="camera_y_mm">camera sensor y coordinate</param>
		/// <param name="camera_z_mm">camera sensor z coordinate</param>
		/// <param name="camera_pan">camera pan in radians</param>
		/// <param name="camera_tilt">camera tilt in radians</param>
		/// <param name="camera_roll">camera roll in radians</param>
		/// <param name="camera_FOV_degrees">camera field of view in degrees</param>
		/// <param name="camera_resolution_width">camera resolution pixels horizontal</param>
		/// <param name="camera_resolution_height">camera resolution pixels vertical</param>
		/// <param name="step_size">sampling step size in pixels</param>
		/// <param name="max_range_mm">maximum range</param>
		/// <param name="use_ground_plane">whether to detect the ground plane, z=0</param>
		/// <param name="range_buffer">buffer storing returned ranges (-1 = range unknown)</param>
		public void ProbeView(
		    particlePose pose,
		    float camera_x_mm,
		    float camera_y_mm,
		    float camera_z_mm,
		    float camera_pan,
		    float camera_tilt,
		    float camera_roll,
		    float camera_FOV_degrees,
		    int camera_resolution_width,
		    int camera_resolution_height,
		    int step_size,
		    float max_range_mm,
		    bool use_ground_plane,
		    float[] range_buffer)
		{
			float camera_FOV_radians = camera_FOV_degrees * (float)Math.PI / 180.0f;
			float camera_FOV_radians2 = camera_FOV_radians * camera_resolution_height / camera_resolution_width;			
			int n = 0;
			for (int camy = 0; camy < camera_resolution_height; camy+=step_size)
			{
				float tilt = (camy * camera_FOV_radians2 / camera_resolution_height) - (camera_FOV_radians2*0.5f);
				float z = max_range_mm*(float)Math.Tan(tilt);
			    for (int camx = 0; camx < camera_resolution_width; camx+=step_size, n++)
			    {
				    float pan = (camx * camera_FOV_radians / camera_resolution_width) - (camera_FOV_radians*0.5f);
					float x = max_range_mm*(float)Math.Tan(pan);
					pos3D p = new pos3D(x,max_range_mm,z);
					pos3D p2 = p.rotate(camera_pan, camera_tilt, camera_roll);

					range_buffer[n] = ProbeRange(
		                pose,
	                    camera_x_mm,
					    camera_y_mm,
					    camera_z_mm,
		                camera_x_mm + p2.x,
		                camera_y_mm + p2.y,
		                camera_z_mm + p2.z,
					    use_ground_plane);
				}
			}
		}
Example #33
0
        /// <summary>
        /// rotates this position, using Y forward convention
        /// </summary>
        /// <param name="pan">pan angle in radians</param>
        /// <param name="tilt">tilt angle in radians</param>
        /// <param name="roll">roll angle in radians</param>
        /// <returns>rotated position</returns>                
        public pos3D rotate(float pan, float tilt, float roll)
        {
            float x2 = x;
            float y2 = y;
            float z2 = z;
            
            float x3 = x;
            float y3 = y;
            float z3 = z;

            // Rotation about the y axis
            if (roll != 0)
            {
                float roll2 = roll + (float)Math.PI;
                x3 = (float)((Math.Cos(roll2) * x2) + (Math.Sin(roll2) * z2));
                z3 = (float)(-(Math.Sin(roll) * x2) + (Math.Cos(roll) * z2));
                x2 = x3;
                z2 = z3;
            }

            // Rotatation about the x axis
            if (tilt != 0)
            {
                float tilt2 = tilt;
                z3 = (float)((Math.Cos(tilt2) * y2) - (Math.Sin(tilt2) * z2));
                y3 = (float)((Math.Sin(tilt2) * y2) + (Math.Cos(tilt2) * z2));
                y2 = y3;
                z2 = z3;
            }
                                    
            // Rotation about the z axis: 
            if (pan != 0)
            {                
                float pan2 = pan + (float)Math.PI;
                x3 = (float)((Math.Cos(pan2) * x2) - (Math.Sin(pan2) * y2));
                y3 = (float)((Math.Sin(pan) * x2) + (Math.Cos(pan) * y2));                 
            }

            pos3D rotated = new pos3D(x3, y3, z3);
            rotated.pan = this.pan + pan;
            rotated.tilt = this.tilt + tilt;
            rotated.roll = this.roll + roll;
            return (rotated);
        }
Example #34
0
 /// <summary>
 /// return a translated version of the point
 /// </summary>
 /// <param name="x"></param>
 /// <param name="y"></param>
 /// <param name="z"></param>
 /// <returns></returns>
 public pos3D translate(float x, float y, float z)
 {
     pos3D result = new pos3D(this.x + x, this.y + y, this.z + z);
     result.pan = pan;
     result.tilt = tilt;
     result.roll = roll;
     return(result);
 }
Example #35
0
        /// <summary>
        /// calculate the position of the robots head and cameras for this pose
        /// </summary>
        /// <param name="rob">robot object</param>
        /// <param name="head_location">location of the centre of the head</param>
        /// <param name="camera_centre_location">location of the centre of each stereo camera</param>
        /// <param name="left_camera_location">location of the left camera within each stereo camera</param>
        /// <param name="right_camera_location">location of the right camera within each stereo camera</param>
        protected void calculateCameraPositions(
            robot rob,
            ref pos3D head_location,
            ref pos3D[] camera_centre_location,
            ref pos3D[] left_camera_location,
            ref pos3D[] right_camera_location)
        {
            // calculate the position of the centre of the head relative to 
            // the centre of rotation of the robots body
            pos3D head_centroid = 
				new pos3D(
				    -(rob.BodyWidth_mm / 2) + rob.head.x,
                    -(rob.BodyLength_mm / 2) + rob.head.y,
                    rob.head.z);

            // location of the centre of the head on the grid map
            // adjusted for the robot pose and the head pan and tilt angle.
            // Note that the positions and orientations of individual cameras
            // on the head have already been accounted for within stereoModel.createObservation
            pos3D head_locn = head_centroid.rotate(rob.head.pan + pan, rob.head.tilt, 0);
            head_locn = head_locn.translate(x, y, 0);
            head_location.copyFrom(head_locn);

            for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++)
            {
                // calculate the position of the centre of the stereo camera
                // (baseline centre point)
                pos3D camera_centre_locn = new pos3D(rob.head.cameraPositionHeadRelative[cam].x, rob.head.cameraPositionHeadRelative[cam].y, rob.head.cameraPositionHeadRelative[cam].y);
                camera_centre_locn = camera_centre_locn.rotate(rob.head.cameraPositionHeadRelative[cam].pan + rob.head.pan + pan, rob.head.cameraPositionHeadRelative[cam].tilt, rob.head.cameraPositionHeadRelative[cam].roll);
                camera_centre_location[cam] = camera_centre_locn.translate(head_location.x, head_location.y, head_location.z);

                // where are the left and right cameras?
                // we need to know this for the origins of the vacancy models
                float half_baseline_length = rob.head.cameraBaseline[cam] / 2;
                pos3D left_camera_locn = new pos3D(-half_baseline_length, 0, 0);
                left_camera_locn = left_camera_locn.rotate(rob.head.cameraPositionHeadRelative[cam].pan + rob.head.pan + pan, rob.head.cameraPositionHeadRelative[cam].tilt, rob.head.cameraPositionHeadRelative[cam].roll);
                pos3D right_camera_locn = new pos3D(-left_camera_locn.x, -left_camera_locn.y, -left_camera_locn.z);
                left_camera_location[cam] = left_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z);
                right_camera_location[cam] = right_camera_locn.translate(camera_centre_location[cam].x, camera_centre_location[cam].y, camera_centre_location[cam].z);
                right_camera_location[cam].pan = left_camera_location[cam].pan;
            }
        }
Example #36
0
 /// <summary>
 /// copy the position from another
 /// </summary>
 /// <param name="other"></param>
 /// <returns></returns>
 public void copyFrom(pos3D other)
 {
     x = other.x;
     y = other.y;
     z = other.z;
     pan = other.pan;
     tilt = other.tilt;
     roll = other.roll;
 }
Example #37
0
        /// <summary>
        /// Show a diagram of the robot in this pose
        /// This is useful for checking that the positions of cameras have
        /// been calculated correctly
        /// </summary>
        /// <param name="robot">robot object</param>
        /// <param name="img">image as a byte array</param>
        /// <param name="width">width of the image</param>
        /// <param name="height">height of the image</param>
        /// <param name="clearBackground">clear the background before drawing</param>
        /// <param name="min_x_mm">top left x coordinate</param>
        /// <param name="min_y_mm">top left y coordinate</param>
        /// <param name="max_x_mm">bottom right x coordinate</param>
        /// <param name="max_y_mm">bottom right y coordinate</param>
        /// <param name="showFieldOfView">whether to show the field of view of each camera</param>
        public void Show(
            robot rob,
            byte[] img, int width, int height, bool clearBackground,
            int min_x_mm, int min_y_mm,
            int max_x_mm, int max_y_mm, int line_width,
            bool showFieldOfView)
        {
            if (clearBackground)
            {
                for (int i = 0; i < width * height * 3; i++)
                {
                    img[i] = 255;
                }
            }

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0, 0, 0);

            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location   = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location  = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(rob, ref head_location,
                                     ref camera_centre_location,
                                     ref left_camera_location,
                                     ref right_camera_location);

            int w = max_x_mm - min_x_mm;
            int h = max_y_mm - min_y_mm;

            // draw the body
            int xx   = (int)((x - min_x_mm) * width / w);
            int yy   = (int)((y - min_y_mm) * height / h);
            int wdth = (int)(rob.BodyWidth_mm * width / w);
            int hght = (int)(rob.BodyLength_mm * height / h);

            drawing.drawBox(img, width, height, xx, yy, wdth, hght, pan, 0, 255, 0, line_width);

            // draw the head
            xx = (int)((head_location.x - min_x_mm) * width / w);
            yy = (int)((head_location.y - min_y_mm) * height / h);
            int radius = (int)(rob.HeadSize_mm * width / w);

            drawing.drawBox(img, width, height, xx, yy, radius, radius, head_location.pan, 0, 255, 0, line_width);

            // draw the cameras
            for (int cam = 0; cam < rob.head.no_of_stereo_cameras; cam++)
            {
                // draw the left camera
                int xx1 = (int)((left_camera_location[cam].x - min_x_mm) * width / w);
                int yy1 = (int)((left_camera_location[cam].y - min_y_mm) * height / h);
                wdth = (int)((rob.head.cameraBaseline[cam] / 4) * width / w);
                hght = (int)((rob.head.cameraBaseline[cam] / 12) * height / h);
                if (hght < 1)
                {
                    hght = 1;
                }
                drawing.drawBox(img, width, height, xx1, yy1, wdth, hght, left_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width);

                // draw the right camera
                int xx2 = (int)((right_camera_location[cam].x - min_x_mm) * width / w);
                int yy2 = (int)((right_camera_location[cam].y - min_y_mm) * height / h);
                drawing.drawBox(img, width, height, xx2, yy2, wdth, hght, right_camera_location[cam].pan + (float)(Math.PI / 2), 0, 255, 0, line_width);

                if (showFieldOfView)
                {
                    float half_FOV = rob.head.cameraFOVdegrees[cam] * (float)Math.PI / 360.0f;
                    int   xx_ray   = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan + half_FOV));
                    int   yy_ray   = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan + half_FOV));
                    drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx1 + (int)(width * Math.Sin(left_camera_location[cam].pan - half_FOV));
                    yy_ray = yy1 + (int)(width * Math.Cos(left_camera_location[cam].pan - half_FOV));
                    drawing.drawLine(img, width, height, xx1, yy1, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan + half_FOV));
                    yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan + half_FOV));
                    drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false);
                    xx_ray = xx2 + (int)(width * Math.Sin(right_camera_location[cam].pan - half_FOV));
                    yy_ray = yy2 + (int)(width * Math.Cos(right_camera_location[cam].pan - half_FOV));
                    drawing.drawLine(img, width, height, xx2, yy2, xx_ray, yy_ray, 200, 200, 255, 0, false);
                }
            }
        }
Example #38
0
 public pos3D Subtract(pos3D p)
 {
     pos3D new_p = new pos3D(x - p.x, y - p.y, z - p.z);
     new_p.pan = pan - p.pan;
     new_p.tilt = tilt - p.tilt;
     new_p.roll = roll - p.roll;
     return (new_p);
 }
Example #39
0
        /// <summary>
        /// add an observation taken from this pose
        /// </summary>
        /// <param name="stereo_rays">list of ray objects in this observation</param>
        /// <param name="rob">robot object</param>
        /// <param name="LocalGrid">occupancy grid into which to insert the observation</param>
        /// <param name="localiseOnly">if true does not add any mapping particles (pure localisation)</param>
        /// <returns>localisation matching score</returns>
        public float AddObservation(
            List <evidenceRay>[] stereo_rays,
            robot rob,
            dpslam LocalGrid,
            bool localiseOnly)
        {
            // clear the localisation score
            float localisation_score = occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE;

            // get the positions of the head and cameras for this pose
            pos3D head_location = new pos3D(0, 0, 0);

            pos3D[] camera_centre_location = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] left_camera_location   = new pos3D[rob.head.no_of_stereo_cameras];
            pos3D[] right_camera_location  = new pos3D[rob.head.no_of_stereo_cameras];
            calculateCameraPositions(
                rob,
                ref head_location,
                ref camera_centre_location,
                ref left_camera_location,
                ref right_camera_location);

            // itterate for each stereo camera
            int cam = stereo_rays.Length - 1;

            while (cam >= 0)
            {
                // itterate through each ray
                int r = stereo_rays[cam].Count - 1;
                while (r >= 0)
                {
                    // observed ray.  Note that this is in an egocentric
                    // coordinate frame relative to the head of the robot
                    evidenceRay ray = stereo_rays[cam][r];

                    // translate and rotate this ray appropriately for the pose
                    evidenceRay trial_ray =
                        ray.trialPose(
                            camera_centre_location[cam].pan,
                            camera_centre_location[cam].tilt,
                            camera_centre_location[cam].roll,
                            camera_centre_location[cam].x,
                            camera_centre_location[cam].y,
                            camera_centre_location[cam].z);

                    //Console.WriteLine("ray.vert[0] " + (trial_ray.vertices[0] == null).ToString());
                    //Console.WriteLine("ray.vert[1] " + (trial_ray.vertices[1] == null).ToString());

                    // update the grid cells for this ray and update the
                    // localisation score accordingly

                    float score =
                        LocalGrid.Insert(
                            trial_ray, this,
                            rob.head.sensormodel[cam],
                            left_camera_location[cam],
                            right_camera_location[cam],
                            localiseOnly);
                    if (score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                    {
                        if (localisation_score != occupancygridCellMultiHypothesis.NO_OCCUPANCY_EVIDENCE)
                        {
                            localisation_score += score;
                        }
                        else
                        {
                            localisation_score = score;
                        }
                    }
                    r--;
                }
                cam--;
            }

            return(localisation_score);
        }
Example #40
0
		public void UpdateSurveyor()
		{
			float horizon = 2000;
			DateTime save_motion_model_time = new DateTime(1990,1,1);
			pos3D map_centre = new pos3D(x,y,z);
			updating = true;
			while (updating)
			{
				if (current_speed_mmsec != 0)
				{
			        motion.speed_uncertainty_forward = current_speed_uncertainty_mmsec;
			        motion.speed_uncertainty_angular = 0.5f / 180.0f * (float)Math.PI;
				}
				else
				{
			        motion.speed_uncertainty_forward = 0;
			        motion.speed_uncertainty_angular = 0;
				}

				UpdatePosition();
				TimeSpan diff = DateTime.Now.Subtract(save_motion_model_time);
				if (diff.TotalSeconds >= 5)
				{
					float dx = map_centre.x - curr_pos.x;
					float dy = map_centre.y - curr_pos.y;
					float dz = map_centre.z - curr_pos.z;
					bool redraw_map = false;
					if (!displaying_motion_model)
					{
					    float dist = (float)Math.Sqrt(dx*dx + dy*dy + dz*dz);
						if (dist > 1000)
						{
							map_centre.x = curr_pos.x;
							map_centre.y = curr_pos.y;
							map_centre.z = curr_pos.z;
							redraw_map = true;
						}
						SaveMotionModel("motion_model.jpg", map_centre.x-horizon, map_centre.y-horizon, map_centre.x+horizon, map_centre.y+horizon, redraw_map);
					}
				}
				Thread.Sleep(500);				
			}
			thread_stopped = true;
		}