예제 #1
0
        /// <summary>
        /// creates a lookup table for sensor models at different visual disparities
        /// </summary>
        public void createLookupTables(
		    stereoHead robot_head, 
		    int gridCellSize_mm)
        {
            int width = 100;
            int height = 100;
            Byte[] img_result = new Byte[width * height * 3];

            for (int cam = 0; cam < robot_head.no_of_stereo_cameras; cam++)
            {
                // update parameters based upon the calibration data
                image_width = robot_head.cameraImageWidth[cam];
                image_height = robot_head.cameraImageHeight[cam];
                baseline = robot_head.cameraBaseline[cam];
                FOV_horizontal = robot_head.cameraFOVdegrees[cam] * (float)Math.PI / 180.0f;
                FOV_vertical = FOV_horizontal * image_height / image_width;

                // create the lookup
                createLookupTable(gridCellSize_mm, img_result, width, height);

                // attach the lookup table to the relevant camera
                robot_head.sensormodel[cam] = ray_model;
            }
        }
예제 #2
0
        /// <summary>
        /// create a list of rays to be stored within poses
        /// </summary>
        /// <param name="head">head configuration</param>
        /// <param name="stereo_camera_index">index number for the stereo camera</param>
        /// <returns>list of evidence rays, centred at (0, 0, 0)</returns>
        public List<evidenceRay> createObservation(
		    stereoHead head, 
		    int stereo_camera_index)
        {
            List<evidenceRay> result;
			
			if (head.features[stereo_camera_index] != null)
			{
	            result = createObservation(
			        head.cameraPosition[stereo_camera_index],
			        head.cameraBaseline[stereo_camera_index],
			        head.cameraImageWidth[stereo_camera_index],
			        head.cameraImageHeight[stereo_camera_index],
			        head.cameraFOVdegrees[stereo_camera_index],
			        head.features[stereo_camera_index],
			        head.featureColours[stereo_camera_index],
				    false);
			}
			else
			{
				result = new List<evidenceRay>();
			}
			
            return (result);
        }
예제 #3
0
파일: robot.cs 프로젝트: kasertim/sentience
        /// <summary>
        /// initialise with the given number of stereo cameras
        /// </summary>
        /// <param name="no_of_stereo_cameras">the number of stereo cameras on the robot (not the total number of cameras)</param>
        /// <param name="rays_per_stereo_camera">the number of rays which will be thrown from each stereo camera per time step</param>
        private void init(
		    int no_of_stereo_cameras, 
            int rays_per_stereo_camera)
        {
            this.no_of_stereo_cameras = no_of_stereo_cameras;

            // head and shoulders
            head = new stereoHead(no_of_stereo_cameras);

            // sensor model used for mapping
            inverseSensorModel = new stereoModel();

            // set the number of stereo features to be detected and inserted into the grid
            // on each time step.  This figure should be large enough to get reasonable
            // detail, but not so large that the mapping consumes a huge amount of 
            // processing resource
            inverseSensorModel.no_of_stereo_features = rays_per_stereo_camera;

            // add local occupancy grid
            createLocalGrid();
	
            // create a motion model for each possible grid
            motion = new motionModel(this, LocalGrid, 100);
            
            // zero encoder positions
            prev_left_wheel_encoder = 0;
            prev_right_wheel_encoder = 0;
        }