예제 #1
0
 public processstereo()
 {
     disparities = new float[MAX_FEATURES * 3];
     tracking = new sentienceTracking();
     radar = new sentienceRadar();
     robot_head = new stereoHead(1);
     stereo_model = new stereoModel();
 }
예제 #2
0
    /// <summary>
    /// initialise variables prior to performing test routines
    /// </summary>
    private void init()
    {
        grid_layer = new float[grid_dimension, grid_dimension, 3];

        pos3D_x = new float[4];
        pos3D_y = new float[4];

        stereo_model = new stereoModel();
        robot_head = new stereoHead(4);            
        stereo_features = new float[900];
        stereo_uncertainties = new float[900];

        img_rays = new byte[standard_width * standard_height * 3];  
        
		imgOutput.Pixbuf = GtkBitmap.createPixbuf(standard_width, standard_height);
		GtkBitmap.setBitmap(img_rays, imgOutput);        
    }
예제 #3
0
        /// <summary>
        /// create a list of rays to be stored within poses
        /// </summary>
        /// <param name="head">head configuration</param>
        /// <param name="stereo_camera_index">index number for the stereo camera</param>
        /// <returns>list of evidence rays, centred at (0, 0, 0)</returns>
        public List<evidenceRay> createObservation(
		    stereoHead head, 
		    int stereo_camera_index)
        {
            List<evidenceRay> result;
			
			if (head.features[stereo_camera_index] != null)
			{
	            result = createObservation(
			        head.cameraPosition[stereo_camera_index],
			        head.calibration[stereo_camera_index].baseline,
			        head.calibration[stereo_camera_index].leftcam.image_width,
			        head.calibration[stereo_camera_index].leftcam.image_height,
			        head.calibration[stereo_camera_index].leftcam.camera_FOV_degrees,
			        head.features[stereo_camera_index].features,
			        head.features[stereo_camera_index].colour,
			        head.features[stereo_camera_index].uncertainties,
				    false);
			}
			else
			{
				result = new List<evidenceRay>();
			}
			
            return (result);
        }
예제 #4
0
        /// <summary>
        /// creates a lookup table for sensor models at different visual disparities
        /// </summary>
        public void createLookupTables(stereoHead robot_head, int gridCellSize_mm)
        {
            int width = 100;
            int height = 100;
            Byte[] img_result = new Byte[width * height * 3];

            for (int cam = 0; cam < robot_head.no_of_stereo_cameras; cam++)
            {
                // update parameters based upon the calibration data
                image_width = robot_head.calibration[cam].leftcam.image_width;
                image_height = robot_head.calibration[cam].leftcam.image_height;
                baseline = robot_head.calibration[cam].baseline;
                FOV_horizontal = robot_head.calibration[cam].leftcam.camera_FOV_degrees * (float)Math.PI / 180.0f;
                FOV_vertical = FOV_horizontal * image_height / image_width;

                // create the lookup
                createLookupTable(gridCellSize_mm, img_result, width, height);

                // attach the lookup table to the relevant camera
                robot_head.sensormodel[cam] = ray_model;
            }
        }
예제 #5
0
 /// <summary>
 /// load a pair of rectified images
 /// </summary>
 /// <param name="stereo_cam_index">index of the stereo camera</param>
 /// <param name="fullres_left">left image</param>
 /// <param name="fullres_right">right image</param>
 /// <param name="head">stereo head geometry and features</param>
 /// <param name="no_of_stereo_features"></param>
 /// <param name="bytes_per_pixel"></param>
 /// <param name="algorithm_type"></param>
 /// <returns></returns>
 public float loadRectifiedImages(int stereo_cam_index, Byte[] fullres_left, Byte[] fullres_right, stereoHead head, int no_of_stereo_features, int bytes_per_pixel,
                                  int algorithm_type)
 {
     setCalibration(null);  // don't use any calibration data
     return(loadImages(stereo_cam_index, fullres_left, fullres_right, head, no_of_stereo_features, bytes_per_pixel, algorithm_type));
 }
예제 #6
0
        /// <summary>
        /// load a pair of images
        /// </summary>
        private float loadImages(int stereo_cam_index, Byte[] fullres_left, Byte[] fullres_right, stereoHead head, int no_of_stereo_features, int bytes_per_pixel,
                                 int algorithm_type)
        {
            stereointerface.loadImage(fullres_left, head.calibration[stereo_cam_index].leftcam.image_width, head.calibration[stereo_cam_index].leftcam.image_height, true, bytes_per_pixel);

            stereointerface.loadImage(fullres_right, head.calibration[stereo_cam_index].leftcam.image_width, head.calibration[stereo_cam_index].leftcam.image_height, false, bytes_per_pixel);
            
            // calculate stereo disparity features
            int peaks_per_row = 5;
            stereointerface.stereoMatchRun(0, peaks_per_row, algorithm_type);

            // retrieve the features
            stereoFeatures feat = new stereoFeatures(no_of_stereo_features);
            int no_of_selected_features = 0;
            stereoFeatures features;

            no_of_selected_features = stereointerface.getSelectedPointFeatures(feat.features);
            if (no_of_selected_features > 0)
            {
                
                if (no_of_selected_features == no_of_stereo_features)
                {
                    features = feat;
                }
                else
                {
                 
                    features = new stereoFeatures(no_of_selected_features);
                    for (int f = 0; f < no_of_selected_features * 3; f++)
                        features.features[f] = feat.features[f];
                }

                // update the head with these features
                head.setStereoFeatures(stereo_cam_index, features);

                // update the colours for each feature
                head.updateFeatureColours(stereo_cam_index, fullres_left);
            }

            if (no_of_selected_features > 0) //no_of_stereo_features * 4 / 10)
                return (stereointerface.getAverageMatchingScore());
            else
                return (-1);
        }
예제 #7
0
 /// <summary>
 /// load a pair of raw (unrectified) images
 /// </summary>
 /// <param name="stereo_cam_index">index of the stereo camera</param>
 /// <param name="fullres_left">left image</param>
 /// <param name="fullres_right">right image</param>
 /// <param name="head">stereo head geometry and features</param>
 /// <param name="no_of_stereo_features"></param>
 /// <param name="bytes_per_pixel"></param>
 /// <param name="algorithm_type"></param>
 /// <returns></returns>
 public float loadRawImages(int stereo_cam_index, Byte[] fullres_left, Byte[] fullres_right, stereoHead head, int no_of_stereo_features, int bytes_per_pixel,
                            int algorithm_type)
 {
     setCalibration(head.calibration[stereo_cam_index]);  // load the appropriate calibration settings for this camera
     return(loadImages(stereo_cam_index, fullres_left, fullres_right, head, no_of_stereo_features, bytes_per_pixel, algorithm_type));
 }
예제 #8
0
 /// <summary>
 /// load a pair of rectified images
 /// </summary>
 /// <param name="stereo_cam_index">index of the stereo camera</param>
 /// <param name="fullres_left">left image</param>
 /// <param name="fullres_right">right image</param>
 /// <param name="head">stereo head geometry and features</param>
 /// <param name="no_of_stereo_features"></param>
 /// <param name="bytes_per_pixel"></param>
 /// <param name="algorithm_type"></param>
 /// <returns></returns>
 public float loadRectifiedImages(int stereo_cam_index, Byte[] fullres_left, Byte[] fullres_right, stereoHead head, int no_of_stereo_features, int bytes_per_pixel,
                                  int algorithm_type)
 {
     setCalibration(null);  // don't use any calibration data
     return(loadImages(stereo_cam_index, fullres_left, fullres_right, head, no_of_stereo_features, bytes_per_pixel, algorithm_type));
 }
예제 #9
0
        /// <summary>
        /// load a pair of images
        /// </summary>
        private float loadImages(int stereo_cam_index, Byte[] fullres_left, Byte[] fullres_right, stereoHead head, int no_of_stereo_features, int bytes_per_pixel,
                                 int algorithm_type)
        {
            stereointerface.loadImage(fullres_left, head.calibration[stereo_cam_index].leftcam.image_width, head.calibration[stereo_cam_index].leftcam.image_height, true, bytes_per_pixel);

            stereointerface.loadImage(fullres_right, head.calibration[stereo_cam_index].leftcam.image_width, head.calibration[stereo_cam_index].leftcam.image_height, false, bytes_per_pixel);

            // calculate stereo disparity features
            int peaks_per_row = 5;

            stereointerface.stereoMatchRun(0, peaks_per_row, algorithm_type);

            // retrieve the features
            stereoFeatures feat = new stereoFeatures(no_of_stereo_features);
            int            no_of_selected_features = 0;
            stereoFeatures features;

            no_of_selected_features = stereointerface.getSelectedPointFeatures(feat.features);
            if (no_of_selected_features > 0)
            {
                if (no_of_selected_features == no_of_stereo_features)
                {
                    features = feat;
                }
                else
                {
                    features = new stereoFeatures(no_of_selected_features);
                    for (int f = 0; f < no_of_selected_features * 3; f++)
                    {
                        features.features[f] = feat.features[f];
                    }
                }

                // update the head with these features
                head.setStereoFeatures(stereo_cam_index, features);

                // update the colours for each feature
                head.updateFeatureColours(stereo_cam_index, fullres_left);
            }

            if (no_of_selected_features > 0) //no_of_stereo_features * 4 / 10)
            {
                return(stereointerface.getAverageMatchingScore());
            }
            else
            {
                return(-1);
            }
        }
예제 #10
0
 /// <summary>
 /// load a pair of raw (unrectified) images
 /// </summary>
 /// <param name="stereo_cam_index">index of the stereo camera</param>
 /// <param name="fullres_left">left image</param>
 /// <param name="fullres_right">right image</param>
 /// <param name="head">stereo head geometry and features</param>
 /// <param name="no_of_stereo_features"></param>
 /// <param name="bytes_per_pixel"></param>
 /// <param name="algorithm_type"></param>
 /// <returns></returns>
 public float loadRawImages(int stereo_cam_index, Byte[] fullres_left, Byte[] fullres_right, stereoHead head, int no_of_stereo_features, int bytes_per_pixel,
                            int algorithm_type)
 {
     setCalibration(head.calibration[stereo_cam_index]);  // load the appropriate calibration settings for this camera
     return(loadImages(stereo_cam_index, fullres_left, fullres_right, head, no_of_stereo_features, bytes_per_pixel, algorithm_type));
 }
예제 #11
0
        private void gaussianFunctionToolStripMenuItem_Click(object sender, EventArgs e)
        {
            grid_layer = new float[grid_dimension, grid_dimension, 3];

            pos3D_x = new float[4];
            pos3D_y = new float[4];

            stereo_model = new stereoModel();
            robot_head = new stereoHead(4);
            stereo_features = new float[900];
            stereo_uncertainties = new float[900];

            img_rays = new Byte[standard_width * standard_height * 3];
            rays = new Bitmap(standard_width, standard_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
            picRays.Image = rays;
            
            stereo_model.showDistribution(img_rays, standard_width, standard_height);

            BitmapArrayConversions.updatebitmap_unsafe(img_rays, (Bitmap)picRays.Image);
        }
예제 #12
0
        private void multipleStereoRaysToolStripMenuItem_Click(object sender, EventArgs e)
        {
            grid_layer = new float[grid_dimension, grid_dimension, 3];

            pos3D_x = new float[4];
            pos3D_y = new float[4];

            stereo_model = new stereoModel();
            robot_head = new stereoHead(4);
            stereo_features = new float[900];
            stereo_uncertainties = new float[900];

            img_rays = new Byte[standard_width * standard_height * 3];
            rays = new Bitmap(standard_width, standard_height, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
            picRays.Image = rays;

            bool mirror = false;
            stereo_model.showProbabilities(grid_layer, grid_dimension, img_rays, standard_width, standard_height, false, true, mirror);
            BitmapArrayConversions.updatebitmap_unsafe(img_rays, (Bitmap)picRays.Image);

        }
예제 #13
0
파일: robot.cs 프로젝트: kasertim/sentience
        /// <summary>
        /// initialise with the given number of stereo cameras
        /// </summary>
        /// <param name="no_of_stereo_cameras">the number of stereo cameras on the robot (not the total number of cameras)</param>
        /// <param name="rays_per_stereo_camera">the number of rays which will be thrown from each stereo camera per time step</param>
        /// <param name="mapping_type">the type of mapping to be used</param>
        private void init(int no_of_stereo_cameras, 
                          int rays_per_stereo_camera,
                          int mapping_type)
        {
            this.no_of_stereo_cameras = no_of_stereo_cameras;
            this.mapping_type = mapping_type;

            // head and shoulders
            head = new stereoHead(no_of_stereo_cameras);

            // sensor model used for mapping
            inverseSensorModel = new stereoModel();

            // set the number of stereo features to be detected and inserted into the grid
            // on each time step.  This figure should be large enough to get reasonable
            // detail, but not so large that the mapping consumes a huge amount of 
            // processing resource
            inverseSensorModel.no_of_stereo_features = rays_per_stereo_camera;
            correspondence = new stereoCorrespondence[no_of_stereo_cameras];
			for (int i = 0; i < no_of_stereo_cameras; i++)
                correspondence[i] = new stereoCorrespondence(inverseSensorModel.no_of_stereo_features);

            if (mapping_type == MAPPING_DPSLAM)
            {
	            // add local occupancy grids
	            LocalGrid = new occupancygridMultiHypothesis[mapping_threads];
	            for (int i = 0; i < mapping_threads; i++) createLocalGrid(i);
	
	            // create a motion model for each possible grid
	            motion = new motionModel[mapping_threads];
	            for (int i = 0; i < mapping_threads; i++) motion[i] = new motionModel(this, LocalGrid[i], 100 * (i+1));
            }
            
            if (mapping_type == MAPPING_SIMPLE)
            {
            }

            // a list of places where the robot might work or make observations
            worksites = new kmlZone();

            // zero encoder positions
            prev_left_wheel_encoder = 0;
            prev_right_wheel_encoder = 0;
        }