/// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="disparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private void Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            disparityMap = new Image <Gray, short>(size);

            int P1 = 8 * 1 * Calibration.SAD * Calibration.SAD;  //GetSliderValue(P1_Slider);
            int P2 = 32 * 1 * Calibration.SAD * Calibration.SAD; //GetSliderValue(P2_Slider);

            using (StereoSGBM stereoSolver = new StereoSGBM(
                       Calibration.MinDisparities,
                       Calibration.NumDisparities,
                       Calibration.SAD,
                       P1,
                       P2,
                       Calibration.MaxDiff,
                       Calibration.PrefilterCap,
                       Calibration.UniquenessRatio,
                       Calibration.Speckle,
                       Calibration.SpeckleRange,
                       Calibration.DisparityMode))
            //using (StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0))
            {
                stereoSolver.FindStereoCorrespondence(left, right, disparityMap);//Computes the disparity map using:

                /*GC: graph cut-based algorithm
                 * BM: block matching algorithm
                 * SGBM: modified H. Hirschmuller algorithm HH08*/
                points = PointCollection.ReprojectImageTo3D(disparityMap, Calibration.Q); //Reprojects disparity image to 3D space.
            }
        }
Exemple #2
0
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="leftDisparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private static void Computer3DPointsFromImages(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, Int16> leftDisparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            using (Image <Gray, Int16> leftDisparity = new Image <Gray, Int16>(size))
                using (Image <Gray, Int16> rightDisparity = new Image <Gray, Int16>(size))
                    //using (StereoSGBM stereoSolver = new StereoSGBM(5, 64, 0, 0, 0, 0, 0, 0, 0, 0))
                    using (StereoGC stereoSolver = new StereoGC(16, 2))
                    {
                        stereoSolver.FindStereoCorrespondence(left, right, leftDisparity, rightDisparity);
                        //stereoSolver.FindStereoCorrespondence(left, right, leftDisparity);

                        leftDisparityMap = leftDisparity * (-16);
                        //leftDisparityMap = leftDisparity.Clone();

                        //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
                        using (Matrix <double> q = new Matrix <double>(
                                   new double[, ] {
                            { 1.0, 0.0, 0.0, -size.Width / 2 },  //shift the x origin to image center
                            { 0.0, 1.0, 0.0, -size.Height / 2 }, //shift the y origin to image center
                            { 0.0, 0.0, -16.0, 0.0 },            //Multiply the z value by -16,
                            { 0.0, 0.0, 0.0, 1.0 }
                        }))
                            points = PointCollection.ReprojectImageTo3D(leftDisparity, q);
                    }
        }
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="disparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private static void Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            disparityMap = new Image <Gray, short>(size);

            //using (StereoSGBM stereoSolver = new StereoSGBM(5, 64, 0))
            using (StereoBM stereoSolver = new StereoBM())
            //using (Mat dm = new Mat())
            {
                stereoSolver.Compute(left, right, disparityMap);

                float scale = Math.Max(size.Width, size.Height);

                //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
                using (Matrix <double> q = new Matrix <double>(
                           new double[, ] {
                    { 1.0, 0.0, 0.0, -size.Width / 2 },  //shift the x origin to image center
                    { 0.0, -1.0, 0.0, size.Height / 2 }, //shift the y origin to image center and flip it upside down
                    { 0.0, 0.0, -1.0, 0.0 },             //Multiply the z value by -1.0,
                    { 0.0, 0.0, 0.0, scale }
                }))                                      //scale the object's corrdinate to within a [-0.5, 0.5] cube
                    points = PointCollection.ReprojectImageTo3D(disparityMap, q);
            }
        }
 private MCvPoint3D32f[] Computer3DPointsFromStereoPair(Mat disparityMap)
 {
     if (_calibrationManager != null && _calibrationManager.calibrationModel != null)
     {
         MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(disparityMap, _calibrationManager.calibrationModel.Q);
     }
     return(null);
 }
        private void Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            disparityMap = new Image <Gray, short>(size);
            //using (StereoSGBM stereoSolver = new StereoSGBM(5, 64, 0, 0, 0, 0, 0, 0, 0, 0, false))
            stereoSolver.FindStereoCorrespondence(left, right, disparityMap);

            //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
            points = PointCollection.ReprojectImageTo3D(disparityMap, Q);
        }
Exemple #6
0
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="disparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private static void Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            disparityMap = new Image <Gray, short>(size);
            //using (StereoSGBM stereoSolver = new StereoSGBM(5, 64, 0, 0, 0, 0, 0, 0, 0, 0, false))
            using (StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0))
            {
                stereoSolver.FindStereoCorrespondence(left, right, disparityMap);

                //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
                using (Matrix <double> q = new Matrix <double>(
                           new double[, ] {
                    { 1.0, 0.0, 0.0, -size.Width / 2 },  //shift the x origin to image center
                    { 0.0, 1.0, 0.0, -size.Height / 2 }, //shift the y origin to image center
                    { 0.0, 0.0, 1.0, 0.0 },              //Multiply the z value by 1.0,
                    { 0.0, 0.0, 0.0, 1.0 }
                }))
                    points = PointCollection.ReprojectImageTo3D(disparityMap, q);
            }
        }
Exemple #7
0
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="disparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        public Computer3DPointsFromStereoPairOutput Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, Compute3DFromStereoCfg cfg = null)
        {
            if (cfg == null)
            {
                cfg = new Compute3DFromStereoCfg();
            }
            System.Drawing.Size size = left.Size;

            Computer3DPointsFromStereoPairOutput res = new Computer3DPointsFromStereoPairOutput();

            res.disparityMap = new Image <Gray, short>(size);
            //thread safe calibration values



            /*Set it to true to run full-scale 2-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes,
             * which is large for 640x480 stereo and huge for HD-size pictures. By default this is usually false*/
            //Set globally for ease
            //bool fullDP = true;

            using (StereoSGBM stereoSolver = new StereoSGBM(cfg.minDispatities, cfg.numDisparities, cfg.SAD, cfg.P1, cfg.P2, cfg.disp12MaxDiff, cfg.PreFilterCap, cfg.UniquenessRatio, cfg.Speckle, cfg.SpeckleRange, cfg.fullDP))
            //using (StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0))
            {
                //FindStereoCorrespondence
                stereoSolver.Compute(left, right, res.disparityMap);//Computes the disparity map using:

                /*GC: graph cut-based algorithm
                 * BM: block matching algorithm
                 * SGBM: modified H. Hirschmuller algorithm HH08*/
                if (Q != null)
                {
                    res.points = PointCollection.ReprojectImageTo3D(res.disparityMap, Q); //Reprojects disparity image to 3D space.
                }
            }
            return(res);
        }
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="outputDisparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, out MCvPoint3D32f[] points)
        {
            Size size;

            using (InputArray ia = left.GetInputArray())
                size = ia.GetSize();

            using (StereoBM stereoSolver = new StereoBM())
            {
                stereoSolver.Compute(left, right, outputDisparityMap);

                float scale = Math.Max(size.Width, size.Height);

                //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
                using (Matrix <double> q = new Matrix <double>(
                           new double[, ] {
                    { 1.0, 0.0, 0.0, -size.Width / 2 },  //shift the x origin to image center
                    { 0.0, -1.0, 0.0, size.Height / 2 }, //shift the y origin to image center and flip it upside down
                    { 0.0, 0.0, -1.0, 0.0 },             //Multiply the z value by -1.0,
                    { 0.0, 0.0, 0.0, scale }
                }))                                      //scale the object's coordinate to within a [-0.5, 0.5] cube
                    points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q);
            }
        }
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="disparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private void Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            disparityMap = new Image <Gray, short>(size);
            //thread safe calibration values


            /*This is maximum disparity minus minimum disparity. Always greater than 0. In the current implementation this parameter must be divisible by 16.*/
            int numDisparities = GetSliderValue(Num_Disparities);

            /*The minimum possible disparity value. Normally it is 0, but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly*/
            int minDispatities = GetSliderValue(Min_Disparities);

            /*The matched block size. Must be an odd number >=1 . Normally, it should be somewhere in 3..11 range*/
            int SAD = GetSliderValue(SAD_Window);

            /*P1, P2 – Parameters that control disparity smoothness. The larger the values, the smoother the disparity.
             * P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels.
             * P2 is the penalty on the disparity change by more than 1 between neighbor pixels.
             * The algorithm requires P2 > P1 .
             * See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown
             * (like 8*number_of_image_channels*SADWindowSize*SADWindowSize and 32*number_of_image_channels*SADWindowSize*SADWindowSize , respectively).*/

            int P1 = 8 * 1 * SAD * SAD;  //GetSliderValue(P1_Slider);
            int P2 = 32 * 1 * SAD * SAD; //GetSliderValue(P2_Slider);

            /* Maximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to non-positive value to disable the check.*/
            int disp12MaxDiff = GetSliderValue(Disp12MaxDiff);

            /*Truncation value for the prefiltered image pixels.
             * The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.
             * The result values are passed to the Birchfield-Tomasi pixel cost function.*/
            int PreFilterCap = GetSliderValue(pre_filter_cap);

            /*The margin in percents by which the best (minimum) computed cost function value should “win” the second best value to consider the found match correct.
             * Normally, some value within 5-15 range is good enough*/
            int UniquenessRatio = GetSliderValue(uniquenessRatio);

            /*Maximum disparity variation within each connected component.
             * If you do speckle filtering, set it to some positive value, multiple of 16.
             * Normally, 16 or 32 is good enough*/
            int Speckle = GetSliderValue(Speckle_Window);

            /*Maximum disparity variation within each connected component. If you do speckle filtering, set it to some positive value, multiple of 16. Normally, 16 or 32 is good enough.*/
            int SpeckleRange = GetSliderValue(specklerange);

            /*Set it to true to run full-scale 2-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes,
             * which is large for 640x480 stereo and huge for HD-size pictures. By default this is usually false*/
            //Set globally for ease
            //bool fullDP = true;

            using (StereoSGBM stereoSolver = new StereoSGBM(minDispatities, numDisparities, SAD, P1, P2, disp12MaxDiff, PreFilterCap, UniquenessRatio, Speckle, SpeckleRange, fullDP))
            //using (StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0))
            {
                stereoSolver.FindStereoCorrespondence(left, right, disparityMap);//Computes the disparity map using:

                /*GC: graph cut-based algorithm
                 * BM: block matching algorithm
                 * SGBM: modified H. Hirschmuller algorithm HH08*/
                points = PointCollection.ReprojectImageTo3D(disparityMap, Q); //Reprojects disparity image to 3D space.
            }
        }