This is a variation of "Stereo Processing by Semiglobal Matching and Mutual Information" by Heiko Hirschmuller. We match blocks rather than individual pixels, thus the algorithm is called SGBM (Semi-global block matching)
Inheritance: Emgu.Util.UnmanagedObject
Ejemplo n.º 1
1
        private Image<Gray, short> GetDispMap(VideoSource.StereoFrameSequenceElement stereoFrame)
        {
            int numDisparities = GetSliderValue(Num_Disparities);
            int minDispatities = GetSliderValue(Min_Disparities);
            int SAD = GetSliderValue(SAD_Window);
            int P1 = 8 * 1 * SAD * SAD;//GetSliderValue(P1_Slider);
            int P2 = 32 * 1 * SAD * SAD;//GetSliderValue(P2_Slider);
            int disp12MaxDiff = GetSliderValue(Disp12MaxDiff);
            int PreFilterCap = GetSliderValue(pre_filter_cap);
            int UniquenessRatio = GetSliderValue(uniquenessRatio);
            int SpeckleWindow = GetSliderValue(Speckle_Window);
            int SpeckleRange = GetSliderValue(specklerange);

            using (var gpuSBM = new Emgu.CV.GPU.GpuStereoBM(numDisparities, SAD))
            using (StereoSGBM stereoSolver = new StereoSGBM(
                            minDisparity: minDispatities,
                            numDisparities: numDisparities,
                            blockSize: SAD,
                            p1: P1,
                            p2: P2,
                            disp12MaxDiff: disp12MaxDiff,
                            preFilterCap: PreFilterCap,
                            uniquenessRatio: UniquenessRatio,
                            speckleRange: SpeckleRange,
                            speckleWindowSize: SpeckleWindow,
                            mode: StereoSGBM.Mode.SGBM
                            ))
            using (var leftImg = new Image<Gray, byte>(stereoFrame.LeftRawFrame))
            using (var rightImg = new Image<Gray, byte>(stereoFrame.RightRawFrame))
            using (var dispImg = new Image<Gray, short>(leftImg.Size))
            using (var gpuLeftImg = new Emgu.CV.GPU.GpuImage<Gray, byte>(leftImg))
            using (var gpuRightImg = new Emgu.CV.GPU.GpuImage<Gray, byte>(rightImg))
            using (var gpuDispImg = new Emgu.CV.GPU.GpuImage<Gray, byte>(leftImg.Size))
            {
                var dispMap = new Image<Gray, short>(leftImg.Size);
                //CPU
                //stereoSolver.FindStereoCorrespondence(leftImg, rightImg, dispImg);
                //dispMap = dispImg.Convert<Gray, short>();
                //
                //GPU
                gpuSBM.FindStereoCorrespondence(gpuLeftImg, gpuRightImg, gpuDispImg, null);
                dispMap = gpuDispImg.ToImage().Convert<Gray, short>();
                //

                return dispMap;
            }
        }
Ejemplo n.º 2
1
        private MCvPoint3D32f[] Get3DFeatures(StereoCameraParams stereoParams, VideoSource.StereoFrameSequenceElement stereoFrame, out Image<Gray, short> disparityImg)
        {
            using (var gpuSBM = new Emgu.CV.GPU.GpuStereoBM(128, 19))
            using (StereoSGBM stereoSolver = new StereoSGBM(
                            minDisparity: 0,
                            numDisparities: 32,
                            blockSize: 0,
                            p1: 0,
                            p2: 0,
                            disp12MaxDiff: 0,
                            preFilterCap: 0,
                            uniquenessRatio: 0,
                            speckleRange: 0,
                            speckleWindowSize: 0,
                            mode: StereoSGBM.Mode.HH
                            ))
            using (var leftImg = new Image<Gray, byte>(stereoFrame.LeftRawFrame))
            using (var rightImg = new Image<Gray, byte>(stereoFrame.RightRawFrame))
            using (var dispImg = new Image<Gray, short>(leftImg.Size))
            using (var gpuLeftImg = new Emgu.CV.GPU.GpuImage<Gray, byte>(leftImg))
            using (var gpuRightImg = new Emgu.CV.GPU.GpuImage<Gray, byte>(rightImg))
            using (var gpuDispImg = new Emgu.CV.GPU.GpuImage<Gray, byte>(leftImg.Size))
            {
                var dispMap = new Image<Gray, short>(leftImg.Size);
                //CPU
                //stereoSolver.FindStereoCorrespondence(leftImg, rightImg, dispImg);
                //dispMap = dispImg.Convert<Gray, short>();
                //
                //GPU
                gpuSBM.FindStereoCorrespondence(gpuLeftImg, gpuRightImg, gpuDispImg, null);
                dispMap = gpuDispImg.ToImage().Convert<Gray, short>();
                //

                var points = PointCollection.ReprojectImageTo3D(dispMap, stereoParams.Q);
                disparityImg = dispMap;
                return points;
            }
        }
Ejemplo n.º 3
0
        public Image<Gray, short> GetDispMapCPU(Image<Gray, byte> leftImg, Image<Gray, byte> rightImg, DispMapFounderParameters parameters)
        {
            var ap = (StereoSGBMDispMapFounderParameters)parameters;

            using (StereoSGBM sgbm = new StereoSGBM(
                minDisparity: ap.MinDisparity,
                numDisparities: ap.NumDisparities,
                blockSize: ap.BlockSize,
                p1: ap.P1,
                p2: ap.P2,
                disp12MaxDiff: ap.Disp12MaxDiff,
                preFilterCap: ap.PreFilterCap,
                uniquenessRatio: ap.UniquenessRatio,
                speckleWindowSize: ap.SpeckleWindowSize,
                speckleRange: ap.SpeckleRange,
                mode: ap.Mode))
            //using (var leftProcessImg = leftImg.Copy())
            //using (var rightProcessImg = rightImg.Copy())
            {
                var leftProcessImg = leftImg;
                var rightProcessImg = rightImg;
                var dispMap = new Image<Gray, short>(leftProcessImg.Size);
                //TODO: dirty hack
                try
                {
                    sgbm.FindStereoCorrespondence(leftProcessImg, rightProcessImg, dispMap);
                }
                catch
                {

                }
                return dispMap;
            }
        }
Ejemplo n.º 4
0
 private Image<Gray, short> FindDisparity1(Image<Bgr, Byte> image1, Image<Bgr, Byte> image2)
 {
     var disparity = new Image<Gray, short>(image1.Size);
     using (
         StereoSGBM stereoSolver = new StereoSGBM(-(int)minDispSlider_.Value, (int)numDispSlider_.Value, (int)sadWindowSizeSlider_.Value, (int)p1Slider_.Value, (int)p2Slider_.Value,
             (int)disp12MaxDiffSlider_.Value, (int)preFilterCapSlider_.Value, (int)uniquenessRatioSlider_.Value, (int)speckleWindowSizeSlider_.Value, (int)speckleRangeSlider_.Value, StereoSGBM.Mode.SGBM)
         )
     {
         stereoSolver.FindStereoCorrespondence(image1.Convert<Gray, Byte>(), image2.Convert<Gray, Byte>(),
             disparity);
     }
     return disparity;
 }
Ejemplo n.º 5
0
        public static Func<Bitmap> Go(uint w,uint h, byte[] one, byte[] two)
        {
            GCHandle gchOne = default(GCHandle), gchTwo = default(GCHandle);
            try {
                gchOne = GCHandle.Alloc(one,GCHandleType.Pinned);
                var ptrOne = gchOne.AddrOfPinnedObject();
                gchTwo = GCHandle.Alloc(two,GCHandleType.Pinned);
                var ptrTwo = gchTwo.AddrOfPinnedObject();

                var l_image = new Image<Gray,short>((int)w,(int)h,1,ptrOne)
                    .Resize(0.25,Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
                var r_image = new Image<Gray,short>((int)w,(int)h,1,ptrTwo)
                    .Resize(0.25,Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);

                var disparity = new Image<Gray, short>(l_image.Size);
                using (StereoSGBM stereoSolver = new StereoSGBM(
                     Config.MinDisparity
                    ,Config.NumDisparities
                    ,Config.SADWindowSize
                    ,Config.P1
                    ,Config.P2
                    ,Config.Disp12MaxDiff
                    ,Config.PreFilterCap
                    ,Config.UniquenessRatio
                    ,Config.SpeckleWindowSize
                    ,Config.SpeckleRange
                    ,StereoSGBM.Mode.SGBM
                )) {
                    stereoSolver.FindStereoCorrespondence(
                        l_image.Convert<Gray, byte>()
                        ,r_image.Convert<Gray, byte>()
                        ,disparity
                    );
                }
                //make it lazy so that it don't have to copy it twice
                return () => disparity.Bitmap;
            }
            finally {
                if (gchOne.IsAllocated) { gchOne.Free(); }
                if (gchTwo.IsAllocated) { gchTwo.Free(); }
            }
        }
Ejemplo n.º 6
0
 public static Image<Gray, byte> Compute(StereoSgbmModel model)
 {
     var disparity = new Image<Gray, short>(model.Image1.Size);
     using (var stereoSolver = new StereoSGBM(
         model.MinDisparity,
         model.NumDisparity,
         model.SadWindowSize,
         model.P1,
         model.P2,
         model.Disparity12MaxDiff,
         model.PreFilterCap,
         model.UniquenessRatio,
         model.SpeckleWindowSize,
         model.SpeckleRange,
         model.Mode))
     {
         stereoSolver.FindStereoCorrespondence(model.Image1, model.Image2, disparity);
     }
     return disparity.Convert<Gray, byte>();
 }
Ejemplo n.º 7
0
        public void TestStereoSGBMCorrespondence()
        {
            Image<Gray, Byte> left = new Image<Gray, byte>("left.jpg");
             Image<Gray, Byte> right = new Image<Gray, byte>("right.jpg");
             Size size = left.Size;

             Image<Gray, Int16> disparity = new Image<Gray, Int16>(size);

             StereoSGBM bm = new StereoSGBM(10, 64, 0, 0, 0, 0, 0, 0, 0, 0, false);
             Stopwatch watch = Stopwatch.StartNew();
             bm.FindStereoCorrespondence(left, right, disparity);
             watch.Stop();

             Trace.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds));

             Matrix<double> q = new Matrix<double>(4, 4);
             q.SetIdentity();
             MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(disparity * (-16), q);

             float min = (float)1.0e10, max = 0;
             foreach (MCvPoint3D32f p in points)
             {
            if (p.z < min) min = p.z;
            else if (p.z > max) max = p.z;
             }
             Trace.WriteLine(String.Format("Min : {0}\r\nMax : {1}", min, max));
        }
Ejemplo n.º 8
0
 internal extern static IntPtr CvStereoSGBMCreate(
    int minDisparity, int numDisparities, int blockSize,
    int P1, int P2, int disp12MaxDiff,
    int preFilterCap, int uniquenessRatio,
    int speckleWindowSize, int speckleRange,
    StereoSGBM.Mode mode, ref IntPtr stereoMatcher);
Ejemplo n.º 9
0
      public void TestStereoSGBMCorrespondence()
      {
         Image<Gray, Byte> left = EmguAssert.LoadImage<Gray, byte>("aloeL.jpg");
         Image<Gray, Byte> right = EmguAssert.LoadImage<Gray, byte>("aloeR.jpg");
         Size size = left.Size;

         Image<Gray, Int16> disparity = new Image<Gray, Int16>(size);

         StereoSGBM bm = new StereoSGBM(10, 64, 0, 0, 0, 0, 0, 0, 0, 0, StereoSGBM.Mode.SGBM);
         Stopwatch watch = Stopwatch.StartNew();
         bm.Compute(left, right, disparity);
         watch.Stop();

         EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds));

         Matrix<double> q = new Matrix<double>(4, 4);
         q.SetIdentity();
         Image<Gray, Int16> disparityScaled = disparity * (-16);
         MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(disparityScaled.Mat, q);

         float min = (float) 1.0e10, max = 0;
         foreach (MCvPoint3D32f p in points)
         {
            if (p.Z < min)
               min = p.Z;
            else if (p.Z > max)
               max = p.Z;
         }
         EmguAssert.WriteLine(String.Format("Min : {0}\r\nMax : {1}", min, max));

      }
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="left">The left image</param>
        /// <param name="right">The right image</param>
        /// <param name="disparityMap">The left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private void Computer3DPointsFromStereoPair(Image<Gray, Byte> left, Image<Gray, Byte> right, out Image<Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            Size size = left.Size;

            disparityMap = new Image<Gray, short>(size);

            int P1 = 8 * 1 * Calibration.SAD * Calibration.SAD;//GetSliderValue(P1_Slider);
            int P2 = 32 * 1 * Calibration.SAD * Calibration.SAD;//GetSliderValue(P2_Slider);

            using (StereoSGBM stereoSolver = new StereoSGBM(
                Calibration.MinDisparities, 
                Calibration.NumDisparities, 
                Calibration.SAD, 
                P1, 
                P2, 
                Calibration.MaxDiff, 
                Calibration.PrefilterCap,
                Calibration.UniquenessRatio,
                Calibration.Speckle,
                Calibration.SpeckleRange,
                Calibration.DisparityMode))
            //using (StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0))
            {
                stereoSolver.FindStereoCorrespondence(left, right, disparityMap);//Computes the disparity map using: 
                /*GC: graph cut-based algorithm
                  BM: block matching algorithm
                  SGBM: modified H. Hirschmuller algorithm HH08*/
                points = PointCollection.ReprojectImageTo3D(disparityMap, Calibration.Q); //Reprojects disparity image to 3D space.
            }
        }
Ejemplo n.º 11
0
 private void OnParametersChanged(object sender, System.EventArgs e)
 {
     algorithm = new StereoSGBM(configuration.MinDisparity, configuration.NumDisparities,
         configuration.BlockSize, configuration.P1, configuration.P2, configuration.Disp12MaxDiff,
         configuration.PreFilterCap, configuration.UniquenessRatio);
 }
Ejemplo n.º 12
0
        /// <summary>
        /// Given the left and right image, computer the disparity map and the 3D point cloud.
        /// </summary>
        /// <param name="leftImage">Left image</param>
        /// <param name="rightImage">Right image</param>
        /// <param name="disparityMap">Left disparity map</param>
        /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
        private void Computer3DPointsFromStereoPair(Image<Gray, Byte> leftImage, Image<Gray, Byte> rightImage, out Image<Gray, short> disparityMap, out MCvPoint3D32f[] points)
        {
            // Create disparity map.
            disparityMap = new Image<Gray, short>(leftImage.Size);

            // This is maximum disparity minus minimum disparity.
            // Always greater than 0. In the current implementation this parameter must be divisible by 16.
            int numDisparities = this.GetSliderValue(this.trbDisparities);

            // The minimum possible disparity value. Normally it is 0,
            // but sometimes rectification algorithms can shift images,
            // so this parameter needs to be adjusted accordingly.
            int minDispatities = this.GetSliderValue(this.trbMinDisparities);

            // The matched block size. Must be an odd number >=1.
            // Normally, it should be somewhere in 3..11 range.
            int SAD = this.GetSliderValue(this.trbSADWindow);

            // П1, P2 – Parameters that control disparity smoothness. The larger the values, the smoother the disparity. 
            // P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels. 
            // P2 is the penalty on the disparity change by more than 1 between neighbor pixels. 
            // The algorithm requires P2 > P1 . 
            // See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown 
            // (like 8 * number_of_image_channels * SADWindowSize * SADWindowSize and 32 * number_of_image_channels * SADWindowSize*SADWindowSize , respectively).
            int P1 = 8 * 1 * SAD * SAD;
            int P2 = 32 * 1 * SAD * SAD;

            // Maximum allowed difference (in integer pixel units) in the left-right disparity check.
            // Set it to non-positive value to disable the check.
            int disp12MaxDiff = GetSliderValue(trbDisp12MaxDiff);

            // Truncation value for the prefiltered image pixels. 
            // The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. 
            // The result values are passed to the Birchfield-Tomasi pixel cost function.
            int PreFilterCap = GetSliderValue(trbPreFilterCap);

            // The margin in percents by which the best (minimum) computed cost function value
            // should “win” the second best value to consider the found match correct. 
            // Normally, some value within 5-15 range is good enough*/
            int UniquenessRatio = GetSliderValue(trbUniquenessRatio);

            // Maximum disparity variation within each connected component. 
            // If you do speckle filtering, set it to some positive value, multiple of 16. 
            // Normally, 16 or 32 is good enough*/
            int Speckle = GetSliderValue(trbSpeckleWindow);

            // Maximum disparity variation within each connected component.
            // If you do speckle filtering, set it to some positive value,
            // multiple of 16. Normally, 16 or 32 is good enough.
            int SpeckleRange = GetSliderValue(trbSpeckleRange);

            // Set it to true to run full-scale 2-pass dynamic programming algorithm.
            // It will consume O(W*H*numDisparities) bytes, which is large for 640x480 stereo and huge for HD-size pictures.
            // By default this is usually false. Set globally for ease
            // bool fullDP = true;

            using (StereoSGBM stereoSolver = new StereoSGBM(minDispatities, numDisparities, SAD, P1, P2, disp12MaxDiff, PreFilterCap, UniquenessRatio, Speckle, SpeckleRange, this.stereoMode))
            {
                // Computes the disparity map using:
                stereoSolver.FindStereoCorrespondence(leftImage, rightImage, disparityMap);
                // GC: Graph Cut-based algorithm
                // BM: Block Matching algorithm
                // SGBM: modified H. Hirschmuller algorithm HH08.

                // Reprojects disparity image to 3D space.
                points = PointCollection.ReprojectImageTo3D(disparityMap, this.Q); 
            }
        }
Ejemplo n.º 13
0
        private void GetDisparityMap()
        {
            minDisparity = TBminDisparity.Value;        //Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
                                                        //Default is zero, should be set to a negative value, if negative disparities are possible (depends on the angle between the cameras views and the distance of the measured object to the cameras).
            numDisparities = TBnumDisparities.Value;    //Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
            SADWindowSize = TBSADWindowSize.Value;      //(= blockSize) Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range. Use 0 for default.
            P1 = TBp1.Value;                            //The first parameter controlling the disparity smoothness. It is the penalty on the disparity change by plus or minus 1 between neighbor pixels.
                                                        //Reasonably good value is 8*number_of_image_channels*SADWindowSize*SADWindowSize. Use 0 for default
            P2 = TBp2.Value;                            //The second parameter controlling the disparity smoothness. It is the penalty on the disparity change by more than 1 between neighbor pixels.
                                                        //The algorithm requires p2 > p1. Reasonably good value is 32*number_of_image_channels*SADWindowSize*SADWindowSize. Use 0 for default
            disp12MaxDiff = TBdisp12MaxDiff.Value;      //Maximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to a non-positive value to disable the check.
            preFilterCap = TBpreFilterCap.Value;        //Truncation value for the prefiltered image pixels. The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.
                                                        //The result values are passed to the Birchfield-Tomasi pixel cost function.
            uniquenessRatio = TBuniquenessRatio.Value;  //Margin in percentage by which the best (minimum) computed cost function value should “win” the second best value to consider the found match correct.
                                                        //Normally, a value within the 5-15 range is good enough.
            speckleWindowSize = TBspeckleWindowSize.Value;   //Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range
            speckleRange = TBspeckleRange.Value;        //Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value,
                                                        //it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough.
            StereoSGBM.Mode mode = StereoSGBM.Mode.SGBM;
                                                        //mode (Optional) : Set it to HH to run the full-scale two-pass dynamic programming algorithm. It will consume O(W*H*numDisparities) bytes,
                                                        //which is large for 640x480 stereo and huge for HD-size pictures. By default, it is set to false.

            StereoSGBM sgbm = new StereoSGBM(minDisparity, numDisparities, SADWindowSize, P1, P2, disp12MaxDiff, preFilterCap, uniquenessRatio, speckleWindowSize, speckleRange, mode);
            //Computes disparity map for the specified stereo pair
            sgbm.Compute(imageLeft, imageRight, disparity);
            //Since Disparity will be either CV_16S or CV_32F, it needs to be compressed and normalized to CV_8U
            CvInvoke.Normalize(disparity, disp8, 0, 255, Emgu.CV.CvEnum.NormType.MinMax, Emgu.CV.CvEnum.DepthType.Cv8U);
        }