/// <summary> /// Given the left and right image, computer the disparity map and the 3D point cloud. /// </summary> /// <param name="left">The left image</param> /// <param name="right">The right image</param> /// <param name="leftDisparityMap">The left disparity map</param> /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param> private static void Computer3DPointsFromImages(Image<Gray, Byte> left, Image<Gray, Byte> right, out Image<Gray, Int16> leftDisparityMap, out MCvPoint3D32f[] points) { Size size = left.Size; using (Image<Gray, Int16> leftDisparity = new Image<Gray, Int16>(size)) using (Image<Gray, Int16> rightDisparity = new Image<Gray, Int16>(size)) using (StereoGC gc = new StereoGC(16, 2)) { gc.FindStereoCorrespondence(left, right, leftDisparity, rightDisparity); leftDisparityMap = leftDisparity * (-16); float scale = Math.Max(size.Width, size.Height); //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead using (Matrix<double> q = new Matrix<double>( new double[,] { {1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center {0.0, -1.0, 0.0, size.Height/2}, //shift the y origin to image center and flip it upside down {0.0, 0.0, 16.0, 0.0}, //Multiply the z value by 16, {0.0, 0.0, 0.0, scale}})) //scale the object's corrdinate to within a [-0.5, 0.5] cube points = PointCollection.ReprojectImageTo3D(leftDisparity, q); } }
public void TestStereoGCCorrespondence() { Image<Gray, Byte> left = new Image<Gray, byte>("left.jpg"); Image<Gray, Byte> right = new Image<Gray, byte>("right.jpg"); Image<Gray, Int16> leftDisparity = new Image<Gray, Int16>(left.Size); Image<Gray, Int16> rightDisparity = new Image<Gray, Int16>(left.Size); StereoGC stereoSolver = new StereoGC(10, 5); Stopwatch watch = Stopwatch.StartNew(); stereoSolver.FindStereoCorrespondence(left, right, leftDisparity, rightDisparity); watch.Stop(); Trace.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); Matrix<double> q = new Matrix<double>(4, 4); q.SetIdentity(); MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(leftDisparity * (-16), q); float min = (float)1.0e10, max = 0; foreach (MCvPoint3D32f p in points) { if (p.z < min) min = p.z; else if (p.z > max) max = p.z; } Trace.WriteLine(String.Format("Min : {0}\r\nMax : {1}", min, max)); //ImageViewer.Show(leftDisparity*(-16)); }
/// <summary> /// Given the left and right image, computer the disparity map and the 3D point cloud. /// </summary> /// <param name="left">The left image</param> /// <param name="right">The right image</param> /// <param name="leftDisparityMap">The left disparity map</param> /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param> private static void Computer3DPointsFromImages(Image<Gray, Byte> left, Image<Gray, Byte> right, out Image<Gray, Int16> leftDisparityMap, out MCvPoint3D32f[] points) { Size size = left.Size; using (Image<Gray, Int16> leftDisparity = new Image<Gray, Int16>(size)) using (Image<Gray, Int16> rightDisparity = new Image<Gray, Int16>(size)) //using (StereoSGBM stereoSolver = new StereoSGBM(5, 64, 0, 0, 0, 0, 0, 0, 0, 0, false)) using (StereoGC stereoSolver = new StereoGC(16, 2)) { stereoSolver.FindStereoCorrespondence(left, right, leftDisparity, rightDisparity); //stereoSolver.FindStereoCorrespondence(left, right, leftDisparity); leftDisparityMap = leftDisparity * (-16); //leftDisparityMap = leftDisparity.Clone(); //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead using (Matrix<double> q = new Matrix<double>( new double[,] { {1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center {0.0, 1.0, 0.0, -size.Height/2}, //shift the y origin to image center {0.0, 0.0, -16.0, 0.0}, //Multiply the z value by -16, {0.0, 0.0, 0.0, 1.0}})) points = PointCollection.ReprojectImageTo3D(leftDisparity, q); } }