//使用Winform定时器 private void timer_disparityMeasure_Tick(object sender, EventArgs e) { if (IS_USE_BM_FLAG) { if (BM_IMG_FLAG) { //控制是否从主窗体中读取图像 tLeftImg = ShareData.LeftImg; tRightImg = ShareData.RightImg; if (BM_DIS_CAL_FLAG && !IS_IMG_SHOW_PAUSE) { validDisROI = Cv2.GetValidDisparityROI(ShareData.PixROI1, ShareData.PixROI2, bm_minDisparity, bm_numOfDisparities, bm_SADWinSize); //转换为灰度图 Cv2.CvtColor(tLeftImg, tLeftImg_g, ColorConversionCodes.BGR2GRAY); Cv2.CvtColor(tRightImg, tRightImg_g, ColorConversionCodes.BGR2GRAY); //BM计算视差图 bmMatch.Compute(tLeftImg_g, tRightImg_g, bmDisImg); //计算视差图 //转换为8位灰度图 double min, max; Cv2.MinMaxLoc(bmDisImg, out min, out max); bmDist8U = new Mat(bmDisImg.Size(), MatType.CV_8UC1); bmDisImg.ConvertTo(bmDist8U, MatType.CV_8UC1, 255 / (max - min), -255 * min / (max - min)); //转换为伪彩色图 bmDist24U = new Mat(bmDisImg.Size(), MatType.CV_8UC3); Cv2.ApplyColorMap(bmDist8U, bmDist24U, ColormapTypes.Jet); } } //控制图像显示 if (IS_SHOW_BM_DISPARITY_FLAG) { //标记显示图像类型 IMG_TYPE_TAG = 2; Cv2.Rectangle(bmDist24U, validDisROI, new Scalar(0, 0, 255), 1); this.pictureBoxIpl_Img.ImageIpl = bmDist24U; } else { //标记显示图像类型 IMG_TYPE_TAG = 1; Cv2.Rectangle(tLeftImg, validDisROI, new Scalar(0, 0, 255), 1); this.pictureBoxIpl_Img.ImageIpl = tLeftImg; //显示左视图 } //validDisROI = new Rect(); } if (IS_USE_SGBM_FLAG) { } }
/// <summary> /// Given the left and right image, computer the disparity map and the 3D point cloud. /// </summary> /// <param name="left">The left image</param> /// <param name="right">The right image</param> /// <param name="outputDisparityMap">The left disparity map</param> /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param> private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points) { Size size; using (InputArray ia = left.GetInputArray()) size = ia.GetSize(); using (StereoBM stereoSolver = new StereoBM()) { stereoSolver.Compute(left, right, outputDisparityMap); float scale = Math.Max(size.Width, size.Height); //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead using (Matrix <double> q = new Matrix <double>( new double[, ] { { 1.0, 0.0, 0.0, -size.Width / 2 }, //shift the x origin to image center { 0.0, -1.0, 0.0, size.Height / 2 }, //shift the y origin to image center and flip it upside down { 0.0, 0.0, -1.0, 0.0 }, //Multiply the z value by -1.0, { 0.0, 0.0, 0.0, scale } })) //scale the object's coordinate to within a [-0.5, 0.5] cube { CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, false, DepthType.Cv32F); } //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q); } }
/// <summary> /// Given the left and right image, computer the disparity map and the 3D point cloud. /// </summary> /// <param name="left">The left image</param> /// <param name="right">The right image</param> /// <param name="disparityMap">The left disparity map</param> /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param> private static void Computer3DPointsFromStereoPair(Image <Gray, Byte> left, Image <Gray, Byte> right, out Image <Gray, short> disparityMap, out MCvPoint3D32f[] points) { Size size = left.Size; disparityMap = new Image <Gray, short>(size); //using (StereoSGBM stereoSolver = new StereoSGBM(5, 64, 0)) using (StereoBM stereoSolver = new StereoBM()) //using (Mat dm = new Mat()) { stereoSolver.Compute(left, right, disparityMap); float scale = Math.Max(size.Width, size.Height); //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead using (Matrix <double> q = new Matrix <double>( new double[, ] { { 1.0, 0.0, 0.0, -size.Width / 2 }, //shift the x origin to image center { 0.0, -1.0, 0.0, size.Height / 2 }, //shift the y origin to image center and flip it upside down { 0.0, 0.0, -1.0, 0.0 }, //Multiply the z value by -1.0, { 0.0, 0.0, 0.0, scale } })) //scale the object's corrdinate to within a [-0.5, 0.5] cube points = PointCollection.ReprojectImageTo3D(disparityMap, q); } }
/// <summary> /// Given the left and right image, computer the disparity map and the 3D point cloud. /// </summary> /// <param name="left">The left image</param> /// <param name="right">The right image</param> /// <param name="outputDisparityMap">The left disparity map</param> /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param> private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points, bool handleMissingValues = true) { System.Drawing.Size size; using (InputArray ia = left.GetInputArray()) size = ia.GetSize(); using (StereoBM leftMatcher = new StereoBM()) using (RightMatcher rightMatcher = new RightMatcher(leftMatcher)) using (Mat leftDisparity = new Mat()) using (Mat rightDisparity = new Mat()) using (DisparityWLSFilter wls = new DisparityWLSFilter(leftMatcher)) { leftMatcher.Compute(left, right, leftDisparity); rightMatcher.Compute(right, left, rightDisparity); wls.Filter(leftDisparity, left, outputDisparityMap, rightDisparity, rightView: right); float scale = Math.Max(size.Width, size.Height); //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead using (Matrix <double> q = new Matrix <double>( new double[, ] { { 1.0, 0.0, 0.0, -size.Width / 2 }, //shift the x origin to image center { 0.0, -1.0, 0.0, size.Height / 2 }, //shift the y origin to image center and flip it upside down { 0.0, 0.0, -1.0, 0.0 }, //Multiply the z value by -1.0, { 0.0, 0.0, 0.0, scale } })) //scale the object's coordinate to within a [-0.5, 0.5] cube { CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, handleMissingValues, DepthType.Cv32F); //CvInvoke.ReprojectImageTo3D(leftDisparity, points, q, false, DepthType.Cv32F); //CvInvoke.ReprojectImageTo3D(leftDisparity, points, q, handleMissingValues, DepthType.Cv32F); } //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q); } }
private void Display3D(Mat left, Mat right) { //TODO: try to use StereoSGBM using (StereoBM stereoSolver = new StereoBM()) { Mat output = new Mat(); Mat left8bit = ConvertInto8bitMat(left); Mat right8bit = ConvertInto8bitMat(right); stereoSolver.Compute(left8bit, right8bit, output); Mat points = new Mat(); float scale = Math.Max(left.Size.Width, left.Size.Height); if (!_isCalibrate) { Q = new Matrix <double>( new double[, ] { { 1.0, 0.0, 0.0, -left.Width / 2 }, //shift the x origin to image center { 0.0, -1.0, 0.0, left.Height / 2 }, //shift the y origin to image center and flip it upside down { 0.0, 0.0, -1.0, 0.0 }, //Multiply the z value by -1.0, { 0.0, 0.0, 0.0, scale } }); _isCalibrate = true; } //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead //scale the object's coordinate to within a [-0.5, 0.5] cube if (_isCameraMatrixCount) { Mat map11 = new Mat(); Mat map12 = new Mat(); Mat map21 = new Mat(); Mat map22 = new Mat(); CvInvoke.InitUndistortRectifyMap(cameraMatrix1, distCoeff1, R1, P1, left8bit.Size, DepthType.Cv16S, map11, map12); CvInvoke.InitUndistortRectifyMap(cameraMatrix2, distCoeff2, R2, P2, left8bit.Size, DepthType.Cv16S, map21, map22); Mat img1r = new Mat(); Mat img2r = new Mat(); CvInvoke.Remap(left8bit, img1r, map11, map12, Inter.Linear); CvInvoke.Remap(right8bit, img2r, map21, map22, Inter.Linear); left8bit = img1r; right8bit = img2r; } //stereoSolver.FindStereoCorrespondence(left, right, disparityMap); CvInvoke.ReprojectImageTo3D(output, points, Q, false, DepthType.Cv32F); //points = PointCollection.ReprojectImageTo3D(output, Q); Mat pointsArray = points.Reshape(points.NumberOfChannels, points.Rows * points.Cols); Mat colorArray = left.Reshape(left.NumberOfChannels, left.Rows * left.Cols); Mat colorArrayFloat = new Mat(); colorArray.ConvertTo(colorArrayFloat, DepthType.Cv32F); WCloud cloud = new WCloud(pointsArray, colorArray); Display3DImage(cloud); //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q); } }
public override Mat ComputeDepthMap(Image <Bgr, byte> leftImage, Image <Bgr, byte> rightImage) { StereoBM _stereoBM = CreateStereoBM(); ConvertImageToGray(leftImage, rightImage); Mat imageDisparity = new Mat(); Mat imageToSave = new Mat(); _stereoBM.Compute(LeftGrayImage, RightGrayImage, imageDisparity); imageDisparity.ConvertTo(imageToSave, DepthType.Cv8U); return(imageDisparity); }
private void ObjectPointsCal(object state) { if (BM_CAL_FLAG) //BM视差图计算 { //获取原图 Data.leftImg.CopyTo(bm_lsrc); Data.rightImg.CopyTo(bm_rsrc); //转换为灰度图 CvInvoke.CvtColor(bm_lsrc, bm_lsrc, ColorConversion.Bgr2Gray); CvInvoke.CvtColor(bm_rsrc, bm_rsrc, ColorConversion.Bgr2Gray); bm.Compute(bm_lsrc, bm_rsrc, bm_distImg); //BM算法只能处理灰度图 bm_distImg.ConvertTo(bm_distImg, DepthType.Cv32F, 1.0 / 16); //除16得到真正的视差图 -----这里的数据需要再次使用 bm_distImg8U = new Mat(bm_distImg.Size, DepthType.Cv8U, 1); CvInvoke.Normalize(bm_distImg, bm_distImg8U, 0, 255, NormType.MinMax, DepthType.Cv8U); //归一化后显示 this.imageBox1.Image = bm_distImg8U; } if (SHOWCONTOURS_FLAG) //轮廓角点计算 { contourImg.SetTo(new MCvScalar(0, 0, 0)); Data.leftImg.CopyTo(contourSrc); CvInvoke.CvtColor(contourSrc, grayImg, ColorConversion.Bgr2Gray); CvInvoke.Canny(grayImg, cannyImg, 100, 200, 3, false); //转换为二值图 CvInvoke.FindContours(cannyImg, contours, hierarchy, RetrType.External, ChainApproxMethod.ChainApproxNone); CvInvoke.DrawContours(contourImg, contours, -1, new MCvScalar(255, 255, 255), 2); //绘制所有轮廓 cornerPoints = gFTT.Detect(grayImg); //计算角点 for (int i = 0; i < cornerPoints.Length; i++) { Point pt = new Point(); pt.X = (int)cornerPoints[i].Point.X; pt.Y = (int)cornerPoints[i].Point.Y; CvInvoke.Circle(contourImg, pt, 3, new MCvScalar(0, 0, 255), -1); //绘制所有角点 } this.imageBox2.Image = contourImg; //显示图像 } if (OBJPOINTS_CAL_FLAG) //计算角点的三维坐标在TextBox中显示 { //2种方法 Image <Gray, Single> bm_distImg_C = bm_distImg.ToImage <Gray, Single>(); try { for (int i = 0; i < cornerPoints.Length; i++) { MCvPoint3D32f[] ptf = new MCvPoint3D32f[1]; //转换计算得到的角点,记得清空 ptf[0].X = cornerPoints[i].Point.X; ptf[0].Y = cornerPoints[i].Point.Y; if (ptf[0].X > bm_distImg_C.Width) //数组越界检查 { ptf[0].X = bm_distImg_C.Width - 1; } if (ptf[0].Y > bm_distImg_C.Height) { ptf[0].Y = bm_distImg_C.Height - 1; } ptf[0].Z = bm_distImg_C.Data[(int)ptf[0].X, (int)ptf[0].Y, 0]; //获取角点在视差图中的深度值(单位是像素) cornerPoints_vec.Push(ptf); //存储转换好后的值 } CvInvoke.PerspectiveTransform(cornerPoints_vec, objXYZ, Data.Q); //透视变换,得到稀疏特征点在摄像机坐标系下的坐标 //方法2 //CvInvoke.ReprojectImageTo3D(bm_distImg, bm_image3D, Data.Q, true); //for(int i = 0; i < cornerPoints.Length; i++) //{ // MCvPoint3D32f[] ptf = new MCvPoint3D32f[1]; // //需要处理数组越界问题 // ptf[0].X = this.bm_image3D.Data[(int)cornerPoints[i].Point.X, (int)cornerPoints[i].Point.Y, 0]; // ptf[0].Y = this.bm_image3D.Data[(int)cornerPoints[i].Point.X, (int)cornerPoints[i].Point.Y, 1]; // ptf[0].Z = this.bm_image3D.Data[(int)cornerPoints[i].Point.X, (int)cornerPoints[i].Point.Y, 2]; // objXYZ.Push(ptf); //存储计算好的空间坐标点 //} //写入数据 必须调用Invoke方法 this.Invoke(new UpdateTextBox(UpdateTextBoxFunc), new object[] { }); } catch (Exception e) { Data.LogString = "[error] " + e.Message; } } cornerPoints_vec.Clear(); //清空元素,Vector会一直叠加 }