/// <summary> /// Given the left and right image, computer the disparity map and the 3D point cloud. /// </summary> /// <param name="left">The left image</param> /// <param name="right">The right image</param> /// <param name="outputDisparityMap">The left disparity map</param> /// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param> private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points) { Size size; using (InputArray ia = left.GetInputArray()) size = ia.GetSize(); using (StereoBM stereoSolver = new StereoBM()) { stereoSolver.Compute(left, right, outputDisparityMap); float scale = Math.Max(size.Width, size.Height); //Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead using (Matrix<double> q = new Matrix<double>( new double[,] { {1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center {0.0, -1.0, 0.0, size.Height/2}, //shift the y origin to image center and flip it upside down {0.0, 0.0, -1.0, 0.0}, //Multiply the z value by -1.0, {0.0, 0.0, 0.0, scale} })) //scale the object's coordinate to within a [-0.5, 0.5] cube { CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, false, DepthType.Cv32F); } //points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q); } }
public void TestStereoBMCorrespondence() { Image<Gray, Byte> left = new Image<Gray, byte>("left.jpg"); Image<Gray, Byte> right = new Image<Gray, byte>("right.jpg"); Image<Gray, Int16> leftDisparity = new Image<Gray, Int16>(left.Size); Image<Gray, Int16> rightDisparity = new Image<Gray, Int16>(left.Size); StereoBM bm = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0); Stopwatch watch = Stopwatch.StartNew(); bm.FindStereoCorrespondence(left, right, leftDisparity); watch.Stop(); Trace.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); Matrix<double> q = new Matrix<double>(4, 4); q.SetIdentity(); MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(leftDisparity * (-16), q); float min = (float)1.0e10, max = 0; foreach (MCvPoint3D32f p in points) { if (p.z < min) min = p.z; else if (p.z > max) max = p.z; } Trace.WriteLine(String.Format("Min : {0}\r\nMax : {1}", min, max)); }
private void ProcessImage(Bitmap image) { // Call the function to apply filtering and enhancement to the captured frame before processing for blobs Bitmap CombinedImage = FilterImage(image); // Create the processed L and R images for blob processing Bitmap ProcessedL = filterL.Apply(CombinedImage); Bitmap ProcessedR = filterR.Apply(CombinedImage); // Free the memory from the filtered combined image CombinedImage.Dispose(); // Create final L and R images based on source Bitmap imageL = filterL.Apply(image); Bitmap imageR = filterR.Apply(image); // Eventually when done testing/tinkering/and generally toying with different techniques we need to eliminate the L and R images and only keep the overlay. // Process the disparity map StereoBM bm = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0); disparity = new Image<Gray, float>(xMax / 2, yMax); bm.FindStereoCorrespondence(new Image<Gray, Byte>(ProcessedL), new Image<Gray, Byte>(ProcessedR), disparity); //CvInvoke.cvConvertScale(disparity, disparity, 16, 0); //CvInvoke.cvNormalize(disparity, disparity, 0, 255, Emgu.CV.CvEnum.NORM_TYPE.CV_MINMAX,IntPtr.Zero); pictureBoxD.Image = disparity.ToBitmap(320, 240); // Process the left pictureBoxL.Image = ProcessBlobs(imageL, ProcessedL, Input.Left); // Process the right pictureBoxR.Image = ProcessBlobs(imageR, ProcessedR, Input.Right); // Process the combined data pictureBoxC.Image = ProcessFingerData(); disparity.Dispose(); // Free memory no longer needed ProcessedL.Dispose(); // Free memory no longer needed ProcessedR.Dispose(); // Free memory no longer needed imageL.Dispose(); // Free memory no longer needed imageR.Dispose(); // Free memory no longer needed }