public Bitmap GetUndistortedImage(Bitmap sourceImage) { // The source image is possibly at reduced size, we need to upscale it for the map to work, // as it's based on the coefficients computed for the full size. Matrix <float> mapx; Matrix <float> mapy; icp.InitUndistortMap(imageSize.Width, imageSize.Height, out mapx, out mapy); Bitmap source = new Bitmap(imageSize.Width, imageSize.Height, PixelFormat.Format24bppRgb); Graphics g = Graphics.FromImage(source); g.DrawImage(sourceImage, 0, 0, imageSize.Width, imageSize.Height); BitmapData sourceImageData = source.LockBits(new Rectangle(0, 0, source.Width, source.Height), ImageLockMode.ReadOnly, source.PixelFormat); Image <Bgr, Byte> cvSource = new Image <Bgr, Byte>(sourceImageData.Width, sourceImageData.Height, sourceImageData.Stride, sourceImageData.Scan0); Bitmap result = new Bitmap(source.Width, source.Height, PixelFormat.Format24bppRgb); BitmapData resultImageData = result.LockBits(new Rectangle(0, 0, result.Width, result.Height), ImageLockMode.ReadOnly, result.PixelFormat); Image <Bgr, Byte> cvResult = new Image <Bgr, Byte>(resultImageData.Width, resultImageData.Height, resultImageData.Stride, resultImageData.Scan0); CvInvoke.cvRemap(cvSource, cvResult, mapx, mapy, 0, new MCvScalar(0)); source.UnlockBits(sourceImageData); result.UnlockBits(resultImageData); return(result); }
private void Initialize() { this.OriginalImage = m_Capture.QueryFrame(); m_IntrinsicCameraParameters.InitUndistortMap( this.OriginalImage.Width, this.OriginalImage.Height, out m_UndistortMapX, out m_UndistortMapY); }
public Image <Bgr, byte> GetCorrectImage(Image <Bgr, byte> img) { Matrix <float> map1, map2; intrinsicParameters.InitUndistortMap(img.Width, img.Height, out map1, out map2); //remap the image to the particular intrinsics //In the current version of EMGU any pixel that is not corrected is set to transparent allowing the original image to be displayed if the same //image is mapped backed, in the future this should be controllable through the flag '0' Image <Bgr, Byte> temp = img.CopyBlank(); CvInvoke.cvRemap(img, temp, map1, map2, 0, new MCvScalar(0)); return(temp); }
/// <summary> /// main function processing of the image data /// </summary> /// <param name="sender"></param> /// <param name="e"></param> void _Capture_ImageGrabbed(object sender, EventArgs e) { //lets get a frame from our capture device img = _Capture.RetrieveBgrFrame(); Gray_Frame = img.Convert <Gray, Byte>(); //apply chess board detection if (currentMode == Mode.SavingFrames) { corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners); if (corners != null) //chess board found { //make mesurments more accurate by using FindCornerSubPixel Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new System.Drawing.Size(11, 11), new System.Drawing.Size(-1, -1), new MCvTermCriteria(30, 0.1)); //if go button has been pressed start aquiring frames else we will just display the points if (start_Flag) { Frame_array_buffer[frame_buffer_savepoint] = Gray_Frame.Copy(); //store the image frame_buffer_savepoint++; //increase buffer positon //check the state of buffer if (frame_buffer_savepoint == Frame_array_buffer.Length) { currentMode = Mode.Caluculating_Intrinsics; //buffer full } } //dram the results img.Draw(new CircleF(corners[0], 3), new Bgr(System.Drawing.Color.Yellow), 1); for (int i = 1; i < corners.Length; i++) { img.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2); img.Draw(new CircleF(corners[i], 3), new Bgr(System.Drawing.Color.Yellow), 1); } //calibrate the delay bassed on size of buffer //if buffer small you want a big delay if big small delay Thread.Sleep(100);//allow the user to move the board to a different position } corners = null; } if (currentMode == Mode.Caluculating_Intrinsics) { //we can do this in the loop above to increase speed for (int k = 0; k < Frame_array_buffer.Length; k++) { corners_points_list[k] = CameraCalibration.FindChessboardCorners(Frame_array_buffer[k], patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //for accuracy Gray_Frame.FindCornerSubPix(corners_points_list, new System.Drawing.Size(11, 11), new System.Drawing.Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world mesurments for the intrinsic calculations List <MCvPoint3D32f> object_list = new List <MCvPoint3D32f>(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { object_list.Add(new MCvPoint3D32f(j * 20.0F, i * 20.0F, 0.0F)); } } corners_object_list[k] = object_list.ToArray(); } //our error should be as close to 0 as possible double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, new MCvTermCriteria(30, 0.1), out EX_Param); //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default. System.Windows.Forms.MessageBox.Show("Intrinsic Calculation Error: " + error.ToString(), "Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user currentMode = Mode.Calibrated; this.Dispatcher.Invoke((Action)(() => { Write_BTN.IsEnabled = true; })); } if (currentMode == Mode.Calibrated) { //calculate the camera intrinsics Matrix <float> Map1, Map2; IC.InitUndistortMap(img.Width, img.Height, out Map1, out Map2); //remap the image to the particular intrinsics //In the current version of EMGU any pixel that is not corrected is set to transparent allowing the original image to be displayed if the same //image is mapped backed, in the future this should be controllable through the flag '0' Image <Bgr, Byte> temp = img.CopyBlank(); CvInvoke.cvRemap(img, temp, Map1, Map2, 0, new MCvScalar(0)); img = temp.Copy(); //set up to allow another calculation SetButtonState(true); start_Flag = false; } Image <Bgr, byte> mainImage = img.Resize(((double)Main_Picturebox.Width / (double)img.Width), Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR); Main_Picturebox.Image = mainImage; }
public void Process() { Mat m = new Mat(); Mat mProcessed = new Mat(); while (true) { if (_captureEnabled) { try { if (_capture == null) { _capture = new VideoCapture(); } //Read the camera data to the mat //Must use VideoCapture.Read function for UWP to read image from capture. //Note that m is in 3 channel RGB color space, //our default color space is BGR for 3 channel Mat _capture.Read(m); if (!m.IsEmpty) { if (p == null) { //Create a dummy camera calibration matrix for testing //Use your own if you have calibrated your camera p = new IntrinsicCameraParameters(5); int centerY = m.Width >> 1; int centerX = m.Height >> 1; CvInvoke.SetIdentity(p.IntrinsicMatrix, new MCvScalar(1.0)); p.IntrinsicMatrix.Data[0, 2] = centerY; p.IntrinsicMatrix.Data[1, 2] = centerX; p.IntrinsicMatrix.Data[2, 2] = 1; p.DistortionCoeffs.Data[0, 0] = -0.000003; p.InitUndistortMap(m.Width, m.Height, out mapx, out mapy); } m.CopyTo(mProcessed); CvInvoke.Undistort(m, mProcessed, p.IntrinsicMatrix, p.DistortionCoeffs); //mProcess is in the same color space as m, which is RGB, //needed to change to BGR CvInvoke.CvtColor(mProcessed, mProcessed, ColorConversion.Rgb2Bgr); //Can apply simple image processing to the captured image, let just invert the pixels //CvInvoke.BitwiseNot(m, m); //render the processed image on the top imageview CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() => { var wb = mProcessed.ToWritableBitmap(); image1.Source = wb; }); //The data in the mat that is read from the camera will //be drawn to the Image control CvInvoke.WinrtImshow(); } } catch (Exception e) { CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() => { textBlock.Text = e.Message; }); } } else { if (_capture != null) { _capture.Dispose(); _capture = null; } Task t = Task.Delay(100); t.Wait(); } } }
public void MainStuff() { SRC_Img = new Image <Gray, byte>(@"C:\Users\Админ\Downloads\image63341262,2002.png"); Corrected_Img = SRC_Img.Clone(); //CvInvoke.CLAHE(SRC_Img, 40, new Size(8, 8), Corrected_Img); //CvInvoke.FindChessboardCorners(SRC_Img, new Size(8,8), vec); #region PointF[] corners = new PointF[] { new PointF(100, 196), new PointF(261, 190), new PointF(417, 192), new PointF(584, 201), new PointF(111, 277), new PointF(284, 287), new PointF(458, 291), new PointF(580, 284), new PointF(130, 368), new PointF(276, 395), new PointF(429, 391), new PointF(563, 365) }; #endregion VectorOfPointF vec = new VectorOfPointF(); vec.Push(corners); // X: 0 - 480 / 3 ||0 159 329 479 // Y: 0 - 210 / 2 || 0 104 209 MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f(0, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, 0, 0.0f), new MCvPoint3D32f(0, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(0, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, SRC_Img.Height - 1, 0.0f) }; /* * for (int i = 0; i < objCorners.Length; i++) * { * objCorners[i].X += SRC_Img.Width / 2; * objCorners[i].Y += SRC_Img.Height / 2; * }*/ //VectorOfPointF objvec = new VectorOfPointF(); //objvec.Push(objCorners); //Corrected_Img = FindTable(SRC_Img); Matrix <double> CameraMatrix = new Matrix <double>(3, 3, 1); CameraMatrix[0, 0] = 1; CameraMatrix[1, 1] = 1; CameraMatrix[2, 2] = 1; CameraMatrix[0, 2] = 349.417; CameraMatrix[1, 2] = 286.417; Mat newCameraMatrix = CvInvoke.GetDefaultNewCameraMatrix(CameraMatrix); //CvInvoke.Undistort(SRC_Img, Corrected_Img, //CvInvoke.FindChessboardCorners(SRC_Img, new System.Drawing.Size(5,5), Mat distCoeffs = new Mat(1, 5, DepthType.Cv32F, 1); Mat rotCoeffs = new Mat(); Mat translVectors = new Mat(); MCvTermCriteria TermCriteria = new MCvTermCriteria(30, 0.1); Corrected_Img = SRC_Img.Clone(); CvInvoke.DrawChessboardCorners(Corrected_Img, new System.Drawing.Size(4, 3), vec, true); //CvInvoke.CornerSubPix(SRC_Img, vec, new Size(2, 2), new Size(-1, -1), TermCriteria); //CvInvoke.DrawChessboardCorners(SRC_Img, new System.Drawing.Size(4, 3), objvec, true); /* * try * { * CvInvoke.Remap(SRC_Img, Corrected_Img, vec, objvec, Inter.Nearest, BorderType.Constant); * } catch (Exception ex) { string s = ex.Message; } */ VectorOfPoint3D32F obj3dvec = new VectorOfPoint3D32F(); obj3dvec.Push(objCorners); try { MCvPoint3D32f[][] corners_object_list = new MCvPoint3D32f[1][]; PointF[][] corners_points_list = new PointF[1][]; corners_object_list[0] = objCorners; corners_points_list[0] = corners; double r = CvInvoke.CalibrateCamera(obj3dvec, vec, SRC_Img.Size, CameraMatrix, distCoeffs, rotCoeffs, translVectors, CalibType.Default, TermCriteria); //double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, out EX_Param); r += 0; //Matrix<float> dist = new Matrix<float>( new float[] { //CvInvoke.Undistort(SRC_Img, Corrected_Img, cameraMatrix, ); } catch (Exception ex) { } IntrinsicCameraParameters IC = new IntrinsicCameraParameters(8); Matrix <float> Map1, Map2; IC.InitUndistortMap(SRC_Img.Width, SRC_Img.Height, out Map1, out Map2); Image <Gray, Byte> stuff = Undistort(SRC_Img); imageBox1.Image = SRC_Img.Resize(imageBox1.Width, imageBox1.Height, Inter.Linear); imageBox2.Image = Corrected_Img.Resize(imageBox1.Width, imageBox1.Height, Inter.Linear); }
private void OnButtonCalibrateClick(object sender, EventArgs e) { if (_leftImages.Count < 3) { return; } IsCalibrating = true; // Stereo Rectify images Matrix <double> R1; Matrix <double> R2; Matrix <double> P1; Matrix <double> P2; Matrix <double> Q; try { MCvPoint3D32f[][] objectPoints = null; PointF[][] imagePointsLeft = new PointF[_leftImages.Count][]; PointF[][] imagePointsRight = new PointF[_leftImages.Count][]; // count should be equal to _rightImages.Count var firstLeft = _leftImages.First(); #region ObjectPoints objectPoints = new MCvPoint3D32f[_leftImages.Count][]; objectPoints[0] = new MCvPoint3D32f[Nx * Ny]; for (int i = 0; i < Ny; i++) { for (int j = 0; j < Nx; j++) { objectPoints[0][i * Nx + j] = new MCvPoint3D32f(i * SquareSize, j * SquareSize, 0); } } for (int i = 1; i < _leftImages.Count; i++) { objectPoints[i] = new MCvPoint3D32f[Nx * Ny]; objectPoints[0].CopyTo(objectPoints[i], 0); } #endregion #region ImagePoints for (int i = 0; i < _leftImages.Count; i++) { imagePointsLeft[i] = _leftImages[i]; imagePointsRight[i] = _rightImages[i]; } #endregion var intrinsicCameraParametersLeft = new IntrinsicCameraParameters(); var intrinsicCameraParametersRight = new IntrinsicCameraParameters(); ExtrinsicCameraParameters extrinsicCameraParameters; Matrix <double> foundamentalMatrix; Matrix <double> essentialMatrix; CameraCalibration.StereoCalibrate(objectPoints, imagePointsLeft, imagePointsRight, intrinsicCameraParametersLeft, intrinsicCameraParametersRight, new Size(_imageWidth, _imageHeight), CALIB_TYPE.DEFAULT, new MCvTermCriteria(100, 1e-5) { type = TERMCRIT.CV_TERMCRIT_EPS | TERMCRIT.CV_TERMCRIT_ITER }, out extrinsicCameraParameters, out foundamentalMatrix, out essentialMatrix); Matrix <float> mapXLeft; Matrix <float> mapYLeft; Matrix <float> mapXRight; Matrix <float> mapYRight; intrinsicCameraParametersLeft.InitUndistortMap(_imageWidth, _imageHeight, out mapXLeft, out mapYLeft); intrinsicCameraParametersLeft.InitUndistortMap(_imageWidth, _imageHeight, out mapXRight, out mapYRight); var validPixROI1 = new Rectangle(); var validPixROI2 = new Rectangle(); StereoRectify(intrinsicCameraParametersLeft, intrinsicCameraParametersRight, new Size(_imageWidth, _imageHeight), extrinsicCameraParameters, out R1, out R2, out P1, out P2, out Q, 0, 0, new Size(_imageWidth, _imageHeight), ref validPixROI1, ref validPixROI2); Options.StereoCalibrationOptions = new StereoCalibrationOptions { IntrinsicCameraParametersLeft = intrinsicCameraParametersLeft, IntrinsicCameraParametersRight = intrinsicCameraParametersRight, EssentialMatrix = essentialMatrix, ExtrinsicCameraParameters = extrinsicCameraParameters, FoundamentalMatrix = foundamentalMatrix, MapXLeft = mapXLeft, MapYLeft = mapYLeft, MapXRight = mapXRight, MapYRight = mapYRight, P1 = P1, P2 = P2, R1 = R1, R2 = R2, Q = Q }; } catch (Exception ex) { MessageBox.Show(ex.ToString()); } finally { IsCalibrating = false; } }
private void AnalyzeImage(Image <Bgr, byte> img) { Gray_Frame = img.Convert <Gray, Byte>(); //apply chess board detection if (currentMode == Mode.SavingFrames) { var corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners); Dispatcher.Invoke(() => addButton.IsEnabled = start_Flag && corners != null); if (corners != null) //chess board found { //make mesurments more accurate by using FindCornerSubPixel Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //dram the results img.Draw(new CircleF(corners[0], 3), new Bgr(Color.Yellow), 1); for (int i = 1; i < corners.Length; i++) { img.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2); img.Draw(new CircleF(corners[i], 3), new Bgr(Color.Yellow), 1); } //calibrate the delay bassed on size of buffer //if buffer small you want a big delay if big small delay Thread.Sleep(100); //allow the user to move the board to a different position } corners = null; } if (currentMode == Mode.Caluculating_Intrinsics) { //we can do this in the loop above to increase speed for (int k = 0; k < Frame_array_buffer.Length; k++) { corners_points_list[k] = CameraCalibration.FindChessboardCorners(Frame_array_buffer[k], patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH); //for accuracy Gray_Frame.FindCornerSubPix(corners_points_list, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1)); //Fill our objects list with the real world mesurments for the intrinsic calculations List <MCvPoint3D32f> object_list = new List <MCvPoint3D32f>(); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { object_list.Add(new MCvPoint3D32f(j * 20.0F, i * 20.0F, 0.0F)); } } corners_object_list[k] = object_list.ToArray(); } //our error should be as close to 0 as possible double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, new MCvTermCriteria(10), out EX_Param); //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default. MessageBox.Show("Intrinsci Calculation Error: " + error.ToString(), "Results"); //display the results to the user currentMode = Mode.Calibrated; } if (currentMode == Mode.Calibrated) { //display the original image Dispatcher.Invoke(() => Sub_PicturBox.Source = img.ToBitmapSource()); //calculate the camera intrinsics Matrix <float> Map1, Map2; IC.InitUndistortMap(img.Width, img.Height, out Map1, out Map2); //remap the image to the particular intrinsics //In the current version of EMGU any pixel that is not corrected is set to transparent allowing the original image to be displayed if the same //image is mapped backed, in the future this should be controllable through the flag '0' Image <Bgr, Byte> temp = img.CopyBlank(); CvInvoke.cvRemap(img, temp, Map1, Map2, 0, new MCvScalar(0)); img = temp.Copy(); //set up to allow another calculation //SetButtonState(true); start_Flag = false; } Dispatcher.Invoke(() => Main_Picturebox.Source = img.ToBitmapSource()); isAnalyzing = false; Debug.WriteLine("分析完毕"); }