コード例 #1
0
ファイル: Form1.cs プロジェクト: petrind/SRTesis2
 /// <summary>
 /// loading of the form
 /// </summary>
 public Form1()
 {
     try
     {
         IC = new IntrinsicCameraParameters();
     }
     catch (Exception ex)
     {
         MessageBox.Show("Error: " + ex.Message);
     }
     InitializeComponent();
     //fill line colour array
     Random R = new Random();
     for (int i = 0; i < line_colour_array.Length; i++)
     {
         line_colour_array[i] = new Bgr(R.Next(0, 255), R.Next(0, 255), R.Next(0, 255));
     }
     //set up cature as normal
     try
     {
         _Capture = new Capture();
         _Capture.ImageGrabbed += new Emgu.CV.Capture.GrabEventHandler(_Capture_ImageGrabbed);
         _Capture.Start();
     }
     catch (Exception ex)
     {
         MessageBox.Show("Error: " + ex.Message);
     }
 }
コード例 #2
0
        /// <summary>
        /// Computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters.
        /// Optionally, the function computes jacobians - matrices of partial derivatives of image points as functions of all the input parameters w.r.t. the particular parameters, intrinsic and/or extrinsic.
        /// The jacobians are used during the global optimization in cvCalibrateCamera2 and cvFindExtrinsicCameraParams2.
        /// The function itself is also used to compute back-projection error for with current intrinsic and extrinsic parameters.
        /// </summary>
        /// <remarks>Note, that with intrinsic and/or extrinsic parameters set to special values, the function can be used to compute just extrinsic transformation or just intrinsic transformation (i.e. distortion of a sparse set of points) </remarks>
        /// <param name="objectPoints">The array of object points.</param>
        /// <param name="extrin">Extrinsic parameters</param>
        /// <param name="intrin">Intrinsic parameters</param>
        /// <param name="mats">Optional matrix supplied in the following order: dpdrot, dpdt, dpdf, dpdc, dpddist</param>
        /// <returns>The array of image points which is the projection of <paramref name="objectPoints"/></returns>
        public static PointF[] ProjectPoints(
            MCvPoint3D32f[] objectPoints,
            ExtrinsicCameraParameters extrin,
            IntrinsicCameraParameters intrin,
            params Matrix <float>[] mats)
        {
            PointF[] imagePoints = new PointF[objectPoints.Length];

            int      matsLength = mats.Length;
            GCHandle handle1    = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
            GCHandle handle2    = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);

            using (Matrix <float> pointMatrix = new Matrix <float>(objectPoints.Length, 1, 3, handle1.AddrOfPinnedObject(), 3 * sizeof(float)))
                using (Matrix <float> imagePointMatrix = new Matrix <float>(imagePoints.Length, 1, 2, handle2.AddrOfPinnedObject(), 2 * sizeof(float)))
                    CvInvoke.ProjectPoints(
                        pointMatrix,
                        extrin.RotationVector,
                        extrin.TranslationVector,
                        intrin.IntrinsicMatrix,
                        intrin.DistortionCoeffs,
                        imagePointMatrix);
            handle1.Free();
            handle2.Free();
            return(imagePoints);
        }
コード例 #3
0
		public double Calibrate(Size resolution)
		{
			MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[1][];
			PointF[][] imagePoints = new PointF[1][];

			int count = this.FObjectPoints.Count;

			objectPoints[0] = new MCvPoint3D32f[count];
			imagePoints[0] = new PointF[count];

			for (int i = 0; i < count; i++)
			{
				objectPoints[0][i] = FObjectPoints[i];
				imagePoints[0][i] = FImagePoints[i];
			}

			IntrinsicCameraParameters intrinsicParam = new IntrinsicCameraParameters();
			ExtrinsicCameraParameters[] extrinsicParams;

			Matrix<double> mat = intrinsicParam.IntrinsicMatrix;
			mat[0, 0] = resolution.Width;
			mat[1, 1] = resolution.Height;
			mat[0, 2] = resolution.Width / 2.0d;
			mat[1, 2] = resolution.Height / 2.0d;
			mat[2, 2] = 1;

			CALIB_TYPE flags;
			flags = CALIB_TYPE.CV_CALIB_FIX_K1 | CALIB_TYPE.CV_CALIB_FIX_K2 | CALIB_TYPE.CV_CALIB_FIX_K3 | CALIB_TYPE.CV_CALIB_FIX_K4 | CALIB_TYPE.CV_CALIB_FIX_K5 | CALIB_TYPE.CV_CALIB_FIX_K6 | CALIB_TYPE.CV_CALIB_USE_INTRINSIC_GUESS | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST;

			double error = CameraCalibration.CalibrateCamera(objectPoints, imagePoints, resolution, intrinsicParam, flags, out extrinsicParams);
			this.FIntrinsics = new Intrinsics(intrinsicParam, resolution);
			this.FExtrinsics = new Extrinsics(extrinsicParams[0]);
			return error;
		}
コード例 #4
0
ファイル: Calibration.cs プロジェクト: zyh329/adas
        public CalibrationCameraResult()
        {
            IntrinsicParameters = new IntrinsicCameraParameters();

            R = new Matrix<double>(3, 3);
            P = new Matrix<double>(3, 4);
        }
コード例 #5
0
ファイル: ImageFilter.cs プロジェクト: tkcast/emgucv
        public override void ProcessData(Mat sourceImage, Mat destImage)
        {
            if (!sourceImage.Size.Equals(_size))
            {
                if (_mapX != null)
                {
                    _mapX.Dispose();
                    _mapX = null;
                }
                if (_mapY != null)
                {
                    _mapY.Dispose();
                    _mapY = null;
                }

                _size = sourceImage.Size;
            }

            if (_mapX == null || _mapY == null)
            {
                IntrinsicCameraParameters p = new IntrinsicCameraParameters(5);
                int centerY = (int)(_size.Width * _centerY);
                int centerX = (int)(_size.Height * _centerX);
                CvInvoke.SetIdentity(p.IntrinsicMatrix, new MCvScalar(1.0));
                p.IntrinsicMatrix.Data[0, 2]  = centerY;
                p.IntrinsicMatrix.Data[1, 2]  = centerX;
                p.IntrinsicMatrix.Data[2, 2]  = 1;
                p.DistortionCoeffs.Data[0, 0] = _distorCoeff / (_size.Width * _size.Height);

                p.InitUndistortMap(_size.Width, _size.Height, out _mapX, out _mapY);
            }

            CvInvoke.Remap(sourceImage, destImage, _mapX, _mapY, Emgu.CV.CvEnum.Inter.Linear);
        }
コード例 #6
0
		//called when data for any output pin is requested
		public void Evaluate(int SpreadMax)
		{
			//if (!FPinInIntrinsics.IsChanged)
			//    return;

            if (FPinInCameraTransform[0] == null)
			{
                FPinOutIntrinsics.SliceCount = 0;
			}
			else
			{
                FPinOutIntrinsics.SliceCount = SpreadMax;

				for (int i = 0; i < SpreadMax; i++)
				{
                    IntrinsicCameraParameters cp = new IntrinsicCameraParameters();
                    // Inspiration / details from http://strawlab.org/2011/11/05/augmented-reality-with-OpenGL/ - See Intrinsics.cs too
                    cp.IntrinsicMatrix[0, 0] = FPinInCameraTransform[i][0, 0] * FPinInSensorSize[i].x / 2;// =L15*P8/2
                    cp.IntrinsicMatrix[1, 1] = FPinInCameraTransform[i][1, 1] * FPinInSensorSize[i].y / 2;// =L15*P8/2
                    cp.IntrinsicMatrix[0, 2] = (FPinInCameraTransform[i][2, 0] + 1)* FPinInSensorSize[i].x / 2;// =L15*P8/2
                    cp.IntrinsicMatrix[1, 2] = (FPinInCameraTransform[i][2, 1] +1) * FPinInSensorSize[i].y / 2;// =L15*P8/2
                    cp.IntrinsicMatrix[2, 2] = 1;
                    FPinOutIntrinsics[i] = new Intrinsics(cp, new Size((int)FPinInSensorSize[i].x, (int)FPinInSensorSize[i].y));
				}
			}
		}
コード例 #7
0
		public static void Save(IntrinsicCameraParameters cameraParameters, string filePath)
		{
			using (TextWriter writer = new StreamWriter(filePath))
			{
				WriteIntrinsics(cameraParameters.IntrinsicMatrix, writer);
				writer.WriteLine();
				WriteDistortionCoeffs(cameraParameters.DistortionCoeffs, writer);
			}
		}
コード例 #8
0
 void DrawCoordinateFrame(Emgu.CV.Image <Emgu.CV.Structure.Bgr, byte> img)
 {
     Emgu.CV.IntrinsicCameraParameters icp = _icp;
     if (_ec != null && _pattern.PatternFound && icp != null)
     {
         Emgu.CV.ExtrinsicCameraParameters ecp = _ec.Calibrate(_pattern.ImagePoints);
         _pattern.DrawCoordinateFrame(img, ecp, icp);
     }
 }
コード例 #9
0
 /// <summary>
 /// Initializes a new instance of the CameraCalibrationModel class.
 /// </summary>
 public CameraCalibrationModel()
 {
     _frameBufferSize = 100;
     _currentMode = CameraCalibrationMode.SavingFrames;
     _intrinsicParameters = new IntrinsicCameraParameters();
     _extrinsicParameters = new ExtrinsicCameraParameters[5];
     _translationToWorld = new Matrix<double>(3, 1);
     _rotationToWorld = new Matrix<double>(3, 3);
     _correspondingPoints = new List<PointF>();
 }
コード例 #10
0
ファイル: Camera.cs プロジェクト: sivarajankumar/dentalsmile
 public Camera(SerializationInfo info, StreamingContext context)
 {
   _device_index = -1;
   int dev_id = (int)info.GetValue("device_index", typeof(int));
   _intrinsics = (Emgu.CV.IntrinsicCameraParameters)info.GetValue("intrinsic", typeof(Emgu.CV.IntrinsicCameraParameters));
   this.DeviceIndex = dev_id;
   System.Drawing.Size last_frame_size = (System.Drawing.Size)info.GetValue("last_frame_size", typeof(System.Drawing.Size));
   this.FrameWidth = last_frame_size.Width;
   this.FrameHeight = last_frame_size.Height;
 }
コード例 #11
0
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CALIB_TYPE calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            #region get the array that represent the point counts
            int[] pointCounts = new int[objectPoints.Length];
            for (int i = 0; i < objectPoints.Length; i++)
            {
                Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
                pointCounts[i] = objectPoints[i].Length;
            }
            #endregion

            double reprojectionError = -1;
            using (Matrix <float> objectPointMatrix = ToMatrix(objectPoints))
                using (Matrix <float> imagePointMatrix = ToMatrix(imagePoints))
                    using (Matrix <int> pointCountsMatrix = new Matrix <int>(pointCounts))
                        using (Matrix <double> rotationVectors = new Matrix <double>(imageCount, 3))
                            using (Matrix <double> translationVectors = new Matrix <double>(imageCount, 3))
                            {
                                reprojectionError = CvInvoke.cvCalibrateCamera2(
                                    objectPointMatrix.Ptr,
                                    imagePointMatrix.Ptr,
                                    pointCountsMatrix.Ptr,
                                    imageSize,
                                    intrinsicParam.IntrinsicMatrix,
                                    intrinsicParam.DistortionCoeffs,
                                    rotationVectors,
                                    translationVectors,
                                    calibrationType,
                                    termCriteria);

                                extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                                IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
                                for (int i = 0; i < imageCount; i++)
                                {
                                    ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                    CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
                                    CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
                                    CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
                                    CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
                                    extrinsicParams[i] = p;
                                }
                                Marshal.FreeHGlobal(matPtr);
                            }
            return(reprojectionError);
        }
コード例 #12
0
ファイル: Camera.cs プロジェクト: thystonius/parsley
        public Camera(SerializationInfo info, StreamingContext context)
        {
            _device_index = -1;
            int dev_id = (int)info.GetValue("device_index", typeof(int));

            _intrinsics      = (Emgu.CV.IntrinsicCameraParameters)info.GetValue("intrinsic", typeof(Emgu.CV.IntrinsicCameraParameters));
            this.DeviceIndex = dev_id;
            System.Drawing.Size last_frame_size = (System.Drawing.Size)info.GetValue("last_frame_size", typeof(System.Drawing.Size));
            this.FrameWidth  = last_frame_size.Width;
            this.FrameHeight = last_frame_size.Height;
        }
コード例 #13
0
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="flags">Flags</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
         MCvPoint3D32f[][] objectPoints,
         PointF[][] imagePoints,
         Size imageSize,
         IntrinsicCameraParameters intrinsicParam,
         CvEnum.CALIB_TYPE flags,
         out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
             int imageCount = objectPoints.Length;

             #region get the array that represent the point counts
             int[] pointCounts = new int[objectPoints.Length];
             for (int i = 0; i < objectPoints.Length; i++)
             {
            Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i] = objectPoints[i].Length;
             }
             #endregion

             double reprojectionError = -1;
             using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
             using (Matrix<float> imagePointMatrix = ToMatrix(imagePoints))
             using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
             using (Matrix<double> rotationVectors = new Matrix<double>(imageCount, 3))
             using (Matrix<double> translationVectors = new Matrix<double>(imageCount, 3))
             {
            reprojectionError = CvInvoke.cvCalibrateCamera2(
                objectPointMatrix.Ptr,
                imagePointMatrix.Ptr,
                pointCountsMatrix.Ptr,
                imageSize,
                intrinsicParam.IntrinsicMatrix,
                intrinsicParam.DistortionCoeffs,
                rotationVectors,
                translationVectors,
                flags);

            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
            IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
            for (int i = 0; i < imageCount; i++)
            {
               ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
               CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
               CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
               CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
               extrinsicParams[i] = p;
            }
            Marshal.FreeHGlobal(matPtr);
             }
             return reprojectionError;
        }
コード例 #14
0
 /// <summary>
 /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
 /// </summary>
 /// <param name="objectPoints">The array of object points</param>
 /// <param name="imagePoints">The array of corresponding image points</param>
 /// <param name="intrin">The intrinsic parameters</param>
 /// <param name="method">Method for solving a PnP problem</param>
 /// <returns>The extrinsic parameters</returns>
 public static ExtrinsicCameraParameters SolvePnP(
    MCvPoint3D32f[] objectPoints,
    PointF[] imagePoints,
    IntrinsicCameraParameters intrin, 
    CvEnum.SolvePnpMethod method = CvEnum.SolvePnpMethod.Iterative)
 {
    ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
    using (VectorOfPoint3D32F objPtVec = new VectorOfPoint3D32F(objectPoints))
    using (VectorOfPointF imgPtVec = new VectorOfPointF(imagePoints))
       CvInvoke.SolvePnP(objPtVec, imgPtVec, intrin.IntrinsicMatrix, intrin.DistortionCoeffs, p.RotationVector, p.TranslationVector, false, method);
    return p;
 }
コード例 #15
0
ファイル: Intrinsics.cs プロジェクト: JaapSuter/Pentacorn
        private static IntrinsicCameraParameters EstimateLens(Size imageSize, PointF principalPoint, double focalLength)
        {
            var lens = new IntrinsicCameraParameters();

            lens.IntrinsicMatrix[0, 2] = principalPoint.X;
            lens.IntrinsicMatrix[1, 2] = principalPoint.Y;

            lens.IntrinsicMatrix[0, 0] = focalLength;
            lens.IntrinsicMatrix[1, 1] = focalLength;
            lens.IntrinsicMatrix[2, 2] = 1;

            return lens;
        }
コード例 #16
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems;
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">The essential matrix</param>
        /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">The fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CALIB_TYPE flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> foundamentalMatrix,
            out Matrix <double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

            #region get the matrix that represent the point counts
            int[,] pointCounts = new int[objectPoints.Length, 1];
            for (int i = 0; i < objectPoints.Length; i++)
            {
                Debug.Assert(objectPoints[i].Length == imagePoints1[i].Length && objectPoints[i].Length == imagePoints2[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
                pointCounts[i, 0] = objectPoints[i].Length;
            }
            #endregion

            using (Matrix <float> objectPointMatrix = ToMatrix(objectPoints))
                using (Matrix <float> imagePointMatrix1 = ToMatrix(imagePoints1))
                    using (Matrix <float> imagePointMatrix2 = ToMatrix(imagePoints2))
                        using (Matrix <int> pointCountsMatrix = new Matrix <int>(pointCounts))
                        {
                            extrinsicParams    = new ExtrinsicCameraParameters();
                            essentialMatrix    = new Matrix <double>(3, 3);
                            foundamentalMatrix = new Matrix <double>(3, 3);

                            CvInvoke.cvStereoCalibrate(
                                objectPointMatrix.Ptr,
                                imagePointMatrix1.Ptr,
                                imagePointMatrix2.Ptr,
                                pointCountsMatrix.Ptr,
                                intrinsicParam1.IntrinsicMatrix,
                                intrinsicParam1.DistortionCoeffs,
                                intrinsicParam2.IntrinsicMatrix,
                                intrinsicParam2.DistortionCoeffs,
                                imageSize,
                                extrinsicParams.RotationVector,
                                extrinsicParams.TranslationVector,
                                essentialMatrix.Ptr,
                                foundamentalMatrix.Ptr,
                                termCrit,
                                flags);
                        }
        }
コード例 #17
0
ファイル: Kamera.cs プロジェクト: JaapSuter/Pentacorn
        public void CalibrateIntrinsics(PointF[][] imagePoints, MCvPoint3D32f[][] worldPoints)
        {
            if (Intrinsics == null)
                Intrinsics = new IntrinsicCameraParameters();

            var calibType = CALIB_TYPE.CV_CALIB_FIX_K3 | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST;
            ExtrinsicCameraParameters[] extrinsics;

            var totalErr = CameraCalibration.CalibrateCamera(worldPoints, imagePoints, new Size(ImageWidth, ImageHeight), Intrinsics, calibType, out extrinsics);
            var err = Math.Sqrt(totalErr / (imagePoints.Length + imagePoints[0].Length));

            Console.WriteLine("Calibration Finished, Reprojection Error: {0}", err);

            Save(IntrinsicsFile, Intrinsics, ImageWidth, ImageHeight);            
        }
コード例 #18
0
 void HandleCalibrateRequest()
 {
     Core.CalibrationPattern pattern = _pattern;
     if (_calibrate_request)
     {
         _icp = _ic.Calibrate();
         _ec  = new Parsley.Core.ExtrinsicCalibration(pattern.ObjectPoints, _icp);
         this.Logger.Info("Calibration succeeded");
         this.Invoke((MethodInvoker) delegate {
             _btn_calibrate.Enabled  = false;
             _btn_take_image.Enabled = true;
             _cb_auto_take.Enabled   = true;
             _cb_auto_take.Checked   = false;
         });
     }
     _calibrate_request = false;
 }
コード例 #19
0
        /// <summary>
        /// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="calibrationType">cCalibration type</param>
        /// <param name="termCriteria">The termination criteria</param>
        /// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
        /// <returns>The final reprojection error</returns>
        public static double CalibrateCamera(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints,
            Size imageSize,
            IntrinsicCameraParameters intrinsicParam,
            CvEnum.CalibType calibrationType,
            MCvTermCriteria termCriteria,
            out ExtrinsicCameraParameters[] extrinsicParams)
        {
            Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints))
                {
                    double reprojectionError = -1;
                    using (VectorOfMat rotationVectors = new VectorOfMat())
                        using (VectorOfMat translationVectors = new VectorOfMat())
                        {
                            Mat cameraMat   = new Mat();
                            Mat distorCoeff = new Mat();
                            reprojectionError = CvInvoke.CalibrateCamera(
                                vvObjPts,
                                vvImgPts,
                                imageSize,
                                intrinsicParam.IntrinsicMatrix,
                                intrinsicParam.DistortionCoeffs,
                                rotationVectors,
                                translationVectors,
                                calibrationType,
                                termCriteria);

                            extrinsicParams = new ExtrinsicCameraParameters[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
                                using (Mat matR = rotationVectors[i])
                                    matR.CopyTo(p.RotationVector);
                                using (Mat matT = translationVectors[i])
                                    matT.CopyTo(p.TranslationVector);
                                extrinsicParams[i] = p;
                            }
                        }
                    return(reprojectionError);
                }
        }
コード例 #20
0
        /// <summary>
        /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error.
        /// </summary>
        /// <param name="objectPoints">The array of object points</param>
        /// <param name="imagePoints">The array of corresponding image points</param>
        /// <param name="intrin">The intrinsic parameters</param>
        /// <returns>The extrinsic parameters</returns>
        public static ExtrinsicCameraParameters FindExtrinsicCameraParams2(
            MCvPoint3D32f[] objectPoints,
            PointF[] imagePoints,
            IntrinsicCameraParameters intrin)
        {
            ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();

            GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
            GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);

            using (Matrix <float> objectPointMatrix = new Matrix <float>(objectPoints.Length, 3, handle1.AddrOfPinnedObject()))
                using (Matrix <float> imagePointMatrix = new Matrix <float>(imagePoints.Length, 2, handle2.AddrOfPinnedObject()))
                    CvInvoke.cvFindExtrinsicCameraParams2(objectPointMatrix, imagePointMatrix, intrin.IntrinsicMatrix.Ptr, intrin.DistortionCoeffs.Ptr, p.RotationVector.Ptr, p.TranslationVector.Ptr, 0);
            handle1.Free();
            handle2.Free();

            return(p);
        }
コード例 #21
0
 /// <summary>
 /// loading of the form
 /// </summary>
 public FrameShooterCameraCalibNao()
 {
     try
     {
         IC = new IntrinsicCameraParameters();
     }
     catch (Exception ex)
     {
         MessageBox.Show("Error: " + ex.Message);
     }
     InitializeComponent();
     //fill line colour array
     Random R = new Random();
     for (int i = 0; i < line_colour_array.Length; i++)
     {
         line_colour_array[i] = new Bgr(R.Next(0, 255), R.Next(0, 255), R.Next(0, 255));
     }
     
 }
コード例 #22
0
ファイル: PinHole.cs プロジェクト: JaapSuter/Pentacorn
        private static Ray[] RaysThrough(IntrinsicCameraParameters intrinsics, PointF[] pixels)
        {
            var rays = new Ray[pixels.Length];
            var undistorted = intrinsics.Undistort(pixels, null, intrinsics.IntrinsicMatrix);

            var cx = intrinsics.IntrinsicMatrix.Data[0, 2];
            var cy = intrinsics.IntrinsicMatrix.Data[1, 2];
            var fx = intrinsics.IntrinsicMatrix.Data[0, 0];
            var fy = intrinsics.IntrinsicMatrix.Data[1, 1];

            var direction = Vector3.Zero;
            for (int i = 0; i < undistorted.Length; ++i)
            {
                PointF pixel = undistorted[i];
                rays[i] = new Ray(Vector3.Zero, new Vector3((float)((pixel.X - cx) / fx), (float)((pixel.Y - cy) / fy), 1));
            }

            return rays;
        }
コード例 #23
0
ファイル: Intrinsics.cs プロジェクト: JaapSuter/Pentacorn
        private static void Calibrate(IntrinsicCameraParameters lens, Size imageSize,
            IEnumerable<PointF[]> worldPoints, IEnumerable<PointF[]> imagePoints,
            int numRadialDistortionCoefficients, bool expectTangentDistortion, out double reprojectionError)
        {
            var calibType = CALIB_TYPE.CV_CALIB_USE_INTRINSIC_GUESS;

            if (!expectTangentDistortion) calibType |= CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST;
            if (numRadialDistortionCoefficients < 3) calibType |= CALIB_TYPE.CV_CALIB_FIX_K3;
            if (numRadialDistortionCoefficients < 2) calibType |= CALIB_TYPE.CV_CALIB_FIX_K2;
            if (numRadialDistortionCoefficients < 1) calibType |= CALIB_TYPE.CV_CALIB_FIX_K1;

            var worldPointsArray = worldPoints.Select(ps => ps.ToArrayOf(p => new MCvPoint3D32f(p.X, p.Y, 0))).ToArray();
            var imagePointsArray = imagePoints.ToArray();

            WorkAroundForPrincipalPointMustResideWithinImage(lens, imageSize);

            ExtrinsicCameraParameters[] extrinsics;
            reprojectionError = CameraCalibration.CalibrateCamera(worldPointsArray, imagePointsArray, imageSize, lens, calibType, out extrinsics);
        }
コード例 #24
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems;
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">The essential matrix</param>
        /// <param name="termCrit">Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">The fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CalibType flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix <double> foundamentalMatrix,
            out Matrix <double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

            using (VectorOfVectorOfPoint3D32F objectPointVec = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF imagePoints1Vec = new VectorOfVectorOfPointF(imagePoints1))
                    using (VectorOfVectorOfPointF imagePoints2Vec = new VectorOfVectorOfPointF(imagePoints2))
                    {
                        extrinsicParams    = new ExtrinsicCameraParameters();
                        essentialMatrix    = new Matrix <double>(3, 3);
                        foundamentalMatrix = new Matrix <double>(3, 3);

                        CvInvoke.StereoCalibrate(
                            objectPointVec,
                            imagePoints1Vec,
                            imagePoints2Vec,

                            intrinsicParam1.IntrinsicMatrix,
                            intrinsicParam1.DistortionCoeffs,
                            intrinsicParam2.IntrinsicMatrix,
                            intrinsicParam2.DistortionCoeffs,
                            imageSize,
                            extrinsicParams.RotationVector,
                            extrinsicParams.TranslationVector,
                            essentialMatrix,
                            foundamentalMatrix,
                            flags,
                            termCrit);
                    }
        }
コード例 #25
0
		public static CameraParameters Load(string filePath)
		{
			Dictionary<string, double> valuesByName = new Dictionary<string, double>(StringComparer.InvariantCultureIgnoreCase);
			using (TextReader reader = new StreamReader(filePath))
			{
				string line;
				while ((line = reader.ReadLine()) != null)
				{
					line = line.Trim();
					if (line.Length == 0 || line.StartsWith("#"))
					{
						continue;
					}

					string[] lineParts = line.Split('\t');
					string name = lineParts[0];
					double value = double.Parse(lineParts[1], CultureInfo.InvariantCulture);
					valuesByName.Add(name, value);
				}
			}

			IntrinsicCameraParameters intrinsicCameraParameters = new IntrinsicCameraParameters();

			// Intrinsic parameters
			intrinsicCameraParameters.IntrinsicMatrix[0, 0] = valuesByName["fx:"];
			intrinsicCameraParameters.IntrinsicMatrix[1, 1] = valuesByName["fy:"];
			intrinsicCameraParameters.IntrinsicMatrix[0, 2] = valuesByName["cx:"];
			intrinsicCameraParameters.IntrinsicMatrix[1, 2] = valuesByName["cy:"];
			intrinsicCameraParameters.IntrinsicMatrix[2, 2] = 1.0;

			// Distortion coefficients
			intrinsicCameraParameters.DistortionCoeffs[0, 0] = valuesByName["k1:"];
			intrinsicCameraParameters.DistortionCoeffs[1, 0] = valuesByName["k2:"];
			intrinsicCameraParameters.DistortionCoeffs[2, 0] = valuesByName["p1:"];
			intrinsicCameraParameters.DistortionCoeffs[3, 0] = valuesByName["p2:"];
			intrinsicCameraParameters.DistortionCoeffs[4, 0] = valuesByName["k3:"];

			return new CameraParameters(intrinsicCameraParameters);
		}
コード例 #26
0
        public StereoCameraCalibrationModel()
        {
            _frameBufferSize = 100;
            _currentMode = CameraCalibrationMode.SavingFrames;
            _intrinsicCameraParameters1 = new IntrinsicCameraParameters();
            _intrinsicCameraParameters2 = new IntrinsicCameraParameters();
            _squareSizeX = 55.0f;
            _squareSizeY = 55.0f;

            BitmapSource bs = BitmapHelper.ToBitmapSource(new Image<Gray, byte>(640, 480, new Gray(1.0)));
            bs.Freeze();
            _disparityImageSource = bs;

            _numDisparities = 64;
            _minDispatities = 0;
            _sad = 3;
            _maxDiff = -1;
            _prefilterCap = 0;
            _uniquenessRatio = 0;
            _speckle = 16;
            _speckleRange = 16;
            _disparityMode = StereoSGBM.Mode.SGBM;
        }
コード例 #27
0
ファイル: Offline.cs プロジェクト: JaapSuter/Pentacorn
            public Camera(string path, int width, int height, PointF[] imageCorners, MCvPoint3D32f[] worldCorners)
            {
                Width = width;
                Height = height;

                Intrinsics = Markers.Chessboard.Load(path, width, height, out UndistortX, out UndistortY);
                Extrinsics = CameraCalibration.FindExtrinsicCameraParams2(worldCorners, imageCorners, Intrinsics);

                var ext = Extrinsics;
                View = new Matrix((float)ext.ExtrinsicMatrix[0, 0], -(float)ext.ExtrinsicMatrix[1, 0], -(float)ext.ExtrinsicMatrix[2, 0], 0,
                                         (float)ext.ExtrinsicMatrix[0, 1], -(float)ext.ExtrinsicMatrix[1, 1], -(float)ext.ExtrinsicMatrix[2, 1], 0,
                                         (float)ext.ExtrinsicMatrix[0, 2], -(float)ext.ExtrinsicMatrix[1, 2], -(float)ext.ExtrinsicMatrix[2, 2], 0,
                                         (float)ext.ExtrinsicMatrix[0, 3], -(float)ext.ExtrinsicMatrix[1, 3], -(float)ext.ExtrinsicMatrix[2, 3], 1);
                Intrinsics.GetIntrinsicMatrixValues(width, height, 0, 0,
                    out FovX, out FovY,
                    out FocalLength, out PrincipalPoint,
                    out PixelAspectRatio);
                World = Matrix.Invert(View);
                Projection = Matrix.CreatePerspectiveFieldOfView(MathHelper.ToRadians((float)FovY), ((float)width) / ((float)height), 0.1f, 4.0f);
                BoundingFrustum = new BoundingFrustum(View * Projection);

                if (Global.No)
                    using (var img = new Image<Bgr, byte>(@"C:\Users\Jaap\My Dropbox\Data\Pentacorn.Vision\Offline\Casio Ex S5\Tape on Melamine\CIMG0606.JPG"))
                    {
                        foreach (var p in imageCorners)
                            img.Draw(new Cross2DF(p, 20, 20), new Bgr(255, 0, 255), 1);

                        var projectedCorners = CameraCalibration.ProjectPoints(worldCorners, Extrinsics, Intrinsics);
                        foreach (var p in projectedCorners)
                            img.Draw(new Cross2DF(p, 6, 6), new Bgr(255, 255, 0), 1);

                        var und = Intrinsics.Undistort(img);

                        img.Save(@"C:\Users\Jaap\Temp\img.png");
                        und.Save(@"C:\Users\Jaap\Temp\und.png");
                    }
            }
コード例 #28
0
ファイル: AutoTestCalibration.cs プロジェクト: Delaley/emgucv
      public void TestChessboardCalibration()
      {
         Size patternSize = new Size(9, 6);

         Image<Gray, Byte> chessboardImage = EmguAssert.LoadImage<Gray, byte>("left01.jpg");
         Util.VectorOfPointF corners = new Util.VectorOfPointF();
         bool patternWasFound = CvInvoke.FindChessboardCorners(chessboardImage, patternSize, corners);

         chessboardImage.FindCornerSubPix(
            new PointF[][] {corners.ToArray()},
            new Size(10, 10),
            new Size(-1, -1),
            new MCvTermCriteria(0.05));

         MCvPoint3D32f[] objectPts = CalcChessboardCorners(patternSize, 1.0f);
         IntrinsicCameraParameters intrisic = new IntrinsicCameraParameters(8);
         ExtrinsicCameraParameters[] extrinsic;
         double error = CameraCalibration.CalibrateCamera(new MCvPoint3D32f[][] { objectPts }, new PointF[][] { corners.ToArray() },
            chessboardImage.Size, intrisic, CvEnum.CalibType.Default, new MCvTermCriteria(30, 1.0e-10),  out extrinsic);
         CvInvoke.DrawChessboardCorners(chessboardImage, patternSize, corners, patternWasFound);
         //CameraCalibration.DrawChessboardCorners(chessboardImage, patternSize, corners);
         Image<Gray, Byte> undistorted = intrisic.Undistort(chessboardImage);
         //UI.ImageViewer.Show(undistorted, String.Format("Reprojection error: {0}", error));
      }
コード例 #29
0
        private void OnButtonCalibrateClick(object sender, EventArgs e)
        {
            if (_leftFaceRegions.Count < 3)
            {
                return;
            }

            IsCalibrating = true;

            try
            {
                var objectPoints = new MCvPoint3D32f[_leftFaceRegions.Count][];

                var firstLeft = _leftFaceRegions.First();

                for (int i = 0; i < _leftFaceRegions.Count; i++)
                {
                    objectPoints[i] = new MCvPoint3D32f[4];

                    objectPoints[i][0] = new MCvPoint3D32f(firstLeft.Mouth.Location.X, firstLeft.Mouth.Location.Y, 0);
                    objectPoints[i][1] = new MCvPoint3D32f(firstLeft.LeftEye.Location.X, firstLeft.LeftEye.Location.Y, 0);
                    objectPoints[i][2] = new MCvPoint3D32f(firstLeft.RightEye.Location.X, firstLeft.RightEye.Location.Y, 0);
                    objectPoints[i][3] = new MCvPoint3D32f(firstLeft.Face.Location.X, firstLeft.Face.Location.Y, 0);
                }

                var leftImagePoints = new PointF[_leftFaceRegions.Count][];

                for (int i = 0; i < _leftFaceRegions.Count; i++)
                {
                    leftImagePoints[i] = new PointF[4];

                    leftImagePoints[i][0] = new PointF(_leftFaceRegions[i].Mouth.Location.X, _leftFaceRegions[i].Mouth.Location.Y);
                    leftImagePoints[i][1] = new PointF(_leftFaceRegions[i].LeftEye.Location.X, _leftFaceRegions[i].LeftEye.Location.Y);
                    leftImagePoints[i][2] = new PointF(_leftFaceRegions[i].RightEye.Location.X, _leftFaceRegions[i].RightEye.Location.Y);
                    leftImagePoints[i][3] = new PointF(_leftFaceRegions[i].Face.Location.X, _leftFaceRegions[i].Face.Location.Y);
                }

                var rightImagePoints = new PointF[_leftFaceRegions.Count][];

                for (int i = 0; i < _leftFaceRegions.Count; i++)
                {
                    rightImagePoints[i] = new PointF[4];

                    rightImagePoints[i][0] = new PointF(_rightFaceRegions[i].Mouth.Location.X, _rightFaceRegions[i].Mouth.Location.Y);
                    rightImagePoints[i][1] = new PointF(_rightFaceRegions[i].LeftEye.Location.X, _rightFaceRegions[i].LeftEye.Location.Y);
                    rightImagePoints[i][2] = new PointF(_rightFaceRegions[i].RightEye.Location.X, _rightFaceRegions[i].RightEye.Location.Y);
                    rightImagePoints[i][3] = new PointF(_rightFaceRegions[i].Face.Location.X, _rightFaceRegions[i].Face.Location.Y);
                }

                var intrinsicCameraParameters1 = new Emgu.CV.IntrinsicCameraParameters();
                var intrinsicCameraParameters2 = new Emgu.CV.IntrinsicCameraParameters();

                ExtrinsicCameraParameters extrinsicCameraParameters;
                Matrix <double>           foundamentalMatrix;
                Matrix <double>           essentialMatrix;


                //Emgu.CV.CvInvoke.cvStereoCalibrate()

                //Emgu.CV.Matrix<

                Emgu.CV.CameraCalibration.

                Emgu.CV.CameraCalibration.StereoCalibrate(objectPoints, leftImagePoints, rightImagePoints,
                                                          intrinsicCameraParameters1, intrinsicCameraParameters2,
                                                          new Size(firstLeft.SourceImage.Width, firstLeft.SourceImage.Height),
                                                          Emgu.CV.CvEnum.CALIB_TYPE.DEFAULT,
                                                          new MCvTermCriteria(), out extrinsicCameraParameters, out foundamentalMatrix, out essentialMatrix);



                Options.StereoCalibrationOptions = new StereoCalibrationOptions()
                {
                    EssentialMatrix           = essentialMatrix,
                    ExtrinsicCameraParameters = extrinsicCameraParameters,
                    FoundamentalMatrix        = foundamentalMatrix
                };
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.ToString());
            }
            finally
            {
                IsCalibrating = false;
            }
        }
コード例 #30
0
ファイル: AutoTestVarious.cs プロジェクト: samuto/UnityOpenCV
        public void TestIntrinsicCameraParametersRuntimeSerialize()
        {
            IntrinsicCameraParameters param = new IntrinsicCameraParameters();

             param.DistortionCoeffs.SetRandUniform(new MCvScalar(), new MCvScalar(1.0));
             param.IntrinsicMatrix.SetRandUniform(new MCvScalar(), new MCvScalar(100));

             System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
             formatter = new System.Runtime.Serialization.Formatters.Binary.BinaryFormatter();

             Byte[] bytes;
             using (MemoryStream ms = new MemoryStream())
             {
            formatter.Serialize(ms, param);
            bytes = ms.GetBuffer();
             }
             using (MemoryStream ms2 = new MemoryStream(bytes))
             {
            IntrinsicCameraParameters param2 = (IntrinsicCameraParameters)formatter.Deserialize(ms2);

            Assert.IsTrue(param.Equals(param2));
             }
        }
コード例 #31
0
ファイル: AutoTestVarious.cs プロジェクト: samuto/UnityOpenCV
        public void TestProjectPoints()
        {
            IntrinsicCameraParameters intrin = new IntrinsicCameraParameters();
             intrin.IntrinsicMatrix.SetIdentity();
             ExtrinsicCameraParameters extrin = new ExtrinsicCameraParameters();
             MCvPoint3D32f point = new MCvPoint3D32f();

             PointF[] points = CameraCalibration.ProjectPoints(new MCvPoint3D32f[] { point }, extrin, intrin);
        }
コード例 #32
0
ファイル: CvHelper.cs プロジェクト: cosmo1911/UniMoveStation
        public static IList<PointF> NormalizePoints(IList<PointF> points, IntrinsicCameraParameters icp)
        {
            float fx = (float)icp.IntrinsicMatrix[0, 0];
            float fy = (float)icp.IntrinsicMatrix[1, 1];

            IList<PointF> list = new List<PointF>();
            foreach (PointF point in points)
            {
                list.Add(new PointF(point.X / fx, point.Y / fy));
            }
            return list;
        }
コード例 #33
0
ファイル: Camera.cs プロジェクト: thystonius/parsley
 /// <summary>
 /// Initialize camera from device index
 /// </summary>
 /// <param name="device_index">Device index starting at zero.</param>
 public Camera(int device_index)
 {
     _device_index    = -1;
     this.DeviceIndex = device_index;
     _intrinsics      = null;
 }
コード例 #34
0
        public void CalibrateSingleCamera(int index, int patternX, int patternY,
                                            ref Image<Bgr, byte> slikaVhodna,
                                            ref Image<Gray, Byte> slikaRobovi,
                                            ref Image<Bgr, byte> slikaTransformirana,
                                            ref IntrinsicCameraParameters intrinsic_param )
        {
            setCamCapture(index);

            int chessboard_number = 3;

            int corners_number = patternY * patternX;
            int success = 0;

            int count = 0;
            int board_dt = 20;

            MCvPoint3D32f[][] object_points = new MCvPoint3D32f[chessboard_number][];
            PointF[][] image_points = new PointF[chessboard_number][];

            ExtrinsicCameraParameters[] extrinsic_param;

            Image<Bgr, byte> slika2 = slikaVhodna;

            Image<Gray, Byte> slika = slika2.Convert<Gray, Byte>();
            slikaVhodna = slika2;

            PointF[] corners = new PointF[] { };
            Size patternSize = new Size(patternX, patternY);

            while (success < chessboard_number)
            {

                if ((count++ % board_dt) == 0)
                {

                    bool found = CameraCalibration.FindChessboardCorners(slika, patternSize, CALIB_CB_TYPE.DEFAULT, out corners);

                    CameraCalibration.DrawChessboardCorners(slika, patternSize, corners, found);
                    slikaRobovi = slika;

                    slika.FindCornerSubPix(new PointF[][] { corners }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.05));

                    if (corners.Length == corners_number)
                    {
                        object_points[success] = new MCvPoint3D32f[corners_number];
                        for (int j = 0; j < corners_number; j++)
                        {
                            image_points[success] = corners;
                            object_points[success][j].x = j / patternX;
                            object_points[success][j].y = j % patternY;
                            object_points[success][j].z = 0.0f;
                        }

                        success++;
                    }
                }

                slika2 = CamCapture.QueryFrame();
                slika = slika2.Convert<Gray, Byte>();

            }

            intrinsic_param = new IntrinsicCameraParameters();
            extrinsic_param = new ExtrinsicCameraParameters[success];

            for (int i = 0; i < success; i++)
            {
                extrinsic_param[i] = new ExtrinsicCameraParameters();
            }

            CameraCalibration.CalibrateCamera(object_points, image_points, new Size(slika.Width, slika.Height), intrinsic_param, CALIB_TYPE.DEFAULT, out extrinsic_param);

            uintdisort(ref slikaTransformirana, ref intrinsic_param, ref slika2, ref slika);
        }
コード例 #35
0
        public void uintdisort(ref Image<Bgr, byte> slikaTransformirana, ref IntrinsicCameraParameters intrinsic_param
                               , ref Image<Bgr, byte> slika2, ref Image<Gray, Byte> slika)
        {
            Matrix<float> mapx = new Matrix<float>(new Size(slika.Width, slika.Height));
            Matrix<float> mapy = new Matrix<float>(new Size(slika.Width, slika.Height));

            intrinsic_param.InitUndistortMap(slika.Width, slika.Height, out mapx, out mapy);

            Image<Bgr, byte> image_calibrated = slika2.Clone();

            CvInvoke.cvRemap(slika2.Ptr, image_calibrated.Ptr, mapx.Ptr, mapy.Ptr, 8 /*(int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS*/, new MCvScalar(0));

            slikaTransformirana = image_calibrated;
        }
コード例 #36
0
        public PointF Calibrate(float image_x, float image_y)
        {
            // If not enough mapping points are giving, don't do the calibration
            if (worldPoints_list.Count < 4)
                return new PointF(0, 0);

            
            try
            {
                intrinsic = new IntrinsicCameraParameters();
            }
            catch (Exception e) { MessageBox.Show(e.InnerException.Message); }

            worldPoints[0] = new MCvPoint3D32f[] { (worldPoints_list[0])[0], (worldPoints_list[1])[0], (worldPoints_list[2])[0], (worldPoints_list[3])[0] };
            imagePoints[0] = new PointF[] { (imagePoints_list[0])[0], (imagePoints_list[1])[0], (imagePoints_list[2])[0], (imagePoints_list[3])[0] };
   
            
            CameraCalibration.CalibrateCamera(worldPoints, imagePoints, frame_size, intrinsic, CALIB_TYPE.DEFAULT, out extrinsic);


            Matrix<double> extrinsicMatrix = extrinsic[0].ExtrinsicMatrix;

            // Build the H matrix out of the extrinsic pramater Matrix.
            // we dont use the 3rd colunm, since we assume that world_z=0 (multiplying by world_z=0 would result to 0 anyways)
            Matrix<double> H = new Matrix<double>(3, 3);
            H[0, 0] = extrinsicMatrix[0, 0]; H[0, 1] = extrinsicMatrix[0, 1]; H[0, 2] = extrinsicMatrix[0, 3];
            H[1, 0] = extrinsicMatrix[1, 0]; H[1, 1] = extrinsicMatrix[1, 1]; H[1, 2] = extrinsicMatrix[1, 3];
            H[2, 0] = extrinsicMatrix[2, 0]; H[2, 1] = extrinsicMatrix[2, 1]; H[2, 2] = extrinsicMatrix[2, 3];
            Matrix<double> H_inverse = new Matrix<double>(3, 3);
            CvInvoke.cvInvert(H, H_inverse, SOLVE_METHOD.CV_LU); 


            // intrinsic parameters include focal length, offset, etc
            Matrix<double> intrinsicMatrix_inverse = new Matrix<double>(3, 3);
            CvInvoke.cvInvert(intrinsic.IntrinsicMatrix, intrinsicMatrix_inverse, SOLVE_METHOD.CV_LU);

            Matrix<double> HI = new Matrix<double>(3, 3);
            HI = H_inverse.Mul(intrinsicMatrix_inverse);


            // This is the image_point we want to transform to world 3D coordinates
            // we use the Homogeneous coordinate system, which allows us to use Matrix multiplications
            // needed for translation. We also use z=1 because we assume that the camera image plane
            // is at z=1. We will fix this later with the intrinsic parameter matrix (inverse) multiplication
            Matrix<double> arbitraryPointMatrix = new Matrix<double>(3, 1);
            arbitraryPointMatrix[0, 0] = image_x;
            arbitraryPointMatrix[1, 0] = image_y;
            arbitraryPointMatrix[2, 0] = 1;


            // Do the projective transformation
            Matrix<double> result3DMatrix = new Matrix<double>(3, 1);
            result3DMatrix = (HI).Mul(arbitraryPointMatrix);


            // Get the point in Homogeneous coordinate system with z=1 by dividing with z = result3DMatrix[2, 0]
            // (z=1, because the image plane is at z=1)
            PointF point3D = new PointF();
            point3D.X = (float)(result3DMatrix[0, 0] / result3DMatrix[2, 0]);
            point3D.Y = (float)(result3DMatrix[1, 0] / result3DMatrix[2, 0]);

            return point3D;
            
        }
コード例 #37
0
ファイル: Offline.cs プロジェクト: JaapSuter/Pentacorn
        public static void Run(Checkers checkers)
        {
            Emgu.CV.Util.OptimizeCV(true);

            PointF[] imageCornersCamera = new PointF[]
            {
                new PointF(3141, 2144),
                new PointF(2035, 2599),
                new PointF(1594, 1503),
                new PointF(2683, 1019),
                new PointF(2363, 1824),
                new PointF(2137, 1258),
                new PointF(1783,  400),
                new PointF( 599, 1948),
            };

            MCvPoint3D32f[] worldCornersCamera = new MCvPoint3D32f[]
            {
                new MCvPoint3D32f(0, 0, 0),
                new MCvPoint3D32f(0, 0.5f, 0),
                new MCvPoint3D32f(0.5f, 0.5f, 0),
                new MCvPoint3D32f(0.5f, 0, 0),
                new MCvPoint3D32f(0.25f, 0.25f, 0),
                new MCvPoint3D32f(0.5f, 0.25f, 0),
                new MCvPoint3D32f(0.9f, 0.25f, 0),
                new MCvPoint3D32f(0.5f, 1.0f, 0),            
            };

            var dstDir = Path.Combine(Global.TmpDir);
            var srcDir = Path.Combine(Global.DatDir, @"Offline\Optoma EW1610\Raw");
            var proPath = Path.Combine(Global.DatDir, @"Offline\Optoma EW1610\intrinsics.txt");
            var camPath = Path.Combine(Global.DatDir, @"Offline\Casio Ex S5\intrinsics.txt");

            var camera = new Camera(camPath, 3648, 2736, imageCornersCamera, worldCornersCamera);

            var proSize = new Size(800, 1280);

            var zPlane = new Plane(Vector3.UnitZ, 0);

            var invertedBlackAndWhite = false;

            var srcFiles = Directory.EnumerateFiles(srcDir, "*.jpg").ToArray();
            PointF[][] imageCorners = new PointF[srcFiles.Length][];
            imageCorners = checkers.GetImagePointsCopy(srcFiles.Length);
            var worldCorners = Projector.GetObjectPointsCopy(srcFiles.Length, checkers.Saddles.Width, checkers.Saddles.Height, (float)checkers.Square.Width, (float)checkers.Square.Height);

            var sw = Stopwatch.StartNew();
            Console.WriteLine("Finding corners in {0} images.", srcFiles.Length);

            Parallel.ForEach(srcFiles, new ParallelOptions() { MaxDegreeOfParallelism = 1, }, (srcFile, loopState, lon) =>
            {
                Console.WriteLine("    Finding corners in image: {0}/{2}, {1}.", lon, srcFile, srcFiles.Length);
                var sw2 = Stopwatch.StartNew();

                using (var img = new Image<Bgr, byte>(srcFile))
                using (var gray = img.Convert<Gray, byte>())
                {
                    if (invertedBlackAndWhite)
                        gray._Not();

                    ImageViewer.Show(gray);

                    var und = camera.Intrinsics.Undistort(img);
                    und.Save(@"C:\Users\Jaap\Temp\und.png");

                    var patternSize = checkers.Saddles;
                    PointF[] corners;
                    var ccbt = CALIB_CB_TYPE.FILTER_QUADS | CALIB_CB_TYPE.ADAPTIVE_THRESH;
                    var found = CameraCalibration.FindChessboardCorners(gray, patternSize, ccbt, out corners);

                    CvInvoke.cvDrawChessboardCorners(img.Ptr, patternSize, corners, corners.Length, patternWasFound: found ? 1 : 0);

                    if (found)
                    {
                        var window = new Size(32, 32);
                        var zeroZone = new Size(-1, -1);
                        var criteria = new MCvTermCriteria(21, 0.0001);
                        Console.WriteLine("    Image {0}/{1} has corners, subpixing begins at {2}.", lon, srcFiles.Length, sw2.Elapsed);
                        gray.FindCornerSubPix(new PointF[][] { corners }, window, zeroZone, criteria);
                        
                        for (int i = 0; i < corners.Length; ++i)
                        {
                            img.Draw(new Cross2DF(corners[i], 20, 20), new Bgr(255, 0, 255), 1);

                            var ray = camera.BoundingFrustum.ThroughPixel(camera.World.Translation, corners[i].X, corners[i].Y, camera.Width, camera.Height);
                            var intersect = ray.Intersects(zPlane);
                            if (intersect.HasValue)
                            {
                                var rayCastWorldCorner = ray.Position + intersect.Value * ray.Direction;
                                worldCorners[lon][i] = new MCvPoint3D32f(rayCastWorldCorner.X, rayCastWorldCorner.Y, rayCastWorldCorner.Z);
                            }                            
                        }

                        corners = CameraCalibration.ProjectPoints(worldCorners[lon], camera.Extrinsics, camera.Intrinsics);
                        for (int i = 0; i < corners.Length; ++i)
                            img.Draw(new Cross2DF(corners[i], 10, 10), new Bgr(255, 255, 0), 1);                        
                    }
                    else
                    {
                        worldCorners[lon] = null;
                    }

                    img.Save(@"C:\Users\Jaap\Temp\img.png");
                    
                    imageCorners[lon] = found ? corners : null;
                    var prefix = found ? "ok." : "failed.";
                    var dstFile = Path.Combine(dstDir, prefix + Path.GetFileNameWithoutExtension(srcFile) + ".png");
                    if (File.Exists(dstFile))
                        File.Delete(dstFile);

                    img.Save(dstFile);

                    if (lon == 0)
                        ImageViewer.Show(img);

                    Console.WriteLine("    Image {0}/{1} is done {2}, found: {3}, time: {4}.", lon, srcFiles.Length, dstFile, found, sw2.Elapsed);
                }
            });

            if (Global.Yes)
                return;

            worldCorners = worldCorners.Where(c => c != null).Select(c => c).ToArray();
            imageCorners = checkers.GetImagePointsCopy(worldCorners.Length);

            var intrinsics = new IntrinsicCameraParameters();

            Console.WriteLine("Found corners in {0}/{1} images, time: {2}.", worldCorners.Length, srcFiles.Length, sw.Elapsed);
            Console.WriteLine("Now calibrating...");
            sw.Restart();

            ExtrinsicCameraParameters[] extrinsics;

            var ct = CALIB_TYPE.CV_CALIB_FIX_K3 | CALIB_TYPE.CV_CALIB_FIX_PRINCIPAL_POINT | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST;
                ct = CALIB_TYPE.CV_CALIB_FIX_K3 | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST;

            var reprojErr = CameraCalibration.CalibrateCamera(worldCorners,
            imageCorners, proSize, intrinsics, ct, out extrinsics);
            var reprojErrPerCornerSquared = reprojErr / (imageCorners.Length * checkers.Saddles.Area());
            var reprojErrPerCorner = Math.Sqrt(reprojErrPerCornerSquared);

            Console.WriteLine("Calibration done in: {0}, reprojection error = {1}, {2}, {3}", sw.Elapsed, reprojErr, reprojErrPerCornerSquared, reprojErrPerCorner);

            Markers.Chessboard.Save(proPath, intrinsics, proSize.Width, proSize.Height);

            Matrix<float> mdx, mdy;
            intrinsics.InitUndistortMap(proSize.Width, proSize.Height, out mdx, out mdy);

            Console.WriteLine("Now undistorting and saving...");
            sw.Restart();

            if (Global.No)
                Parallel.ForEach(srcFiles, new ParallelOptions() { MaxDegreeOfParallelism = 2, }, (srcFile, loopState, lon) =>
                {
                    var dstFile = Path.Combine(dstDir, Path.GetFileNameWithoutExtension(srcFile) + ".undistorted.png");
                    if (File.Exists(dstFile))
                        File.Delete(dstFile);

                    using (var img = new Image<Bgr, byte>(srcFile))
                    using (var jmg = img.Copy())
                    {
                        CvInvoke.cvRemap(jmg.Ptr, img.Ptr, mdx, mdy, (int)INTER.CV_INTER_LINEAR | (int)Emgu.CV.CvEnum.WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(255.0f, 0.0f, 255.0f, 255.0f));
                        img.Save(dstFile);
                    }

                    Console.WriteLine("    Undistorted {0}/{2}, {1}", lon, dstFile, srcFiles.Length);
                });

            Console.WriteLine("Undistortion done in {0}, press a key to continue.", sw.Elapsed);
            Console.ReadKey();
            Console.WriteLine("One more key please...");
        }
コード例 #38
0
		//called when data for any output pin is requested
		public void Evaluate(int SpreadMax)
		{
			if (FPinInDo[0])
			{
				int nPointsPerImage = FPinInObject.SliceCount;
				bool useVVVVCoords = FPinInCoordSystem[0] == TCoordinateSystem.VVVV;

				if (nPointsPerImage == 0)
				{
					FStatus[0] = "Insufficient points";
					return;
				}
				int nImages = FPinInImage.SliceCount / nPointsPerImage;

				MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[nImages][];
				PointF[][] imagePoints = new PointF[nImages][];
				Size imageSize = new Size( (int) FPinInSensorSize[0].x, (int) FPinInSensorSize[0].y);
				CALIB_TYPE flags = new CALIB_TYPE();
				IntrinsicCameraParameters intrinsicParam = new IntrinsicCameraParameters();
				ExtrinsicCameraParameters[] extrinsicsPerView;
				GetFlags(out flags);

				if (flags.HasFlag(CALIB_TYPE.CV_CALIB_USE_INTRINSIC_GUESS))
				{
					if (FPinInIntrinsics[0] == null)
					{
						Matrix<double> mat = intrinsicParam.IntrinsicMatrix;
						mat[0, 0] = FPinInSensorSize[0].x / 2.0d;
						mat[1, 1] = FPinInSensorSize[0].y / 2.0d;
						mat[0, 2] = FPinInSensorSize[0].x / 2.0d;
						mat[1, 2] = FPinInSensorSize[0].y / 2.0d;
						mat[2, 2] = 1;
					}
					else
					{
						intrinsicParam.DistortionCoeffs = FPinInIntrinsics[0].intrinsics.DistortionCoeffs.Clone();
						intrinsicParam.IntrinsicMatrix = FPinInIntrinsics[0].intrinsics.IntrinsicMatrix.Clone();
					}

				}

				imagePoints = MatrixUtils.ImagePoints(FPinInImage, nPointsPerImage);

				for (int i=0; i<nImages; i++)
				{
					objectPoints[i] = MatrixUtils.ObjectPoints(FPinInObject, useVVVVCoords);
				}

				try
				{
					FPinOutError[0] = CameraCalibration.CalibrateCamera(objectPoints, imagePoints, imageSize, intrinsicParam, flags, out extrinsicsPerView);

					Intrinsics intrinsics = new Intrinsics(intrinsicParam, imageSize);
					FPinOutIntrinsics[0] = intrinsics;
					if (useVVVVCoords)
						FPinOutProjection[0] = intrinsics.Matrix;
					else
						FPinOutProjection[0] = intrinsics.Matrix;

					FPinOutExtrinsics.SliceCount = nImages;
					FPinOutView.SliceCount = nImages;
					for (int i = 0; i < nImages; i++)
					{
						Extrinsics extrinsics = new Extrinsics(extrinsicsPerView[i]);
						FPinOutExtrinsics[i] = extrinsics;

						if (useVVVVCoords)
							FPinOutView[i] = MatrixUtils.ConvertToVVVV(extrinsics.Matrix);
						else
							FPinOutView[i] = extrinsics.Matrix;
					}

					FPinOutSuccess[0] = true;
					FStatus[0] = "OK";
				}
				catch (Exception e)  {
					FPinOutSuccess[0] = false;
					FStatus[0] = e.Message;
				}
			}

		}
コード例 #39
0
ファイル: calibrate_camera.cs プロジェクト: qida/qcv
        public void OnCalibrate(Dictionary<string, object> bundle)
        {
            if (_image_points.Count >= 3) {
            IntrinsicCameraParameters icp = new IntrinsicCameraParameters();
            ExtrinsicCameraParameters[] ecp;

            Emgu.CV.CameraCalibration.CalibrateCamera(
              _object_points.ToArray(),
              _image_points.ToArray(),
              bundle.GetImage("source").Size,
              icp,
              Emgu.CV.CvEnum.CALIB_TYPE.DEFAULT,
              out ecp);

            bundle["intrinsics"] = icp;
              }
        }
コード例 #40
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems; 
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">essential matrix</param>
        /// <param name="termCrit"> Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CALIB_TYPE flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix<double> foundamentalMatrix,
            out Matrix<double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

             #region get the matrix that represent the point counts
             int[,] pointCounts = new int[objectPoints.Length, 1];
             for (int i = 0; i < objectPoints.Length; i++)
             {
            Debug.Assert(objectPoints[i].Length == imagePoints1[i].Length && objectPoints[i].Length == imagePoints2[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i, 0] = objectPoints[i].Length;
             }
             #endregion

             using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
             using (Matrix<float> imagePointMatrix1 = ToMatrix(imagePoints1))
             using (Matrix<float> imagePointMatrix2 = ToMatrix(imagePoints2))
             using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
             {
            extrinsicParams = new ExtrinsicCameraParameters();
            essentialMatrix = new Matrix<double>(3, 3);
            foundamentalMatrix = new Matrix<double>(3, 3);

            CvInvoke.cvStereoCalibrate(
               objectPointMatrix.Ptr,
               imagePointMatrix1.Ptr,
               imagePointMatrix2.Ptr,
               pointCountsMatrix.Ptr,
               intrinsicParam1.IntrinsicMatrix,
               intrinsicParam1.DistortionCoeffs,
               intrinsicParam2.IntrinsicMatrix,
               intrinsicParam2.DistortionCoeffs,
               imageSize,
               extrinsicParams.RotationVector,
               extrinsicParams.TranslationVector,
               essentialMatrix.Ptr,
               foundamentalMatrix.Ptr,
               termCrit,
               flags);
             }
        }
コード例 #41
0
        /// <summary>
        /// Estimates extrinsic camera parameters using known intrinsic parameters and and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
        /// </summary>
        /// <param name="objectPoints">The array of object points</param>
        /// <param name="imagePoints">The array of corresponding image points</param>
        /// <param name="intrin">The intrinsic parameters</param>
        /// <returns>The extrinsic parameters</returns>
        public static ExtrinsicCameraParameters FindExtrinsicCameraParams2(
            MCvPoint3D32f[] objectPoints,
            PointF[] imagePoints,
            IntrinsicCameraParameters intrin)
        {
            ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();

             GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
             GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);
             using (Matrix<float> objectPointMatrix = new Matrix<float>(objectPoints.Length, 3, handle1.AddrOfPinnedObject()))
             using (Matrix<float> imagePointMatrix = new Matrix<float>(imagePoints.Length, 2, handle2.AddrOfPinnedObject()))
            CvInvoke.cvFindExtrinsicCameraParams2(objectPointMatrix, imagePointMatrix, intrin.IntrinsicMatrix.Ptr, intrin.DistortionCoeffs.Ptr, p.RotationVector.Ptr, p.TranslationVector.Ptr, 0);
             handle1.Free();
             handle2.Free();

             return p;
        }
コード例 #42
0
        /// <summary>
        /// Computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters. 
        /// Optionally, the function computes jacobians - matrices of partial derivatives of image points as functions of all the input parameters w.r.t. the particular parameters, intrinsic and/or extrinsic. 
        /// The jacobians are used during the global optimization in cvCalibrateCamera2 and cvFindExtrinsicCameraParams2. 
        /// The function itself is also used to compute back-projection error for with current intrinsic and extrinsic parameters. 
        /// </summary>
        /// <remarks>Note, that with intrinsic and/or extrinsic parameters set to special values, the function can be used to compute just extrinsic transformation or just intrinsic transformation (i.e. distortion of a sparse set of points) </remarks>
        /// <param name="objectPoints">The array of object points.</param>
        /// <param name="extrin">Extrinsic parameters</param>
        /// <param name="intrin">Intrinsic parameters</param>
        /// <param name="mats">Optional matrix supplied in the following order: dpdrot, dpdt, dpdf, dpdc, dpddist</param>
        /// <returns>The array of image points which is the projection of <paramref name="objectPoints"/></returns>
        public static PointF[] ProjectPoints(
            MCvPoint3D32f[] objectPoints,
            ExtrinsicCameraParameters extrin,
            IntrinsicCameraParameters intrin,
            params Matrix<float>[] mats)
        {
            PointF[] imagePoints = new PointF[objectPoints.Length];

             int matsLength = mats.Length;
             GCHandle handle1 = GCHandle.Alloc(objectPoints, GCHandleType.Pinned);
             GCHandle handle2 = GCHandle.Alloc(imagePoints, GCHandleType.Pinned);
             using (Matrix<float> pointMatrix = new Matrix<float>(objectPoints.Length, 1, 3, handle1.AddrOfPinnedObject(), 3 * sizeof(float)))
             using (Matrix<float> imagePointMatrix = new Matrix<float>(imagePoints.Length, 1, 2, handle2.AddrOfPinnedObject(), 2 * sizeof(float)))
            CvInvoke.cvProjectPoints2(
                  pointMatrix,
                  extrin.RotationVector.Ptr,
                  extrin.TranslationVector.Ptr,
                  intrin.IntrinsicMatrix.Ptr,
                  intrin.DistortionCoeffs.Ptr,
                  imagePointMatrix,
                  matsLength > 0 ? mats[0] : IntPtr.Zero,
                  matsLength > 1 ? mats[1] : IntPtr.Zero,
                  matsLength > 2 ? mats[2] : IntPtr.Zero,
                  matsLength > 3 ? mats[3] : IntPtr.Zero,
                  matsLength > 4 ? mats[4] : IntPtr.Zero);
             handle1.Free();
             handle2.Free();
             return imagePoints;
        }
コード例 #43
-1
ファイル: Calibration.cs プロジェクト: guozanhua/ProjMap64
 private void ApplyCalibrationToUnityCamera(IntrinsicCameraParameters intrinsic, ExtrinsicCameraParameters extrinsics)
 {
     Matrix<double> rotationInverse = flipZAxis(extrinsics.RotationVector.RotationMatrix).Transpose(); // transpose is same as inverse for rotation matrix
     Matrix<double> transFinal = (rotationInverse * -1) * extrinsics.TranslationVector;
     _mainCamera.projectionMatrix = LoadProjectionMatrix((float)intrinsic.IntrinsicMatrix[0, 0], (float)intrinsic.IntrinsicMatrix[1, 1], (float)intrinsic.IntrinsicMatrix[0, 2], (float)intrinsic.IntrinsicMatrix[1, 2]);
     ApplyTranslationAndRotationToCamera(transFinal, RotationConversion.RotationMatrixToEulerZXY(rotationInverse));
 }