private void CoordinateMapper_CoordinateMappingChanged(object sender, CoordinateMappingChangedEventArgs e) { if (this.DepthFrameToCameraSpaceTable.HasSubscribers) { this.DepthFrameToCameraSpaceTable.Post(this.kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable(), this.pipeline.GetCurrentTime()); } if (this.configuration.OutputCalibration) { if (!this.calibrationPosted) { // Extract and created old style calibration var kinectInternalCalibration = new KinectInternalCalibration(); kinectInternalCalibration.RecoverCalibrationFromSensor(this.kinectSensor); Matrix <double> colorCameraMatrix = Matrix <double> .Build.Dense(3, 3); Matrix <double> depthCameraMatrix = Matrix <double> .Build.Dense(3, 3); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { colorCameraMatrix[i, j] = kinectInternalCalibration.colorCameraMatrix[i, j]; depthCameraMatrix[i, j] = kinectInternalCalibration.depthCameraMatrix[i, j]; } } Vector <double> colorLensDistortion = Vector <double> .Build.Dense(5); Vector <double> depthLensDistortion = Vector <double> .Build.Dense(5); for (int i = 0; i < 5; i++) { colorLensDistortion[i] = kinectInternalCalibration.colorLensDistortion[i]; depthLensDistortion[i] = kinectInternalCalibration.depthLensDistortion[i]; } Matrix <double> depthToColorTransform = Matrix <double> .Build.Dense(4, 4); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { depthToColorTransform[i, j] = kinectInternalCalibration.depthToColorTransform[i, j]; } } // Kinect uses a basis under the hood that assumes Forward=Z, Left=X, Up=Y. var kinectBasis = new CoordinateSystem(default, UnitVector3D.ZAxis, UnitVector3D.XAxis, UnitVector3D.YAxis);
private void CoordinateMapper_CoordinateMappingChanged(object sender, CoordinateMappingChangedEventArgs e) { if (this.DepthFrameToCameraSpaceTable.HasSubscribers) { this.DepthFrameToCameraSpaceTable.Post(this.kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable(), this.pipeline.GetCurrentTime()); } if (this.configuration.OutputCalibration) { if (!this.calibrationPosted) { // Extract and created old style calibration var kinectInternalCalibration = new KinectInternalCalibration(); kinectInternalCalibration.RecoverCalibrationFromSensor(this.kinectSensor); // Kinect uses a basis under the hood that assumes Forward=Z, Left=X, Up=Y. var kinectBasis = new CoordinateSystem(default, UnitVector3D.ZAxis, UnitVector3D.XAxis, UnitVector3D.YAxis);
private static double CalibrateColorCamera(List <Vector <double> > worldPoints, List <System.Drawing.PointF> imagePoints, Matrix <double> cameraMatrix, Vector <double> distCoeffs, Vector <double> rotation, Vector <double> translation, bool silent = true) { int nPoints = worldPoints.Count; { Matrix <double> R; Vector <double> t; DLT(cameraMatrix, distCoeffs, worldPoints, imagePoints, out R, out t); var r = RotationExtensions.MatrixToAxisAngle(R); r.CopyTo(rotation); t.CopyTo(translation); } // pack parameters into vector // parameters: fx, fy, cx, cy, k1, k2, + 3 for rotation, 3 translation = 12 int nParameters = 12; var parameters = Vector <double> .Build.Dense(nParameters); { int pi = 0; parameters[pi++] = cameraMatrix[0, 0]; // fx parameters[pi++] = cameraMatrix[1, 1]; // fy parameters[pi++] = cameraMatrix[0, 2]; // cx parameters[pi++] = cameraMatrix[1, 2]; // cy parameters[pi++] = distCoeffs[0]; // k1 parameters[pi++] = distCoeffs[1]; // k2 parameters[pi++] = rotation[0]; parameters[pi++] = rotation[1]; parameters[pi++] = rotation[2]; parameters[pi++] = translation[0]; parameters[pi++] = translation[1]; parameters[pi++] = translation[2]; } // size of our error vector int nValues = nPoints * 2; // each component (x,y) is a separate entry LevenbergMarquardt.Function function = delegate(Vector <double> p) { var fvec = Vector <double> .Build.Dense(nValues); // unpack parameters int pi = 0; double fx = p[pi++]; double fy = p[pi++]; double cx = p[pi++]; double cy = p[pi++]; double k1 = p[pi++]; double k2 = p[pi++]; var K = Matrix <double> .Build.DenseIdentity(3, 3); K[0, 0] = fx; K[1, 1] = fy; K[0, 2] = cx; K[1, 2] = cy; var d = Vector <double> .Build.Dense(5, 0); d[0] = k1; d[1] = k2; var r = Vector <double> .Build.Dense(3); r[0] = p[pi++]; r[1] = p[pi++]; r[2] = p[pi++]; var t = Vector <double> .Build.Dense(3); t[0] = p[pi++]; t[1] = p[pi++]; t[2] = p[pi++]; var R = RotationExtensions.AxisAngleToMatrix(r); int fveci = 0; for (int i = 0; i < worldPoints.Count; i++) { // transform world point to local camera coordinates var x = R * worldPoints[i]; x += t; // fvec_i = y_i - f(x_i) double u, v; KinectInternalCalibration.Project(K, d, x[0], x[1], x[2], out u, out v); var imagePoint = imagePoints[i]; fvec[fveci++] = imagePoint.X - u; fvec[fveci++] = imagePoint.Y - v; } return(fvec); }; // optimize var calibrate = new LevenbergMarquardt(function); while (calibrate.State == LevenbergMarquardt.States.Running) { var rmsError = calibrate.MinimizeOneStep(parameters); if (!silent) { Console.WriteLine("rms error = " + rmsError); } } if (!silent) { for (int i = 0; i < nParameters; i++) { Console.WriteLine(parameters[i] + "\t"); } Console.WriteLine(); } // unpack parameters { int pi = 0; double fx = parameters[pi++]; double fy = parameters[pi++]; double cx = parameters[pi++]; double cy = parameters[pi++]; double k1 = parameters[pi++]; double k2 = parameters[pi++]; cameraMatrix[0, 0] = fx; cameraMatrix[1, 1] = fy; cameraMatrix[0, 2] = cx; cameraMatrix[1, 2] = cy; distCoeffs[0] = k1; distCoeffs[1] = k2; rotation[0] = parameters[pi++]; rotation[1] = parameters[pi++]; rotation[2] = parameters[pi++]; translation[0] = parameters[pi++]; translation[1] = parameters[pi++]; translation[2] = parameters[pi++]; } return(calibrate.RMSError); }
private static double CalibrateDepthCamera(List <Vector <double> > worldPoints, List <System.Drawing.PointF> imagePoints, Matrix <double> cameraMatrix, Vector <double> distCoeffs, bool silent = true) { int nPoints = worldPoints.Count; // pack parameters into vector // parameters: fx, fy, cx, cy, k1, k2 = 6 parameters int nParameters = 6; var parameters = Vector <double> .Build.Dense(nParameters); { int pi = 0; parameters[pi++] = cameraMatrix[0, 0]; // fx parameters[pi++] = cameraMatrix[1, 1]; // fy parameters[pi++] = cameraMatrix[0, 2]; // cx parameters[pi++] = cameraMatrix[1, 2]; // cy parameters[pi++] = distCoeffs[0]; // k1 parameters[pi++] = distCoeffs[1]; // k2 } // size of our error vector int nValues = nPoints * 2; // each component (x,y) is a separate entry LevenbergMarquardt.Function function = delegate(Vector <double> p) { var fvec = Vector <double> .Build.Dense(nValues); // unpack parameters int pi = 0; double fx = p[pi++]; double fy = p[pi++]; double cx = p[pi++]; double cy = p[pi++]; double k1 = p[pi++]; double k2 = p[pi++]; var K = Matrix <double> .Build.DenseIdentity(3, 3); K[0, 0] = fx; K[1, 1] = fy; K[0, 2] = cx; K[1, 2] = cy; var d = Vector <double> .Build.Dense(5, 0); d[0] = k1; d[1] = k2; int fveci = 0; for (int i = 0; i < worldPoints.Count; i++) { double u, v; var x = worldPoints[i]; KinectInternalCalibration.Project(K, d, x[0], x[1], x[2], out u, out v); var imagePoint = imagePoints[i]; fvec[fveci++] = imagePoint.X - u; fvec[fveci++] = imagePoint.Y - v; } return(fvec); }; // optimize var calibrate = new LevenbergMarquardt(function); while (calibrate.State == LevenbergMarquardt.States.Running) { var rmsError = calibrate.MinimizeOneStep(parameters); if (!silent) { Console.WriteLine("rms error = " + rmsError); } } if (!silent) { for (int i = 0; i < nParameters; i++) { Console.WriteLine(parameters[i] + "\t"); } Console.WriteLine(); } // unpack parameters { int pi = 0; double fx = parameters[pi++]; double fy = parameters[pi++]; double cx = parameters[pi++]; double cy = parameters[pi++]; double k1 = parameters[pi++]; double k2 = parameters[pi++]; cameraMatrix[0, 0] = fx; cameraMatrix[1, 1] = fy; cameraMatrix[0, 2] = cx; cameraMatrix[1, 2] = cy; distCoeffs[0] = k1; distCoeffs[1] = k2; } return(calibrate.RMSError); }
private void CoordinateMapper_CoordinateMappingChanged(object sender, CoordinateMappingChangedEventArgs e) { if (this.DepthFrameToCameraSpaceTable.HasSubscribers) { this.DepthFrameToCameraSpaceTable.Post(this.kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable(), this.pipeline.GetCurrentTime()); } if (this.configuration.OutputCalibration) { if (!this.calibrationPosted) { // Extract and created old style calibration var kinectInternalCalibration = new KinectInternalCalibration(); kinectInternalCalibration.RecoverCalibrationFromSensor(this.kinectSensor); Matrix <double> colorCameraMatrix = Matrix <double> .Build.Dense(3, 3); Matrix <double> depthCameraMatrix = Matrix <double> .Build.Dense(3, 3); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { colorCameraMatrix[i, j] = kinectInternalCalibration.colorCameraMatrix[i, j]; depthCameraMatrix[i, j] = kinectInternalCalibration.depthCameraMatrix[i, j]; } } Vector <double> colorLensDistortion = Vector <double> .Build.Dense(5); Vector <double> depthLensDistortion = Vector <double> .Build.Dense(5); for (int i = 0; i < 5; i++) { colorLensDistortion[i] = kinectInternalCalibration.colorLensDistortion[i]; depthLensDistortion[i] = kinectInternalCalibration.depthLensDistortion[i]; } Matrix <double> depthToColorTransform = Matrix <double> .Build.Dense(4, 4); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { depthToColorTransform[i, j] = kinectInternalCalibration.depthToColorTransform[i, j]; } } // Extract and create new style calibration this.kinectCalibration = new KinectCalibration( this.kinectSensor.ColorFrameSource.FrameDescription.Width, this.kinectSensor.ColorFrameSource.FrameDescription.Height, colorCameraMatrix, kinectInternalCalibration.colorLensDistortion[0], kinectInternalCalibration.colorLensDistortion[1], 0.0, 0.0, depthToColorTransform, this.kinectSensor.DepthFrameSource.FrameDescription.Width, this.kinectSensor.DepthFrameSource.FrameDescription.Height, depthCameraMatrix, kinectInternalCalibration.depthLensDistortion[0], kinectInternalCalibration.depthLensDistortion[1], 0.0, 0.0); /* Warning about comments being preceded by blank line */ #pragma warning disable SA1515 // KinectCalibrationOld's original ToColorSpace() method flips the Y axis (height-Y). To account // for this we adjust our Transform. // Matrix<double> flipY = Matrix<double>.Build.DenseIdentity(3, 3); // flipY[1, 1] = -1.0; // flipY[1, 2] = this.kinectSensor.ColorFrameSource.FrameDescription.Height; // this.kinectCalibration.ColorIntrinsics.Transform = flipY * this.kinectCalibration.ColorIntrinsics.Transform; #pragma warning restore SA1515 this.Calibration.Post(this.kinectCalibration, this.pipeline.GetCurrentTime()); this.calibrationPosted = true; } } }
static double CalibrateColorCamera(List <Matrix> worldPoints, List <System.Drawing.PointF> imagePoints, Matrix cameraMatrix, Matrix distCoeffs, Matrix rotation, Matrix translation, bool silent) { int nPoints = worldPoints.Count; { Matrix R, t; DLT(cameraMatrix, distCoeffs, worldPoints, imagePoints, out R, out t); var r = Orientation.RotationVector(R); rotation.Copy(r); translation.Copy(t); } // pack parameters into vector // parameters: fx, fy, cx, cy, k1, k2, + 3 for rotation, 3 translation = 12 int nParameters = 12; var parameters = new Matrix(nParameters, 1); { int pi = 0; parameters[pi++] = cameraMatrix[0, 0]; // fx parameters[pi++] = cameraMatrix[1, 1]; // fy parameters[pi++] = cameraMatrix[0, 2]; // cx parameters[pi++] = cameraMatrix[1, 2]; // cy parameters[pi++] = distCoeffs[0]; // k1 parameters[pi++] = distCoeffs[1]; // k2 parameters[pi++] = rotation[0]; parameters[pi++] = rotation[1]; parameters[pi++] = rotation[2]; parameters[pi++] = translation[0]; parameters[pi++] = translation[1]; parameters[pi++] = translation[2]; } // size of our error vector int nValues = nPoints * 2; // each component (x,y) is a separate entry LevenbergMarquardt.Function function = delegate(Matrix p) { var fvec = new Matrix(nValues, 1); // unpack parameters int pi = 0; double fx = p[pi++]; double fy = p[pi++]; double cx = p[pi++]; double cy = p[pi++]; double k1 = p[pi++]; double k2 = p[pi++]; var K = Matrix.Identity(3, 3); K[0, 0] = fx; K[1, 1] = fy; K[0, 2] = cx; K[1, 2] = cy; var d = Matrix.Zero(5, 1); d[0] = k1; d[1] = k2; var r = new Matrix(3, 1); r[0] = p[pi++]; r[1] = p[pi++]; r[2] = p[pi++]; var t = new Matrix(3, 1); t[0] = p[pi++]; t[1] = p[pi++]; t[2] = p[pi++]; var R = Orientation.Rodrigues(r); var x = new Matrix(3, 1); int fveci = 0; for (int i = 0; i < worldPoints.Count; i++) { // transform world point to local camera coordinates x.Mult(R, worldPoints[i]); x.Add(t); // fvec_i = y_i - f(x_i) double u, v; KinectInternalCalibration.Project(K, d, x[0], x[1], x[2], out u, out v); var imagePoint = imagePoints[i]; fvec[fveci++] = imagePoint.X - u; fvec[fveci++] = imagePoint.Y - v; } return(fvec); }; // optimize var calibrate = new LevenbergMarquardt(function); while (calibrate.State == LevenbergMarquardt.States.Running) { var rmsError = calibrate.MinimizeOneStep(parameters); if (!silent) { Console.WriteLine("rms error = " + rmsError); } } if (!silent) { for (int i = 0; i < nParameters; i++) { Console.WriteLine(parameters[i] + "\t"); } Console.WriteLine(); } // unpack parameters { int pi = 0; double fx = parameters[pi++]; double fy = parameters[pi++]; double cx = parameters[pi++]; double cy = parameters[pi++]; double k1 = parameters[pi++]; double k2 = parameters[pi++]; cameraMatrix[0, 0] = fx; cameraMatrix[1, 1] = fy; cameraMatrix[0, 2] = cx; cameraMatrix[1, 2] = cy; distCoeffs[0] = k1; distCoeffs[1] = k2; rotation[0] = parameters[pi++]; rotation[1] = parameters[pi++]; rotation[2] = parameters[pi++]; translation[0] = parameters[pi++]; translation[1] = parameters[pi++]; translation[2] = parameters[pi++]; } return(calibrate.RMSError); }