Ejemplo n.º 1
0
        public static void LeastSquares(Matrix x, Matrix A, Matrix b)
        {
            // use svd
            // for overdetermined systems A*x = b
            // x = V * diag(1/wj) * U T * b
            // NRC p. 66

            int m = A.m;
            int n = A.n;

            Matrix U = new Matrix(m, n), V = new Matrix(n, n), w = new Matrix(n, 1), W = new Matrix(n, n);

            A.SVD(U, w, V);
            w.Reciprocal();
            W.Diag(w);

            Matrix M = new Matrix(n, n);

            M.Mult(V, W);

            Matrix N = new Matrix(n, m);

            N.MultAAT(M, U);

            x.Mult(N, b);
        }
Ejemplo n.º 2
0
        public static void PlanarDLT(Matrix cameraMatrix, Matrix distCoeffs, List <Matrix> worldPoints, List <System.Drawing.PointF> imagePoints, Matrix Rplane, Matrix Tplane, out Matrix R, out Matrix t)
        {
            // transform world points to plane
            int n = worldPoints.Count;
            var worldPlanePoints = new List <Matrix>();

            for (int i = 0; i < n; i++)
            {
                var planePoint = new Matrix(3, 1);
                planePoint.Mult(Rplane, worldPoints[i]);
                planePoint.Add(Tplane);
                worldPlanePoints.Add(planePoint);
            }

            var Rdlt = new Matrix(3, 3);
            var Tdlt = new Matrix(3, 1);

            PlanarDLT(cameraMatrix, distCoeffs, worldPlanePoints, imagePoints, out Rdlt, out Tdlt);

            R = new Matrix(3, 3);
            t = new Matrix(3, 1);

            t.Mult(Rdlt, Tplane);
            t.Add(Tdlt);

            R.Mult(Rdlt, Rplane);
        }
        public void DepthImageToColorImage(double depthX, double depthY, double depthMeters, out double colorX, out double colorY)
        {
            double xUndistorted, yUndistorted;

            // convert to depth camera space
            float fx = (float)depthCameraMatrix[0, 0];
            float fy = (float)depthCameraMatrix[1, 1];
            float cx = (float)depthCameraMatrix[0, 2];
            float cy = (float)depthCameraMatrix[1, 2];

            float[] kappa = new float[] { (float)depthLensDistortion[0], (float)depthLensDistortion[1] };
            // flip y because our calibration expects y up (right handed coordinates at all times)
            CameraMath.Undistort(fx, fy, cx, cy, kappa, depthX, (depthImageHeight - depthY), out xUndistorted, out yUndistorted);

            var depthCamera = new Matrix(4, 1);

            depthCamera[0] = xUndistorted * depthMeters;
            depthCamera[1] = yUndistorted * depthMeters;
            depthCamera[2] = depthMeters;
            depthCamera[3] = 1;

            // convert to color camera space
            var colorCamera = new Matrix(4, 1);

            colorCamera.Mult(depthToColorTransform, depthCamera);

            // project to color image
            CameraMath.Project(colorCameraMatrix, colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorX, out colorY);

            // convert back to Y down
            colorY = colorImageHeight - colorY;
        }
        public void DepthImageToColorImage(int depthX, int depthY, double depthMeters, System.Drawing.PointF[] depthFrameToCameraSpaceTable, out double colorX, out double colorY)
        {
            double xUndistorted, yUndistorted;

            // convert to depth camera space
            // use lookup table to perform undistortion; this will be faster when converting lots of points
            // this matches the Kinect SDK's depthFrameToCameraSpace table (y down)
            var point = depthFrameToCameraSpaceTable[depthY * Kinect2Calibration.depthImageWidth + depthX];

            xUndistorted = point.X;
            yUndistorted = point.Y;

            var depthCamera = new Matrix(4, 1);

            depthCamera[0] = xUndistorted * depthMeters;
            depthCamera[1] = yUndistorted * depthMeters;
            depthCamera[2] = depthMeters;
            depthCamera[3] = 1;

            // convert to color camera space
            var colorCamera = new Matrix(4, 1);

            colorCamera.Mult(depthToColorTransform, depthCamera);

            // project to color image
            CameraMath.Project(colorCameraMatrix, colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorX, out colorY);

            // convert back to Y down
            colorY = colorImageHeight - colorY;
        }
        public void ColorImageToDepthImage(double colorX, double colorY, ShortImage depthImage, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            float fx = (float)colorCameraMatrix[0, 0];
            float fy = (float)colorCameraMatrix[1, 1];
            float cx = (float)colorCameraMatrix[0, 2];
            float cy = (float)colorCameraMatrix[1, 2];

            float[] kappa = new float[] { (float)colorLensDistortion[0], (float)colorLensDistortion[1] };
            // flip y because our calibration expects y up (right handed coordinates at all times)
            CameraMath.Undistort(fx, fy, cx, cy, kappa, colorX, (colorImageHeight - colorY), out xUndistorted, out yUndistorted);

            var colorToDepthTransform = new Matrix(4, 4);

            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);

            depthPoint = new Matrix(4, 1);
            depthX     = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;

            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                    {
                        found = true;
                    }
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
        public void ColorImageToDepthImage(int colorX, int colorY, ShortImage depthImage, System.Drawing.PointF[] colorFrameToCameraSpaceTable, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            // use lookup table to perform undistortion; this will be faster when converting lots of points
            var point = colorFrameToCameraSpaceTable[colorY * Kinect2Calibration.colorImageWidth + colorX];

            xUndistorted = point.X;
            yUndistorted = point.Y;

            var colorToDepthTransform = new Matrix(4, 4);

            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);

            depthPoint = new Matrix(4, 1);
            depthX     = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;

            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                    {
                        found = true;
                    }
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
Ejemplo n.º 7
0
        public void ColorImageToDepthImage(double colorX, double colorY, ShortImage depthImage, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            float fx = (float)colorCameraMatrix[0, 0];
            float fy = (float)colorCameraMatrix[1, 1];
            float cx = (float)colorCameraMatrix[0, 2];
            float cy = (float)colorCameraMatrix[1, 2];
            float[] kappa = new float[] { (float)colorLensDistortion[0], (float)colorLensDistortion[1] };
            // flip y because our calibration expects y up (right handed coordinates at all times)
            CameraMath.Undistort(fx, fy, cx, cy, kappa, colorX, (colorImageHeight - colorY), out xUndistorted, out yUndistorted);

            var colorToDepthTransform = new Matrix(4, 4);
            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);
            depthPoint = new Matrix(4, 1);
            depthX = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;
            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                        found = true;
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
Ejemplo n.º 8
0
        public static void TestPlanarDLT()
        {
            var cameraMatrix = Matrix.Identity(3, 3);
            cameraMatrix[0, 0] = 300;
            cameraMatrix[1, 1] = 300;
            cameraMatrix[0, 2] = 250;
            cameraMatrix[1, 2] = 220;

            var distCoeffs = new Matrix(5, 1);
            distCoeffs[0] = 0.05;
            distCoeffs[1] = -0.1;

            // generate a bunch of points in a plane
            // project under some other camera (view)

            var R = new Matrix(3, 3);
            R.RotEuler2Matrix(0.3, -0.2, 0.6);

            var t = new Matrix(3, 1);
            t[0] = 0.2;
            t[1] = 0.3;
            t[2] = 2;

            var modelR = new Matrix(3, 3);
            modelR.RotEuler2Matrix(-0.6, 0.2, 0.3);

            var modelT = new Matrix(3, 1);
            modelT[0] = -0.1;
            modelT[1] = 1.0;
            modelT[2] = 1.5;

            var worldPoints = new List<Matrix>();
            var worldTransformedPoints = new List<Matrix>();
            var imagePoints = new List<System.Drawing.PointF>();
            var zero3 = Matrix.Zero(3, 1);

            for (float y = -1f; y <= 1.0f; y += 0.2f)
                for (float x = -1f; x <= 1.0f; x += 0.2f)
                {
                    var model = new Matrix(3, 1);
                    model[0] = x;
                    model[1] = y;
                    model[2] = 0;

                    var noise = Matrix.GaussianSample(zero3, 0.1 * 0.1);

                    var world = new Matrix(3, 1);
                    world.Mult(modelR, model);
                    world.Add(modelT);
                    world.Add(noise);
                    worldPoints.Add(world);

                    // under some camera:
                    var worldTransformed = new Matrix(3, 1);
                    worldTransformed.Mult(R, world);
                    worldTransformed.Add(t);
                    worldTransformedPoints.Add(worldTransformed);

                    double u, v;
                    Project(cameraMatrix, distCoeffs, worldTransformed[0], worldTransformed[1], worldTransformed[2], out u, out v);

                    var image = new System.Drawing.PointF();
                    image.X = (float)u;
                    image.Y = (float)v;
                    imagePoints.Add(image);
                }

            Console.WriteLine("R\n" + R);
            Console.WriteLine("t\n" + t);

            var Rplane = new Matrix(3, 1);
            var Tplane = new Matrix(3, 1);

            PlaneFit(worldPoints, out Rplane, out Tplane);

            var Rest = new Matrix(3, 3);
            var test = new Matrix(3, 1);

            PlanarDLT(cameraMatrix, distCoeffs, worldPoints, imagePoints, Rplane, Tplane, out Rest, out test);

            Console.WriteLine("Rest\n" + Rest);
            Console.WriteLine("test\n" + test);
        }
Ejemplo n.º 9
0
        public static void TestDLT()
        {
            var cameraMatrix = Matrix.Identity(3, 3);
            cameraMatrix[0, 0] = 700;
            cameraMatrix[1, 1] = 700;
            cameraMatrix[0, 2] = 250;
            cameraMatrix[1, 2] = 220;

            var distCoeffs = new Matrix(5, 1);
            distCoeffs[0] = 0.05;
            distCoeffs[1] = -0.1;

            // generate a bunch of points in a volume
            // project under some other camera (view)

            var R = new Matrix(3, 3);
            R.RotEuler2Matrix(0.2, 0.3, 0.3);

            var t = new Matrix(3, 1);
            t[0] = 2;
            t[1] = 0;
            t[2] = -4;

            var modelPoints = new List<Matrix>();
            var imagePoints = new List<System.Drawing.PointF>();
            var zero3 = Matrix.Zero(3, 1);

            for (float z = 1f; z <= 3.0f; z += 0.4f)
                for (float y = -1f; y <= 1.0f; y += 0.4f)
                    for (float x = -1f; x <= 1.0f; x += 0.4f)
                    {
                        var model = new Matrix(3, 1);
                        model[0] = x;
                        model[1] = y;
                        model[2] = z;
                        modelPoints.Add(model);

                        // under our camera:
                        var transformedPoint = new Matrix(3, 1);
                        transformedPoint.Mult(R, model);
                        transformedPoint.Add(t);

                        var noise = Matrix.GaussianSample(zero3, 0.1 * 0.1);
                        transformedPoint.Add(noise);

                        double u, v;
                        Project(cameraMatrix, distCoeffs, transformedPoint[0], transformedPoint[1], transformedPoint[2], out u, out v);

                        var image = new System.Drawing.PointF();
                        image.X = (float)u;
                        image.Y = (float)v;
                        imagePoints.Add(image);
                    }

            Console.WriteLine("x = [");
            for (int i = 0; i < imagePoints.Count; i++)
                Console.WriteLine("{0} {1}", imagePoints[i].X, imagePoints[i].Y);
            Console.WriteLine("]';");

            Console.WriteLine("X = [");
            for (int i = 0; i < modelPoints.Count; i++)
                Console.WriteLine("{0} {1} {2}", modelPoints[i][0], modelPoints[i][1], modelPoints[i][2]);
            Console.WriteLine("]';");

            Console.WriteLine("fc = [{0} {1}];", cameraMatrix[0, 0], cameraMatrix[1, 1]);
            Console.WriteLine("cc = [{0} {1}];", cameraMatrix[0, 2], cameraMatrix[1, 2]);
            Console.WriteLine("kc = [{0} {1} 0 0 0];", distCoeffs[0], distCoeffs[1]);
            Console.WriteLine();

            Console.WriteLine("R\n" + R);
            Console.WriteLine("t\n" + t);

            var Rest = new Matrix(3, 3);
            var test = new Matrix(3, 1);

            DLT(cameraMatrix, distCoeffs, modelPoints, imagePoints, out Rest, out test);

            Console.WriteLine("Rest\n" + Rest);
            Console.WriteLine("test\n" + test);
        }
Ejemplo n.º 10
0
        public static void PlanarDLT(Matrix cameraMatrix, Matrix distCoeffs, List<Matrix> worldPoints, List<System.Drawing.PointF> imagePoints, Matrix Rplane, Matrix Tplane, out Matrix R, out Matrix t)
        {
            // transform world points to plane
            int n = worldPoints.Count;
            var worldPlanePoints = new List<Matrix>();
            for (int i = 0; i < n; i++)
            {
                var planePoint = new Matrix(3, 1);
                planePoint.Mult(Rplane, worldPoints[i]);
                planePoint.Add(Tplane);
                worldPlanePoints.Add(planePoint);
            }

            var Rdlt = new Matrix(3, 3);
            var Tdlt = new Matrix(3, 1);

            PlanarDLT(cameraMatrix, distCoeffs, worldPlanePoints, imagePoints, out Rdlt, out Tdlt);

            R = new Matrix(3, 3);
            t = new Matrix(3, 1);

            t.Mult(Rdlt, Tplane);
            t.Add(Tdlt);

            R.Mult(Rdlt, Rplane);
        }
Ejemplo n.º 11
0
        public static void TestDLT()
        {
            var cameraMatrix = Matrix.Identity(3, 3);

            cameraMatrix[0, 0] = 700;
            cameraMatrix[1, 1] = 700;
            cameraMatrix[0, 2] = 250;
            cameraMatrix[1, 2] = 220;

            var distCoeffs = new Matrix(5, 1);

            distCoeffs[0] = 0.05;
            distCoeffs[1] = -0.1;

            // generate a bunch of points in a volume
            // project under some other camera (view)

            var R = new Matrix(3, 3);

            R.RotEuler2Matrix(0.2, 0.3, 0.3);

            var t = new Matrix(3, 1);

            t[0] = 2;
            t[1] = 0;
            t[2] = -4;

            var modelPoints = new List <Matrix>();
            var imagePoints = new List <System.Drawing.PointF>();
            var zero3       = Matrix.Zero(3, 1);

            for (float z = 1f; z <= 3.0f; z += 0.4f)
            {
                for (float y = -1f; y <= 1.0f; y += 0.4f)
                {
                    for (float x = -1f; x <= 1.0f; x += 0.4f)
                    {
                        var model = new Matrix(3, 1);
                        model[0] = x;
                        model[1] = y;
                        model[2] = z;
                        modelPoints.Add(model);

                        // under our camera:
                        var transformedPoint = new Matrix(3, 1);
                        transformedPoint.Mult(R, model);
                        transformedPoint.Add(t);

                        var noise = Matrix.GaussianSample(zero3, 0.1 * 0.1);
                        transformedPoint.Add(noise);

                        double u, v;
                        Project(cameraMatrix, distCoeffs, transformedPoint[0], transformedPoint[1], transformedPoint[2], out u, out v);

                        var image = new System.Drawing.PointF();
                        image.X = (float)u;
                        image.Y = (float)v;
                        imagePoints.Add(image);
                    }
                }
            }

            Console.WriteLine("x = [");
            for (int i = 0; i < imagePoints.Count; i++)
            {
                Console.WriteLine("{0} {1}", imagePoints[i].X, imagePoints[i].Y);
            }
            Console.WriteLine("]';");

            Console.WriteLine("X = [");
            for (int i = 0; i < modelPoints.Count; i++)
            {
                Console.WriteLine("{0} {1} {2}", modelPoints[i][0], modelPoints[i][1], modelPoints[i][2]);
            }
            Console.WriteLine("]';");

            Console.WriteLine("fc = [{0} {1}];", cameraMatrix[0, 0], cameraMatrix[1, 1]);
            Console.WriteLine("cc = [{0} {1}];", cameraMatrix[0, 2], cameraMatrix[1, 2]);
            Console.WriteLine("kc = [{0} {1} 0 0 0];", distCoeffs[0], distCoeffs[1]);
            Console.WriteLine();

            Console.WriteLine("R\n" + R);
            Console.WriteLine("t\n" + t);

            var Rest = new Matrix(3, 3);
            var test = new Matrix(3, 1);

            DLT(cameraMatrix, distCoeffs, modelPoints, imagePoints, out Rest, out test);

            Console.WriteLine("Rest\n" + Rest);
            Console.WriteLine("test\n" + test);
        }
        public void RecoverCalibrationFromSensor(KinectSensor kinectSensor)
        {
            var stopWatch = new System.Diagnostics.Stopwatch();
            stopWatch.Start();

            var objectPoints1 = new List<RoomAliveToolkit.Matrix>();
            var colorPoints1 = new List<System.Drawing.PointF>();
            var depthPoints1 = new List<System.Drawing.PointF>();

            int n = 0;
            for (float x = -2f; x < 2f; x += 0.2f)
                for (float y = -2f; y < 2f; y += 0.2f)
                    for (float z = 0.4f; z < 4.5f; z += 0.4f)
                    {
                        var kinectCameraPoint = new CameraSpacePoint();
                        kinectCameraPoint.X = x;
                        kinectCameraPoint.Y = y;
                        kinectCameraPoint.Z = z;

                        // use SDK's projection
                        // adjust Y to make RH cooridnate system that is a projection of Kinect 3D points
                        var kinectColorPoint = kinectSensor.CoordinateMapper.MapCameraPointToColorSpace(kinectCameraPoint);
                        kinectColorPoint.Y = colorImageHeight - kinectColorPoint.Y;
                        var kinectDepthPoint = kinectSensor.CoordinateMapper.MapCameraPointToDepthSpace(kinectCameraPoint);
                        kinectDepthPoint.Y = depthImageHeight - kinectDepthPoint.Y;

                        if ((kinectColorPoint.X >= 0) && (kinectColorPoint.X < colorImageWidth) &&
                            (kinectColorPoint.Y >= 0) && (kinectColorPoint.Y < colorImageHeight) &&
                            (kinectDepthPoint.X >= 0) && (kinectDepthPoint.X < depthImageWidth) &&
                            (kinectDepthPoint.Y >= 0) && (kinectDepthPoint.Y < depthImageHeight))
                        {
                            n++;
                            var objectPoint = new RoomAliveToolkit.Matrix(3, 1);
                            objectPoint[0] = kinectCameraPoint.X;
                            objectPoint[1] = kinectCameraPoint.Y;
                            objectPoint[2] = kinectCameraPoint.Z;
                            objectPoints1.Add(objectPoint);

                            var colorPoint = new System.Drawing.PointF();
                            colorPoint.X = kinectColorPoint.X;
                            colorPoint.Y = kinectColorPoint.Y;
                            colorPoints1.Add(colorPoint);

                            //Console.WriteLine(objectPoint[0] + "\t" + objectPoint[1] + "\t" + colorPoint.X + "\t" + colorPoint.Y);

                            var depthPoint = new System.Drawing.PointF();
                            depthPoint.X = kinectDepthPoint.X;
                            depthPoint.Y = kinectDepthPoint.Y;
                            depthPoints1.Add(depthPoint);
                        }
                    }

            colorCameraMatrix[0, 0] = 1000; //fx
            colorCameraMatrix[1, 1] = 1000; //fy
            colorCameraMatrix[0, 2] = colorImageWidth / 2; //cx
            colorCameraMatrix[1, 2] = colorImageHeight / 2; //cy
            colorCameraMatrix[2, 2] = 1;

            var rotation = new Matrix(3, 1);
            var translation = new Matrix(3, 1);
            var colorError = CalibrateColorCamera(objectPoints1, colorPoints1, colorCameraMatrix, colorLensDistortion, rotation, translation);
            //var rotationMatrix = Orientation.Rodrigues(rotation);
            var rotationMatrix = RoomAliveToolkit.ProjectorCameraEnsemble.RotationMatrixFromRotationVector(rotation);

            depthToColorTransform = Matrix.Identity(4, 4);
            for (int i = 0; i < 3; i++)
            {
                depthToColorTransform[i, 3] = translation[i];
                for (int j = 0; j < 3; j++)
                    depthToColorTransform[i, j] = rotationMatrix[i, j];
            }

            depthCameraMatrix[0, 0] = 360; //fx
            depthCameraMatrix[1, 1] = 360; //fy
            depthCameraMatrix[0, 2] = depthImageWidth / 2; //cx
            depthCameraMatrix[1, 2] = depthImageHeight / 2; //cy
            depthCameraMatrix[2, 2] = 1;

            var depthError = CalibrateDepthCamera(objectPoints1, depthPoints1, depthCameraMatrix, depthLensDistortion);

            //// latest SDK gives access to depth intrinsics directly -- this gives slightly higher projection error; not sure why
            //var depthIntrinsics = kinectSensor.CoordinateMapper.GetDepthCameraIntrinsics();
            //depthCameraMatrix[0, 0] = depthIntrinsics.FocalLengthX;
            //depthCameraMatrix[1, 1] = depthIntrinsics.FocalLengthY;
            //depthCameraMatrix[0, 2] = depthIntrinsics.PrincipalPointX;
            //depthCameraMatrix[1, 2] = depthImageHeight - depthIntrinsics.PrincipalPointY; // note flip in Y!
            //depthDistCoeffs[0] = depthIntrinsics.RadialDistortionSecondOrder;
            //depthDistCoeffs[1] = depthIntrinsics.RadialDistortionFourthOrder;

            // check projections
            double depthProjectionError = 0;
            double colorProjectionError = 0;
            var color = new RoomAliveToolkit.Matrix(4, 1);
            var testObjectPoint4 = new RoomAliveToolkit.Matrix(4, 1);
            for (int i = 0; i < n; i++)
            {
                var testObjectPoint = objectPoints1[i];
                var testDepthPoint = depthPoints1[i];
                var testColorPoint = colorPoints1[i];

                // "camera space" == depth camera space
                // depth camera projection
                double depthU, depthV;
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, testObjectPoint[0], testObjectPoint[1], testObjectPoint[2], out depthU, out depthV);

                double dx = testDepthPoint.X - depthU;
                double dy = testDepthPoint.Y - depthV;
                depthProjectionError += (dx * dx) + (dy * dy);

                // color camera projection
                testObjectPoint4[0] = testObjectPoint[0];
                testObjectPoint4[1] = testObjectPoint[1];
                testObjectPoint4[2] = testObjectPoint[2];
                testObjectPoint4[3] = 1;

                color.Mult(depthToColorTransform, testObjectPoint4);
                color.Scale(1.0 / color[3]); // not necessary for this transform

                double colorU, colorV;
                CameraMath.Project(colorCameraMatrix, colorLensDistortion, color[0], color[1], color[2], out colorU, out colorV);

                dx = testColorPoint.X - colorU;
                dy = testColorPoint.Y - colorV;
                colorProjectionError += (dx * dx) + (dy * dy);
            }
            depthProjectionError /= n;
            colorProjectionError /= n;

            stopWatch.Stop();
            Console.WriteLine("FakeCalibration :");
            Console.WriteLine("n = " + n);
            Console.WriteLine("color error = " + colorError);
            Console.WriteLine("depth error = " + depthError);
            Console.WriteLine("depth reprojection error = " + depthProjectionError);
            Console.WriteLine("color reprojection error = " + colorProjectionError);
            Console.WriteLine("depth camera matrix = \n" + depthCameraMatrix);
            Console.WriteLine("depth lens distortion = \n" + depthLensDistortion);
            Console.WriteLine("color camera matrix = \n" + colorCameraMatrix);
            Console.WriteLine("color lens distortion = \n" + colorLensDistortion);

            Console.WriteLine(stopWatch.ElapsedMilliseconds + " ms");

            //// get camera space table
            //// this does not change frame to frame (or so I believe)
            //var tableEntries = kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable();

            //// compute our own version of the camera space table and compare it to the SDK's
            //stopWatch.Restart();

            //var tableEntries2 = ComputeDepthFrameToCameraSpaceTable();
            //Console.WriteLine("ComputeDepthFrameToCameraSpaceTable took " + stopWatch.ElapsedMilliseconds + " ms");

            //{
            //    float error = 0;
            //    for (int framey = 0; framey < depthImageHeight; framey++)
            //        for (int framex = 0; framex < depthImageWidth; framex++)
            //        {
            //            var point1 = tableEntries[depthImageWidth * framey + framex];
            //            var point2 = tableEntries2[depthImageWidth * framey + framex];

            //            error += (float)Math.Sqrt((point1.X - point2.X) * (point1.X - point2.X) + (point1.Y - point2.Y) * (point1.Y - point2.Y));
            //        }
            //    error /= (float)(depthImageHeight * depthImageWidth);
            //    Console.WriteLine("error = " + error);
            //}
        }
        public double MinimizeOneStep(Matrix parameters)
        {
            // initial value of the function; callee knows the size of the returned vector
            var errorVector = function(parameters);
            var error = errorVector.Dot(errorVector);

            // Jacobian; callee knows the size of the returned matrix
            var J = jacobianFunction(parameters);

            // J'*J
            var JtJ = new Matrix(parameters.Size, parameters.Size);
            //stopWatch.Restart();
            //JtJ.MultATA(J, J); // this is the big calculation that could be parallelized
            JtJ.MultATAParallel(J, J);
            //Console.WriteLine("JtJ: J size {0}x{1} {2}ms", J.Rows, J.Cols, stopWatch.ElapsedMilliseconds);

            // J'*error
            var JtError = new Matrix(parameters.Size, 1);
            //stopWatch.Restart();
            JtError.MultATA(J, errorVector); // error vector must be a column vector
            //Console.WriteLine("JtError: errorVector size {0}x{1} {2}ms", errorVector.Rows, errorVector.Cols, stopWatch.ElapsedMilliseconds);



            // allocate some space
            var JtJaugmented = new Matrix(parameters.Size, parameters.Size);
            var JtJinv = new Matrix(parameters.Size, parameters.Size);
            var delta = new Matrix(parameters.Size, 1);
            var newParameters = new Matrix(parameters.Size, 1);

            // find a value of lambda that reduces error
            double lambda = initialLambda;
            while (true)
            {
                // augment J'*J: J'*J += lambda*(diag(J))
                JtJaugmented.Copy(JtJ);
                for (int i = 0; i < parameters.Size; i++)
                    JtJaugmented[i, i] = (1.0 + lambda) * JtJ[i, i];

                //WriteMatrixToFile(errorVector, "errorVector");
                //WriteMatrixToFile(J, "J");
                //WriteMatrixToFile(JtJaugmented, "JtJaugmented");
                //WriteMatrixToFile(JtError, "JtError");


                // solve for delta: (J'*J + lambda*(diag(J)))*delta = J'*error
                JtJinv.Inverse(JtJaugmented);
                delta.Mult(JtJinv, JtError);

                // new parameters = parameters - delta [why not add?]
                newParameters.Sub(parameters, delta);

                // evaluate function, compute error
                var newErrorVector = function(newParameters);
                double newError = newErrorVector.Dot(newErrorVector);

                // if error is reduced, divide lambda by 10
                bool improvement;
                if (newError < error)
                {
                    lambda /= lambdaIncrement;
                    improvement = true;
                }
                else // if not, multiply lambda by 10
                {
                    lambda *= lambdaIncrement;
                    improvement = false;
                }

                // termination criteria:
                // reduction in error is too small
                var diff = new Matrix(errorVector.Size, 1);
                diff.Sub(errorVector, newErrorVector);
                double diffSq = diff.Dot(diff);
                double errorDelta = Math.Sqrt(diffSq / error);

                if (errorDelta < minimumReduction)
                    state = States.ReductionStepTooSmall;

                // lambda is too big
                if (lambda > maximumLambda)
                    state = States.LambdaTooLarge;

                // change in parameters is too small [not implemented]

                // if we made an improvement, accept the new parameters
                if (improvement)
                {
                    parameters.Copy(newParameters);
                    error = newError;
                    break;
                }

                // if we meet termination criteria, break
                if (state != States.Running)
                    break;
            }

            rmsError = Math.Sqrt(error / errorVector.Size);
            return rmsError;
        }
Ejemplo n.º 14
0
        public static double CalibrateCameraExtrinsicsOnly(List<List<Matrix>> worldPointSets, List<List<System.Drawing.PointF>> imagePointSets,
    Matrix cameraMatrix, ref List<Matrix> rotations, ref List<Matrix> translations)
        {
            int nSets = worldPointSets.Count;
            int nPoints = 0;

            for (int i = 0; i < nSets; i++)
                nPoints += worldPointSets[i].Count; // for later

            var distCoeffs = Matrix.Zero(2, 1);


            //// if necessary run DLT on each point set to get initial rotation and translations
            //if (rotations == null)
            //{
            //    rotations = new List<Matrix>();
            //    translations = new List<Matrix>();

            //    for (int i = 0; i < nSets; i++)
            //    {
            //        Matrix R, t;
            //        CameraMath.DLT(cameraMatrix, distCoeffs, worldPointSets[i], imagePointSets[i], out R, out t);

            //        var r = CameraMath.RotationVectorFromRotationMatrix(R);

            //        rotations.Add(r);
            //        translations.Add(t);
            //    }
            //}

            // Levenberg-Marquardt for camera matrix (ignore lens distortion for now)

            // pack parameters into vector
            // parameters: camera has f, cx, cy; each point set has rotation + translation (6)
            //int nParameters = 3 + 6 * nSets;
            int nParameters = 6 * nSets;
            var parameters = new Matrix(nParameters, 1);

            {
                int pi = 0;
                //parameters[pi++] = cameraMatrix[0, 0]; // f
                //parameters[pi++] = cameraMatrix[0, 2]; // cx
                //parameters[pi++] = cameraMatrix[1, 2]; // cy
                for (int i = 0; i < nSets; i++)
                {
                    parameters[pi++] = rotations[i][0];
                    parameters[pi++] = rotations[i][1];
                    parameters[pi++] = rotations[i][2];
                    parameters[pi++] = translations[i][0];
                    parameters[pi++] = translations[i][1];
                    parameters[pi++] = translations[i][2];
                }
            }

            // size of our error vector
            int nValues = nPoints * 2; // each component (x,y) is a separate entry



            LevenbergMarquardt.Function function = delegate (Matrix p)
            {
                var fvec = new Matrix(nValues, 1);

                // unpack parameters
                int pi = 0;
                //double f = p[pi++];
                //double cx = p[pi++];
                //double cy = p[pi++];

                var K = Matrix.Identity(3, 3);
                //K[0, 0] = f;
                //K[1, 1] = f;
                //K[0, 2] = cx;
                //K[1, 2] = cy;

                K[0, 0] = cameraMatrix[0, 0];
                K[1, 1] = cameraMatrix[1, 1];
                K[0, 2] = cameraMatrix[0, 2];
                K[1, 2] = cameraMatrix[1, 2];


                var d = Matrix.Zero(2, 1);

                int fveci = 0;

                for (int i = 0; i < nSets; i++)
                {
                    var rotation = new Matrix(3, 1);
                    rotation[0] = p[pi++];
                    rotation[1] = p[pi++];
                    rotation[2] = p[pi++];
                    var R = RotationMatrixFromRotationVector(rotation);

                    var t = new Matrix(3, 1);
                    t[0] = p[pi++];
                    t[1] = p[pi++];
                    t[2] = p[pi++];

                    var worldPoints = worldPointSets[i];
                    var imagePoints = imagePointSets[i];
                    var x = new Matrix(3, 1);

                    for (int j = 0; j < worldPoints.Count; j++)
                    {
                        // transform world point to local camera coordinates
                        x.Mult(R, worldPoints[j]);
                        x.Add(t);

                        // fvec_i = y_i - f(x_i)
                        double u, v;
                        CameraMath.Project(K, d, x[0], x[1], x[2], out u, out v);

                        var imagePoint = imagePoints[j];
                        fvec[fveci++] = imagePoint.X - u;
                        fvec[fveci++] = imagePoint.Y - v;
                    }
                }
                return fvec;
            };

            // optimize
            var calibrate = new LevenbergMarquardt(function);
            calibrate.minimumReduction = 1.0e-4;
            calibrate.Minimize(parameters);

            //while (calibrate.State == LevenbergMarquardt.States.Running)
            //{
            //    var rmsError = calibrate.MinimizeOneStep(parameters);
            //    Console.WriteLine("rms error = " + rmsError);
            //}
            //for (int i = 0; i < nParameters; i++)
            //    Console.WriteLine(parameters[i] + "\t");
            //Console.WriteLine();

            // unpack parameters
            {
                int pi = 0;
                //double f = parameters[pi++];
                //double cx = parameters[pi++];
                //double cy = parameters[pi++];
                //cameraMatrix[0, 0] = f;
                //cameraMatrix[1, 1] = f;
                //cameraMatrix[0, 2] = cx;
                //cameraMatrix[1, 2] = cy;

                for (int i = 0; i < nSets; i++)
                {
                    rotations[i][0] = parameters[pi++];
                    rotations[i][1] = parameters[pi++];
                    rotations[i][2] = parameters[pi++];

                    translations[i][0] = parameters[pi++];
                    translations[i][1] = parameters[pi++];
                    translations[i][2] = parameters[pi++];
                }
            }

            return calibrate.RMSError;
        }
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        bool nonCoplanar = false;
                        int nTries = 0;

                        while (!nonCoplanar)
                        {
                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // check that points are not coplanar
                            Matrix X;
                            double D;
                            double ssdToPlane = PlaneFit(worldPointSubset, out X, out D);
                            int numOutliers = 0;
                            foreach (var point in worldPointSubset)
                            {
                                double distanceFromPlane = X.Dot(point) + D;
                                if (Math.Abs(distanceFromPlane) > 0.1f)
                                    numOutliers++;
                            }
                            nonCoplanar = (numOutliers > worldPointSubset.Count * 0.10f);
                            if (!nonCoplanar)
                            {
                                Console.WriteLine("points are coplanar (try #{0})", nTries);
                                worldPointSubset.Clear();
                                imagePointSubset.Clear();
                            }
                            if (nTries++ > 1000)
                            {
                                throw new CalibrationFailedException("Unable to find noncoplanar points.");
                                // consider moving this check up with variance check (when calibration point sets are formed)
                            }
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);
                    }

                    var cameraMatrix = new Matrix(3, 3);
                    cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                    cameraMatrix[1, 1] = 1000; //fy
                    cameraMatrix[0, 2] = projector.width / 2; //cx
                    cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                    cameraMatrix[2, 2] = 1;
                    var distCoeffs = new RoomAliveToolkit.Matrix(2, 1);
                    List<RoomAliveToolkit.Matrix> rotations = null;
                    List<RoomAliveToolkit.Matrix> translations = null;

                    var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    Console.WriteLine("error = " + error);
                    //Console.WriteLine("intrinsics = \n" + cameraMatrix);

                    //// we differ from opencv's 'error' in that we do not distinguish between x and y.
                    //// i.e. opencv uses the method below; this number would match if we used pointsInSum2*2 in the divisor.
                    //// double check opencv's error
                    //{
                    //    double sumError2 = 0;
                    //    int pointsInSum2 = 0;
                    //    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    //    {
                    //        var R = Orientation.Rodrigues(rotations[ii]);
                    //        var t = translations[ii];
                    //        var p = new Matrix(3, 1);

                    //        var worldPointSet = worldPointSubsets[ii];
                    //        var imagePointSet = imagePointSubsets[ii];

                    //        for (int k = 0; k < worldPointSet.Count; k++)
                    //        {
                    //            p.Mult(R, worldPointSet[k]);
                    //            p.Add(t);
                    //            double u, v;
                    //            Kinect2.Kinect2Calibration.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                    //            double dx = imagePointSet[k].X - u;
                    //            double dy = imagePointSet[k].Y - v;

                    //            double thisError = dx * dx + dy * dy;
                    //            sumError2 += thisError;
                    //            pointsInSum2++;
                    //        }
                    //    }

                    //    // opencv's error is rms but over both x and y combined

                    //    Console.WriteLine("average projection error = " + Math.Sqrt(sumError2 / (float)(pointsInSum2)));
                    //}

                    // find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        //var R = Vision.Orientation.Rodrigues(rotations[setIndex]);
                        var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 1.0f)
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 1000);

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);

                    }

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        var error2 = CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if err < besterr save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error < minError)
                        {
                            minError = error;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
Ejemplo n.º 16
0
        // Use DLT to obtain estimate of calibration rig pose; in our case this is the pose of the Kinect camera.
        // This pose estimate will provide a good initial estimate for subsequent projector calibration.
        // Note for a full PnP solution we should probably refine with Levenberg-Marquardt.
        // DLT is described in Hartley and Zisserman p. 178
        public static void DLT(Matrix cameraMatrix, Matrix distCoeffs, List <Matrix> worldPoints, List <System.Drawing.PointF> imagePoints, out Matrix R, out Matrix t)
        {
            int n = worldPoints.Count;

            var A = Matrix.Zero(2 * n, 12);

            for (int j = 0; j < n; j++)
            {
                var X          = worldPoints[j];
                var imagePoint = imagePoints[j];

                double x, y;
                Undistort(cameraMatrix, distCoeffs, imagePoint.X, imagePoint.Y, out x, out y);

                double w = 1;

                int ii = 2 * j;
                A[ii, 4] = -w * X[0];
                A[ii, 5] = -w * X[1];
                A[ii, 6] = -w * X[2];
                A[ii, 7] = -w;

                A[ii, 8]  = y * X[0];
                A[ii, 9]  = y * X[1];
                A[ii, 10] = y * X[2];
                A[ii, 11] = y;

                ii++; // next row
                A[ii, 0] = w * X[0];
                A[ii, 1] = w * X[1];
                A[ii, 2] = w * X[2];
                A[ii, 3] = w;

                A[ii, 8]  = -x * X[0];
                A[ii, 9]  = -x * X[1];
                A[ii, 10] = -x * X[2];
                A[ii, 11] = -x;
            }

            var Pcolumn = new Matrix(12, 1);
            {
                var U  = new Matrix(2 * n, 2 * n); // full SVD, alas, supports small number of points
                var V  = new Matrix(12, 12);
                var ww = new Matrix(12, 1);

                A.SVD(U, ww, V);

                // find smallest singular value
                int min = 0;
                ww.Minimum(ref min);

                // Pcolumn is last column of V
                Pcolumn.CopyCol(V, min);
            }

            // reshape into 3x4 projection matrix
            var P = new Matrix(3, 4);

            P.Reshape(Pcolumn);

            // x = P * X
            // P = K [ R | t ]
            // inv(K) P = [ R | t ]

            //var Kinv = new Matrix(3, 3);
            //Kinv.Inverse(cameraMatrix);
            //var Rt = new Matrix(3, 4);
            //Rt.Mult(Kinv, P);

            var Rt = new Matrix(3, 4);

            Rt.Copy(P); // P does not contain camera matrix (by earlier undistort)

            R = new Matrix(3, 3);
            t = new Matrix(3, 1);

            for (int ii = 0; ii < 3; ii++)
            {
                t[ii] = Rt[ii, 3];
                for (int jj = 0; jj < 3; jj++)
                {
                    R[ii, jj] = Rt[ii, jj];
                }
            }

            //R.Copy(0, 0, Rt);
            //t.CopyCol(Rt, 3);

            if (R.Det3x3() < 0)
            {
                R.Scale(-1); t.Scale(-1);
            }

            // orthogonalize R
            {
                var U  = new Matrix(3, 3);
                var Vt = new Matrix(3, 3);
                var V  = new Matrix(3, 3);
                var ww = new Matrix(3, 1);

                R.SVD(U, ww, V);
                Vt.Transpose(V);

                R.Mult(U, Vt);
                double s = ww.Sum() / 3.0;
                t.Scale(1.0 / s);
            }

            // compute error?
        }
Ejemplo n.º 17
0
        public static double CalibrateCameraExtrinsicsOnly(List <List <Matrix> > worldPointSets, List <List <System.Drawing.PointF> > imagePointSets,
                                                           Matrix cameraMatrix, ref List <Matrix> rotations, ref List <Matrix> translations)
        {
            int nSets   = worldPointSets.Count;
            int nPoints = 0;

            for (int i = 0; i < nSets; i++)
            {
                nPoints += worldPointSets[i].Count; // for later
            }
            var distCoeffs = Matrix.Zero(2, 1);


            //// if necessary run DLT on each point set to get initial rotation and translations
            //if (rotations == null)
            //{
            //    rotations = new List<Matrix>();
            //    translations = new List<Matrix>();

            //    for (int i = 0; i < nSets; i++)
            //    {
            //        Matrix R, t;
            //        CameraMath.DLT(cameraMatrix, distCoeffs, worldPointSets[i], imagePointSets[i], out R, out t);

            //        var r = CameraMath.RotationVectorFromRotationMatrix(R);

            //        rotations.Add(r);
            //        translations.Add(t);
            //    }
            //}

            // Levenberg-Marquardt for camera matrix (ignore lens distortion for now)

            // pack parameters into vector
            // parameters: camera has f, cx, cy; each point set has rotation + translation (6)
            //int nParameters = 3 + 6 * nSets;
            int nParameters = 6 * nSets;
            var parameters  = new Matrix(nParameters, 1);

            {
                int pi = 0;
                //parameters[pi++] = cameraMatrix[0, 0]; // f
                //parameters[pi++] = cameraMatrix[0, 2]; // cx
                //parameters[pi++] = cameraMatrix[1, 2]; // cy
                for (int i = 0; i < nSets; i++)
                {
                    parameters[pi++] = rotations[i][0];
                    parameters[pi++] = rotations[i][1];
                    parameters[pi++] = rotations[i][2];
                    parameters[pi++] = translations[i][0];
                    parameters[pi++] = translations[i][1];
                    parameters[pi++] = translations[i][2];
                }
            }

            // size of our error vector
            int nValues = nPoints * 2; // each component (x,y) is a separate entry



            LevenbergMarquardt.Function function = delegate(Matrix p)
            {
                var fvec = new Matrix(nValues, 1);

                // unpack parameters
                int pi = 0;
                //double f = p[pi++];
                //double cx = p[pi++];
                //double cy = p[pi++];

                var K = Matrix.Identity(3, 3);
                //K[0, 0] = f;
                //K[1, 1] = f;
                //K[0, 2] = cx;
                //K[1, 2] = cy;

                K[0, 0] = cameraMatrix[0, 0];
                K[1, 1] = cameraMatrix[1, 1];
                K[0, 2] = cameraMatrix[0, 2];
                K[1, 2] = cameraMatrix[1, 2];


                var d = Matrix.Zero(2, 1);

                int fveci = 0;

                for (int i = 0; i < nSets; i++)
                {
                    var rotation = new Matrix(3, 1);
                    rotation[0] = p[pi++];
                    rotation[1] = p[pi++];
                    rotation[2] = p[pi++];
                    var R = RotationMatrixFromRotationVector(rotation);

                    var t = new Matrix(3, 1);
                    t[0] = p[pi++];
                    t[1] = p[pi++];
                    t[2] = p[pi++];

                    var worldPoints = worldPointSets[i];
                    var imagePoints = imagePointSets[i];
                    var x           = new Matrix(3, 1);

                    for (int j = 0; j < worldPoints.Count; j++)
                    {
                        // transform world point to local camera coordinates
                        x.Mult(R, worldPoints[j]);
                        x.Add(t);

                        // fvec_i = y_i - f(x_i)
                        double u, v;
                        CameraMath.Project(K, d, x[0], x[1], x[2], out u, out v);

                        var imagePoint = imagePoints[j];
                        fvec[fveci++] = imagePoint.X - u;
                        fvec[fveci++] = imagePoint.Y - v;
                    }
                }
                return(fvec);
            };

            // optimize
            var calibrate = new LevenbergMarquardt(function);

            calibrate.minimumReduction = 1.0e-4;
            calibrate.Minimize(parameters);

            //while (calibrate.State == LevenbergMarquardt.States.Running)
            //{
            //    var rmsError = calibrate.MinimizeOneStep(parameters);
            //    Console.WriteLine("rms error = " + rmsError);
            //}
            //for (int i = 0; i < nParameters; i++)
            //    Console.WriteLine(parameters[i] + "\t");
            //Console.WriteLine();

            // unpack parameters
            {
                int pi = 0;
                //double f = parameters[pi++];
                //double cx = parameters[pi++];
                //double cy = parameters[pi++];
                //cameraMatrix[0, 0] = f;
                //cameraMatrix[1, 1] = f;
                //cameraMatrix[0, 2] = cx;
                //cameraMatrix[1, 2] = cy;

                for (int i = 0; i < nSets; i++)
                {
                    rotations[i][0] = parameters[pi++];
                    rotations[i][1] = parameters[pi++];
                    rotations[i][2] = parameters[pi++];

                    translations[i][0] = parameters[pi++];
                    translations[i][1] = parameters[pi++];
                    translations[i][2] = parameters[pi++];
                }
            }

            return(calibrate.RMSError);
        }
Ejemplo n.º 18
0
        public static Matrix Homography(List <Matrix> worldPoints, List <System.Drawing.PointF> imagePoints)
        {
            int n = worldPoints.Count;

            // normalize image coordinates
            var mu = new Matrix(2, 1);

            for (int i = 0; i < n; i++)
            {
                mu[0] += imagePoints[i].X;
                mu[1] += imagePoints[i].Y;
            }
            mu.Scale(1.0 / n);
            var muAbs = new Matrix(2, 1);

            for (int i = 0; i < n; i++)
            {
                muAbs[0] += Math.Abs(imagePoints[i].X - mu[0]);
                muAbs[1] += Math.Abs(imagePoints[i].Y - mu[1]);
            }
            muAbs.Scale(1.0 / n);

            var Hnorm = Matrix.Identity(3, 3);

            Hnorm[0, 0] = 1 / muAbs[0];
            Hnorm[1, 1] = 1 / muAbs[1];
            Hnorm[0, 2] = -mu[0] / muAbs[0];
            Hnorm[1, 2] = -mu[1] / muAbs[1];

            var invHnorm = Matrix.Identity(3, 3);

            invHnorm[0, 0] = muAbs[0];
            invHnorm[1, 1] = muAbs[1];
            invHnorm[0, 2] = mu[0];
            invHnorm[1, 2] = mu[1];


            var A = Matrix.Zero(2 * n, 9);

            for (int i = 0; i < n; i++)
            {
                var X          = worldPoints[i];
                var imagePoint = imagePoints[i];

                var x = new Matrix(3, 1);
                x[0] = imagePoint.X;
                x[1] = imagePoint.Y;
                x[2] = 1;

                var xn = new Matrix(3, 1);
                xn.Mult(Hnorm, x);

                // Zhang's formulation; Hartley's is similar
                int ii = 2 * i;
                A[ii, 0] = X[0];
                A[ii, 1] = X[1];
                A[ii, 2] = 1;

                A[ii, 6] = -xn[0] * X[0];
                A[ii, 7] = -xn[0] * X[1];
                A[ii, 8] = -xn[0];

                ii++; // next row
                A[ii, 3] = X[0];
                A[ii, 4] = X[1];
                A[ii, 5] = 1;

                A[ii, 6] = -xn[1] * X[0];
                A[ii, 7] = -xn[1] * X[1];
                A[ii, 8] = -xn[1];
            }

            // h is the eigenvector of ATA with the smallest eignvalue
            var h = new Matrix(9, 1);
            {
                var ATA = new Matrix(9, 9);
                ATA.MultATA(A, A);

                var V  = new Matrix(9, 9);
                var ww = new Matrix(9, 1);
                ATA.Eig(V, ww);

                h.CopyCol(V, 0);
            }

            var Hn = new Matrix(3, 3);

            Hn.Reshape(h);

            var H = new Matrix(3, 3);

            H.Mult(invHnorm, Hn);

            return(H);
        }
Ejemplo n.º 19
0
        static public void PlaneFit(IList <Matrix> X, out Matrix R, out Matrix t, out Matrix d2)
        {
            int n = X.Count;

            var mu = new Matrix(3, 1);

            for (int i = 0; i < n; i++)
            {
                mu.Add(X[i]);
            }
            mu.Scale(1f / (float)n);

            var A  = new Matrix(3, 3);
            var xc = new Matrix(3, 1);
            var M  = new Matrix(3, 3);

            for (int i = 0; i < X.Count; i++)
            {
                var x = X[i];
                xc.Sub(x, mu);
                M.Outer(xc, xc);
                A.Add(M);
            }
            var V = new Matrix(3, 3);
            var d = new Matrix(3, 1);

            A.Eig(V, d); // eigenvalues in ascending order

            // arrange in descending order so that z = 0
            var V2 = new Matrix(3, 3);

            for (int i = 0; i < 3; i++)
            {
                V2[i, 2] = V[i, 0];
                V2[i, 1] = V[i, 1];
                V2[i, 0] = V[i, 2];
            }

            d2    = new Matrix(3, 1);
            d2[2] = d[0];
            d2[1] = d[1];
            d2[0] = d[2];

            R = new Matrix(3, 3);
            R.Transpose(V2);

            if (R.Det3x3() < 0)
            {
                R.Scale(-1);
            }

            t = new Matrix(3, 1);
            t.Mult(R, mu);
            t.Scale(-1);

            // eigenvalues are the sum of squared distances in each direction
            // i.e., min eigenvalue is the sum of squared distances to the plane = d2[2]

            // compute the distance to the plane by transforming to the plane and take z-coordinate:
            // xPlane = R*x + t; distance = xPlane[2]
        }
Ejemplo n.º 20
0
        public static void TestPlanarDLT()
        {
            var cameraMatrix = Matrix.Identity(3, 3);

            cameraMatrix[0, 0] = 300;
            cameraMatrix[1, 1] = 300;
            cameraMatrix[0, 2] = 250;
            cameraMatrix[1, 2] = 220;

            var distCoeffs = new Matrix(5, 1);

            distCoeffs[0] = 0.05;
            distCoeffs[1] = -0.1;

            // generate a bunch of points in a plane
            // project under some other camera (view)

            var R = new Matrix(3, 3);

            R.RotEuler2Matrix(0.3, -0.2, 0.6);

            var t = new Matrix(3, 1);

            t[0] = 0.2;
            t[1] = 0.3;
            t[2] = 2;

            var modelR = new Matrix(3, 3);

            modelR.RotEuler2Matrix(-0.6, 0.2, 0.3);

            var modelT = new Matrix(3, 1);

            modelT[0] = -0.1;
            modelT[1] = 1.0;
            modelT[2] = 1.5;

            var worldPoints            = new List <Matrix>();
            var worldTransformedPoints = new List <Matrix>();
            var imagePoints            = new List <System.Drawing.PointF>();
            var zero3 = Matrix.Zero(3, 1);

            for (float y = -1f; y <= 1.0f; y += 0.2f)
            {
                for (float x = -1f; x <= 1.0f; x += 0.2f)
                {
                    var model = new Matrix(3, 1);
                    model[0] = x;
                    model[1] = y;
                    model[2] = 0;

                    var noise = Matrix.GaussianSample(zero3, 0.1 * 0.1);

                    var world = new Matrix(3, 1);
                    world.Mult(modelR, model);
                    world.Add(modelT);
                    world.Add(noise);
                    worldPoints.Add(world);

                    // under some camera:
                    var worldTransformed = new Matrix(3, 1);
                    worldTransformed.Mult(R, world);
                    worldTransformed.Add(t);
                    worldTransformedPoints.Add(worldTransformed);

                    double u, v;
                    Project(cameraMatrix, distCoeffs, worldTransformed[0], worldTransformed[1], worldTransformed[2], out u, out v);

                    var image = new System.Drawing.PointF();
                    image.X = (float)u;
                    image.Y = (float)v;
                    imagePoints.Add(image);
                }
            }

            Console.WriteLine("R\n" + R);
            Console.WriteLine("t\n" + t);

            var Rplane = new Matrix(3, 1);
            var Tplane = new Matrix(3, 1);

            PlaneFit(worldPoints, out Rplane, out Tplane);

            var Rest = new Matrix(3, 3);
            var test = new Matrix(3, 1);

            PlanarDLT(cameraMatrix, distCoeffs, worldPoints, imagePoints, Rplane, Tplane, out Rest, out test);

            Console.WriteLine("Rest\n" + Rest);
            Console.WriteLine("test\n" + test);
        }
Ejemplo n.º 21
0
        static public void PlaneFit(IList<Matrix> X, out Matrix R, out Matrix t, out Matrix d2)
        {
            int n = X.Count;

            var mu = new Matrix(3, 1);
            for (int i = 0; i < n; i++)
                mu.Add(X[i]);
            mu.Scale(1f / (float)n);

            var A = new Matrix(3, 3);
            var xc = new Matrix(3, 1);
            var M = new Matrix(3, 3);
            for (int i = 0; i < X.Count; i++)
            {
                var x = X[i];
                xc.Sub(x, mu);
                M.Outer(xc, xc);
                A.Add(M);
            }
            var V = new Matrix(3, 3);
            var d = new Matrix(3, 1);
            A.Eig(V, d); // eigenvalues in ascending order

            // arrange in descending order so that z = 0
            var V2 = new Matrix(3, 3);
            for (int i = 0; i < 3; i++)
            {
                V2[i, 2] = V[i, 0];
                V2[i, 1] = V[i, 1];
                V2[i, 0] = V[i, 2];
            }

            d2 = new Matrix(3, 1);
            d2[2] = d[0];
            d2[1] = d[1];
            d2[0] = d[2];

            R = new Matrix(3, 3);
            R.Transpose(V2);

            if (R.Det3x3() < 0)
                R.Scale(-1);

            t = new Matrix(3, 1);
            t.Mult(R, mu);
            t.Scale(-1);

            // eigenvalues are the sum of squared distances in each direction
            // i.e., min eigenvalue is the sum of squared distances to the plane = d2[2]

            // compute the distance to the plane by transforming to the plane and take z-coordinate:
            // xPlane = R*x + t; distance = xPlane[2]
        }
Ejemplo n.º 22
0
        public static Matrix Homography(List<Matrix> worldPoints, List<System.Drawing.PointF> imagePoints)
        {
            int n = worldPoints.Count;

            // normalize image coordinates
            var mu = new Matrix(2, 1);
            for (int i = 0; i < n; i++)
            {
                mu[0] += imagePoints[i].X;
                mu[1] += imagePoints[i].Y;
            }
            mu.Scale(1.0 / n);
            var muAbs = new Matrix(2, 1);
            for (int i = 0; i < n; i++)
            {
                muAbs[0] += Math.Abs(imagePoints[i].X - mu[0]);
                muAbs[1] += Math.Abs(imagePoints[i].Y - mu[1]);
            }
            muAbs.Scale(1.0 / n);

            var Hnorm = Matrix.Identity(3, 3);
            Hnorm[0, 0] = 1 / muAbs[0];
            Hnorm[1, 1] = 1 / muAbs[1];
            Hnorm[0, 2] = -mu[0] / muAbs[0];
            Hnorm[1, 2] = -mu[1] / muAbs[1];

            var invHnorm = Matrix.Identity(3, 3);
            invHnorm[0, 0] = muAbs[0];
            invHnorm[1, 1] = muAbs[1];
            invHnorm[0, 2] = mu[0];
            invHnorm[1, 2] = mu[1];


            var A = Matrix.Zero(2 * n, 9);
            for (int i = 0; i < n; i++)
            {
                var X = worldPoints[i];
                var imagePoint = imagePoints[i];

                var x = new Matrix(3, 1);
                x[0] = imagePoint.X;
                x[1] = imagePoint.Y;
                x[2] = 1;

                var xn = new Matrix(3, 1);
                xn.Mult(Hnorm, x);
 
                // Zhang's formulation; Hartley's is similar
                int ii = 2 * i;
                A[ii, 0] = X[0];
                A[ii, 1] = X[1];
                A[ii, 2] = 1;

                A[ii, 6] = -xn[0] * X[0];
                A[ii, 7] = -xn[0] * X[1];
                A[ii, 8] = -xn[0];

                ii++; // next row
                A[ii, 3] = X[0];
                A[ii, 4] = X[1];
                A[ii, 5] = 1;

                A[ii, 6] = -xn[1] * X[0];
                A[ii, 7] = -xn[1] * X[1];
                A[ii, 8] = -xn[1];
            }

            // h is the eigenvector of ATA with the smallest eignvalue
            var h = new Matrix(9, 1);
            {
                var ATA = new Matrix(9, 9);
                ATA.MultATA(A, A);

                var V = new Matrix(9, 9);
                var ww = new Matrix(9, 1);
                ATA.Eig(V, ww);

                h.CopyCol(V, 0);
            }

            var Hn = new Matrix(3, 3);
            Hn.Reshape(h);

            var H = new Matrix(3, 3);
            H.Mult(invHnorm, Hn);

            return H;
        }
Ejemplo n.º 23
0
        public void RecoverCalibrationFromSensor(KinectSensor kinectSensor)
        {
            var stopWatch = new System.Diagnostics.Stopwatch();

            stopWatch.Start();

            var objectPoints1 = new List <RoomAliveToolkit.Matrix>();
            var colorPoints1  = new List <System.Drawing.PointF>();
            var depthPoints1  = new List <System.Drawing.PointF>();

            int n = 0;

            for (float x = -2f; x < 2f; x += 0.2f)
            {
                for (float y = -2f; y < 2f; y += 0.2f)
                {
                    for (float z = 0.4f; z < 4.5f; z += 0.4f)
                    {
                        var kinectCameraPoint = new CameraSpacePoint();
                        kinectCameraPoint.X = x;
                        kinectCameraPoint.Y = y;
                        kinectCameraPoint.Z = z;

                        // use SDK's projection
                        // adjust Y to make RH cooridnate system that is a projection of Kinect 3D points
                        var kinectColorPoint = kinectSensor.CoordinateMapper.MapCameraPointToColorSpace(kinectCameraPoint);
                        kinectColorPoint.Y = colorImageHeight - kinectColorPoint.Y;
                        var kinectDepthPoint = kinectSensor.CoordinateMapper.MapCameraPointToDepthSpace(kinectCameraPoint);
                        kinectDepthPoint.Y = depthImageHeight - kinectDepthPoint.Y;

                        if ((kinectColorPoint.X >= 0) && (kinectColorPoint.X < colorImageWidth) &&
                            (kinectColorPoint.Y >= 0) && (kinectColorPoint.Y < colorImageHeight) &&
                            (kinectDepthPoint.X >= 0) && (kinectDepthPoint.X < depthImageWidth) &&
                            (kinectDepthPoint.Y >= 0) && (kinectDepthPoint.Y < depthImageHeight))
                        {
                            n++;
                            var objectPoint = new RoomAliveToolkit.Matrix(3, 1);
                            objectPoint[0] = kinectCameraPoint.X;
                            objectPoint[1] = kinectCameraPoint.Y;
                            objectPoint[2] = kinectCameraPoint.Z;
                            objectPoints1.Add(objectPoint);

                            var colorPoint = new System.Drawing.PointF();
                            colorPoint.X = kinectColorPoint.X;
                            colorPoint.Y = kinectColorPoint.Y;
                            colorPoints1.Add(colorPoint);


                            //Console.WriteLine(objectPoint[0] + "\t" + objectPoint[1] + "\t" + colorPoint.X + "\t" + colorPoint.Y);

                            var depthPoint = new System.Drawing.PointF();
                            depthPoint.X = kinectDepthPoint.X;
                            depthPoint.Y = kinectDepthPoint.Y;
                            depthPoints1.Add(depthPoint);
                        }
                    }
                }
            }

            colorCameraMatrix[0, 0] = 1000;                 //fx
            colorCameraMatrix[1, 1] = 1000;                 //fy
            colorCameraMatrix[0, 2] = colorImageWidth / 2;  //cx
            colorCameraMatrix[1, 2] = colorImageHeight / 2; //cy
            colorCameraMatrix[2, 2] = 1;

            var rotation    = new Matrix(3, 1);
            var translation = new Matrix(3, 1);
            var colorError  = CalibrateColorCamera(objectPoints1, colorPoints1, colorCameraMatrix, colorLensDistortion, rotation, translation);
            //var rotationMatrix = Orientation.Rodrigues(rotation);
            var rotationMatrix = RoomAliveToolkit.ProjectorCameraEnsemble.RotationMatrixFromRotationVector(rotation);

            depthToColorTransform = Matrix.Identity(4, 4);
            for (int i = 0; i < 3; i++)
            {
                depthToColorTransform[i, 3] = translation[i];
                for (int j = 0; j < 3; j++)
                {
                    depthToColorTransform[i, j] = rotationMatrix[i, j];
                }
            }


            depthCameraMatrix[0, 0] = 360;                  //fx
            depthCameraMatrix[1, 1] = 360;                  //fy
            depthCameraMatrix[0, 2] = depthImageWidth / 2;  //cx
            depthCameraMatrix[1, 2] = depthImageHeight / 2; //cy
            depthCameraMatrix[2, 2] = 1;

            var depthError = CalibrateDepthCamera(objectPoints1, depthPoints1, depthCameraMatrix, depthLensDistortion);

            //// latest SDK gives access to depth intrinsics directly -- this gives slightly higher projection error; not sure why
            //var depthIntrinsics = kinectSensor.CoordinateMapper.GetDepthCameraIntrinsics();
            //depthCameraMatrix[0, 0] = depthIntrinsics.FocalLengthX;
            //depthCameraMatrix[1, 1] = depthIntrinsics.FocalLengthY;
            //depthCameraMatrix[0, 2] = depthIntrinsics.PrincipalPointX;
            //depthCameraMatrix[1, 2] = depthImageHeight - depthIntrinsics.PrincipalPointY; // note flip in Y!
            //depthDistCoeffs[0] = depthIntrinsics.RadialDistortionSecondOrder;
            //depthDistCoeffs[1] = depthIntrinsics.RadialDistortionFourthOrder;


            // check projections
            double depthProjectionError = 0;
            double colorProjectionError = 0;
            var    color            = new RoomAliveToolkit.Matrix(4, 1);
            var    testObjectPoint4 = new RoomAliveToolkit.Matrix(4, 1);

            for (int i = 0; i < n; i++)
            {
                var testObjectPoint = objectPoints1[i];
                var testDepthPoint  = depthPoints1[i];
                var testColorPoint  = colorPoints1[i];

                // "camera space" == depth camera space
                // depth camera projection
                double depthU, depthV;
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, testObjectPoint[0], testObjectPoint[1], testObjectPoint[2], out depthU, out depthV);

                double dx = testDepthPoint.X - depthU;
                double dy = testDepthPoint.Y - depthV;
                depthProjectionError += (dx * dx) + (dy * dy);

                // color camera projection
                testObjectPoint4[0] = testObjectPoint[0];
                testObjectPoint4[1] = testObjectPoint[1];
                testObjectPoint4[2] = testObjectPoint[2];
                testObjectPoint4[3] = 1;

                color.Mult(depthToColorTransform, testObjectPoint4);
                color.Scale(1.0 / color[3]); // not necessary for this transform

                double colorU, colorV;
                CameraMath.Project(colorCameraMatrix, colorLensDistortion, color[0], color[1], color[2], out colorU, out colorV);

                dx = testColorPoint.X - colorU;
                dy = testColorPoint.Y - colorV;
                colorProjectionError += (dx * dx) + (dy * dy);
            }
            depthProjectionError /= n;
            colorProjectionError /= n;


            stopWatch.Stop();
            Console.WriteLine("FakeCalibration :");
            Console.WriteLine("n = " + n);
            Console.WriteLine("color error = " + colorError);
            Console.WriteLine("depth error = " + depthError);
            Console.WriteLine("depth reprojection error = " + depthProjectionError);
            Console.WriteLine("color reprojection error = " + colorProjectionError);
            Console.WriteLine("depth camera matrix = \n" + depthCameraMatrix);
            Console.WriteLine("depth lens distortion = \n" + depthLensDistortion);
            Console.WriteLine("color camera matrix = \n" + colorCameraMatrix);
            Console.WriteLine("color lens distortion = \n" + colorLensDistortion);

            Console.WriteLine(stopWatch.ElapsedMilliseconds + " ms");


            //// get camera space table
            //// this does not change frame to frame (or so I believe)
            //var tableEntries = kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable();

            //// compute our own version of the camera space table and compare it to the SDK's
            //stopWatch.Restart();

            //var tableEntries2 = ComputeDepthFrameToCameraSpaceTable();
            //Console.WriteLine("ComputeDepthFrameToCameraSpaceTable took " + stopWatch.ElapsedMilliseconds + " ms");

            //{
            //    float error = 0;
            //    for (int framey = 0; framey < depthImageHeight; framey++)
            //        for (int framex = 0; framex < depthImageWidth; framex++)
            //        {
            //            var point1 = tableEntries[depthImageWidth * framey + framex];
            //            var point2 = tableEntries2[depthImageWidth * framey + framex];

            //            error += (float)Math.Sqrt((point1.X - point2.X) * (point1.X - point2.X) + (point1.Y - point2.Y) * (point1.Y - point2.Y));
            //        }
            //    error /= (float)(depthImageHeight * depthImageWidth);
            //    Console.WriteLine("error = " + error);
            //}
        }
Ejemplo n.º 24
0
        public double MinimizeOneStep(Matrix parameters)
        {
            // initial value of the function; callee knows the size of the returned vector
            var errorVector = function(parameters);
            var error       = errorVector.Dot(errorVector);

            // Jacobian; callee knows the size of the returned matrix
            var J = jacobianFunction(parameters);

            // J'*J
            var JtJ = new Matrix(parameters.Size, parameters.Size);

            //stopWatch.Restart();
            //JtJ.MultATA(J, J); // this is the big calculation that could be parallelized
            JtJ.MultATAParallel(J, J);
            //Console.WriteLine("JtJ: J size {0}x{1} {2}ms", J.Rows, J.Cols, stopWatch.ElapsedMilliseconds);

            // J'*error
            var JtError = new Matrix(parameters.Size, 1);

            //stopWatch.Restart();
            JtError.MultATA(J, errorVector); // error vector must be a column vector
            //Console.WriteLine("JtError: errorVector size {0}x{1} {2}ms", errorVector.Rows, errorVector.Cols, stopWatch.ElapsedMilliseconds);



            // allocate some space
            var JtJaugmented  = new Matrix(parameters.Size, parameters.Size);
            var JtJinv        = new Matrix(parameters.Size, parameters.Size);
            var delta         = new Matrix(parameters.Size, 1);
            var newParameters = new Matrix(parameters.Size, 1);

            // find a value of lambda that reduces error
            double lambda = initialLambda;

            while (true)
            {
                // augment J'*J: J'*J += lambda*(diag(J))
                JtJaugmented.Copy(JtJ);
                for (int i = 0; i < parameters.Size; i++)
                {
                    JtJaugmented[i, i] = (1.0 + lambda) * JtJ[i, i];
                }

                //WriteMatrixToFile(errorVector, "errorVector");
                //WriteMatrixToFile(J, "J");
                //WriteMatrixToFile(JtJaugmented, "JtJaugmented");
                //WriteMatrixToFile(JtError, "JtError");


                // solve for delta: (J'*J + lambda*(diag(J)))*delta = J'*error
                JtJinv.Inverse(JtJaugmented);
                delta.Mult(JtJinv, JtError);

                // new parameters = parameters - delta [why not add?]
                newParameters.Sub(parameters, delta);

                // evaluate function, compute error
                var    newErrorVector = function(newParameters);
                double newError       = newErrorVector.Dot(newErrorVector);

                // if error is reduced, divide lambda by 10
                bool improvement;
                if (newError < error)
                {
                    lambda     /= lambdaIncrement;
                    improvement = true;
                }
                else // if not, multiply lambda by 10
                {
                    lambda     *= lambdaIncrement;
                    improvement = false;
                }

                // termination criteria:
                // reduction in error is too small
                var diff = new Matrix(errorVector.Size, 1);
                diff.Sub(errorVector, newErrorVector);
                double diffSq     = diff.Dot(diff);
                double errorDelta = Math.Sqrt(diffSq / error);

                if (errorDelta < minimumReduction)
                {
                    state = States.ReductionStepTooSmall;
                }

                // lambda is too big
                if (lambda > maximumLambda)
                {
                    state = States.LambdaTooLarge;
                }

                // change in parameters is too small [not implemented]

                // if we made an improvement, accept the new parameters
                if (improvement)
                {
                    parameters.Copy(newParameters);
                    error = newError;
                    break;
                }

                // if we meet termination criteria, break
                if (state != States.Running)
                {
                    break;
                }
            }

            rmsError = Math.Sqrt(error / errorVector.Size);
            return(rmsError);
        }
Ejemplo n.º 25
0
        static double CalibrateColorCamera(List<Matrix> worldPoints, List<System.Drawing.PointF> imagePoints, Matrix cameraMatrix, Matrix distCoeffs, Matrix rotation, Matrix translation)
        {
            int nPoints = worldPoints.Count;

            {
                Matrix R, t;
                CameraMath.DLT(cameraMatrix, distCoeffs, worldPoints, imagePoints, out R, out t);
                var r = CameraMath.RotationVectorFromRotationMatrix(R);
                rotation.Copy(r);
                translation.Copy(t);
            }

            // pack parameters into vector
            // parameters: fx, fy, cx, cy, k1, k2, + 3 for rotation, 3 translation = 12
            int nParameters = 12;
            var parameters = new Matrix(nParameters, 1);

            {
                int pi = 0;
                parameters[pi++] = cameraMatrix[0, 0]; // fx
                parameters[pi++] = cameraMatrix[1, 1]; // fy
                parameters[pi++] = cameraMatrix[0, 2]; // cx
                parameters[pi++] = cameraMatrix[1, 2]; // cy
                parameters[pi++] = distCoeffs[0]; // k1
                parameters[pi++] = distCoeffs[1]; // k2
                parameters[pi++] = rotation[0];
                parameters[pi++] = rotation[1];
                parameters[pi++] = rotation[2];
                parameters[pi++] = translation[0];
                parameters[pi++] = translation[1];
                parameters[pi++] = translation[2];

            }

            // size of our error vector
            int nValues = nPoints * 2; // each component (x,y) is a separate entry

            LevenbergMarquardt.Function function = delegate(Matrix p)
            {
                var fvec = new Matrix(nValues, 1);


                // unpack parameters
                int pi = 0;
                double fx = p[pi++];
                double fy = p[pi++];
                double cx = p[pi++];
                double cy = p[pi++];

                double k1 = p[pi++];
                double k2 = p[pi++];

                var K = Matrix.Identity(3, 3);
                K[0, 0] = fx;
                K[1, 1] = fy;
                K[0, 2] = cx;
                K[1, 2] = cy;

                var d = Matrix.Zero(5, 1);
                d[0] = k1;
                d[1] = k2;

                var r = new Matrix(3, 1);
                r[0] = p[pi++];
                r[1] = p[pi++];
                r[2] = p[pi++];

                var t = new Matrix(3, 1);
                t[0] = p[pi++];
                t[1] = p[pi++];
                t[2] = p[pi++];

                var R = CameraMath.RotationMatrixFromRotationVector(r);



                var x = new Matrix(3, 1);

                int fveci = 0;
                for (int i = 0; i < worldPoints.Count; i++)
                {
                    // transform world point to local camera coordinates
                    x.Mult(R, worldPoints[i]);
                    x.Add(t);

                    // fvec_i = y_i - f(x_i)
                    double u, v;
                    CameraMath.Project(K, d, x[0], x[1], x[2], out u, out v);

                    var imagePoint = imagePoints[i];
                    fvec[fveci++] = imagePoint.X - u;
                    fvec[fveci++] = imagePoint.Y - v;
                }
                return fvec;
            };

            // optimize
            var calibrate = new LevenbergMarquardt(function);
            while (calibrate.State == LevenbergMarquardt.States.Running)
            {
                var rmsError = calibrate.MinimizeOneStep(parameters);
                Console.WriteLine("rms error = " + rmsError);
            }
            for (int i = 0; i < nParameters; i++)
                Console.WriteLine(parameters[i] + "\t");
            Console.WriteLine();

            // unpack parameters
            {
                int pi = 0;
                double fx = parameters[pi++];
                double fy = parameters[pi++];
                double cx = parameters[pi++];
                double cy = parameters[pi++];
                double k1 = parameters[pi++];
                double k2 = parameters[pi++];
                cameraMatrix[0, 0] = fx;
                cameraMatrix[1, 1] = fy;
                cameraMatrix[0, 2] = cx;
                cameraMatrix[1, 2] = cy;
                distCoeffs[0] = k1;
                distCoeffs[1] = k2;
                rotation[0] = parameters[pi++];
                rotation[1] = parameters[pi++];
                rotation[2] = parameters[pi++];
                translation[0] = parameters[pi++];
                translation[1] = parameters[pi++];
                translation[2] = parameters[pi++];
            }


            return calibrate.RMSError;
        }
        static double CalibrateColorCamera(List <Matrix> worldPoints, List <System.Drawing.PointF> imagePoints, Matrix cameraMatrix, Matrix distCoeffs, Matrix rotation, Matrix translation)
        {
            int nPoints = worldPoints.Count;

            {
                Matrix R, t;
                CameraMath.DLT(cameraMatrix, distCoeffs, worldPoints, imagePoints, out R, out t);
                //var r = Orientation.RotationVector(R);
                var r = RoomAliveToolkit.ProjectorCameraEnsemble.RotationVectorFromRotationMatrix(R);
                rotation.Copy(r);
                translation.Copy(t);
            }

            // pack parameters into vector
            // parameters: fx, fy, cx, cy, k1, k2, + 3 for rotation, 3 translation = 12
            int nParameters = 12;
            var parameters  = new Matrix(nParameters, 1);

            {
                int pi = 0;
                parameters[pi++] = cameraMatrix[0, 0]; // fx
                parameters[pi++] = cameraMatrix[1, 1]; // fy
                parameters[pi++] = cameraMatrix[0, 2]; // cx
                parameters[pi++] = cameraMatrix[1, 2]; // cy
                parameters[pi++] = distCoeffs[0];      // k1
                parameters[pi++] = distCoeffs[1];      // k2
                parameters[pi++] = rotation[0];
                parameters[pi++] = rotation[1];
                parameters[pi++] = rotation[2];
                parameters[pi++] = translation[0];
                parameters[pi++] = translation[1];
                parameters[pi++] = translation[2];
            }

            // size of our error vector
            int nValues = nPoints * 2; // each component (x,y) is a separate entry

            LevenbergMarquardt.Function function = delegate(Matrix p)
            {
                var fvec = new Matrix(nValues, 1);


                // unpack parameters
                int    pi = 0;
                double fx = p[pi++];
                double fy = p[pi++];
                double cx = p[pi++];
                double cy = p[pi++];

                double k1 = p[pi++];
                double k2 = p[pi++];

                var K = Matrix.Identity(3, 3);
                K[0, 0] = fx;
                K[1, 1] = fy;
                K[0, 2] = cx;
                K[1, 2] = cy;

                var d = Matrix.Zero(5, 1);
                d[0] = k1;
                d[1] = k2;

                var r = new Matrix(3, 1);
                r[0] = p[pi++];
                r[1] = p[pi++];
                r[2] = p[pi++];

                var t = new Matrix(3, 1);
                t[0] = p[pi++];
                t[1] = p[pi++];
                t[2] = p[pi++];

                //var R = Orientation.Rodrigues(r);
                var R = RoomAliveToolkit.ProjectorCameraEnsemble.RotationMatrixFromRotationVector(r);



                var x = new Matrix(3, 1);

                int fveci = 0;
                for (int i = 0; i < worldPoints.Count; i++)
                {
                    // transform world point to local camera coordinates
                    x.Mult(R, worldPoints[i]);
                    x.Add(t);

                    // fvec_i = y_i - f(x_i)
                    double u, v;
                    CameraMath.Project(K, d, x[0], x[1], x[2], out u, out v);

                    var imagePoint = imagePoints[i];
                    fvec[fveci++] = imagePoint.X - u;
                    fvec[fveci++] = imagePoint.Y - v;
                }
                return(fvec);
            };

            // optimize
            var calibrate = new LevenbergMarquardt(function);

            while (calibrate.State == LevenbergMarquardt.States.Running)
            {
                var rmsError = calibrate.MinimizeOneStep(parameters);
                Console.WriteLine("rms error = " + rmsError);
            }
            for (int i = 0; i < nParameters; i++)
            {
                Console.WriteLine(parameters[i] + "\t");
            }
            Console.WriteLine();

            // unpack parameters
            {
                int    pi = 0;
                double fx = parameters[pi++];
                double fy = parameters[pi++];
                double cx = parameters[pi++];
                double cy = parameters[pi++];
                double k1 = parameters[pi++];
                double k2 = parameters[pi++];
                cameraMatrix[0, 0] = fx;
                cameraMatrix[1, 1] = fy;
                cameraMatrix[0, 2] = cx;
                cameraMatrix[1, 2] = cy;
                distCoeffs[0]      = k1;
                distCoeffs[1]      = k2;
                rotation[0]        = parameters[pi++];
                rotation[1]        = parameters[pi++];
                rotation[2]        = parameters[pi++];
                translation[0]     = parameters[pi++];
                translation[1]     = parameters[pi++];
                translation[2]     = parameters[pi++];
            }


            return(calibrate.RMSError);
        }
Ejemplo n.º 27
0
        public void DepthImageToColorImage(int depthX, int depthY, double depthMeters, System.Drawing.PointF[] depthFrameToCameraSpaceTable, out double colorX, out double colorY)
        {
            double xUndistorted, yUndistorted;

            // convert to depth camera space
            // use lookup table to perform undistortion; this will be faster when converting lots of points
            // this matches the Kinect SDK's depthFrameToCameraSpace table (y down)
            var point = depthFrameToCameraSpaceTable[depthY * Kinect2Calibration.depthImageWidth + depthX];
            xUndistorted = point.X;
            yUndistorted = point.Y;

            var depthCamera = new Matrix(4, 1);
            depthCamera[0] = xUndistorted * depthMeters;
            depthCamera[1] = yUndistorted * depthMeters;
            depthCamera[2] = depthMeters;
            depthCamera[3] = 1;

            // convert to color camera space
            var colorCamera = new Matrix(4, 1);
            colorCamera.Mult(depthToColorTransform, depthCamera);

            // project to color image
            CameraMath.Project(colorCameraMatrix, colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorX, out colorY);

            // convert back to Y down
            colorY = colorImageHeight - colorY;
        }
        public void SaveToOBJ(string directory, string objPath)
        {
            var objFilename = Path.GetFileNameWithoutExtension(objPath);
            var objDirectory = Path.GetDirectoryName(objPath);

            if (!Directory.Exists(objDirectory))
                Directory.CreateDirectory(objDirectory);

            // Because we need to form triangles, we go back to the depth image
            var quadOffsets = new System.Drawing.Point[]
            {
                new System.Drawing.Point(0, 0),
                new System.Drawing.Point(1, 0),
                new System.Drawing.Point(0, 1),
                new System.Drawing.Point(1, 0),
                new System.Drawing.Point(1, 1),
                new System.Drawing.Point(0, 1),
            };

            var streamWriter = new CultureInvariantStreamWriter(objDirectory + "/" + objFilename + ".obj");
            var mtlFileWriter = new CultureInvariantStreamWriter(objDirectory + "/" + objFilename + ".mtl");
            streamWriter.WriteLine("mtllib " + objFilename + ".mtl");
            uint nextVertexIndex = 1;
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                mtlFileWriter.WriteLine("newmtl camera" + camera.name);
                mtlFileWriter.WriteLine("Ka 1.000000 1.000000 1.000000");
                mtlFileWriter.WriteLine("Kd 1.000000 1.000000 1.000000");
                mtlFileWriter.WriteLine("Ks 0.000000 0.000000 0.000000");
                mtlFileWriter.WriteLine("Tr 1.000000");
                mtlFileWriter.WriteLine("illum 1");
                mtlFileWriter.WriteLine("Ns 0.000000");
                mtlFileWriter.WriteLine("map_Kd " + objFilename + "_" + camera.name + ".jpg");

                File.Copy(directory + "/camera" + camera.name + "/color.jpg", objDirectory + "/" + objFilename + "_" + camera.name + ".jpg", true);

                streamWriter.WriteLine("usemtl camera" + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();
                var vertices = new Vertex[Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight];
                var colorCamera = new Matrix(4, 1);
                var depthCamera = new Matrix(4, 1);
                var world = new Matrix(4, 1);

                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                    {
                        // depth camera coords
                        var depth = depthImage[x, y] / 1000f; // m
                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[Kinect2Calibration.depthImageWidth * y + x];
                        depthCamera[0] = point.X * depth;
                        depthCamera[1] = point.Y * depth;
                        depthCamera[2] = depth;
                        depthCamera[3] = 1;

                        // world coordinates
                        world.Mult(camera.pose, depthCamera);
                        //world.Scale(1.0 / world[3]); not necessary for this transform

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera);
                        colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);
                        colorU /= (double)Kinect2Calibration.colorImageWidth;
                        colorV /= (double)Kinect2Calibration.colorImageHeight;

                        var vertex = new Vertex();
                        vertex.x = (float)world[0];
                        vertex.y = (float)world[1];
                        vertex.z = (float)world[2];
                        vertex.u = (float)colorU;
                        vertex.v = (float)colorV;
                        vertices[Kinect2Calibration.depthImageWidth * y + x] = vertex;

                    }

                streamWriter.WriteLine("g camera" + camera.name);
                streamWriter.WriteLine("usemtl camera" + camera.name);

                // examine each triangle
                for (int y = 0; y < Kinect2Calibration.depthImageHeight - 1; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth - 1; x++)
                    {
                        int offseti = 0;
                        for (int tri = 0; tri < 2; tri++)
                        {
                            // the indexes of the vertices of this triangle
                            var i0 = Kinect2Calibration.depthImageWidth * (y + quadOffsets[offseti].Y) + (x + quadOffsets[offseti].X);
                            var i1 = Kinect2Calibration.depthImageWidth * (y + quadOffsets[offseti + 1].Y) + (x + quadOffsets[offseti + 1].X);
                            var i2 = Kinect2Calibration.depthImageWidth * (y + quadOffsets[offseti + 2].Y) + (x + quadOffsets[offseti + 2].X);

                            // is triangle valid?
                            bool nonZero = (vertices[i0].z != 0) && (vertices[i1].z != 0) && (vertices[i2].z != 0);

                            bool jump01 = Vertex.DistanceSquared(vertices[i0], vertices[i1]) < 0.2 * 0.2;
                            bool jump02 = Vertex.DistanceSquared(vertices[i0], vertices[i2]) < 0.2 * 0.2;
                            bool jump12 = Vertex.DistanceSquared(vertices[i1], vertices[i2]) < 0.2 * 0.2;

                            bool valid = nonZero && jump01 && jump02 && jump12;
                            if (valid)
                            {
                                // only add the vertex if we haven't already
                                if (vertices[i0].index == 0)
                                {
                                    streamWriter.WriteLine(vertices[i0]);
                                    vertices[i0].index = nextVertexIndex++;
                                }
                                if (vertices[i1].index == 0)
                                {
                                    streamWriter.WriteLine(vertices[i1]);
                                    vertices[i1].index = nextVertexIndex++;
                                }
                                if (vertices[i2].index == 0)
                                {
                                    streamWriter.WriteLine(vertices[i2]);
                                    vertices[i2].index = nextVertexIndex++;
                                }
                                streamWriter.WriteLine("f {0}/{0} {1}/{1} {2}/{2}", vertices[i0].index, vertices[i1].index, vertices[i2].index);
                            }
                            offseti += 3;
                        }
                    }
            }
            streamWriter.Close();
            mtlFileWriter.Close();
        }
Ejemplo n.º 29
0
        public static void LeastSquares(Matrix x, Matrix A, Matrix b)
        {
            // use svd
            // for overdetermined systems A*x = b
            // x = V * diag(1/wj) * U T * b
            // NRC p. 66

            int m = A.m;
            int n = A.n;

            Matrix U = new Matrix(m, n), V = new Matrix(n, n), w = new Matrix(n, 1), W = new Matrix(n, n);
            A.SVD(U, w, V);
            w.Reciprocal();
            W.Diag(w);

            Matrix M = new Matrix(n, n);
            M.Mult(V, W);

            Matrix N = new Matrix(n, m);
            N.MultAAT(M, U);

            x.Mult(N, b);
        }
        public void UnifyPose()
        {
            // unify extrinsics

            // greedily assign poses to projectors and cameras

            // The first camera is assumed to be in world coordinates already; its pose will not be modified.
            // In this way users can place the system in a useful world coordinate system that is external to calibration.

            // Set all camera poses except for the first to null.
            for (int i = 1; i < cameras.Count; i++)
                cameras[i].pose = null;

            // Keep a list of all projectors that haven't been dealt with.
            var unfixed = new List<Projector>();
            unfixed.AddRange(projectors);

            // While "unfixed" is not empty
            while (unfixed.Count > 0)
            {
                // For each projector in "unfixed"
                Projector projectorToRemove = null;
                foreach (var projector in unfixed)
                {
                    // Is it associated with a camera that has its pose set?
                    Camera fixedCamera = null;
                    foreach (var camera in projector.calibrationPointSets.Keys)
                        if (camera.pose != null)
                        {
                            fixedCamera = camera;
                            break;
                        }

                    // If so, set pose of projector and all associated cameras; remove the projector from "unfixed" (be careful to remove outside loop)
                    if (fixedCamera != null)
                    {
                        // find pose of projector as concatenation of the fixed camera pose and pose from calibration
                        var T_CjStarW = fixedCamera.pose;
                        var T_CjStarPk = projector.calibrationPointSets[fixedCamera].pose;
                        var T_PkCjStar = new Matrix(4, 4);
                        T_PkCjStar.Inverse(T_CjStarPk);
                        var T_PkW = new Matrix(4, 4);
                        T_PkW.Mult(T_CjStarW, T_PkCjStar);
                        projector.pose = T_PkW;

                        // for all other cameras that do not have their pose set
                        foreach (var camera in projector.calibrationPointSets.Keys)
                        {
                            if (camera.pose == null)
                            {
                                // concatenate projector and local pose
                                var T_CjPk = projector.calibrationPointSets[camera].pose;
                                var T_CjW = new Matrix(4, 4);
                                T_CjW.Mult(T_PkW, T_CjPk);
                                camera.pose = T_CjW;
                            }
                        }

                        projectorToRemove = projector;
                        break;
                    }
                }
                unfixed.Remove(projectorToRemove); // if projectorToRemove null, graph is not fully connected?
            }
        }
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    bool foundNonplanarSubset = false;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        // try to find a nonplanar subset
                        bool planar = true;
                        int nTries = 0;
                        while (planar && (nTries++ < 1000))
                        {
                            worldPointSubset.Clear();
                            imagePointSubset.Clear();

                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // planar?
                            Matrix Rplane, tplane, d;
                            CameraMath.PlaneFit(worldPointSubset, out Rplane, out tplane, out d);
                            //Console.WriteLine("planar : " + d[2] / d[1]);
                            planar = (d[2] / d[1]) < 0.001f;
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);

                        // we can't initialize extrinsics yet, because we don't know which intrinsics we'll be using

                        if (!planar)
                            foundNonplanarSubset = true;
                    }

                    // we do not optimize intrinsics if all the point sets are planar, or if the projector intrinsics are marked as locked
                    bool fixIntrinsics = (!foundNonplanarSubset) || (projector.lockIntrinsics); // TODO: add option to lock intrinsics

                    var rotations = new List<Matrix>();
                    var translations = new List<Matrix>();
                    var cameraMatrix = new Matrix(3, 3);
                    var distCoeffs = new Matrix(2, 1);

                    if (fixIntrinsics)
                    {
                        cameraMatrix.Copy(projector.cameraMatrix);
                        distCoeffs.Copy(projector.lensDistortion);
                    }
                    else // nonplanar, so we can optimize intrinsics
                    {
                        cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                        cameraMatrix[1, 1] = 1000; //fy
                        cameraMatrix[0, 2] = projector.width / 2; //cx
                        cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                        cameraMatrix[2, 2] = 1;
                    }

                    // init extrinsics
                    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    {
                        Matrix R, t;
                        CameraMath.ExtrinsicsInit(cameraMatrix, distCoeffs, worldPointSubsets[ii], imagePointSubsets[ii], out R, out t);
                        rotations.Add(CameraMath.RotationVectorFromRotationMatrix(R));
                        translations.Add(t);
                    }

                    // initial RANSAC fit on subset of points
                    double error;
                    if (fixIntrinsics)
                        error = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    else
                        error = CameraMath.CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);

                    Console.WriteLine("error on subset = " + error);

                    // RANSAC: find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    int totalInliers = 0;
                    int totalPoints = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 2.0f) // TODO: how to set this?
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 500); // should be related to min number of points in set (above)

                        totalPoints += pointSet.worldPoints.Count;
                        totalInliers += worldPointInlierSet.Count;

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);
                    }

                    Console.WriteLine("{0}/{1} inliers", totalInliers, totalPoints);

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        double error2;
                        if (fixIntrinsics)
                            error2 = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);
                        else
                            error2 = CameraMath.CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if reduced error save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error2 < minError)
                        {
                            minError = error2;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            //Console.WriteLine("x = [");
            //for (int ii = 0; ii < imagePointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1}", imagePointSubsets[0][ii].X, imagePointSubsets[0][ii].Y);
            //Console.WriteLine("]';");
            //Console.WriteLine("X = [");
            //for (int ii = 0; ii < worldPointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1} {2}", worldPointSubsets[0][ii][0], worldPointSubsets[0][ii][1], worldPointSubsets[0][ii][2]);
            //Console.WriteLine("]';");
            //Console.WriteLine("fc = [{0} {1}];", projector.cameraMatrix[0, 0], projector.cameraMatrix[1, 1]);
            //Console.WriteLine("cc = [{0} {1}];", projector.cameraMatrix[0, 2], projector.cameraMatrix[1, 2]);

            //Matrix thisR, thist;

            //{
            //    Matrix Rplane, tplane;
            //    CameraMath.PlaneFit(worldPointSubsets[0], out Rplane, out tplane);

            //    CameraMath.PlanarDLT(projector.cameraMatrix, projector.lensDistortion, worldPointSubsets[0], imagePointSubsets[0], Rplane, tplane, out thisR, out thist);
            //    //Console.WriteLine("DLT---------");
            //    //Console.WriteLine(thisR);
            //    //Console.WriteLine(thist);

            //}

            //// if pattern is not planar, we can recover projector intrinsics

            //List<RoomAliveToolkit.Matrix> rotations = null;
            //List<RoomAliveToolkit.Matrix> translations = null;

            //var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
            //Console.WriteLine("error = " + error);

            // we check whether each view is planar, so that we can use the correct version of DLT

            // the overall set may not be planar however, so we have to check the union of points

            // if overall set is planar, leave intrinsics alone

            //
        }
Ejemplo n.º 32
0
        public void DepthImageToColorImage(double depthX, double depthY, double depthMeters, out double colorX, out double colorY)
        {
            double xUndistorted, yUndistorted;

            // convert to depth camera space
            float fx = (float)depthCameraMatrix[0, 0];
            float fy = (float)depthCameraMatrix[1, 1];
            float cx = (float)depthCameraMatrix[0, 2];
            float cy = (float)depthCameraMatrix[1, 2];
            float[] kappa = new float[] { (float)depthLensDistortion[0], (float)depthLensDistortion[1] };
            // flip y because our calibration expects y up (right handed coordinates at all times)
            CameraMath.Undistort(fx, fy, cx, cy, kappa, depthX, (depthImageHeight - depthY), out xUndistorted, out yUndistorted);

            var depthCamera = new Matrix(4, 1);
            depthCamera[0] = xUndistorted * depthMeters;
            depthCamera[1] = yUndistorted * depthMeters;
            depthCamera[2] = depthMeters;
            depthCamera[3] = 1;

            // convert to color camera space
            var colorCamera = new Matrix(4, 1);
            colorCamera.Mult(depthToColorTransform, depthCamera);

            // project to color image
            CameraMath.Project(colorCameraMatrix, colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorX, out colorY);

            // convert back to Y down
            colorY = colorImageHeight - colorY;
        }
        public void OptimizePose()
        {
            UnifyPose();

            // joint estimate of projector and camera pose

            // minimize wrt T_CjW, T_WPk:  Sum_ijk  v_ijk [ p_k( T_WPk T_CjW x_i ) - y_ik ]^2

            // cameras observe points x_i (in camera coords)
            // point x_i is observed to project to point y_ik in projector k
            // v_ijk === 1 if point i is observed by camera j and imaged by projector k
            // p_k(x) projects point x in projector k; x in projector coordinates
            // T_CjW camera j local coordinates to world coordinates
            // T_WPk world to projector k coorindates

            // efficient implementation: list of points x_ijk for which v_ijk != 0; store j, k with each point x_i
            // solve for C_j, P_k; C_0 is not in the set of parameters

            // parameters: for each projector and camera: 1 rotation + 1 translation = 6 parameters
            //    We leave T_C0W fixed, so have 6 * (numProjectors + numCameras - 1) parameters
            int nParameters = 6 * (projectors.Count + cameras.Count - 1);
            //double[] parameters = new double[nParameters];
            var parameters = new Matrix(nParameters, 1);

            // loop over room.cameras, room.projectors to form up parameters array
            {
                int pi = 0; // index into our parameter array
                for (int i = 1; i < cameras.Count; i++) // skip first one, which is our root
                {
                    var T = cameras[i].pose;
                    var R = new Matrix(3, 3);
                    var t = new Matrix(3, 1);
                    for (int ii = 0; ii < 3; ii++)
                    {
                        t[ii] = T[ii, 3];
                        for (int jj = 0; jj < 3; jj++)
                            R[ii, jj] = T[ii, jj];
                    }

                    var r = CameraMath.RotationVectorFromRotationMatrix(R);

                    for (int ii = 0; ii < 3; ii++)
                        parameters[pi++] = r[ii];
                    for (int ii = 0; ii < 3; ii++)
                        parameters[pi++] = t[ii];
                }

                for (int i = 0; i < projectors.Count; i++)
                {
                    var T = projectors[i].pose;
                    var R = new Matrix(3, 3);
                    var t = new Matrix(3, 1);
                    for (int ii = 0; ii < 3; ii++)
                    {
                        t[ii] = T[ii, 3];
                        for (int jj = 0; jj < 3; jj++)
                            R[ii, jj] = T[ii, jj];
                    }

                    var r = CameraMath.RotationVectorFromRotationMatrix(R);

                    for (int ii = 0; ii < 3; ii++)
                        parameters[pi++] = r[ii];
                    for (int ii = 0; ii < 3; ii++)
                        parameters[pi++] = t[ii];
                }
            }

            // count the number of values
            // use only inliers from previous step
            int nValues = 0;
            foreach (var projector in projectors)
                foreach (var camera in projector.calibrationPointSets.Keys)
                    nValues += projector.calibrationPointSets[camera].worldPointInliers.Count * 2; // count components

            LevenbergMarquardt.Function optimize = delegate(Matrix p)
            {
                var fvec = new Matrix(nValues, 1);

                // convert p to transforms etc.
                // convert back to transforms and put back in our structures
                int pi = 0; // index into our parameter array
                for (int i = 1; i < cameras.Count; i++) // skip first one, which is our root
                {
                    var r = new Matrix(3, 1);
                    r[0] = p[pi++];
                    r[1] = p[pi++];
                    r[2] = p[pi++];
                    var R = CameraMath.RotationMatrixFromRotationVector(r);

                    var t = new Matrix(3, 1);
                    t[0] = p[pi++];
                    t[1] = p[pi++];
                    t[2] = p[pi++];

                    var T = new Matrix(4, 4);
                    T.Identity();
                    for (int ii = 0; ii < 3; ii++)
                    {
                        for (int jj = 0; jj < 3; jj++)
                            T[ii, jj] = R[ii, jj];
                        T[ii, 3] = t[ii];
                    }
                    cameras[i].pose = T;
                }

                for (int i = 0; i < projectors.Count; i++)
                {
                    var r = new Matrix(3, 1);
                    r[0] = p[pi++];
                    r[1] = p[pi++];
                    r[2] = p[pi++];
                    var R = CameraMath.RotationMatrixFromRotationVector(r);

                    var t = new Matrix(3, 1);
                    t[0] = p[pi++];
                    t[1] = p[pi++];
                    t[2] = p[pi++];

                    var T = new Matrix(4, 4);
                    T.Identity();
                    for (int ii = 0; ii < 3; ii++)
                    {
                        for (int jj = 0; jj < 3; jj++)
                            T[ii, jj] = R[ii, jj];
                        T[ii, 3] = t[ii];
                    }
                    projectors[i].pose = T;
                }

                int fveci = 0; // index into our fvec array

                foreach (var projector in projectors)
                {
                    // T_WPk is inverse of T_PkW, projector pose
                    var T_WPk = new Matrix(4, 4);
                    T_WPk.Inverse(projector.pose);

                    foreach (var camera in projector.calibrationPointSets.Keys)
                    {
                        var cameraPoints = projector.calibrationPointSets[camera].worldPointInliers;
                        var projectorPoints = projector.calibrationPointSets[camera].imagePointInliers;

                        // transforms camera to projector coordinates
                        var T_CjW = camera.pose;
                        var T_CjPk = new Matrix(4, 4);
                        T_CjPk.Mult(T_WPk, T_CjW);

                        var cameraInProjector4 = new Matrix(4, 1);
                        cameraInProjector4[3] = 1;

                        var cameraPoint4 = new Matrix(4, 1);
                        cameraPoint4[3] = 1;

                        for (int i = 0; i < cameraPoints.Count; i++)
                        {
                            var cameraPoint = cameraPoints[i];

                            cameraPoint4[0] = cameraPoint[0];
                            cameraPoint4[1] = cameraPoint[1];
                            cameraPoint4[2] = cameraPoint[2];

                            cameraInProjector4.Mult(T_CjPk, cameraPoint4);

                            cameraInProjector4.Scale(1.0 / cameraInProjector4[3]);

                            // fvec_i = y_i - p_k( T_CjPk x_i );
                            double u, v;
                            CameraMath.Project(projector.cameraMatrix, projector.lensDistortion, cameraInProjector4[0], cameraInProjector4[1], cameraInProjector4[2], out u, out v);

                            var projectorPoint = projectorPoints[i];
                            fvec[fveci++] = projectorPoint.X - u;
                            fvec[fveci++] = projectorPoint.Y - v;
                        }
                    }
                }

                //double sum = 0;
                //for (int i = 0; i < nValues; i++)
                //    sum += fvec[i] * fvec[i];

                //double rms = Math.Sqrt(sum / (double)nValues);
                //Console.WriteLine("in functor, rms == " + rms);

                return fvec;

            };

            // TODO: maybe compute error before final optimization

            var calibrate = new LevenbergMarquardt(optimize);
            calibrate.minimumReduction = 1.0e-4;
            while (calibrate.State == LevenbergMarquardt.States.Running)
            {
                double rmsError = calibrate.MinimizeOneStep(parameters);
                Console.WriteLine("rms error = " + rmsError);
            }

            //for (int i = 0; i < nParameters; i++)
            //    Console.WriteLine(parameters[i] + "\t");
            //Console.WriteLine();

            // convert back to transforms and put back in our structures
            {
                int pi = 0; // index into our parameter array
                for (int i = 1; i < cameras.Count; i++) // skip first one, which is our root
                {
                    var r = new Matrix(3, 1);
                    r[0] = parameters[pi++];
                    r[1] = parameters[pi++];
                    r[2] = parameters[pi++];
                    var R = CameraMath.RotationMatrixFromRotationVector(r);

                    var t = new Matrix(3, 1);
                    t[0] = parameters[pi++];
                    t[1] = parameters[pi++];
                    t[2] = parameters[pi++];

                    var T = new Matrix(4, 4);
                    T.Identity();
                    for (int ii = 0; ii < 3; ii++)
                    {
                        for (int jj = 0; jj < 3; jj++)
                            T[ii, jj] = R[ii, jj];
                        T[ii, 3] = t[ii];
                    }
                    cameras[i].pose = T;
                }

                for (int i = 0; i < projectors.Count; i++)
                {
                    var r = new Matrix(3, 1);
                    r[0] = parameters[pi++];
                    r[1] = parameters[pi++];
                    r[2] = parameters[pi++];
                    var R = CameraMath.RotationMatrixFromRotationVector(r);

                    var t = new Matrix(3, 1);
                    t[0] = parameters[pi++];
                    t[1] = parameters[pi++];
                    t[2] = parameters[pi++];

                    var T = new Matrix(4, 4);
                    T.Identity();
                    for (int ii = 0; ii < 3; ii++)
                    {
                        for (int jj = 0; jj < 3; jj++)
                            T[ii, jj] = R[ii, jj];
                        T[ii, 3] = t[ii];
                    }
                    projectors[i].pose = T;
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
Ejemplo n.º 34
0
        public void ColorImageToDepthImage(int colorX, int colorY, ShortImage depthImage, System.Drawing.PointF[] colorFrameToCameraSpaceTable, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            // use lookup table to perform undistortion; this will be faster when converting lots of points
            var point = colorFrameToCameraSpaceTable[colorY * Kinect2Calibration.colorImageWidth + colorX];
            xUndistorted = point.X;
            yUndistorted = point.Y;

            var colorToDepthTransform = new Matrix(4, 4);
            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);
            depthPoint = new Matrix(4, 1);
            depthX = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;
            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                        found = true;
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
Ejemplo n.º 35
0
 public static List<RoomAliveToolkit.Matrix> TransformPoints(RoomAliveToolkit.Matrix A, List<RoomAliveToolkit.Matrix> points)
 {
     var transformedPoints = new List<RoomAliveToolkit.Matrix>();
     var point4 = new RoomAliveToolkit.Matrix(4, 1);
     point4[3] = 1;
     var transformedPoint4 = new RoomAliveToolkit.Matrix(4, 1);
     foreach (var point in points)
     {
         point4[0] = point[0]; point4[1] = point[1]; point4[2] = point[2];
         transformedPoint4.Mult(A, point4);
         transformedPoint4.Scale(1.0f / transformedPoint4[3]);
         var transformedPoint = new RoomAliveToolkit.Matrix(3, 1);
         transformedPoint[0] = transformedPoint4[0]; transformedPoint[1] = transformedPoint4[1]; transformedPoint[2] = transformedPoint4[2];
         transformedPoints.Add(transformedPoint);
     }
     return transformedPoints;
 }