コード例 #1
0
    private Matrix4x4 GetProjectionMatrix(RoomAliveToolkit.Matrix intrinsics, float zNear, float zFar)
    {
        float c_x = (float)intrinsics[0, 2];
        float c_y = (float)intrinsics[1, 2];

        //the intrinsics are in Kinect coordinates: X - left, Y - up, Z, forward
        //we need the coordinates to be: X - right, Y - down, Z - forward
        c_x = imageWidth - c_x;
        c_y = imageHeight - c_y;

        // http://spottrlabs.blogspot.com/2012/07/opencv-and-opengl-not-always-friends.html
        // http://opencv.willowgarage.com/wiki/Posit
        Matrix4x4 projMat = new Matrix4x4();

        projMat[0, 0] = (float)(2.0 * intrinsics[0, 0] / imageWidth);
        projMat[1, 1] = (float)(2.0 * intrinsics[1, 1] / imageHeight);
        projMat[2, 0] = (float)(-1.0f + 2 * c_x / imageWidth);
        projMat[2, 1] = (float)(-1.0f + 2 * c_y / imageHeight);

        // Note this changed from previous code
        // see here: http://www.songho.ca/opengl/gl_projectionmatrix.html
        projMat[2, 2] = -(zFar + zNear) / (zFar - zNear);
        projMat[3, 2] = -2.0f * zNear * zFar / (zFar - zNear);
        projMat[2, 3] = -1;

        // Transpose tp fit Unity's column major matrix (in contrast to vision raw major ones).
        projMat = projMat.transpose;

        return(projMat);
    }
コード例 #2
0
        public void RecoverCalibrationFromSensor(KinectSensor kinectSensor)
        {
            var stopWatch = new System.Diagnostics.Stopwatch();

            stopWatch.Start();

            var objectPoints1 = new List <RoomAliveToolkit.Matrix>();
            var colorPoints1  = new List <System.Drawing.PointF>();
            var depthPoints1  = new List <System.Drawing.PointF>();

            int n = 0;

            for (float x = -2f; x < 2f; x += 0.2f)
            {
                for (float y = -2f; y < 2f; y += 0.2f)
                {
                    for (float z = 0.4f; z < 4.5f; z += 0.4f)
                    {
                        var kinectCameraPoint = new CameraSpacePoint();
                        kinectCameraPoint.X = x;
                        kinectCameraPoint.Y = y;
                        kinectCameraPoint.Z = z;

                        // use SDK's projection
                        // adjust Y to make RH cooridnate system that is a projection of Kinect 3D points
                        var kinectColorPoint = kinectSensor.CoordinateMapper.MapCameraPointToColorSpace(kinectCameraPoint);
                        kinectColorPoint.Y = colorImageHeight - kinectColorPoint.Y;
                        var kinectDepthPoint = kinectSensor.CoordinateMapper.MapCameraPointToDepthSpace(kinectCameraPoint);
                        kinectDepthPoint.Y = depthImageHeight - kinectDepthPoint.Y;

                        if ((kinectColorPoint.X >= 0) && (kinectColorPoint.X < colorImageWidth) &&
                            (kinectColorPoint.Y >= 0) && (kinectColorPoint.Y < colorImageHeight) &&
                            (kinectDepthPoint.X >= 0) && (kinectDepthPoint.X < depthImageWidth) &&
                            (kinectDepthPoint.Y >= 0) && (kinectDepthPoint.Y < depthImageHeight))
                        {
                            n++;
                            var objectPoint = new RoomAliveToolkit.Matrix(3, 1);
                            objectPoint[0] = kinectCameraPoint.X;
                            objectPoint[1] = kinectCameraPoint.Y;
                            objectPoint[2] = kinectCameraPoint.Z;
                            objectPoints1.Add(objectPoint);

                            var colorPoint = new System.Drawing.PointF();
                            colorPoint.X = kinectColorPoint.X;
                            colorPoint.Y = kinectColorPoint.Y;
                            colorPoints1.Add(colorPoint);


                            //Console.WriteLine(objectPoint[0] + "\t" + objectPoint[1] + "\t" + colorPoint.X + "\t" + colorPoint.Y);

                            var depthPoint = new System.Drawing.PointF();
                            depthPoint.X = kinectDepthPoint.X;
                            depthPoint.Y = kinectDepthPoint.Y;
                            depthPoints1.Add(depthPoint);
                        }
                    }
                }
            }

            colorCameraMatrix[0, 0] = 1000;                 //fx
            colorCameraMatrix[1, 1] = 1000;                 //fy
            colorCameraMatrix[0, 2] = colorImageWidth / 2;  //cx
            colorCameraMatrix[1, 2] = colorImageHeight / 2; //cy
            colorCameraMatrix[2, 2] = 1;

            var rotation    = new Matrix(3, 1);
            var translation = new Matrix(3, 1);
            var colorError  = CalibrateColorCamera(objectPoints1, colorPoints1, colorCameraMatrix, colorLensDistortion, rotation, translation);
            //var rotationMatrix = Orientation.Rodrigues(rotation);
            var rotationMatrix = RoomAliveToolkit.ProjectorCameraEnsemble.RotationMatrixFromRotationVector(rotation);

            depthToColorTransform = Matrix.Identity(4, 4);
            for (int i = 0; i < 3; i++)
            {
                depthToColorTransform[i, 3] = translation[i];
                for (int j = 0; j < 3; j++)
                {
                    depthToColorTransform[i, j] = rotationMatrix[i, j];
                }
            }


            depthCameraMatrix[0, 0] = 360;                  //fx
            depthCameraMatrix[1, 1] = 360;                  //fy
            depthCameraMatrix[0, 2] = depthImageWidth / 2;  //cx
            depthCameraMatrix[1, 2] = depthImageHeight / 2; //cy
            depthCameraMatrix[2, 2] = 1;

            var depthError = CalibrateDepthCamera(objectPoints1, depthPoints1, depthCameraMatrix, depthLensDistortion);

            //// latest SDK gives access to depth intrinsics directly -- this gives slightly higher projection error; not sure why
            //var depthIntrinsics = kinectSensor.CoordinateMapper.GetDepthCameraIntrinsics();
            //depthCameraMatrix[0, 0] = depthIntrinsics.FocalLengthX;
            //depthCameraMatrix[1, 1] = depthIntrinsics.FocalLengthY;
            //depthCameraMatrix[0, 2] = depthIntrinsics.PrincipalPointX;
            //depthCameraMatrix[1, 2] = depthImageHeight - depthIntrinsics.PrincipalPointY; // note flip in Y!
            //depthDistCoeffs[0] = depthIntrinsics.RadialDistortionSecondOrder;
            //depthDistCoeffs[1] = depthIntrinsics.RadialDistortionFourthOrder;


            // check projections
            double depthProjectionError = 0;
            double colorProjectionError = 0;
            var    color            = new RoomAliveToolkit.Matrix(4, 1);
            var    testObjectPoint4 = new RoomAliveToolkit.Matrix(4, 1);

            for (int i = 0; i < n; i++)
            {
                var testObjectPoint = objectPoints1[i];
                var testDepthPoint  = depthPoints1[i];
                var testColorPoint  = colorPoints1[i];

                // "camera space" == depth camera space
                // depth camera projection
                double depthU, depthV;
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, testObjectPoint[0], testObjectPoint[1], testObjectPoint[2], out depthU, out depthV);

                double dx = testDepthPoint.X - depthU;
                double dy = testDepthPoint.Y - depthV;
                depthProjectionError += (dx * dx) + (dy * dy);

                // color camera projection
                testObjectPoint4[0] = testObjectPoint[0];
                testObjectPoint4[1] = testObjectPoint[1];
                testObjectPoint4[2] = testObjectPoint[2];
                testObjectPoint4[3] = 1;

                color.Mult(depthToColorTransform, testObjectPoint4);
                color.Scale(1.0 / color[3]); // not necessary for this transform

                double colorU, colorV;
                CameraMath.Project(colorCameraMatrix, colorLensDistortion, color[0], color[1], color[2], out colorU, out colorV);

                dx = testColorPoint.X - colorU;
                dy = testColorPoint.Y - colorV;
                colorProjectionError += (dx * dx) + (dy * dy);
            }
            depthProjectionError /= n;
            colorProjectionError /= n;


            stopWatch.Stop();
            Console.WriteLine("FakeCalibration :");
            Console.WriteLine("n = " + n);
            Console.WriteLine("color error = " + colorError);
            Console.WriteLine("depth error = " + depthError);
            Console.WriteLine("depth reprojection error = " + depthProjectionError);
            Console.WriteLine("color reprojection error = " + colorProjectionError);
            Console.WriteLine("depth camera matrix = \n" + depthCameraMatrix);
            Console.WriteLine("depth lens distortion = \n" + depthLensDistortion);
            Console.WriteLine("color camera matrix = \n" + colorCameraMatrix);
            Console.WriteLine("color lens distortion = \n" + colorLensDistortion);

            Console.WriteLine(stopWatch.ElapsedMilliseconds + " ms");


            //// get camera space table
            //// this does not change frame to frame (or so I believe)
            //var tableEntries = kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable();

            //// compute our own version of the camera space table and compare it to the SDK's
            //stopWatch.Restart();

            //var tableEntries2 = ComputeDepthFrameToCameraSpaceTable();
            //Console.WriteLine("ComputeDepthFrameToCameraSpaceTable took " + stopWatch.ElapsedMilliseconds + " ms");

            //{
            //    float error = 0;
            //    for (int framey = 0; framey < depthImageHeight; framey++)
            //        for (int framex = 0; framex < depthImageWidth; framex++)
            //        {
            //            var point1 = tableEntries[depthImageWidth * framey + framex];
            //            var point2 = tableEntries2[depthImageWidth * framey + framex];

            //            error += (float)Math.Sqrt((point1.X - point2.X) * (point1.X - point2.X) + (point1.Y - point2.Y) * (point1.Y - point2.Y));
            //        }
            //    error /= (float)(depthImageHeight * depthImageWidth);
            //    Console.WriteLine("error = " + error);
            //}
        }
コード例 #3
0
        public void RecoverCalibrationFromSensor(KinectSensor kinectSensor)
        {
            var stopWatch = new System.Diagnostics.Stopwatch();
            stopWatch.Start();

            var objectPoints1 = new List<RoomAliveToolkit.Matrix>();
            var colorPoints1 = new List<System.Drawing.PointF>();
            var depthPoints1 = new List<System.Drawing.PointF>();

            int n = 0;
            for (float x = -2f; x < 2f; x += 0.2f)
                for (float y = -2f; y < 2f; y += 0.2f)
                    for (float z = 0.4f; z < 4.5f; z += 0.4f)
                    {
                        var kinectCameraPoint = new CameraSpacePoint();
                        kinectCameraPoint.X = x;
                        kinectCameraPoint.Y = y;
                        kinectCameraPoint.Z = z;

                        // use SDK's projection
                        // adjust Y to make RH cooridnate system that is a projection of Kinect 3D points
                        var kinectColorPoint = kinectSensor.CoordinateMapper.MapCameraPointToColorSpace(kinectCameraPoint);
                        kinectColorPoint.Y = colorImageHeight - kinectColorPoint.Y;
                        var kinectDepthPoint = kinectSensor.CoordinateMapper.MapCameraPointToDepthSpace(kinectCameraPoint);
                        kinectDepthPoint.Y = depthImageHeight - kinectDepthPoint.Y;

                        if ((kinectColorPoint.X >= 0) && (kinectColorPoint.X < colorImageWidth) &&
                            (kinectColorPoint.Y >= 0) && (kinectColorPoint.Y < colorImageHeight) &&
                            (kinectDepthPoint.X >= 0) && (kinectDepthPoint.X < depthImageWidth) &&
                            (kinectDepthPoint.Y >= 0) && (kinectDepthPoint.Y < depthImageHeight))
                        {
                            n++;
                            var objectPoint = new RoomAliveToolkit.Matrix(3, 1);
                            objectPoint[0] = kinectCameraPoint.X;
                            objectPoint[1] = kinectCameraPoint.Y;
                            objectPoint[2] = kinectCameraPoint.Z;
                            objectPoints1.Add(objectPoint);

                            var colorPoint = new System.Drawing.PointF();
                            colorPoint.X = kinectColorPoint.X;
                            colorPoint.Y = kinectColorPoint.Y;
                            colorPoints1.Add(colorPoint);

                            //Console.WriteLine(objectPoint[0] + "\t" + objectPoint[1] + "\t" + colorPoint.X + "\t" + colorPoint.Y);

                            var depthPoint = new System.Drawing.PointF();
                            depthPoint.X = kinectDepthPoint.X;
                            depthPoint.Y = kinectDepthPoint.Y;
                            depthPoints1.Add(depthPoint);
                        }
                    }

            colorCameraMatrix[0, 0] = 1000; //fx
            colorCameraMatrix[1, 1] = 1000; //fy
            colorCameraMatrix[0, 2] = colorImageWidth / 2; //cx
            colorCameraMatrix[1, 2] = colorImageHeight / 2; //cy
            colorCameraMatrix[2, 2] = 1;

            var rotation = new Matrix(3, 1);
            var translation = new Matrix(3, 1);
            var colorError = CalibrateColorCamera(objectPoints1, colorPoints1, colorCameraMatrix, colorLensDistortion, rotation, translation);
            //var rotationMatrix = Orientation.Rodrigues(rotation);
            var rotationMatrix = RoomAliveToolkit.ProjectorCameraEnsemble.RotationMatrixFromRotationVector(rotation);

            depthToColorTransform = Matrix.Identity(4, 4);
            for (int i = 0; i < 3; i++)
            {
                depthToColorTransform[i, 3] = translation[i];
                for (int j = 0; j < 3; j++)
                    depthToColorTransform[i, j] = rotationMatrix[i, j];
            }

            depthCameraMatrix[0, 0] = 360; //fx
            depthCameraMatrix[1, 1] = 360; //fy
            depthCameraMatrix[0, 2] = depthImageWidth / 2; //cx
            depthCameraMatrix[1, 2] = depthImageHeight / 2; //cy
            depthCameraMatrix[2, 2] = 1;

            var depthError = CalibrateDepthCamera(objectPoints1, depthPoints1, depthCameraMatrix, depthLensDistortion);

            //// latest SDK gives access to depth intrinsics directly -- this gives slightly higher projection error; not sure why
            //var depthIntrinsics = kinectSensor.CoordinateMapper.GetDepthCameraIntrinsics();
            //depthCameraMatrix[0, 0] = depthIntrinsics.FocalLengthX;
            //depthCameraMatrix[1, 1] = depthIntrinsics.FocalLengthY;
            //depthCameraMatrix[0, 2] = depthIntrinsics.PrincipalPointX;
            //depthCameraMatrix[1, 2] = depthImageHeight - depthIntrinsics.PrincipalPointY; // note flip in Y!
            //depthDistCoeffs[0] = depthIntrinsics.RadialDistortionSecondOrder;
            //depthDistCoeffs[1] = depthIntrinsics.RadialDistortionFourthOrder;

            // check projections
            double depthProjectionError = 0;
            double colorProjectionError = 0;
            var color = new RoomAliveToolkit.Matrix(4, 1);
            var testObjectPoint4 = new RoomAliveToolkit.Matrix(4, 1);
            for (int i = 0; i < n; i++)
            {
                var testObjectPoint = objectPoints1[i];
                var testDepthPoint = depthPoints1[i];
                var testColorPoint = colorPoints1[i];

                // "camera space" == depth camera space
                // depth camera projection
                double depthU, depthV;
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, testObjectPoint[0], testObjectPoint[1], testObjectPoint[2], out depthU, out depthV);

                double dx = testDepthPoint.X - depthU;
                double dy = testDepthPoint.Y - depthV;
                depthProjectionError += (dx * dx) + (dy * dy);

                // color camera projection
                testObjectPoint4[0] = testObjectPoint[0];
                testObjectPoint4[1] = testObjectPoint[1];
                testObjectPoint4[2] = testObjectPoint[2];
                testObjectPoint4[3] = 1;

                color.Mult(depthToColorTransform, testObjectPoint4);
                color.Scale(1.0 / color[3]); // not necessary for this transform

                double colorU, colorV;
                CameraMath.Project(colorCameraMatrix, colorLensDistortion, color[0], color[1], color[2], out colorU, out colorV);

                dx = testColorPoint.X - colorU;
                dy = testColorPoint.Y - colorV;
                colorProjectionError += (dx * dx) + (dy * dy);
            }
            depthProjectionError /= n;
            colorProjectionError /= n;

            stopWatch.Stop();
            Console.WriteLine("FakeCalibration :");
            Console.WriteLine("n = " + n);
            Console.WriteLine("color error = " + colorError);
            Console.WriteLine("depth error = " + depthError);
            Console.WriteLine("depth reprojection error = " + depthProjectionError);
            Console.WriteLine("color reprojection error = " + colorProjectionError);
            Console.WriteLine("depth camera matrix = \n" + depthCameraMatrix);
            Console.WriteLine("depth lens distortion = \n" + depthLensDistortion);
            Console.WriteLine("color camera matrix = \n" + colorCameraMatrix);
            Console.WriteLine("color lens distortion = \n" + colorLensDistortion);

            Console.WriteLine(stopWatch.ElapsedMilliseconds + " ms");

            //// get camera space table
            //// this does not change frame to frame (or so I believe)
            //var tableEntries = kinectSensor.CoordinateMapper.GetDepthFrameToCameraSpaceTable();

            //// compute our own version of the camera space table and compare it to the SDK's
            //stopWatch.Restart();

            //var tableEntries2 = ComputeDepthFrameToCameraSpaceTable();
            //Console.WriteLine("ComputeDepthFrameToCameraSpaceTable took " + stopWatch.ElapsedMilliseconds + " ms");

            //{
            //    float error = 0;
            //    for (int framey = 0; framey < depthImageHeight; framey++)
            //        for (int framex = 0; framex < depthImageWidth; framex++)
            //        {
            //            var point1 = tableEntries[depthImageWidth * framey + framex];
            //            var point2 = tableEntries2[depthImageWidth * framey + framex];

            //            error += (float)Math.Sqrt((point1.X - point2.X) * (point1.X - point2.X) + (point1.Y - point2.Y) * (point1.Y - point2.Y));
            //        }
            //    error /= (float)(depthImageHeight * depthImageWidth);
            //    Console.WriteLine("error = " + error);
            //}
        }
コード例 #4
0
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(depthWidth, depthHeight);
            var varianceImage = new FloatImage(depthWidth, depthHeight);
            var validMask = new ByteImage(depthWidth, depthHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < depthHeight; y += 1)
                    for (int x = 0; x < depthWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * depthWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (colorWidth - 1)) && (colorV >= 0) && (colorV < (colorHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(depthWidth * depthHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(colorWidth, colorHeight);
                    var decodedRows = new ShortImage(colorWidth, colorHeight);
                    var mask = new ByteImage(colorWidth, colorHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = colorHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= colorWidth) || (y < 0) || (y >= colorHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging
                for (int i = 0; i < 4; i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        bool nonCoplanar = false;
                        int nTries = 0;

                        while (!nonCoplanar)
                        {
                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // check that points are not coplanar
                            Matrix X;
                            double D;
                            double ssdToPlane = PlaneFit(worldPointSubset, out X, out D);
                            int numOutliers = 0;
                            foreach (var point in worldPointSubset)
                            {
                                double distanceFromPlane = X.Dot(point) + D;
                                if (Math.Abs(distanceFromPlane) > 0.1f)
                                    numOutliers++;
                            }
                            nonCoplanar = (numOutliers > worldPointSubset.Count * 0.10f);
                            if (!nonCoplanar)
                            {
                                Console.WriteLine("points are coplanar (try #{0})", nTries);
                                worldPointSubset.Clear();
                                imagePointSubset.Clear();
                            }
                            if (nTries++ > 1000)
                            {
                                Console.WriteLine("Unable to find noncoplanar points.");
                                return; // TODO: should return an error, throw an exception, or try to recover
                                // consider moving this check up with variance check (when calibration point sets are formed)
                            }
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);
                    }

                    var cameraMatrix = new Matrix(3, 3);
                    cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                    cameraMatrix[1, 1] = 1000; //fy
                    cameraMatrix[0, 2] = projector.width / 2; //cx
                    cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                    cameraMatrix[2, 2] = 1;
                    var distCoeffs = new RoomAliveToolkit.Matrix(2, 1);
                    List<RoomAliveToolkit.Matrix> rotations = null;
                    List<RoomAliveToolkit.Matrix> translations = null;

                    var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    Console.WriteLine("error = " + error);
                    //Console.WriteLine("intrinsics = \n" + cameraMatrix);

                    //// we differ from opencv's 'error' in that we do not distinguish between x and y.
                    //// i.e. opencv uses the method below; this number would match if we used pointsInSum2*2 in the divisor.
                    //// double check opencv's error
                    //{
                    //    double sumError2 = 0;
                    //    int pointsInSum2 = 0;
                    //    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    //    {
                    //        var R = Orientation.Rodrigues(rotations[ii]);
                    //        var t = translations[ii];
                    //        var p = new Matrix(3, 1);

                    //        var worldPointSet = worldPointSubsets[ii];
                    //        var imagePointSet = imagePointSubsets[ii];

                    //        for (int k = 0; k < worldPointSet.Count; k++)
                    //        {
                    //            p.Mult(R, worldPointSet[k]);
                    //            p.Add(t);
                    //            double u, v;
                    //            Kinect2.Kinect2Calibration.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                    //            double dx = imagePointSet[k].X - u;
                    //            double dy = imagePointSet[k].Y - v;

                    //            double thisError = dx * dx + dy * dy;
                    //            sumError2 += thisError;
                    //            pointsInSum2++;
                    //        }
                    //    }

                    //    // opencv's error is rms but over both x and y combined

                    //    Console.WriteLine("average projection error = " + Math.Sqrt(sumError2 / (float)(pointsInSum2)));
                    //}

                    // find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        //var R = Vision.Orientation.Rodrigues(rotations[setIndex]);
                        var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 1.0f)
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of outliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 1000);

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);

                    }

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        var error2 = CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        // if err < besterr save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error < minError)
                        {
                            minError = error;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;
                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                //var R = Vision.Orientation.Rodrigues(rotations[setIndex]);
                                var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
コード例 #5
0
 public static List<RoomAliveToolkit.Matrix> TransformPoints(RoomAliveToolkit.Matrix A, List<RoomAliveToolkit.Matrix> points)
 {
     var transformedPoints = new List<RoomAliveToolkit.Matrix>();
     var point4 = new RoomAliveToolkit.Matrix(4, 1);
     point4[3] = 1;
     var transformedPoint4 = new RoomAliveToolkit.Matrix(4, 1);
     foreach (var point in points)
     {
         point4[0] = point[0]; point4[1] = point[1]; point4[2] = point[2];
         transformedPoint4.Mult(A, point4);
         transformedPoint4.Scale(1.0f / transformedPoint4[3]);
         var transformedPoint = new RoomAliveToolkit.Matrix(3, 1);
         transformedPoint[0] = transformedPoint4[0]; transformedPoint[1] = transformedPoint4[1]; transformedPoint[2] = transformedPoint4[2];
         transformedPoints.Add(transformedPoint);
     }
     return transformedPoints;
 }
コード例 #6
0
        public static double PlaneFit(IList<Matrix> points, out Matrix X, out double D)
        {
            X = new Matrix(3, 1);

            var mu = new RoomAliveToolkit.Matrix(3, 1);
            for (int i = 0; i < points.Count; i++)
                mu.Add(points[i]);
            mu.Scale(1f / (float)points.Count);

            var A = new RoomAliveToolkit.Matrix(3, 3);
            var pc = new RoomAliveToolkit.Matrix(3, 1);
            var M = new RoomAliveToolkit.Matrix(3, 3);
            for (int i = 0; i < points.Count; i++)
            {
                var p = points[i];
                pc.Sub(p, mu);
                M.Outer(pc, pc);
                A.Add(M);
            }

            var V = new RoomAliveToolkit.Matrix(3, 3);
            var d = new RoomAliveToolkit.Matrix(3, 1);
            A.Eig(V, d); // TODO: replace with 3x3 version?

            //Console.WriteLine("------");
            //Console.WriteLine(A);
            //Console.WriteLine(V);
            //Console.WriteLine(d);

            double minEigenvalue = Double.MaxValue;
            int minEigenvaluei = 0;
            for (int i = 0; i < 3; i++)
                if (d[i] < minEigenvalue)
                {
                    minEigenvalue = d[i];
                    minEigenvaluei = i;
                }

            X.CopyCol(V, minEigenvaluei);

            D = -X.Dot(mu);

            // min eigenvalue is the sum of squared distances to the plane
            // signed distance is: double distance = X.Dot(point) + D;

            return minEigenvalue;
        }