示例#1
0
        public void Copy(FloatImage x, FloatImage y, FloatImage z)
        {
            Float3* p = data;

            float* px = x.Data(0, 0);
            float* py = y.Data(0, 0);
            float* pz = z.Data(0, 0);

            for (int i = 0; i < width * height; i++)
            {
                p->x = *px++;
                p->y = *py++;
                p->z = *pz++;
                p++;
            }
        }
示例#2
0
        public void Copy(FloatImage a, float min, float max)
        {
            float *pa = a.Data(0, 0);
            byte * p  = data;
            float  s  = 255.0f / (max - min);

            for (int i = 0; i < width * height; i++)
            {
                int value = (int)(s * (*pa++ - min));

                if (value < 0)
                {
                    *p++ = 0;
                }
                else if (value > 255)
                {
                    *p++ = (byte)255;
                }
                else
                {
                    *p++ = (byte)value;
                }
            }
        }
        public void CaptureDepthAndColor(string directory)
        {
            // foreach camera:
            // average a bunch of frames to find a good depth image
            // get calibration
            // TODO: parallelize

            foreach (var camera in cameras)
            {
                string cameraDirectory = directory + "/camera" + camera.name;
                if (!Directory.Exists(cameraDirectory))
                    Directory.CreateDirectory(cameraDirectory);

                // compute mean and variance of depth image
                var sum = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                sum.Zero();
                var sumSquared = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                sumSquared.Zero();
                var count = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                count.Zero();
                var depth = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                for (int i = 0; i < 100; i++)
                {
                    var depthBytes = camera.Client.LatestDepthImage();
                    Marshal.Copy(depthBytes, 0, depth.DataIntPtr, Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight * 2);
                    Console.WriteLine("acquired depth image " + i);
                    for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                        for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                            if (depth[x, y] != 0)
                            {
                                ushort d = depth[x, y];
                                count[x, y]++;
                                sum[x, y] += d;
                                sumSquared[x, y] += d * d;
                            }
                }

                var meanImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                meanImage.Zero(); // not all pixels will be assigned
                var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                varianceImage.Zero(); // not all pixels will be assigned

                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                    {
                        if (count[x, y] > 50)
                        {
                            float mean = sum[x, y] / count[x, y];
                            meanImage[x, y] = mean;
                            float variance = sumSquared[x, y] / count[x, y] - mean * mean;
                            varianceImage[x, y] = variance;
                        }
                    }

                // WIC doesn't support encoding float tiff images, so for now we write to a binary file
                meanImage.SaveToFile(cameraDirectory + "/mean.bin");
                varianceImage.SaveToFile(cameraDirectory + "/variance.bin");

                // create a short version that we can write, used only for debugging
                var meanDepthShortImage = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                        meanDepthShortImage[x, y] = (ushort)meanImage[x, y];
                SaveToTiff(imagingFactory, meanDepthShortImage, cameraDirectory + "/mean.tiff");

                // convert to world coordinates and save to ply file
                camera.calibration = camera.Client.GetCalibration();
                var depthFrameToCameraSpaceTable = camera.calibration.ComputeDepthFrameToCameraSpaceTable();
                var world = new Float3Image(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); // TODO: move out/reuse
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                    {
                        var pointF = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        float meanDepthMeters = meanImage[x, y] / 1000.0f;

                        Float3 worldPoint;
                        worldPoint.x = pointF.X * meanDepthMeters;
                        worldPoint.y = pointF.Y * meanDepthMeters;
                        worldPoint.z = meanDepthMeters;
                        world[x, y] = worldPoint;
                    }
                SaveToPly(cameraDirectory + "/mean.ply", world);

                // TODO: consider writing OBJ instead
            }

            //// connect to projectors
            //foreach (var projector in projectors)
            //{
            //    projector.Client.OpenDisplay(projector.displayIndex);
            //}

            // collect color images; this is not necessary for calibration, but is nice to have for visualization
            //foreach (var projector in projectors)
            //    projector.Client.SetColor(projector.displayIndex, 0f, 0f, 0f);
            //System.Threading.Thread.Sleep(5000);
            foreach (var camera in cameras)
            {
                // save color image
                string cameraDirectory = directory + "/camera" + camera.name;
                var jpegBytes = camera.Client.LatestJPEGImage();
                File.WriteAllBytes(cameraDirectory + "/color.jpg", jpegBytes);
                var colorBytes = camera.Client.LatestRGBImage();
                var image = new ARGBImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                Marshal.Copy(colorBytes, 0, image.DataIntPtr, Kinect2Calibration.colorImageWidth * Kinect2Calibration.colorImageHeight * 4);
                SaveToTiff(imagingFactory, image, cameraDirectory + "/color.tiff");
                image.Dispose();

            }

            //// close all displays
            //foreach (var projector in projectors)
            //{
            //    projector.Client.CloseDisplay(projector.displayIndex);
            //}
        }
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    bool foundNonplanarSubset = false;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        // try to find a nonplanar subset
                        bool planar = true;
                        int nTries = 0;
                        while (planar && (nTries++ < 1000))
                        {
                            worldPointSubset.Clear();
                            imagePointSubset.Clear();

                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // planar?
                            Matrix Rplane, tplane, d;
                            CameraMath.PlaneFit(worldPointSubset, out Rplane, out tplane, out d);
                            //Console.WriteLine("planar : " + d[2] / d[1]);
                            planar = (d[2] / d[1]) < 0.001f;
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);

                        // we can't initialize extrinsics yet, because we don't know which intrinsics we'll be using

                        if (!planar)
                            foundNonplanarSubset = true;
                    }

                    // we do not optimize intrinsics if all the point sets are planar, or if the projector intrinsics are marked as locked
                    bool fixIntrinsics = (!foundNonplanarSubset) || (projector.lockIntrinsics); // TODO: add option to lock intrinsics

                    var rotations = new List<Matrix>();
                    var translations = new List<Matrix>();
                    var cameraMatrix = new Matrix(3, 3);
                    var distCoeffs = new Matrix(2, 1);

                    if (fixIntrinsics)
                    {
                        cameraMatrix.Copy(projector.cameraMatrix);
                        distCoeffs.Copy(projector.lensDistortion);
                    }
                    else // nonplanar, so we can optimize intrinsics
                    {
                        cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                        cameraMatrix[1, 1] = 1000; //fy
                        cameraMatrix[0, 2] = projector.width / 2; //cx
                        cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                        cameraMatrix[2, 2] = 1;
                    }

                    // init extrinsics
                    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    {
                        Matrix R, t;
                        CameraMath.ExtrinsicsInit(cameraMatrix, distCoeffs, worldPointSubsets[ii], imagePointSubsets[ii], out R, out t);
                        rotations.Add(CameraMath.RotationVectorFromRotationMatrix(R));
                        translations.Add(t);
                    }

                    // initial RANSAC fit on subset of points
                    double error;
                    if (fixIntrinsics)
                        error = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    else
                        error = CameraMath.CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);

                    Console.WriteLine("error on subset = " + error);

                    // RANSAC: find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    int totalInliers = 0;
                    int totalPoints = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 2.0f) // TODO: how to set this?
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 500); // should be related to min number of points in set (above)

                        totalPoints += pointSet.worldPoints.Count;
                        totalInliers += worldPointInlierSet.Count;

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);
                    }

                    Console.WriteLine("{0}/{1} inliers", totalInliers, totalPoints);

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        double error2;
                        if (fixIntrinsics)
                            error2 = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);
                        else
                            error2 = CameraMath.CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if reduced error save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error2 < minError)
                        {
                            minError = error2;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            //Console.WriteLine("x = [");
            //for (int ii = 0; ii < imagePointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1}", imagePointSubsets[0][ii].X, imagePointSubsets[0][ii].Y);
            //Console.WriteLine("]';");
            //Console.WriteLine("X = [");
            //for (int ii = 0; ii < worldPointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1} {2}", worldPointSubsets[0][ii][0], worldPointSubsets[0][ii][1], worldPointSubsets[0][ii][2]);
            //Console.WriteLine("]';");
            //Console.WriteLine("fc = [{0} {1}];", projector.cameraMatrix[0, 0], projector.cameraMatrix[1, 1]);
            //Console.WriteLine("cc = [{0} {1}];", projector.cameraMatrix[0, 2], projector.cameraMatrix[1, 2]);

            //Matrix thisR, thist;

            //{
            //    Matrix Rplane, tplane;
            //    CameraMath.PlaneFit(worldPointSubsets[0], out Rplane, out tplane);

            //    CameraMath.PlanarDLT(projector.cameraMatrix, projector.lensDistortion, worldPointSubsets[0], imagePointSubsets[0], Rplane, tplane, out thisR, out thist);
            //    //Console.WriteLine("DLT---------");
            //    //Console.WriteLine(thisR);
            //    //Console.WriteLine(thist);

            //}

            //// if pattern is not planar, we can recover projector intrinsics

            //List<RoomAliveToolkit.Matrix> rotations = null;
            //List<RoomAliveToolkit.Matrix> translations = null;

            //var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
            //Console.WriteLine("error = " + error);

            // we check whether each view is planar, so that we can use the correct version of DLT

            // the overall set may not be planar however, so we have to check the union of points

            // if overall set is planar, leave intrinsics alone

            //
        }
        public void SaveToOBJ(string directory, string objPath)
        {
            var objFilename = Path.GetFileNameWithoutExtension(objPath);
            var objDirectory = Path.GetDirectoryName(objPath);

            if (!Directory.Exists(objDirectory))
                Directory.CreateDirectory(objDirectory);

            // Because we need to form triangles, we go back to the depth image
            var quadOffsets = new System.Drawing.Point[]
            {
                new System.Drawing.Point(0, 0),
                new System.Drawing.Point(1, 0),
                new System.Drawing.Point(0, 1),
                new System.Drawing.Point(1, 0),
                new System.Drawing.Point(1, 1),
                new System.Drawing.Point(0, 1),
            };

            var streamWriter = new CultureInvariantStreamWriter(objDirectory + "/" + objFilename + ".obj");
            var mtlFileWriter = new CultureInvariantStreamWriter(objDirectory + "/" + objFilename + ".mtl");
            streamWriter.WriteLine("mtllib " + objFilename + ".mtl");
            uint nextVertexIndex = 1;
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                mtlFileWriter.WriteLine("newmtl camera" + camera.name);
                mtlFileWriter.WriteLine("Ka 1.000000 1.000000 1.000000");
                mtlFileWriter.WriteLine("Kd 1.000000 1.000000 1.000000");
                mtlFileWriter.WriteLine("Ks 0.000000 0.000000 0.000000");
                mtlFileWriter.WriteLine("Tr 1.000000");
                mtlFileWriter.WriteLine("illum 1");
                mtlFileWriter.WriteLine("Ns 0.000000");
                mtlFileWriter.WriteLine("map_Kd " + objFilename + "_" + camera.name + ".jpg");

                File.Copy(directory + "/camera" + camera.name + "/color.jpg", objDirectory + "/" + objFilename + "_" + camera.name + ".jpg", true);

                streamWriter.WriteLine("usemtl camera" + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();
                var vertices = new Vertex[Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight];
                var colorCamera = new Matrix(4, 1);
                var depthCamera = new Matrix(4, 1);
                var world = new Matrix(4, 1);

                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                    {
                        // depth camera coords
                        var depth = depthImage[x, y] / 1000f; // m
                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[Kinect2Calibration.depthImageWidth * y + x];
                        depthCamera[0] = point.X * depth;
                        depthCamera[1] = point.Y * depth;
                        depthCamera[2] = depth;
                        depthCamera[3] = 1;

                        // world coordinates
                        world.Mult(camera.pose, depthCamera);
                        //world.Scale(1.0 / world[3]); not necessary for this transform

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera);
                        colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);
                        colorU /= (double)Kinect2Calibration.colorImageWidth;
                        colorV /= (double)Kinect2Calibration.colorImageHeight;

                        var vertex = new Vertex();
                        vertex.x = (float)world[0];
                        vertex.y = (float)world[1];
                        vertex.z = (float)world[2];
                        vertex.u = (float)colorU;
                        vertex.v = (float)colorV;
                        vertices[Kinect2Calibration.depthImageWidth * y + x] = vertex;

                    }

                streamWriter.WriteLine("g camera" + camera.name);
                streamWriter.WriteLine("usemtl camera" + camera.name);

                // examine each triangle
                for (int y = 0; y < Kinect2Calibration.depthImageHeight - 1; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth - 1; x++)
                    {
                        int offseti = 0;
                        for (int tri = 0; tri < 2; tri++)
                        {
                            // the indexes of the vertices of this triangle
                            var i0 = Kinect2Calibration.depthImageWidth * (y + quadOffsets[offseti].Y) + (x + quadOffsets[offseti].X);
                            var i1 = Kinect2Calibration.depthImageWidth * (y + quadOffsets[offseti + 1].Y) + (x + quadOffsets[offseti + 1].X);
                            var i2 = Kinect2Calibration.depthImageWidth * (y + quadOffsets[offseti + 2].Y) + (x + quadOffsets[offseti + 2].X);

                            // is triangle valid?
                            bool nonZero = (vertices[i0].z != 0) && (vertices[i1].z != 0) && (vertices[i2].z != 0);

                            bool jump01 = Vertex.DistanceSquared(vertices[i0], vertices[i1]) < 0.2 * 0.2;
                            bool jump02 = Vertex.DistanceSquared(vertices[i0], vertices[i2]) < 0.2 * 0.2;
                            bool jump12 = Vertex.DistanceSquared(vertices[i1], vertices[i2]) < 0.2 * 0.2;

                            bool valid = nonZero && jump01 && jump02 && jump12;
                            if (valid)
                            {
                                // only add the vertex if we haven't already
                                if (vertices[i0].index == 0)
                                {
                                    streamWriter.WriteLine(vertices[i0]);
                                    vertices[i0].index = nextVertexIndex++;
                                }
                                if (vertices[i1].index == 0)
                                {
                                    streamWriter.WriteLine(vertices[i1]);
                                    vertices[i1].index = nextVertexIndex++;
                                }
                                if (vertices[i2].index == 0)
                                {
                                    streamWriter.WriteLine(vertices[i2]);
                                    vertices[i2].index = nextVertexIndex++;
                                }
                                streamWriter.WriteLine("f {0}/{0} {1}/{1} {2}/{2}", vertices[i0].index, vertices[i1].index, vertices[i2].index);
                            }
                            offseti += 3;
                        }
                    }
            }
            streamWriter.Close();
            mtlFileWriter.Close();
        }
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        bool nonCoplanar = false;
                        int nTries = 0;

                        while (!nonCoplanar)
                        {
                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // check that points are not coplanar
                            Matrix X;
                            double D;
                            double ssdToPlane = PlaneFit(worldPointSubset, out X, out D);
                            int numOutliers = 0;
                            foreach (var point in worldPointSubset)
                            {
                                double distanceFromPlane = X.Dot(point) + D;
                                if (Math.Abs(distanceFromPlane) > 0.1f)
                                    numOutliers++;
                            }
                            nonCoplanar = (numOutliers > worldPointSubset.Count * 0.10f);
                            if (!nonCoplanar)
                            {
                                Console.WriteLine("points are coplanar (try #{0})", nTries);
                                worldPointSubset.Clear();
                                imagePointSubset.Clear();
                            }
                            if (nTries++ > 1000)
                            {
                                throw new CalibrationFailedException("Unable to find noncoplanar points.");
                                // consider moving this check up with variance check (when calibration point sets are formed)
                            }
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);
                    }

                    var cameraMatrix = new Matrix(3, 3);
                    cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                    cameraMatrix[1, 1] = 1000; //fy
                    cameraMatrix[0, 2] = projector.width / 2; //cx
                    cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                    cameraMatrix[2, 2] = 1;
                    var distCoeffs = new RoomAliveToolkit.Matrix(2, 1);
                    List<RoomAliveToolkit.Matrix> rotations = null;
                    List<RoomAliveToolkit.Matrix> translations = null;

                    var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    Console.WriteLine("error = " + error);
                    //Console.WriteLine("intrinsics = \n" + cameraMatrix);

                    //// we differ from opencv's 'error' in that we do not distinguish between x and y.
                    //// i.e. opencv uses the method below; this number would match if we used pointsInSum2*2 in the divisor.
                    //// double check opencv's error
                    //{
                    //    double sumError2 = 0;
                    //    int pointsInSum2 = 0;
                    //    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    //    {
                    //        var R = Orientation.Rodrigues(rotations[ii]);
                    //        var t = translations[ii];
                    //        var p = new Matrix(3, 1);

                    //        var worldPointSet = worldPointSubsets[ii];
                    //        var imagePointSet = imagePointSubsets[ii];

                    //        for (int k = 0; k < worldPointSet.Count; k++)
                    //        {
                    //            p.Mult(R, worldPointSet[k]);
                    //            p.Add(t);
                    //            double u, v;
                    //            Kinect2.Kinect2Calibration.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                    //            double dx = imagePointSet[k].X - u;
                    //            double dy = imagePointSet[k].Y - v;

                    //            double thisError = dx * dx + dy * dy;
                    //            sumError2 += thisError;
                    //            pointsInSum2++;
                    //        }
                    //    }

                    //    // opencv's error is rms but over both x and y combined

                    //    Console.WriteLine("average projection error = " + Math.Sqrt(sumError2 / (float)(pointsInSum2)));
                    //}

                    // find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        //var R = Vision.Orientation.Rodrigues(rotations[setIndex]);
                        var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 1.0f)
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 1000);

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);

                    }

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        var error2 = CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if err < besterr save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error < minError)
                        {
                            minError = error;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
        public void Blur3x3(FloatImage a)
        {
            float* output = data + width + 1;

            float* pb02 = a.data + 2;
            float* pb12 = a.data + width + 2;
            float* pb22 = a.data + 2 * width + 2;

            float s0, s1, s2;
            float h;

            for (int y = 0; y < height - 2; y++)
            {
                h = 0;
                s0 = 0; s1 = 0; s2 = 0;
                for (int x = 0; x < width; x++)
                {
                    h -= s0;

                    s0 = s1;
                    s1 = s2;

                    s2 = *pb02++ + *pb12++ + *pb22++;

                    h += s2;
                    float g = h / 9;

                    *output++ = (float)g;
                }
            }
        }
示例#8
0
        public void Copy(FloatImage a, float min, float max)
        {
            float* pa = a.Data(0, 0);
            byte* p = data;
            float s = 255.0f / (max - min);

            for (int i = 0; i < width * height; i++)
            {
                int value = (int)(s * (*pa++ - min));

                if (value < 0)
                    *p++ = 0;
                else if (value > 255)
                    *p++ = (byte)255;
                else
                    *p++ = (byte)value;
            }
        }
示例#9
0
 public void Threshold(FloatImage a, float threshold)
 {
     float* pa = a.Data(0, 0);
     byte* p = data;
     for (int i = 0; i < width * height; i++)
     {
         if (*pa++ > threshold)
             *p++ = 255;
         else
             *p++ = 0;
     }
 }
示例#10
0
        public void CaptureDepthAndColor(string directory)
        {
            // foreach camera:
            // average a bunch of frames to find a good depth image
            // get calibration
            // TODO: parallelize

            foreach (var camera in cameras)
            {
                string cameraDirectory = directory + "/camera" + camera.name;
                if (!Directory.Exists(cameraDirectory))
                    Directory.CreateDirectory(cameraDirectory);

                // compute mean and variance of depth image
                var sum = new FloatImage(depthWidth, depthHeight);
                sum.Zero();
                var sumSquared = new FloatImage(depthWidth, depthHeight);
                sumSquared.Zero();
                var count = new ShortImage(depthWidth, depthHeight);
                count.Zero();
                var depth = new ShortImage(depthWidth, depthHeight);
                for (int i = 0; i < 100; i++)
                {
                    var depthBytes = camera.Client.LatestDepthImage();
                    Marshal.Copy(depthBytes, 0, depth.DataIntPtr, depthWidth * depthHeight * 2);
                    Console.WriteLine("acquired depth image " + i);
                    for (int y = 0; y < depthHeight; y++)
                        for (int x = 0; x < depthWidth; x++)
                            if (depth[x, y] != 0)
                            {
                                ushort d = depth[x, y];
                                count[x, y]++;
                                sum[x, y] += d;
                                sumSquared[x, y] += d * d;
                            }
                }

                var meanImage = new FloatImage(depthWidth, depthHeight);
                meanImage.Zero(); // not all pixels will be assigned
                var varianceImage = new FloatImage(depthWidth, depthHeight);
                varianceImage.Zero(); // not all pixels will be assigned

                for (int y = 0; y < depthHeight; y++)
                    for (int x = 0; x < depthWidth; x++)
                    {
                        if (count[x, y] > 50)
                        {
                            float mean = sum[x, y] / count[x, y];
                            meanImage[x, y] = mean;
                            float variance = sumSquared[x, y] / count[x, y] - mean * mean;
                            varianceImage[x, y] = variance;
                        }
                    }

                // WIC doesn't support encoding float tiff images, so for now we write to a binary file
                meanImage.SaveToFile(cameraDirectory + "/mean.bin");
                varianceImage.SaveToFile(cameraDirectory + "/variance.bin");

                // create a short version that we can write, used only for debugging
                var meanDepthShortImage = new ShortImage(depthWidth, depthHeight);
                for (int y = 0; y < depthHeight; y++)
                    for (int x = 0; x < depthWidth; x++)
                        meanDepthShortImage[x, y] = (ushort)meanImage[x, y];
                SaveToTiff(imagingFactory, meanDepthShortImage, cameraDirectory + "/mean.tiff");

                // convert to world coordinates and save to ply file
                camera.calibration = camera.Client.GetCalibration();
                var depthFrameToCameraSpaceTable = camera.calibration.ComputeDepthFrameToCameraSpaceTable();
                var world = new Float3Image(depthWidth, depthHeight); // TODO: move out/reuse
                for (int y = 0; y < depthHeight; y++)
                    for (int x = 0; x < depthWidth; x++)
                    {
                        var pointF = depthFrameToCameraSpaceTable[y * depthWidth + x];
                        Float3 worldPoint;
                        worldPoint.x = pointF.X * meanImage[x, y];
                        worldPoint.y = pointF.Y * meanImage[x, y];
                        worldPoint.z = meanImage[x, y];
                        world[x, y] = worldPoint;
                    }
                SaveToPly(cameraDirectory + "/mean.ply", world);

                // TODO: consider writing OBJ instead
            }

            // connect to projectors
            foreach (var projector in projectors)
            {
                //var binding = new NetTcpBinding();
                //binding.Security.Mode = SecurityMode.None;
                //var uri = "net.tcp://" + projector.hostNameOrAddress + ":9001/ProjectorServer/service";
                //var address = new EndpointAddress(uri);
                //projector.client = new ProjectorServerClient(binding, address);
                projector.Client.OpenDisplay(projector.displayIndex);
            }

            // collect color images when projecting all white and all black
            // set projectors to white
            foreach (var projector in projectors)
                projector.Client.SetColor(projector.displayIndex, 1f, 1f, 1f);
            System.Threading.Thread.Sleep(5000);
            foreach (var camera in cameras)
            {
                // save color image
                string cameraDirectory = directory + "/camera" + camera.name;
                var jpegBytes = camera.Client.LatestJPEGImage();
                File.WriteAllBytes(cameraDirectory + "/color.jpg", jpegBytes);
                var colorBytes = camera.Client.LatestRGBImage();
                var image = new ARGBImage(colorWidth, colorHeight);
                Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4);
                SaveToTiff(imagingFactory, image, cameraDirectory + "/color.tiff");
                image.Dispose();

            }
            foreach (var projector in projectors)
                projector.Client.SetColor(projector.displayIndex, 0f, 0f, 0f);
            System.Threading.Thread.Sleep(5000);
            foreach (var camera in cameras)
            {
                // save color image
                string cameraDirectory = directory + "/camera" + camera.name;
                var jpegBytes = camera.Client.LatestJPEGImage();
                File.WriteAllBytes(cameraDirectory + "/colorDark.jpg", jpegBytes);
                var colorBytes = camera.Client.LatestRGBImage();
                var image = new ARGBImage(colorWidth, colorHeight);
                Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4);
                SaveToTiff(imagingFactory, image, cameraDirectory + "/colorDark.tiff");
                image.Dispose();

            }

            // close all displays
            foreach (var projector in projectors)
            {
                projector.Client.CloseDisplay(projector.displayIndex);
            }
        }