public void Copy(ByteImage a, float scale)
 {
     byte* pa = a.Data(0, 0);
     float* p = data;
     for (int i = 0; i < width * height; i++)
         *p++ = *pa++ * scale;
 }
示例#2
0
 public void Copy(ByteImage byteImage)
 {
     byte* p = byteImage.Data(0, 0);
     RGB24* pOut = data;
     for (int i = 0; i < width * height; i++)
     {
         pOut->R = pOut->G = pOut++->B = *p++;
     }
 }
示例#3
0
        public void Copy(ByteImage byteImage)
        {
            byte * p    = byteImage.Data(0, 0);
            RGB24 *pOut = data;

            for (int i = 0; i < width * height; i++)
            {
                pOut->R = pOut->G = pOut++->B = *p++;
            }
        }
示例#4
0
        public void Invert(ByteImage a)
        {
            byte *p0 = a.data;
            byte *p1 = data;

            for (int i = 0; i < width * height; i++)
            {
                *p1++ = (byte)(255 - *p0++);
            }
        }
示例#5
0
        public void Copy(ByteImage a, float scale)
        {
            byte * pa = a.Data(0, 0);
            float *p  = data;

            for (int i = 0; i < width * height; i++)
            {
                *p++ = *pa++ *scale;
            }
        }
示例#6
0
        public void And(ByteImage a)
        {
            byte *p_a = a.Data(0, 0);
            byte *p   = data;

            for (int i = 0; i < width * height; i++)
            {
                *p = (byte)((*p_a) & (*p));
                p++;
                p_a++;
            }
        }
示例#7
0
        public void Add(ByteImage a)
        {
            byte * pa = a.Data(0, 0);
            float *p  = data;

            for (int i = 0; i < width * height; i++)
            {
                *p = *p + *pa;
                p++;
                pa++;
            }
        }
示例#8
0
        public void Add(ByteImage a)
        {
            byte *  p_a = a.Data(0, 0);
            ushort *p   = data;

            for (int i = 0; i < width * height; i++)
            {
                *p = (ushort)((*p_a) + (*p));
                p++;
                p_a++;
            }
        }
示例#9
0
        public void Decimate(ByteImage a, int factor)
        {
            byte* output = data;

            for (int y = 0; y < height; y++)
            {
                byte* pa = a.Data(0, y * factor);
                for (int x = 0; x < width; x++)
                {
                    *output++ = *pa;
                    pa += factor;
                }
            }
        }
示例#10
0
        public void Decimate(ByteImage a, int factor)
        {
            byte *output = data;

            for (int y = 0; y < height; y++)
            {
                byte *pa = a.Data(0, y * factor);
                for (int x = 0; x < width; x++)
                {
                    *output++ = *pa;
                    pa += factor;
                }
            }
        }
示例#11
0
        public void XMirror(ByteImage a)
        {
            byte *pOut = data;
            byte *pIn  = a.data;

            for (int yy = 0; yy < height; yy++)
            {
                pIn = a.Data(width - 1, yy);
                for (int xx = 0; xx < width; xx++)
                {
                    *pOut++ = *pIn--;
                }
            }
        }
示例#12
0
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decodedX, ShortImage decodedY, ByteImage mask)
        {
            mask.Set(255); // cumulative across X and Y

            Decode(capturedImages, decodedX, mask, numXBits, width);

            // TODO: this is a little awkward
            var Yimages = new ByteImage[numYBits * 2];

            for (int i = 0; i < numYBits * 2; i++)
            {
                Yimages[i] = capturedImages[numXBits * 2 + i];
            }

            Decode(Yimages, decodedY, mask, numYBits, height);
        }
示例#13
0
        public void Threshold(ByteImage a, byte threshold)
        {
            byte *pa = a.data;
            byte *p  = data;

            for (int i = 0; i < a.width * a.height; i++)
            {
                if (*pa++ > threshold)
                {
                    *p++ = 255;
                }
                else
                {
                    *p++ = 0;
                }
            }
        }
示例#14
0
        public void ThresholdHighPass(ByteImage a, byte threshold)
        {
            byte *pa = a.data;
            byte *p  = data;

            for (int i = 0; i < a.width * a.height; i++)
            {
                if (*pa > threshold)
                {
                    *p++ = *pa;
                }
                else
                {
                    *p++ = 0;
                }
                pa++;
            }
        }
示例#15
0
        public void CopyRectangle(ByteImage byteImage, int startX, int startY, int w, int h)
        {
            byte *  pOrig    = byteImage.Data(0, 0);
            ARGB32 *pOutOrig = data;
            byte *  p;
            ARGB32 *pOut;

            for (int j = startY; j < h; j++)
            {
                for (int i = startX; i < w; i++)
                {
                    p    = pOrig + j * byteImage.Width + i;
                    pOut = pOutOrig + j * width + i;

                    pOut->A = 255;
                    pOut->R = pOut->G = pOut->B = *p;
                }
            }
        }
示例#16
0
        public void CopyRectangle(ByteImage byteImage, int startX, int startY, int w, int h)
        {
            byte* pOrig = byteImage.Data(0, 0);
            ARGB32* pOutOrig = data;
            byte* p;
            ARGB32* pOut;

            for (int j = startY; j < h; j++)
            {
                for (int i = startX; i < w; i++)
                {
                    p = pOrig + j * byteImage.Width + i;
                    pOut = pOutOrig + j * width + i;

                    pOut->A = 255;
                    pOut->R = pOut->G = pOut->B = *p;
                }
            }
        }
示例#17
0
        // BEWARE: threshold on absdiff, and mask level settings*
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decoded, ByteImage mask, int nBits, int max)
        {
            decoded.Zero();

            int capturedWidth  = decoded.Width;
            int capturedHeight = decoded.Height;

            // stores decoded bit from previous level
            var bits = new ByteImage(capturedWidth, capturedHeight);

            for (int i = 0; i < nBits; i++)
            {
                var capturedImage         = capturedImages[2 * i];
                var invertedCapturedImage = capturedImages[2 * i + 1];

                int bitValue = (int)Math.Pow(2.0, nBits - i - 1);

                ushort *decodedp  = decoded.Data(0, 0);
                byte *  capturedp = capturedImage.Data(0, 0);
                byte *  invertedp = invertedCapturedImage.Data(0, 0);
                byte *  maskp     = mask.Data(0, 0);
                byte *  bitsp     = bits.Data(0, 0);

                for (int y = 0; y < capturedHeight; y++)
                {
                    for (int x = 0; x < capturedWidth; x++)
                    {
                        // a bit is considered valid if the diff is greater than some threshold; this value is tricky to set given AGC
                        byte valid = (Math.Abs(*capturedp - *invertedp) > 10) ? (byte)255 : (byte)0;
                        byte bit   = (*capturedp >= *invertedp) ? (byte)255 : (byte)0;
                        // Gray code bit
                        *bitsp = (byte)(bit ^ *bitsp);
                        if (*bitsp == 255)
                        {
                            *decodedp = (ushort)(*decodedp + bitValue);
                        }

                        // stop updating the mask for the least significant levels (but continue decoding)
                        // *FIX: this is pretty fragile, perhaps better to record how many bits of rows and column have been recorded and walk back from that
                        if (i < nBits - 4)
                        {
                            *maskp = (byte)(valid & (*maskp));
                        }

                        decodedp++;
                        capturedp++;
                        invertedp++;
                        maskp++;
                        bitsp++;
                    }
                }
            }
            bits.Dispose();

            // check that decoded values are within range
            for (int y = 0; y < capturedHeight; y++)
            {
                for (int x = 0; x < capturedWidth; x++)
                {
                    int d = decoded[x, y]; // can this be negative?
                    if ((d >= max) || (d < 0))
                    {
                        mask[x, y] = 0;
                    }
                }
            }
        }
        public void DecodeGrayCodeImages(string directory)
        {
            stopWatch.Start();

            // decode Gray code captures
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                var grayCode = new GrayCode(projector.width, projector.height);

                // allocate space for captured images
                int nCapturedImages = 2 * (grayCode.numXBits + grayCode.numYBits); // varies by projector
                var capturedImages = new ByteImage[nCapturedImages];
                for (int i = 0; i < nCapturedImages; i++) // varies by projector
                    capturedImages[i] = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                foreach (var camera in cameras)
                {
                    Console.WriteLine("decoding Gray code images for projector " + projector.name + ", camera " + camera.name);

                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    // load and decode Gray code images
                    for (int i = 0; i < nCapturedImages; i++)
                        LoadFromTiff(imagingFactory, capturedImages[i], cameraDirectory + "/grayCode" + i + ".tiff");

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    // TODO: there are a couple of interesting thresholds in Decode; they should be surfaced here
                    grayCode.Decode(capturedImages, decodedColumns, decodedRows, mask);

                    //Console.WriteLine("saving camera " + camera.displayName);

                    SaveToTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    SaveToTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    SaveToTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    var decodedColumnsMasked = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRowsMasked = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    for (int y = 0; y < Kinect2Calibration.colorImageHeight; y++)
                        for (int x = 0; x < Kinect2Calibration.colorImageWidth; x++)
                        {
                            if (mask[x, y] > 0)
                            {
                                decodedColumnsMasked[x, y] = decodedColumns[x, y];
                                decodedRowsMasked[x, y] = decodedRows[x, y];
                            }
                            else
                            {
                                decodedColumnsMasked[x, y] = 0;
                                decodedRowsMasked[x, y] = 0;
                            }
                        }
                    SaveToTiff(imagingFactory, decodedColumnsMasked, cameraDirectory + "/decodedColumnsMasked.tiff");
                    SaveToTiff(imagingFactory, decodedRowsMasked, cameraDirectory + "/decodedRowsMasked.tiff");
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
 public static void LoadFromTiff(SharpDX.WIC.ImagingFactory imagingFactory, ByteImage image, string filename)
 {
     LoadFromTiff(imagingFactory, image, filename, 1);
 }
        public void CaptureGrayCodes(string directory)
        {
            // for each projector
            //    for each gray code
            //       display gray code
            //       for each camera (fork?)
            //          capture color image; save to file

            // store as projector#/camera#/grayCode#

            // foreach camera
            //   get calibration
            //   save depth map to file

            var grayImage = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

            // pick up projector image dimensions form server and save them in configuration
            // put up projector's name on each
            foreach (var projector in projectors)
            {
                var size = projector.Client.Size(projector.displayIndex);
                projector.width = size.Width;
                projector.height = size.Height;
                projector.Client.OpenDisplay(projector.displayIndex);
                projector.Client.DisplayName(projector.displayIndex, projector.name);
            }

            // let AGC settle
            System.Threading.Thread.Sleep(2000);

            CaptureDepthAndColor(directory);

            //// save an image with projector name displayed, useful for later for visualization of results
            //foreach (var camera in cameras)
            //{
            //    string cameraDirectory = directory + "/camera" + camera.name;
            //    if (!Directory.Exists(cameraDirectory))
            //        Directory.CreateDirectory(cameraDirectory);
            //    //var jpegBytes = camera.client.LatestJPEGImage();
            //    //File.WriteAllBytes(cameraDirectory + "/projectorLabels.jpg", jpegBytes);
            //    var colorBytes = camera.Client.LatestRGBImage();
            //    var image = new ARGBImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
            //    Marshal.Copy(colorBytes, 0, image.DataIntPtr, Kinect2Calibration.colorImageWidth * Kinect2Calibration.colorImageHeight * 4);
            //    SaveToTiff(imagingFactory, image, cameraDirectory + "/projectorLabels.tiff");
            //    image.Dispose();
            //}

            //// TODO: consider combining with later aquiring color and depth

            // set all projectors to black
            foreach (var projector in projectors)
                projector.Client.SetColor(projector.displayIndex, 0, 0, 0);

            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;
                if (!Directory.Exists(projectorDirectory))
                    Directory.CreateDirectory(projectorDirectory);

                int numberOfGrayCodeImages = projector.Client.NumberOfGrayCodeImages(projector.displayIndex);

                // set display to gray, to give AGC a chance to settle
                projector.Client.SetColor(projector.displayIndex, 0.5f, 0.5f, 0.5f);
                System.Threading.Thread.Sleep(1500);

                for (int i = 0; i < numberOfGrayCodeImages; i++)
                {
                    projector.Client.DisplayGrayCode(projector.displayIndex, i);

                    // wait for the image to be displayed and give camera AGC time to settle
                    System.Threading.Thread.Sleep(500);

                    // TODO: parallelize?
                    foreach (var camera in cameras)
                    {
                        string cameraDirectory = projectorDirectory + "/camera" + camera.name;
                        if (!Directory.Exists(cameraDirectory))
                            Directory.CreateDirectory(cameraDirectory);

                        //// acquire color frames until exposure and gain have settled to a stable value
                        //int numUnchanging = 0;
                        //long lastExposureTime = 0;
                        //float lastGain = 0;
                        //const int numUnchangingThreshold = 5;
                        //byte[] colorImageBytes = null;
                        //while (numUnchanging < numUnchangingThreshold)
                        //{
                        //    colorImageBytes = camera.client.NextColorImage(); // consider providing a way of getting color exposure etc. w/o calling NextColorImage
                        //    long exposureTime = camera.client.LastColorExposureTimeTicks();
                        //    float gain = camera.client.LastColorGain();
                        //    if ((gain == lastGain) && (exposureTime == lastExposureTime))
                        //        numUnchanging++;
                        //    lastGain = gain;
                        //    lastExposureTime = exposureTime;
                        //}

                        var colorImageBytes = camera.Client.LatestYUVImage();

                        // convert YUY2 to grayscale
                        for (int y = 0; y < Kinect2Calibration.colorImageHeight; y++)
                            for (int x = 0; x < Kinect2Calibration.colorImageWidth; x++)
                                grayImage[x, y] = colorImageBytes[2 * (Kinect2Calibration.colorImageWidth * y + x)];

                        // save to file
                        SaveToTiff(imagingFactory, grayImage, cameraDirectory + "/grayCode" + i + ".tiff");
                    }
                }
                projector.Client.SetColor(projector.displayIndex, 0, 0, 0);
            }

            // close all displays
            foreach (var projector in projectors)
            {
                projector.Client.CloseDisplay(projector.displayIndex);
            }
        }
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    bool foundNonplanarSubset = false;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        // try to find a nonplanar subset
                        bool planar = true;
                        int nTries = 0;
                        while (planar && (nTries++ < 1000))
                        {
                            worldPointSubset.Clear();
                            imagePointSubset.Clear();

                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // planar?
                            Matrix Rplane, tplane, d;
                            CameraMath.PlaneFit(worldPointSubset, out Rplane, out tplane, out d);
                            //Console.WriteLine("planar : " + d[2] / d[1]);
                            planar = (d[2] / d[1]) < 0.001f;
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);

                        // we can't initialize extrinsics yet, because we don't know which intrinsics we'll be using

                        if (!planar)
                            foundNonplanarSubset = true;
                    }

                    // we do not optimize intrinsics if all the point sets are planar, or if the projector intrinsics are marked as locked
                    bool fixIntrinsics = (!foundNonplanarSubset) || (projector.lockIntrinsics); // TODO: add option to lock intrinsics

                    var rotations = new List<Matrix>();
                    var translations = new List<Matrix>();
                    var cameraMatrix = new Matrix(3, 3);
                    var distCoeffs = new Matrix(2, 1);

                    if (fixIntrinsics)
                    {
                        cameraMatrix.Copy(projector.cameraMatrix);
                        distCoeffs.Copy(projector.lensDistortion);
                    }
                    else // nonplanar, so we can optimize intrinsics
                    {
                        cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                        cameraMatrix[1, 1] = 1000; //fy
                        cameraMatrix[0, 2] = projector.width / 2; //cx
                        cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                        cameraMatrix[2, 2] = 1;
                    }

                    // init extrinsics
                    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    {
                        Matrix R, t;
                        CameraMath.ExtrinsicsInit(cameraMatrix, distCoeffs, worldPointSubsets[ii], imagePointSubsets[ii], out R, out t);
                        rotations.Add(CameraMath.RotationVectorFromRotationMatrix(R));
                        translations.Add(t);
                    }

                    // initial RANSAC fit on subset of points
                    double error;
                    if (fixIntrinsics)
                        error = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    else
                        error = CameraMath.CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);

                    Console.WriteLine("error on subset = " + error);

                    // RANSAC: find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    int totalInliers = 0;
                    int totalPoints = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 2.0f) // TODO: how to set this?
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 500); // should be related to min number of points in set (above)

                        totalPoints += pointSet.worldPoints.Count;
                        totalInliers += worldPointInlierSet.Count;

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);
                    }

                    Console.WriteLine("{0}/{1} inliers", totalInliers, totalPoints);

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        double error2;
                        if (fixIntrinsics)
                            error2 = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);
                        else
                            error2 = CameraMath.CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if reduced error save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error2 < minError)
                        {
                            minError = error2;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            //Console.WriteLine("x = [");
            //for (int ii = 0; ii < imagePointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1}", imagePointSubsets[0][ii].X, imagePointSubsets[0][ii].Y);
            //Console.WriteLine("]';");
            //Console.WriteLine("X = [");
            //for (int ii = 0; ii < worldPointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1} {2}", worldPointSubsets[0][ii][0], worldPointSubsets[0][ii][1], worldPointSubsets[0][ii][2]);
            //Console.WriteLine("]';");
            //Console.WriteLine("fc = [{0} {1}];", projector.cameraMatrix[0, 0], projector.cameraMatrix[1, 1]);
            //Console.WriteLine("cc = [{0} {1}];", projector.cameraMatrix[0, 2], projector.cameraMatrix[1, 2]);

            //Matrix thisR, thist;

            //{
            //    Matrix Rplane, tplane;
            //    CameraMath.PlaneFit(worldPointSubsets[0], out Rplane, out tplane);

            //    CameraMath.PlanarDLT(projector.cameraMatrix, projector.lensDistortion, worldPointSubsets[0], imagePointSubsets[0], Rplane, tplane, out thisR, out thist);
            //    //Console.WriteLine("DLT---------");
            //    //Console.WriteLine(thisR);
            //    //Console.WriteLine(thist);

            //}

            //// if pattern is not planar, we can recover projector intrinsics

            //List<RoomAliveToolkit.Matrix> rotations = null;
            //List<RoomAliveToolkit.Matrix> translations = null;

            //var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
            //Console.WriteLine("error = " + error);

            // we check whether each view is planar, so that we can use the correct version of DLT

            // the overall set may not be planar however, so we have to check the union of points

            // if overall set is planar, leave intrinsics alone

            //
        }
 public static void SaveToTiff(SharpDX.WIC.ImagingFactory imagingFactory, ByteImage image, string filename)
 {
     SaveToTiff(imagingFactory, image, filename, SharpDX.WIC.PixelFormat.Format8bppGray, 1);
 }
示例#23
0
 public void Threshold(ByteImage a, byte threshold)
 {
     byte* pa = a.data;
     byte* p = data;
     for (int i = 0; i < a.width * a.height; i++)
     {
         if (*pa++ > threshold)
             *p++ = 255;
         else
             *p++ = 0;
     }
 }
示例#24
0
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decodedX, ShortImage decodedY, ByteImage mask)
        {
            mask.Set(255); // cumulative across X and Y

            Decode(capturedImages, decodedX, mask, numXBits, width);

            // TODO: this is a little awkward
            var Yimages = new ByteImage[numYBits*2];
            for (int i = 0; i < numYBits*2; i++)
                Yimages[i] = capturedImages[numXBits * 2 + i];

            Decode(Yimages, decodedY, mask, numYBits, height);
        }
示例#25
0
 public void ThresholdHighPass(ByteImage a, byte threshold)
 {
     byte* pa = a.data;
     byte* p = data;
     for (int i = 0; i < a.width * a.height; i++)
     {
         if (*pa > threshold)
             *p++ = *pa;
         else
             *p++ = 0;
         pa++;
     }
 }
示例#26
0
 public void Invert(ByteImage a)
 {
     byte* p0 = a.data;
     byte* p1 = data;
     for (int i = 0; i < width * height; i++)
     {
         *p1++ = (byte)(255 - *p0++);
     }
 }
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        bool nonCoplanar = false;
                        int nTries = 0;

                        while (!nonCoplanar)
                        {
                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // check that points are not coplanar
                            Matrix X;
                            double D;
                            double ssdToPlane = PlaneFit(worldPointSubset, out X, out D);
                            int numOutliers = 0;
                            foreach (var point in worldPointSubset)
                            {
                                double distanceFromPlane = X.Dot(point) + D;
                                if (Math.Abs(distanceFromPlane) > 0.1f)
                                    numOutliers++;
                            }
                            nonCoplanar = (numOutliers > worldPointSubset.Count * 0.10f);
                            if (!nonCoplanar)
                            {
                                Console.WriteLine("points are coplanar (try #{0})", nTries);
                                worldPointSubset.Clear();
                                imagePointSubset.Clear();
                            }
                            if (nTries++ > 1000)
                            {
                                throw new CalibrationFailedException("Unable to find noncoplanar points.");
                                // consider moving this check up with variance check (when calibration point sets are formed)
                            }
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);
                    }

                    var cameraMatrix = new Matrix(3, 3);
                    cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                    cameraMatrix[1, 1] = 1000; //fy
                    cameraMatrix[0, 2] = projector.width / 2; //cx
                    cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                    cameraMatrix[2, 2] = 1;
                    var distCoeffs = new RoomAliveToolkit.Matrix(2, 1);
                    List<RoomAliveToolkit.Matrix> rotations = null;
                    List<RoomAliveToolkit.Matrix> translations = null;

                    var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    Console.WriteLine("error = " + error);
                    //Console.WriteLine("intrinsics = \n" + cameraMatrix);

                    //// we differ from opencv's 'error' in that we do not distinguish between x and y.
                    //// i.e. opencv uses the method below; this number would match if we used pointsInSum2*2 in the divisor.
                    //// double check opencv's error
                    //{
                    //    double sumError2 = 0;
                    //    int pointsInSum2 = 0;
                    //    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    //    {
                    //        var R = Orientation.Rodrigues(rotations[ii]);
                    //        var t = translations[ii];
                    //        var p = new Matrix(3, 1);

                    //        var worldPointSet = worldPointSubsets[ii];
                    //        var imagePointSet = imagePointSubsets[ii];

                    //        for (int k = 0; k < worldPointSet.Count; k++)
                    //        {
                    //            p.Mult(R, worldPointSet[k]);
                    //            p.Add(t);
                    //            double u, v;
                    //            Kinect2.Kinect2Calibration.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                    //            double dx = imagePointSet[k].X - u;
                    //            double dy = imagePointSet[k].Y - v;

                    //            double thisError = dx * dx + dy * dy;
                    //            sumError2 += thisError;
                    //            pointsInSum2++;
                    //        }
                    //    }

                    //    // opencv's error is rms but over both x and y combined

                    //    Console.WriteLine("average projection error = " + Math.Sqrt(sumError2 / (float)(pointsInSum2)));
                    //}

                    // find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        //var R = Vision.Orientation.Rodrigues(rotations[setIndex]);
                        var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 1.0f)
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 1000);

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);

                    }

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        var error2 = CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if err < besterr save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error < minError)
                        {
                            minError = error;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
示例#28
0
        // BEWARE: threshold on absdiff, and mask level settings*
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decoded, ByteImage mask, int nBits, int max)
        {
            decoded.Zero();

            int capturedWidth = decoded.Width;
            int capturedHeight = decoded.Height;

            // stores decoded bit from previous level
            var bits = new ByteImage(capturedWidth, capturedHeight);

            for (int i = 0; i < nBits; i++)
            {
                var capturedImage = capturedImages[2 * i];
                var invertedCapturedImage = capturedImages[2 * i + 1];

                int bitValue = (int)Math.Pow(2.0, nBits - i - 1);

                ushort* decodedp = decoded.Data(0, 0);
                byte* capturedp = capturedImage.Data(0, 0);
                byte* invertedp = invertedCapturedImage.Data(0, 0);
                byte* maskp = mask.Data(0, 0);
                byte* bitsp = bits.Data(0, 0);

                for (int y = 0; y < capturedHeight; y++)
                    for (int x = 0; x < capturedWidth; x++)
                    {
                        // a bit is considered valid if the diff is greater than some threshold; this value is tricky to set given AGC
                        byte valid = (Math.Abs(*capturedp - *invertedp) > 10) ? (byte)255 : (byte)0;
                        byte bit = (*capturedp >= *invertedp) ? (byte)255 : (byte)0;
                        // Gray code bit
                        *bitsp = (byte)(bit ^ *bitsp);
                        if (*bitsp == 255)
                            *decodedp = (ushort)(*decodedp + bitValue);

                        // stop updating the mask for the least significant levels (but continue decoding)
                        // *FIX: this is pretty fragile, perhaps better to record how many bits of rows and column have been recorded and walk back from that
                        if (i < nBits - 4)
                            *maskp = (byte)(valid & (*maskp));

                        decodedp++;
                        capturedp++;
                        invertedp++;
                        maskp++;
                        bitsp++;
                    }
            }
            bits.Dispose();

            // check that decoded values are within range
            for (int y = 0; y < capturedHeight; y++)
                for (int x = 0; x < capturedWidth; x++)
                {
                    int d = decoded[x, y]; // can this be negative?
                    if ((d >= max) || (d < 0))
                        mask[x, y] = 0;
                }
        }