コード例 #1
0
 /// <summary>
 /// Demonstrates how to use our Kinect calibration to convert a depth image point to color image coordinates.
 /// </summary>
 /// <param name="calibration"></param>
 /// <param name="kinectSensor"></param>
 public void Run(Kinect2Calibration calibration, KinectSensor kinectSensor)
 {
     this.calibration               = calibration;
     this.kinectSensor              = kinectSensor;
     depthImage                     = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
     depthFrameReader               = kinectSensor.DepthFrameSource.OpenReader();
     depthFrameReader.FrameArrived += depthFrameReader_FrameArrived;
 }
コード例 #2
0
 /// <summary>
 /// Demonstrates how to use our Kinect calibration to convert a depth image point to color image coordinates.
 /// </summary>
 /// <param name="calibration"></param>
 /// <param name="kinectSensor"></param>
 public void Run(Kinect2Calibration calibration, KinectSensor kinectSensor)
 {
     this.calibration = calibration;
     this.kinectSensor = kinectSensor;
     depthImage = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
     depthFrameReader = kinectSensor.DepthFrameSource.OpenReader();
     depthFrameReader.FrameArrived += depthFrameReader_FrameArrived;
 
 }
コード例 #3
0
        public void Copy(ShortImage a, int shift)
        {
            ushort *pa = a.Data(0, 0);
            byte *  p  = data;

            for (int i = 0; i < width * height; i++)
            {
                *p++ = (byte)(*pa++ >> shift);
            }
        }
コード例 #4
0
ファイル: FloatImage.cs プロジェクト: perepichka/RoomAlive
        public void Add(ShortImage a)
        {
            ushort *pa = a.Data(0, 0);
            float * p  = data;

            for (int i = 0; i < width * height; i++)
            {
                *p = *p + *pa;
                p++;
                pa++;
            }
        }
コード例 #5
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        public void Add(ShortImage a)
        {
            ushort *p_a = a.Data(0, 0);
            ushort *p   = data;

            for (int i = 0; i < width * height; i++)
            {
                *p = (ushort)((*p_a) + (*p));
                p++;
                p_a++;
            }
        }
コード例 #6
0
        public void ColorImageToDepthImage(double colorX, double colorY, ShortImage depthImage, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            float fx = (float)colorCameraMatrix[0, 0];
            float fy = (float)colorCameraMatrix[1, 1];
            float cx = (float)colorCameraMatrix[0, 2];
            float cy = (float)colorCameraMatrix[1, 2];

            float[] kappa = new float[] { (float)colorLensDistortion[0], (float)colorLensDistortion[1] };
            // flip y because our calibration expects y up (right handed coordinates at all times)
            CameraMath.Undistort(fx, fy, cx, cy, kappa, colorX, (colorImageHeight - colorY), out xUndistorted, out yUndistorted);

            var colorToDepthTransform = new Matrix(4, 4);

            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);

            depthPoint = new Matrix(4, 1);
            depthX     = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;

            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                    {
                        found = true;
                    }
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
コード例 #7
0
        public void Copy(ShortImage shortImage)
        {
            ushort *p    = shortImage.Data(0, 0);
            ARGB32 *pOut = data;

            for (int i = 0; i < width * height; i++)
            {
                pOut->A   = 255;
                pOut->R   = (byte)*p;
                pOut->G   = (byte)*p;
                pOut++->B = (byte)*p++;
            }
        }
コード例 #8
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        public void XMirror(ShortImage a)
        {
            ushort *pOut = data;
            ushort *pIn  = a.data;

            for (int yy = 0; yy < height; yy++)
            {
                pIn = a.Data(width - 1, yy);
                for (int xx = 0; xx < width; xx++)
                {
                    *pOut++ = *pIn--;
                }
            }
        }
コード例 #9
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        public void YMirror(ShortImage a)
        {
            ushort *pOut = data;
            ushort *pIn  = a.data;

            for (int yy = 0; yy < height; yy++)
            {
                pIn = a.Data(0, height - yy);
                for (int xx = 0; xx < width; xx++)
                {
                    *pOut++ = *pIn++;
                }
            }
        }
コード例 #10
0
        public void ColorImageToDepthImage(int colorX, int colorY, ShortImage depthImage, System.Drawing.PointF[] colorFrameToCameraSpaceTable, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            // use lookup table to perform undistortion; this will be faster when converting lots of points
            var point = colorFrameToCameraSpaceTable[colorY * Kinect2Calibration.colorImageWidth + colorX];

            xUndistorted = point.X;
            yUndistorted = point.Y;

            var colorToDepthTransform = new Matrix(4, 4);

            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);

            depthPoint = new Matrix(4, 1);
            depthX     = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;

            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                    {
                        found = true;
                    }
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
コード例 #11
0
        //Converts a 16-bit grayscale depth frame into a 32-bit frame
        public void CopyShortImageForGrayscaleDisplay(ShortImage shortImage, ushort maxVal)
        {
            ushort *pIn        = shortImage.Data(0, 0);
            double  multiplier = 255.0 / maxVal;
            ARGB32 *pOut       = data;

            for (int i = 0; i < width * height; i++)
            {
                byte intensity = (byte)(multiplier * *pIn++);//(255 * *pIn++ / maxVal);
                pOut->A   = 255;
                pOut->R   = intensity;
                pOut->G   = intensity;
                pOut++->B = intensity;
            }
        }
コード例 #12
0
ファイル: GrayCode.cs プロジェクト: perepichka/RoomAlive
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decodedX, ShortImage decodedY, ByteImage mask)
        {
            mask.Set(255); // cumulative across X and Y

            Decode(capturedImages, decodedX, mask, numXBits, width);

            // TODO: this is a little awkward
            var Yimages = new ByteImage[numYBits * 2];

            for (int i = 0; i < numYBits * 2; i++)
            {
                Yimages[i] = capturedImages[numXBits * 2 + i];
            }

            Decode(Yimages, decodedY, mask, numYBits, height);
        }
コード例 #13
0
        public void ColorImageToDepthImage(double colorX, double colorY, ShortImage depthImage, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            float fx = (float)colorCameraMatrix[0, 0];
            float fy = (float)colorCameraMatrix[1, 1];
            float cx = (float)colorCameraMatrix[0, 2];
            float cy = (float)colorCameraMatrix[1, 2];
            float[] kappa = new float[] { (float)colorLensDistortion[0], (float)colorLensDistortion[1] };
            // flip y because our calibration expects y up (right handed coordinates at all times)
            CameraMath.Undistort(fx, fy, cx, cy, kappa, colorX, (colorImageHeight - colorY), out xUndistorted, out yUndistorted);

            var colorToDepthTransform = new Matrix(4, 4);
            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);
            depthPoint = new Matrix(4, 1);
            depthX = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;
            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                        found = true;
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
コード例 #14
0
        public void Threshold(ShortImage a, ushort threshold)
        {
            ushort *pa = a.Data(0, 0);
            byte *  p  = data;

            for (int i = 0; i < width * height; i++)
            {
                if (*pa++ > threshold)
                {
                    *p++ = 255;
                }
                else
                {
                    *p++ = 0;
                }
            }
        }
コード例 #15
0
        public void ColorImageToDepthImage(int colorX, int colorY, ShortImage depthImage, System.Drawing.PointF[] colorFrameToCameraSpaceTable, out Matrix depthPoint, out double depthX, out double depthY)
        {
            double xUndistorted, yUndistorted;

            // convert to color camera space
            // use lookup table to perform undistortion; this will be faster when converting lots of points
            var point = colorFrameToCameraSpaceTable[colorY * Kinect2Calibration.colorImageWidth + colorX];
            xUndistorted = point.X;
            yUndistorted = point.Y;

            var colorToDepthTransform = new Matrix(4, 4);
            colorToDepthTransform.Inverse(depthToColorTransform);

            var colorPoint = new Matrix(4, 1);
            depthPoint = new Matrix(4, 1);
            depthX = 0; depthY = 0;

            // walk along ray in color camera
            bool found = false;
            for (int s = 400; (s < 4500) && !found; s++) // TODO: confirm these limits (mm)
            {
                // convert to a 3D point along ray, in meters
                colorPoint[0] = xUndistorted * s / 1000.0;
                colorPoint[1] = yUndistorted * s / 1000.0;
                colorPoint[2] = s / 1000.0;
                colorPoint[3] = 1;

                // transform to depth camera 3D point and project
                depthPoint.Mult(colorToDepthTransform, colorPoint);
                CameraMath.Project(depthCameraMatrix, depthLensDistortion, depthPoint[0], depthPoint[1], depthPoint[2], out depthX, out depthY);

                int x = (int)depthX;
                // Y down, since we are indexing into an image
                int y = depthImageHeight - (int)depthY;
                if ((x >= 0) && (x < depthImageWidth) && (y >= 0) && (y < depthImageHeight))
                {
                    int z = depthImage[x, y];
                    if ((z != 0) && (z < s))
                        found = true;
                }
            }
            // convert back to Y down
            depthY = depthImageHeight - depthY;
        }
コード例 #16
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        //this is a special function which respects the YUYV ordering when mirroring
        public void XMirror_YUYSpecial(ShortImage a)
        {
            //Y1 U Y2 V  ---> Y2 U Y1 V
            byte *pOut = (byte *)data;
            byte *pIn  = (byte *)a.data;

            for (int yy = 0; yy < height; yy++)
            {
                pIn = (byte *)a.Data(width - 2, yy);

                for (int xx = 0; xx < width; xx += 2)
                {
                    *pOut++ = *(pIn + 2);
                    *pOut++ = *(pIn + 1);
                    *pOut++ = *pIn;
                    *pOut++ = *(pIn + 3);
                    pIn -= 4;
                }
            }
        }
コード例 #17
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        /// <summary>
        /// n should be odd otherwise this function behaves as n = n+1
        /// </summary>
        /// <param name="a"></param>
        /// <param name="n"></param>
        public void BlurNxNNonZero(ShortImage a, int n)
        {
            if (n % 2 == 0)
            {
                n++;
            }

            ushort *input;
            ushort *output;

            ushort *[] pbs = new ushort *[n];
            int[]      s   = new int[n];
            int[]      c   = new int[n];

            int h, hc;
            int sumS, sumC;
            int lastElement = n - 1;

            for (int y = 0; y < (height - (n - 1)); y++)
            {
                input  = a.Data((int)(n / 2), y + 1);
                output = this.Data((int)(n / 2), y + 1);

                for (int cnt = 0; cnt < n; cnt++)
                {
                    pbs[cnt] = a.Data((n - 1), y + cnt);
                    s[cnt]   = 0;
                    c[cnt]   = 0;
                }

                h  = 0;
                hc = 0;

                for (int x = 0; x < (width - (n - 1)); x++)
                {
                    h  -= s[0];
                    hc -= c[0];

                    int i = 0;
                    for (i = 0; i < (n - 1); i++)
                    {
                        s[i] = s[i + 1];
                        c[i] = c[i + 1];
                    }

                    sumS = 0;
                    sumC = 0;
                    for (i = 0; i < n; i++)
                    {
                        ushort bsi = *pbs[i];
                        sumC += ((bsi > 0) ? 1 : 0);
                        sumS += bsi;
                        pbs[i]++;
                    }

                    c[lastElement] = sumC;
                    s[lastElement] = sumS;

                    h  += sumS;
                    hc += sumC;

                    int g = 0;
                    if (hc > 0)
                    {
                        g = h / hc;
                    }

                    //if (g > ushort.MaxValue)
                    //    g = ushort.MaxValue;

                    if (*input++ != (ushort)0)
                    {
                        *output++ = (ushort)g;
                    }
                    else
                    {
                        *output++ = (ushort)0;
                    }
                }
            }
        }
コード例 #18
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        public void Blur3x3NonZero(ShortImage a)
        {
            ushort *input;
            ushort *output;

            ushort *pb02;
            ushort *pb12;
            ushort *pb22;

            int s0, s1, s2; //pixel values
            int c0, c1, c2; //valid pixel counts (where value > 0)
            int h, hc;

            for (int y = 0; y < height - 2; y++)
            {
                input  = a.Data(1, y + 1);
                output = this.Data(1, y + 1);

                pb02 = a.Data(2, y);
                pb12 = a.Data(2, y + 1);
                pb22 = a.Data(2, y + 2);

                h  = 0;
                hc = 0;

                s0 = 0; s1 = 0; s2 = 0;
                c0 = 0; c1 = 0; c2 = 0;

                for (int x = 0; x < width - 2; x++)
                {
                    h  -= s0;
                    hc -= c0;

                    s0 = s1;
                    s1 = s2;

                    c0 = c1;
                    c1 = c2;

                    c2 = (((*pb02) > 0) ? 1 : 0) + (((*pb12) > 0) ? 1 : 0) + (((*pb22) > 0) ? 1 : 0);
                    s2 = *pb02++ + *pb12++ + *pb22++;

                    h  += s2;
                    hc += c2;

                    int g = 0;
                    if (hc > 0)
                    {
                        g = h / hc;
                    }

                    if (g > ushort.MaxValue)
                    {
                        g = ushort.MaxValue;
                    }

                    if (*input++ != (ushort)0)
                    {
                        *output++ = (ushort)g;                        //comment this check if you want to fill holes and smooth edges
                    }
                    else
                    {
                        *output++ = (ushort)0;
                    }
                }
            }
        }
コード例 #19
0
 public static void LoadFromTiff(SharpDX.WIC.ImagingFactory imagingFactory, ShortImage image, string filename)
 {
     LoadFromTiff(imagingFactory, image, filename, 2);
 }
コード例 #20
0
ファイル: GrayCode.cs プロジェクト: perepichka/RoomAlive
        // BEWARE: threshold on absdiff, and mask level settings*
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decoded, ByteImage mask, int nBits, int max)
        {
            decoded.Zero();

            int capturedWidth  = decoded.Width;
            int capturedHeight = decoded.Height;

            // stores decoded bit from previous level
            var bits = new ByteImage(capturedWidth, capturedHeight);

            for (int i = 0; i < nBits; i++)
            {
                var capturedImage         = capturedImages[2 * i];
                var invertedCapturedImage = capturedImages[2 * i + 1];

                int bitValue = (int)Math.Pow(2.0, nBits - i - 1);

                ushort *decodedp  = decoded.Data(0, 0);
                byte *  capturedp = capturedImage.Data(0, 0);
                byte *  invertedp = invertedCapturedImage.Data(0, 0);
                byte *  maskp     = mask.Data(0, 0);
                byte *  bitsp     = bits.Data(0, 0);

                for (int y = 0; y < capturedHeight; y++)
                {
                    for (int x = 0; x < capturedWidth; x++)
                    {
                        // a bit is considered valid if the diff is greater than some threshold; this value is tricky to set given AGC
                        byte valid = (Math.Abs(*capturedp - *invertedp) > 10) ? (byte)255 : (byte)0;
                        byte bit   = (*capturedp >= *invertedp) ? (byte)255 : (byte)0;
                        // Gray code bit
                        *bitsp = (byte)(bit ^ *bitsp);
                        if (*bitsp == 255)
                        {
                            *decodedp = (ushort)(*decodedp + bitValue);
                        }

                        // stop updating the mask for the least significant levels (but continue decoding)
                        // *FIX: this is pretty fragile, perhaps better to record how many bits of rows and column have been recorded and walk back from that
                        if (i < nBits - 4)
                        {
                            *maskp = (byte)(valid & (*maskp));
                        }

                        decodedp++;
                        capturedp++;
                        invertedp++;
                        maskp++;
                        bitsp++;
                    }
                }
            }
            bits.Dispose();

            // check that decoded values are within range
            for (int y = 0; y < capturedHeight; y++)
            {
                for (int x = 0; x < capturedWidth; x++)
                {
                    int d = decoded[x, y]; // can this be negative?
                    if ((d >= max) || (d < 0))
                    {
                        mask[x, y] = 0;
                    }
                }
            }
        }
コード例 #21
0
        public void CaptureDepthAndColor(string directory)
        {
            // foreach camera:
            // average a bunch of frames to find a good depth image
            // get calibration
            // TODO: parallelize

            foreach (var camera in cameras)
            {
                string cameraDirectory = directory + "/camera" + camera.name;
                if (!Directory.Exists(cameraDirectory))
                    Directory.CreateDirectory(cameraDirectory);

                // compute mean and variance of depth image
                var sum = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                sum.Zero();
                var sumSquared = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                sumSquared.Zero();
                var count = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                count.Zero();
                var depth = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                for (int i = 0; i < 100; i++)
                {
                    var depthBytes = camera.Client.LatestDepthImage();
                    Marshal.Copy(depthBytes, 0, depth.DataIntPtr, Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight * 2);
                    Console.WriteLine("acquired depth image " + i);
                    for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                        for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                            if (depth[x, y] != 0)
                            {
                                ushort d = depth[x, y];
                                count[x, y]++;
                                sum[x, y] += d;
                                sumSquared[x, y] += d * d;
                            }
                }

                var meanImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                meanImage.Zero(); // not all pixels will be assigned
                var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                varianceImage.Zero(); // not all pixels will be assigned

                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                    {
                        if (count[x, y] > 50)
                        {
                            float mean = sum[x, y] / count[x, y];
                            meanImage[x, y] = mean;
                            float variance = sumSquared[x, y] / count[x, y] - mean * mean;
                            varianceImage[x, y] = variance;
                        }
                    }

                // WIC doesn't support encoding float tiff images, so for now we write to a binary file
                meanImage.SaveToFile(cameraDirectory + "/mean.bin");
                varianceImage.SaveToFile(cameraDirectory + "/variance.bin");

                // create a short version that we can write, used only for debugging
                var meanDepthShortImage = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                        meanDepthShortImage[x, y] = (ushort)meanImage[x, y];
                SaveToTiff(imagingFactory, meanDepthShortImage, cameraDirectory + "/mean.tiff");

                // convert to world coordinates and save to ply file
                camera.calibration = camera.Client.GetCalibration();
                var depthFrameToCameraSpaceTable = camera.calibration.ComputeDepthFrameToCameraSpaceTable();
                var world = new Float3Image(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); // TODO: move out/reuse
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++)
                    {
                        var pointF = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        float meanDepthMeters = meanImage[x, y] / 1000.0f;

                        Float3 worldPoint;
                        worldPoint.x = pointF.X * meanDepthMeters;
                        worldPoint.y = pointF.Y * meanDepthMeters;
                        worldPoint.z = meanDepthMeters;
                        world[x, y] = worldPoint;
                    }
                SaveToPly(cameraDirectory + "/mean.ply", world);

                // TODO: consider writing OBJ instead
            }

            //// connect to projectors
            //foreach (var projector in projectors)
            //{
            //    projector.Client.OpenDisplay(projector.displayIndex);
            //}

            // collect color images; this is not necessary for calibration, but is nice to have for visualization
            //foreach (var projector in projectors)
            //    projector.Client.SetColor(projector.displayIndex, 0f, 0f, 0f);
            //System.Threading.Thread.Sleep(5000);
            foreach (var camera in cameras)
            {
                // save color image
                string cameraDirectory = directory + "/camera" + camera.name;
                var jpegBytes = camera.Client.LatestJPEGImage();
                File.WriteAllBytes(cameraDirectory + "/color.jpg", jpegBytes);
                var colorBytes = camera.Client.LatestRGBImage();
                var image = new ARGBImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                Marshal.Copy(colorBytes, 0, image.DataIntPtr, Kinect2Calibration.colorImageWidth * Kinect2Calibration.colorImageHeight * 4);
                SaveToTiff(imagingFactory, image, cameraDirectory + "/color.tiff");
                image.Dispose();

            }

            //// close all displays
            //foreach (var projector in projectors)
            //{
            //    projector.Client.CloseDisplay(projector.displayIndex);
            //}
        }
コード例 #22
0
        public void DecodeGrayCodeImages(string directory)
        {
            stopWatch.Start();

            // decode Gray code captures
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                var grayCode = new GrayCode(projector.width, projector.height);

                // allocate space for captured images
                int nCapturedImages = 2 * (grayCode.numXBits + grayCode.numYBits); // varies by projector
                var capturedImages = new ByteImage[nCapturedImages];
                for (int i = 0; i < nCapturedImages; i++) // varies by projector
                    capturedImages[i] = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                foreach (var camera in cameras)
                {
                    Console.WriteLine("decoding Gray code images for projector " + projector.name + ", camera " + camera.name);

                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    // load and decode Gray code images
                    for (int i = 0; i < nCapturedImages; i++)
                        LoadFromTiff(imagingFactory, capturedImages[i], cameraDirectory + "/grayCode" + i + ".tiff");

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    // TODO: there are a couple of interesting thresholds in Decode; they should be surfaced here
                    grayCode.Decode(capturedImages, decodedColumns, decodedRows, mask);

                    //Console.WriteLine("saving camera " + camera.displayName);

                    SaveToTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    SaveToTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    SaveToTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    var decodedColumnsMasked = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRowsMasked = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    for (int y = 0; y < Kinect2Calibration.colorImageHeight; y++)
                        for (int x = 0; x < Kinect2Calibration.colorImageWidth; x++)
                        {
                            if (mask[x, y] > 0)
                            {
                                decodedColumnsMasked[x, y] = decodedColumns[x, y];
                                decodedRowsMasked[x, y] = decodedRows[x, y];
                            }
                            else
                            {
                                decodedColumnsMasked[x, y] = 0;
                                decodedRowsMasked[x, y] = 0;
                            }
                        }
                    SaveToTiff(imagingFactory, decodedColumnsMasked, cameraDirectory + "/decodedColumnsMasked.tiff");
                    SaveToTiff(imagingFactory, decodedRowsMasked, cameraDirectory + "/decodedRowsMasked.tiff");
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
コード例 #23
0
 public static void SaveToTiff(SharpDX.WIC.ImagingFactory imagingFactory, ShortImage image, string filename)
 {
     SaveToTiff(imagingFactory, image, filename, SharpDX.WIC.PixelFormat.Format16bppGray, 2);
 }
コード例 #24
0
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    bool foundNonplanarSubset = false;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        // try to find a nonplanar subset
                        bool planar = true;
                        int nTries = 0;
                        while (planar && (nTries++ < 1000))
                        {
                            worldPointSubset.Clear();
                            imagePointSubset.Clear();

                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // planar?
                            Matrix Rplane, tplane, d;
                            CameraMath.PlaneFit(worldPointSubset, out Rplane, out tplane, out d);
                            //Console.WriteLine("planar : " + d[2] / d[1]);
                            planar = (d[2] / d[1]) < 0.001f;
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);

                        // we can't initialize extrinsics yet, because we don't know which intrinsics we'll be using

                        if (!planar)
                            foundNonplanarSubset = true;
                    }

                    // we do not optimize intrinsics if all the point sets are planar, or if the projector intrinsics are marked as locked
                    bool fixIntrinsics = (!foundNonplanarSubset) || (projector.lockIntrinsics); // TODO: add option to lock intrinsics

                    var rotations = new List<Matrix>();
                    var translations = new List<Matrix>();
                    var cameraMatrix = new Matrix(3, 3);
                    var distCoeffs = new Matrix(2, 1);

                    if (fixIntrinsics)
                    {
                        cameraMatrix.Copy(projector.cameraMatrix);
                        distCoeffs.Copy(projector.lensDistortion);
                    }
                    else // nonplanar, so we can optimize intrinsics
                    {
                        cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                        cameraMatrix[1, 1] = 1000; //fy
                        cameraMatrix[0, 2] = projector.width / 2; //cx
                        cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                        cameraMatrix[2, 2] = 1;
                    }

                    // init extrinsics
                    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    {
                        Matrix R, t;
                        CameraMath.ExtrinsicsInit(cameraMatrix, distCoeffs, worldPointSubsets[ii], imagePointSubsets[ii], out R, out t);
                        rotations.Add(CameraMath.RotationVectorFromRotationMatrix(R));
                        translations.Add(t);
                    }

                    // initial RANSAC fit on subset of points
                    double error;
                    if (fixIntrinsics)
                        error = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    else
                        error = CameraMath.CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);

                    Console.WriteLine("error on subset = " + error);

                    // RANSAC: find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    int totalInliers = 0;
                    int totalPoints = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 2.0f) // TODO: how to set this?
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 500); // should be related to min number of points in set (above)

                        totalPoints += pointSet.worldPoints.Count;
                        totalInliers += worldPointInlierSet.Count;

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);
                    }

                    Console.WriteLine("{0}/{1} inliers", totalInliers, totalPoints);

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        double error2;
                        if (fixIntrinsics)
                            error2 = CameraMath.CalibrateCameraExtrinsicsOnly(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);
                        else
                            error2 = CameraMath.CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if reduced error save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error2 < minError)
                        {
                            minError = error2;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = CameraMath.RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            //Console.WriteLine("x = [");
            //for (int ii = 0; ii < imagePointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1}", imagePointSubsets[0][ii].X, imagePointSubsets[0][ii].Y);
            //Console.WriteLine("]';");
            //Console.WriteLine("X = [");
            //for (int ii = 0; ii < worldPointSubsets[0].Count; ii++)
            //    Console.WriteLine("{0} {1} {2}", worldPointSubsets[0][ii][0], worldPointSubsets[0][ii][1], worldPointSubsets[0][ii][2]);
            //Console.WriteLine("]';");
            //Console.WriteLine("fc = [{0} {1}];", projector.cameraMatrix[0, 0], projector.cameraMatrix[1, 1]);
            //Console.WriteLine("cc = [{0} {1}];", projector.cameraMatrix[0, 2], projector.cameraMatrix[1, 2]);

            //Matrix thisR, thist;

            //{
            //    Matrix Rplane, tplane;
            //    CameraMath.PlaneFit(worldPointSubsets[0], out Rplane, out tplane);

            //    CameraMath.PlanarDLT(projector.cameraMatrix, projector.lensDistortion, worldPointSubsets[0], imagePointSubsets[0], Rplane, tplane, out thisR, out thist);
            //    //Console.WriteLine("DLT---------");
            //    //Console.WriteLine(thisR);
            //    //Console.WriteLine(thist);

            //}

            //// if pattern is not planar, we can recover projector intrinsics

            //List<RoomAliveToolkit.Matrix> rotations = null;
            //List<RoomAliveToolkit.Matrix> translations = null;

            //var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
            //Console.WriteLine("error = " + error);

            // we check whether each view is planar, so that we can use the correct version of DLT

            // the overall set may not be planar however, so we have to check the union of points

            // if overall set is planar, leave intrinsics alone

            //
        }
コード例 #25
0
 public void Copy(ShortImage shortImage)
 {
     ushort* p = shortImage.Data(0, 0);
     ARGB32* pOut = data;
     for (int i = 0; i < width * height; i++)
     {
         pOut->A = 255;
         pOut->R = (byte)*p;
         pOut->G = (byte)*p;
         pOut++->B = (byte)*p++;
     }
 }
コード例 #26
0
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decodedX, ShortImage decodedY, ByteImage mask)
        {
            mask.Set(255); // cumulative across X and Y

            Decode(capturedImages, decodedX, mask, numXBits, width);

            // TODO: this is a little awkward
            var Yimages = new ByteImage[numYBits*2];
            for (int i = 0; i < numYBits*2; i++)
                Yimages[i] = capturedImages[numXBits * 2 + i];

            Decode(Yimages, decodedY, mask, numYBits, height);
        }
コード例 #27
0
        public void CalibrateProjectorGroups(string directory)
        {
            // for all cameras, take depth image points to color image points
            var depthImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);
            var validMask = new ByteImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight);

            foreach (var camera in cameras)
            {
                Console.WriteLine("projecting depth points to color camera " + camera.name);

                // load depth image
                string cameraDirectory = directory + "/camera" + camera.name;
                depthImage.LoadFromFile(cameraDirectory + "/mean.bin");
                varianceImage.LoadFromFile(cameraDirectory + "/variance.bin");
                validMask.Zero();

                var calibration = camera.calibration;
                var depthFrameToCameraSpaceTable = calibration.ComputeDepthFrameToCameraSpaceTable();

                // TODO: consider using just one 4x4 in calibration class
                var colorCamera = new Matrix(4, 1);
                camera.colorImagePoints = new List<Matrix>();
                camera.depthCameraPoints = new List<Matrix>();
                var depthCamera4 = new Matrix(4, 1);

                // for each valid point in depth image
                int numRejected = 0;
                for (int y = 0; y < Kinect2Calibration.depthImageHeight; y += 1)
                    for (int x = 0; x < Kinect2Calibration.depthImageWidth; x += 1)
                    {
                        float depth = depthImage[x, y] / 1000f; // m
                        float variance = varianceImage[x, y];

                        if (depth == 0)
                            continue;
                        if (variance > 6 * 6)
                        {
                            numRejected++;
                            continue;
                        }
                        validMask[x, y] = (byte)255;

                        // convert to depth camera space
                        var point = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x];
                        depthCamera4[0] = point.X * depth;
                        depthCamera4[1] = point.Y * depth;
                        depthCamera4[2] = depth;
                        depthCamera4[3] = 1;

                        // convert to color camera space
                        colorCamera.Mult(calibration.depthToColorTransform, depthCamera4);
                        //colorCamera.Scale(1.0 / colorCamera[3]);

                        // project to color image
                        double colorU, colorV;
                        CameraMath.Project(calibration.colorCameraMatrix, calibration.colorLensDistortion, colorCamera[0], colorCamera[1], colorCamera[2], out colorU, out colorV);

                        if ((colorU >= 0) && (colorU < (Kinect2Calibration.colorImageWidth - 1)) && (colorV >= 0) && (colorV < (Kinect2Calibration.colorImageHeight - 1))) // BEWARE: later do we round or truncate??
                        {
                            var colorImagePoint = new Matrix(2, 1);
                            colorImagePoint[0] = colorU;
                            colorImagePoint[1] = colorV;
                            camera.colorImagePoints.Add(colorImagePoint);

                            // expect a 3-vector?
                            var depthCamera = new Matrix(3, 1);
                            depthCamera[0] = depthCamera4[0];
                            depthCamera[1] = depthCamera4[1];
                            depthCamera[2] = depthCamera4[2];

                            camera.depthCameraPoints.Add(depthCamera);

                            //Console.WriteLine(depthCamera[0] + "\t" + depthCamera[1] + "\t -> " + colorImagePoint[0] + "\t" + colorImagePoint[1]);
                        }

                    }
                SaveToTiff(imagingFactory, validMask, cameraDirectory + "/validMask.tiff");

                Console.WriteLine("rejected " + 100 * (float)numRejected / (float)(Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight) + "% pixels for high variance");

            }

            // we never save colorImagePoints, depthCameraPoints, so we must remember to run previous

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // use decoded Gray code images to create calibration point sets
            foreach (var projector in projectors)
            {
                string projectorDirectory = directory + "/projector" + projector.name;

                projector.calibrationPointSets = new Dictionary<Camera, CalibrationPointSet>();

                foreach (var camera in cameras)
                {
                    string cameraDirectory = projectorDirectory + "/camera" + camera.name;

                    var decodedColumns = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var decodedRows = new ShortImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);
                    var mask = new ByteImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight);

                    LoadFromTiff(imagingFactory, decodedColumns, cameraDirectory + "/decodedColumns.tiff");
                    LoadFromTiff(imagingFactory, decodedRows, cameraDirectory + "/decodedRows.tiff");
                    LoadFromTiff(imagingFactory, mask, cameraDirectory + "/mask.tiff");

                    // we have a bunch of color camera / depth camera point corrspondences
                    // use the Gray code to find the position of the color camera point in the projector frame

                    // find 2D projector coordinates from decoded Gray code images
                    var imagePoints = new List<System.Drawing.PointF>();
                    var worldPoints = new List<Matrix>();

                    for (int i = 0; i < camera.colorImagePoints.Count; i++)
                    {
                        var colorImagePoint = camera.colorImagePoints[i];

                        // We would like to relate projected color points to color images stored in memory.
                        // The Kinect SDK and our camera calibration assumes X left, Y up (from the POV of the camera).
                        // We index images in memory with X right and Y down.
                        // Our Gray code images are flipped in the horizontal direction.
                        // Therefore to map an image space coordinate to a memory location we flip Y (and not X):
                        int x = (int)(colorImagePoint[0] + 0.5f);
                        int y = Kinect2Calibration.colorImageHeight - (int)(colorImagePoint[1] + 0.5f);

                        if ((x < 0) || (x >= Kinect2Calibration.colorImageWidth) || (y < 0) || (y >= Kinect2Calibration.colorImageHeight))
                        {
                            //Console.WriteLine("out of bounds");
                            continue;
                        }

                        if (mask[x, y] > 0) // Gray code is valid
                        {
                            // We would like to relate decoded row/column values to projector coordinates.
                            // To match the camera, we want projector's coordinate system X left, Y up (from the POV of the projector).
                            // We assume that the projector is configured in front projection mode (i.e., projected text looks correct in the real world).
                            // In that case decoded columns run X right (in the real world), decoded rows run Y down (in the real world).
                            // So we need to flip both X and Y decoded values.

                            var projectorImagePoint = new System.Drawing.PointF(projector.width - decodedColumns[x, y], projector.height - decodedRows[x, y]);
                            var depthCameraPoint = camera.depthCameraPoints[i];

                            imagePoints.Add(projectorImagePoint);
                            worldPoints.Add(depthCameraPoint);

                            //Console.WriteLine(depthCameraPoint[0] + "\t" + depthCameraPoint[1] + "\t" + depthCameraPoint[2] + "-> \t" + x + "\t" + y + "-> \t" + projectorImagePoint.X + "\t" + projectorImagePoint.Y);
                        }
                    }

                    if (worldPoints.Count > 1000)
                    {
                        var pointSet = new CalibrationPointSet();
                        pointSet.worldPoints = worldPoints;
                        pointSet.imagePoints = imagePoints;
                        projector.calibrationPointSets[camera] = pointSet;
                        Console.WriteLine("projector " + projector.name + " is seen by camera " + camera.name + " (" + worldPoints.Count + " points)");
                    }
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);

            // calibration
            foreach (var projector in projectors)
            {
                Console.WriteLine("calibrating projector " + projector.name);

                string projectorDirectory = directory + "/projector" + projector.name;

                // RANSAC
                double minError = Double.PositiveInfinity;
                var random = new Random(0); // provide seed to ease debugging

                int numCompletedFits = 0;

                for (int i = 0; (numCompletedFits < 4) && (i < 10); i++)
                {
                    Console.WriteLine("RANSAC iteration " + i);

                    // randomly select small number of points from each calibration set
                    var worldPointSubsets = new List<List<Matrix>>();
                    var imagePointSubsets = new List<List<System.Drawing.PointF>>();

                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointSubset = new List<Matrix>();
                        var imagePointSubset = new List<System.Drawing.PointF>();

                        bool nonCoplanar = false;
                        int nTries = 0;

                        while (!nonCoplanar)
                        {
                            for (int j = 0; j < 100; j++)
                            {
                                int k = random.Next(pointSet.worldPoints.Count);
                                worldPointSubset.Add(pointSet.worldPoints[k]);
                                imagePointSubset.Add(pointSet.imagePoints[k]);
                            }

                            // check that points are not coplanar
                            Matrix X;
                            double D;
                            double ssdToPlane = PlaneFit(worldPointSubset, out X, out D);
                            int numOutliers = 0;
                            foreach (var point in worldPointSubset)
                            {
                                double distanceFromPlane = X.Dot(point) + D;
                                if (Math.Abs(distanceFromPlane) > 0.1f)
                                    numOutliers++;
                            }
                            nonCoplanar = (numOutliers > worldPointSubset.Count * 0.10f);
                            if (!nonCoplanar)
                            {
                                Console.WriteLine("points are coplanar (try #{0})", nTries);
                                worldPointSubset.Clear();
                                imagePointSubset.Clear();
                            }
                            if (nTries++ > 1000)
                            {
                                throw new CalibrationFailedException("Unable to find noncoplanar points.");
                                // consider moving this check up with variance check (when calibration point sets are formed)
                            }
                        }

                        worldPointSubsets.Add(worldPointSubset);
                        imagePointSubsets.Add(imagePointSubset);
                    }

                    var cameraMatrix = new Matrix(3, 3);
                    cameraMatrix[0, 0] = 1000; //fx TODO: can we instead init this from FOV?
                    cameraMatrix[1, 1] = 1000; //fy
                    cameraMatrix[0, 2] = projector.width / 2; //cx
                    cameraMatrix[1, 2] = 0; // projector lens shift; note this assumes desktop projection mode
                    cameraMatrix[2, 2] = 1;
                    var distCoeffs = new RoomAliveToolkit.Matrix(2, 1);
                    List<RoomAliveToolkit.Matrix> rotations = null;
                    List<RoomAliveToolkit.Matrix> translations = null;

                    var error = CalibrateCamera(worldPointSubsets, imagePointSubsets, cameraMatrix, ref rotations, ref translations);
                    Console.WriteLine("error = " + error);
                    //Console.WriteLine("intrinsics = \n" + cameraMatrix);

                    //// we differ from opencv's 'error' in that we do not distinguish between x and y.
                    //// i.e. opencv uses the method below; this number would match if we used pointsInSum2*2 in the divisor.
                    //// double check opencv's error
                    //{
                    //    double sumError2 = 0;
                    //    int pointsInSum2 = 0;
                    //    for (int ii = 0; ii < worldPointSubsets.Count; ii++)
                    //    {
                    //        var R = Orientation.Rodrigues(rotations[ii]);
                    //        var t = translations[ii];
                    //        var p = new Matrix(3, 1);

                    //        var worldPointSet = worldPointSubsets[ii];
                    //        var imagePointSet = imagePointSubsets[ii];

                    //        for (int k = 0; k < worldPointSet.Count; k++)
                    //        {
                    //            p.Mult(R, worldPointSet[k]);
                    //            p.Add(t);
                    //            double u, v;
                    //            Kinect2.Kinect2Calibration.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                    //            double dx = imagePointSet[k].X - u;
                    //            double dy = imagePointSet[k].Y - v;

                    //            double thisError = dx * dx + dy * dy;
                    //            sumError2 += thisError;
                    //            pointsInSum2++;
                    //        }
                    //    }

                    //    // opencv's error is rms but over both x and y combined

                    //    Console.WriteLine("average projection error = " + Math.Sqrt(sumError2 / (float)(pointsInSum2)));
                    //}

                    // find inliers from overall dataset
                    var worldPointInlierSets = new List<List<Matrix>>();
                    var imagePointInlierSets = new List<List<System.Drawing.PointF>>();
                    int setIndex = 0;

                    bool enoughInliers = true;
                    double sumError = 0;
                    int pointsInSum = 0;
                    foreach (var pointSet in projector.calibrationPointSets.Values)
                    {
                        var worldPointInlierSet = new List<Matrix>();
                        var imagePointInlierSet = new List<System.Drawing.PointF>();

                        //var R = Vision.Orientation.Rodrigues(rotations[setIndex]);
                        var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                        var t = translations[setIndex];
                        var p = new Matrix(3, 1);

                        for (int k = 0; k < pointSet.worldPoints.Count; k++)
                        {
                            p.Mult(R, pointSet.worldPoints[k]);
                            p.Add(t);

                            double u, v;
                            CameraMath.Project(cameraMatrix, distCoeffs, p[0], p[1], p[2], out u, out v);

                            double dx = pointSet.imagePoints[k].X - u;
                            double dy = pointSet.imagePoints[k].Y - v;
                            double thisError = Math.Sqrt((dx * dx) + (dy * dy));

                            if (thisError < 1.0f)
                            {
                                worldPointInlierSet.Add(pointSet.worldPoints[k]);
                                imagePointInlierSet.Add(pointSet.imagePoints[k]);
                            }
                            sumError += thisError * thisError;
                            pointsInSum++;
                        }
                        setIndex++;

                        // require that each view has a minimum number of inliers
                        enoughInliers = enoughInliers && (worldPointInlierSet.Count > 1000);

                        worldPointInlierSets.Add(worldPointInlierSet);
                        imagePointInlierSets.Add(imagePointInlierSet);

                    }

                    // if number of inliers > some threshold (should be for each subset)
                    if (enoughInliers) // should this threshold be a function of the number of cameras, a percentage?
                    {
                        var error2 = CalibrateCamera(worldPointInlierSets, imagePointInlierSets, cameraMatrix, ref rotations, ref translations);

                        Console.WriteLine("error with inliers = " + error2);
                        Console.Write("camera matrix = \n" + cameraMatrix);

                        numCompletedFits++;

                        // if err < besterr save model (save rotation and translation to calibrationPointSets, cameraMatrix and distortion coeffs to projector)
                        if (error < minError)
                        {
                            minError = error;
                            projector.cameraMatrix = cameraMatrix;
                            projector.lensDistortion = distCoeffs;
                            setIndex = 0;

                            foreach (var pointSet in projector.calibrationPointSets.Values)
                            {
                                // convert to 4x4 transform
                                var R = RotationMatrixFromRotationVector(rotations[setIndex]);
                                var t = translations[setIndex];

                                var T = new Matrix(4, 4);
                                T.Identity();
                                for (int ii = 0; ii < 3; ii++)
                                {
                                    for (int jj = 0; jj < 3; jj++)
                                        T[ii, jj] = R[ii, jj];
                                    T[ii, 3] = t[ii];
                                }
                                pointSet.pose = T;
                                pointSet.worldPointInliers = worldPointInlierSets[setIndex];
                                pointSet.imagePointInliers = imagePointInlierSets[setIndex];

                                setIndex++;
                            }
                        }
                    }

                }

                if (numCompletedFits == 0)
                    throw new CalibrationFailedException("Unable to successfully calibrate projector: " + projector.name);

                Console.WriteLine("final calibration:");
                Console.Write("camera matrix = \n" + projector.cameraMatrix);
                Console.Write("distortion = \n" + projector.lensDistortion);
                Console.WriteLine("error = " + minError);

                foreach (var camera in projector.calibrationPointSets.Keys)
                {
                    Console.WriteLine("camera " + camera.name + " pose:");
                    Console.Write(projector.calibrationPointSets[camera].pose);
                }
            }

            Console.WriteLine("elapsed time " + stopWatch.ElapsedMilliseconds);
        }
コード例 #28
0
ファイル: ByteImage.cs プロジェクト: dmak78/RoomAliveToolkit
 public void Threshold(ShortImage a, ushort threshold)
 {
     ushort* pa = a.Data(0, 0);
     byte* p = data;
     for (int i = 0; i < width * height; i++)
     {
         if (*pa++ > threshold)
             *p++ = 255;
         else
             *p++ = 0;
     }
 }
コード例 #29
0
ファイル: ByteImage.cs プロジェクト: dmak78/RoomAliveToolkit
        public void Copy(ShortImage a, int shift)
        {
            ushort* pa = a.Data(0, 0);
            byte* p = data;

            for (int i = 0; i < width * height; i++)
                *p++ = (byte)(*pa++ >> shift);
        }
コード例 #30
0
ファイル: ShortImage.cs プロジェクト: perepichka/RoomAlive
        public void Blur5x5NonZero(ShortImage a)
        {
            ushort *input;
            ushort *output;

            ushort *pb04;
            ushort *pb14;
            ushort *pb24;
            ushort *pb34;
            ushort *pb44;

            int s0, s1, s2, s3, s4; //pixel values
            int c0, c1, c2, c3, c4; //valid pixel counts (where value > 0)
            int h, hc;

            for (int y = 0; y < height - 4; y++)
            {
                input  = a.Data(2, y + 1);
                output = this.Data(2, y + 1);

                pb04 = a.Data(4, y);
                pb14 = a.Data(4, y + 1);
                pb24 = a.Data(4, y + 2);
                pb34 = a.Data(4, y + 3);
                pb44 = a.Data(4, y + 4);

                h  = 0;
                hc = 0;

                s0 = 0; s1 = 0; s2 = 0; s3 = 0; s4 = 0;
                c0 = 0; c1 = 0; c2 = 0; c3 = 0; c4 = 0;

                for (int x = 0; x < width - 4; x++)
                {
                    h  -= s0;
                    hc -= c0;

                    s0 = s1;
                    s1 = s2;
                    s2 = s3;
                    s3 = s4;

                    c0 = c1;
                    c1 = c2;
                    c2 = c3;
                    c3 = c4;

                    c4 = (((*pb04) > 0) ? 1 : 0) + (((*pb14) > 0) ? 1 : 0) + (((*pb24) > 0) ? 1 : 0) + (((*pb34) > 0) ? 1 : 0) + (((*pb44) > 0) ? 1 : 0);
                    s4 = *pb04++ + *pb14++ + *pb24++ + *pb34++ + *pb44++;

                    h  += s4;
                    hc += c4;

                    int g = 0;
                    if (hc > 0)
                    {
                        g = h / hc;
                    }

                    //if (g > ushort.MaxValue)
                    //    g = ushort.MaxValue;

                    if (*input++ != (ushort)0)
                    {
                        *output++ = (ushort)g;
                    }
                    else
                    {
                        *output++ = (ushort)0;
                    }
                }
            }
        }
コード例 #31
0
        // BEWARE: threshold on absdiff, and mask level settings*
        public unsafe void Decode(ByteImage[] capturedImages, ShortImage decoded, ByteImage mask, int nBits, int max)
        {
            decoded.Zero();

            int capturedWidth = decoded.Width;
            int capturedHeight = decoded.Height;

            // stores decoded bit from previous level
            var bits = new ByteImage(capturedWidth, capturedHeight);

            for (int i = 0; i < nBits; i++)
            {
                var capturedImage = capturedImages[2 * i];
                var invertedCapturedImage = capturedImages[2 * i + 1];

                int bitValue = (int)Math.Pow(2.0, nBits - i - 1);

                ushort* decodedp = decoded.Data(0, 0);
                byte* capturedp = capturedImage.Data(0, 0);
                byte* invertedp = invertedCapturedImage.Data(0, 0);
                byte* maskp = mask.Data(0, 0);
                byte* bitsp = bits.Data(0, 0);

                for (int y = 0; y < capturedHeight; y++)
                    for (int x = 0; x < capturedWidth; x++)
                    {
                        // a bit is considered valid if the diff is greater than some threshold; this value is tricky to set given AGC
                        byte valid = (Math.Abs(*capturedp - *invertedp) > 10) ? (byte)255 : (byte)0;
                        byte bit = (*capturedp >= *invertedp) ? (byte)255 : (byte)0;
                        // Gray code bit
                        *bitsp = (byte)(bit ^ *bitsp);
                        if (*bitsp == 255)
                            *decodedp = (ushort)(*decodedp + bitValue);

                        // stop updating the mask for the least significant levels (but continue decoding)
                        // *FIX: this is pretty fragile, perhaps better to record how many bits of rows and column have been recorded and walk back from that
                        if (i < nBits - 4)
                            *maskp = (byte)(valid & (*maskp));

                        decodedp++;
                        capturedp++;
                        invertedp++;
                        maskp++;
                        bitsp++;
                    }
            }
            bits.Dispose();

            // check that decoded values are within range
            for (int y = 0; y < capturedHeight; y++)
                for (int x = 0; x < capturedWidth; x++)
                {
                    int d = decoded[x, y]; // can this be negative?
                    if ((d >= max) || (d < 0))
                        mask[x, y] = 0;
                }
        }
コード例 #32
0
            // encapsulates d3d resources for a camera
            public CameraDeviceResource(SharpDX.Direct3D11.Device device, ProjectorCameraEnsemble.Camera camera, Object renderLock, string directory)
            {
                this.device     = device;
                this.camera     = camera;
                this.renderLock = renderLock;

                // Kinect depth image
                var depthImageTextureDesc = new Texture2DDescription()
                {
                    Width             = 512,
                    Height            = 424,
                    MipLevels         = 1,
                    ArraySize         = 1,
                    Format            = SharpDX.DXGI.Format.R16_UInt,
                    SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0),
                    Usage             = ResourceUsage.Dynamic,
                    BindFlags         = BindFlags.ShaderResource,
                    CpuAccessFlags    = CpuAccessFlags.Write,
                };

                depthImageTexture   = new Texture2D(device, depthImageTextureDesc);
                depthImageTextureRV = new ShaderResourceView(device, depthImageTexture);

                var floatDepthImageTextureDesc = new Texture2DDescription()
                {
                    Width             = 512,
                    Height            = 424,
                    MipLevels         = 1,
                    ArraySize         = 1,
                    Format            = SharpDX.DXGI.Format.R32_Float,
                    SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0),
                    Usage             = ResourceUsage.Default,
                    BindFlags         = BindFlags.RenderTarget | BindFlags.ShaderResource,
                    CpuAccessFlags    = CpuAccessFlags.None,
                };

                floatDepthImageTexture          = new Texture2D(device, floatDepthImageTextureDesc);
                floatDepthImageRV               = new ShaderResourceView(device, floatDepthImageTexture);
                floatDepthImageRenderTargetView = new RenderTargetView(device, floatDepthImageTexture);

                floatDepthImageTexture2          = new Texture2D(device, floatDepthImageTextureDesc);
                floatDepthImageRV2               = new ShaderResourceView(device, floatDepthImageTexture2);
                floatDepthImageRenderTargetView2 = new RenderTargetView(device, floatDepthImageTexture2);

                // Kinect color image
                var colorImageStagingTextureDesc = new Texture2DDescription()
                {
                    Width             = colorImageWidth,
                    Height            = colorImageHeight,
                    MipLevels         = 1,
                    ArraySize         = 1,
                    Format            = SharpDX.DXGI.Format.B8G8R8A8_UNorm,
                    SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0),
                    Usage             = ResourceUsage.Dynamic,
                    BindFlags         = BindFlags.ShaderResource,
                    CpuAccessFlags    = CpuAccessFlags.Write
                };

                colorImageStagingTexture = new Texture2D(device, colorImageStagingTextureDesc);

                var colorImageTextureDesc = new Texture2DDescription()
                {
                    Width             = colorImageWidth,
                    Height            = colorImageHeight,
                    MipLevels         = 0,
                    ArraySize         = 1,
                    Format            = SharpDX.DXGI.Format.B8G8R8A8_UNorm,
                    SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0),
                    Usage             = ResourceUsage.Default,
                    BindFlags         = BindFlags.ShaderResource | BindFlags.RenderTarget,
                    CpuAccessFlags    = CpuAccessFlags.None,
                    OptionFlags       = ResourceOptionFlags.GenerateMipMaps
                };

                colorImageTexture   = new Texture2D(device, colorImageTextureDesc);
                colorImageTextureRV = new ShaderResourceView(device, colorImageTexture);

                // vertex buffer
                var table       = camera.calibration.ComputeDepthFrameToCameraSpaceTable();
                int numVertices = 6 * (depthImageWidth - 1) * (depthImageHeight - 1);
                var vertices    = new VertexPosition[numVertices];

                Int3[] quadOffsets = new Int3[]
                {
                    new Int3(0, 0, 0),
                    new Int3(1, 0, 0),
                    new Int3(0, 1, 0),
                    new Int3(1, 0, 0),
                    new Int3(1, 1, 0),
                    new Int3(0, 1, 0),
                };

                int vertexIndex = 0;

                for (int y = 0; y < depthImageHeight - 1; y++)
                {
                    for (int x = 0; x < depthImageWidth - 1; x++)
                    {
                        for (int i = 0; i < 6; i++)
                        {
                            int vertexX = x + quadOffsets[i].X;
                            int vertexY = y + quadOffsets[i].Y;

                            var point = table[depthImageWidth * vertexY + vertexX];

                            var vertex = new VertexPosition();
                            vertex.position         = new SharpDX.Vector4(point.X, point.Y, vertexX, vertexY);
                            vertices[vertexIndex++] = vertex;
                        }
                    }
                }

                var stream = new DataStream(numVertices * VertexPosition.SizeInBytes, true, true);

                stream.WriteRange(vertices);
                stream.Position = 0;

                var vertexBufferDesc = new BufferDescription()
                {
                    BindFlags      = BindFlags.VertexBuffer,
                    CpuAccessFlags = CpuAccessFlags.None,
                    Usage          = ResourceUsage.Default,
                    SizeInBytes    = numVertices * VertexPosition.SizeInBytes,
                };

                vertexBuffer = new SharpDX.Direct3D11.Buffer(device, stream, vertexBufferDesc);

                vertexBufferBinding = new VertexBufferBinding(vertexBuffer, VertexPosition.SizeInBytes, 0);

                stream.Dispose();

                var colorImage = new RoomAliveToolkit.ARGBImage(colorImageWidth, colorImageHeight);

                ProjectorCameraEnsemble.LoadFromTiff(imagingFactory, colorImage, directory + "/camera" + camera.name + "/colorDark.tiff");

                var depthImage = new RoomAliveToolkit.ShortImage(depthImageWidth, depthImageHeight);

                ProjectorCameraEnsemble.LoadFromTiff(imagingFactory, depthImage, directory + "/camera" + camera.name + "/mean.tiff");

                lock (renderLock) // necessary?
                {
                    UpdateColorImage(device.ImmediateContext, colorImage.DataIntPtr);
                    UpdateDepthImage(device.ImmediateContext, depthImage.DataIntPtr);
                }

                colorImage.Dispose();
                depthImage.Dispose();
            }
コード例 #33
0
        public void CaptureDepthAndColor(string directory)
        {
            // foreach camera:
            // average a bunch of frames to find a good depth image
            // get calibration
            // TODO: parallelize

            foreach (var camera in cameras)
            {
                string cameraDirectory = directory + "/camera" + camera.name;
                if (!Directory.Exists(cameraDirectory))
                    Directory.CreateDirectory(cameraDirectory);

                // compute mean and variance of depth image
                var sum = new FloatImage(depthWidth, depthHeight);
                sum.Zero();
                var sumSquared = new FloatImage(depthWidth, depthHeight);
                sumSquared.Zero();
                var count = new ShortImage(depthWidth, depthHeight);
                count.Zero();
                var depth = new ShortImage(depthWidth, depthHeight);
                for (int i = 0; i < 100; i++)
                {
                    var depthBytes = camera.Client.LatestDepthImage();
                    Marshal.Copy(depthBytes, 0, depth.DataIntPtr, depthWidth * depthHeight * 2);
                    Console.WriteLine("acquired depth image " + i);
                    for (int y = 0; y < depthHeight; y++)
                        for (int x = 0; x < depthWidth; x++)
                            if (depth[x, y] != 0)
                            {
                                ushort d = depth[x, y];
                                count[x, y]++;
                                sum[x, y] += d;
                                sumSquared[x, y] += d * d;
                            }
                }

                var meanImage = new FloatImage(depthWidth, depthHeight);
                meanImage.Zero(); // not all pixels will be assigned
                var varianceImage = new FloatImage(depthWidth, depthHeight);
                varianceImage.Zero(); // not all pixels will be assigned

                for (int y = 0; y < depthHeight; y++)
                    for (int x = 0; x < depthWidth; x++)
                    {
                        if (count[x, y] > 50)
                        {
                            float mean = sum[x, y] / count[x, y];
                            meanImage[x, y] = mean;
                            float variance = sumSquared[x, y] / count[x, y] - mean * mean;
                            varianceImage[x, y] = variance;
                        }
                    }

                // WIC doesn't support encoding float tiff images, so for now we write to a binary file
                meanImage.SaveToFile(cameraDirectory + "/mean.bin");
                varianceImage.SaveToFile(cameraDirectory + "/variance.bin");

                // create a short version that we can write, used only for debugging
                var meanDepthShortImage = new ShortImage(depthWidth, depthHeight);
                for (int y = 0; y < depthHeight; y++)
                    for (int x = 0; x < depthWidth; x++)
                        meanDepthShortImage[x, y] = (ushort)meanImage[x, y];
                SaveToTiff(imagingFactory, meanDepthShortImage, cameraDirectory + "/mean.tiff");

                // convert to world coordinates and save to ply file
                camera.calibration = camera.Client.GetCalibration();
                var depthFrameToCameraSpaceTable = camera.calibration.ComputeDepthFrameToCameraSpaceTable();
                var world = new Float3Image(depthWidth, depthHeight); // TODO: move out/reuse
                for (int y = 0; y < depthHeight; y++)
                    for (int x = 0; x < depthWidth; x++)
                    {
                        var pointF = depthFrameToCameraSpaceTable[y * depthWidth + x];
                        Float3 worldPoint;
                        worldPoint.x = pointF.X * meanImage[x, y];
                        worldPoint.y = pointF.Y * meanImage[x, y];
                        worldPoint.z = meanImage[x, y];
                        world[x, y] = worldPoint;
                    }
                SaveToPly(cameraDirectory + "/mean.ply", world);

                // TODO: consider writing OBJ instead
            }

            // connect to projectors
            foreach (var projector in projectors)
            {
                //var binding = new NetTcpBinding();
                //binding.Security.Mode = SecurityMode.None;
                //var uri = "net.tcp://" + projector.hostNameOrAddress + ":9001/ProjectorServer/service";
                //var address = new EndpointAddress(uri);
                //projector.client = new ProjectorServerClient(binding, address);
                projector.Client.OpenDisplay(projector.displayIndex);
            }

            // collect color images when projecting all white and all black
            // set projectors to white
            foreach (var projector in projectors)
                projector.Client.SetColor(projector.displayIndex, 1f, 1f, 1f);
            System.Threading.Thread.Sleep(5000);
            foreach (var camera in cameras)
            {
                // save color image
                string cameraDirectory = directory + "/camera" + camera.name;
                var jpegBytes = camera.Client.LatestJPEGImage();
                File.WriteAllBytes(cameraDirectory + "/color.jpg", jpegBytes);
                var colorBytes = camera.Client.LatestRGBImage();
                var image = new ARGBImage(colorWidth, colorHeight);
                Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4);
                SaveToTiff(imagingFactory, image, cameraDirectory + "/color.tiff");
                image.Dispose();

            }
            foreach (var projector in projectors)
                projector.Client.SetColor(projector.displayIndex, 0f, 0f, 0f);
            System.Threading.Thread.Sleep(5000);
            foreach (var camera in cameras)
            {
                // save color image
                string cameraDirectory = directory + "/camera" + camera.name;
                var jpegBytes = camera.Client.LatestJPEGImage();
                File.WriteAllBytes(cameraDirectory + "/colorDark.jpg", jpegBytes);
                var colorBytes = camera.Client.LatestRGBImage();
                var image = new ARGBImage(colorWidth, colorHeight);
                Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4);
                SaveToTiff(imagingFactory, image, cameraDirectory + "/colorDark.tiff");
                image.Dispose();

            }

            // close all displays
            foreach (var projector in projectors)
            {
                projector.Client.CloseDisplay(projector.displayIndex);
            }
        }