public ARGBImage[] Generate() { var images = new ARGBImage[2 * (numXBits + numYBits)]; int j = 0; for (int i = 0; i < numXBits; i++) { var image = new ARGBImage(width, height); // present most significant bit first images[j++] = GenerateX(image, numXBits - i - 1); var invertedImage = new ARGBImage(width, height); invertedImage.Copy(image); invertedImage.InverseRGB(); images[j++] = invertedImage; } for (int i = 0; i < numYBits; i++) { var image = new ARGBImage(width, height); // present most significant bit first images[j++] = GenerateY(image, numYBits - i - 1); var invertedImage = new ARGBImage(width, height); invertedImage.Copy(image); invertedImage.InverseRGB(); images[j++] = invertedImage; } return images; }
public ARGBImage[] Generate() { var images = new ARGBImage[2 * (numXBits + numYBits)]; int j = 0; for (int i = 0; i < numXBits; i++) { var image = new ARGBImage(width, height); // present most significant bit first images[j++] = GenerateX(image, numXBits - i - 1); var invertedImage = new ARGBImage(width, height); invertedImage.Copy(image); invertedImage.InverseRGB(); images[j++] = invertedImage; } for (int i = 0; i < numYBits; i++) { var image = new ARGBImage(width, height); // present most significant bit first images[j++] = GenerateY(image, numYBits - i - 1); var invertedImage = new ARGBImage(width, height); invertedImage.Copy(image); invertedImage.InverseRGB(); images[j++] = invertedImage; } return(images); }
public void DecimateAndReduce(ARGBImage a, int factor) { byte *output = data; for (int y = 0; y < height; y++) { ARGB32 *pa = a.Data(0, y * factor); for (int x = 0; x < width; x++) { *output++ = (*pa).R; pa += factor; } } }
public void XMirror(ARGBImage a) { ARGB32 *pOut = data; ARGB32 *pIn = a.data; for (int yy = 0; yy < height; yy++) { pIn = a.Data(width - 1, yy); for (int xx = 0; xx < width; xx++) { *pOut++ = *pIn--; } } }
public void YMirror(ARGBImage a) { ARGB32 *pOut = data; ARGB32 *pIn = a.data; for (int yy = 0; yy < height; yy++) { pIn = a.Data(0, height - yy - 1); for (int xx = 0; xx < width; xx++) { *pOut++ = *pIn++; } } }
public void CopyRectangle(ARGBImage argbImage, int startX, int startY, int w, int h) { ARGB32 *pOrig = argbImage.Data(0, 0); ARGB32 *pOutOrig = data; ARGB32 *p; ARGB32 *pOut; for (int j = startY; j < h; j++) { for (int i = startX; i < w; i++) { p = pOrig + j * argbImage.Width + i; pOut = pOutOrig + j * width + i; *pOut = *p; } } }
public void CopyRectangle(ARGBImage argbImage, int startX, int startY, int w, int h) { ARGB32* pOrig = argbImage.Data(0, 0); ARGB32* pOutOrig = data; ARGB32* p; ARGB32* pOut; for (int j = startY; j < h; j++) { for (int i = startX; i < w; i++) { p = pOrig + j * argbImage.Width + i; pOut = pOutOrig + j * width + i; *pOut = *p; } } }
public unsafe ARGBImage GenerateY(ARGBImage image, int i) { ARGB32* p = image.Data(0, 0); for (int y = 0; y < height; y++) { // Gray code changes only one bit from one column/row to the next column/row int grayCode = y ^ (y >> 1); // pick out the bit for this image int bit = (grayCode & (1 << i)) >> i; for (int x = 0; x < width; x++) { p->R = (byte)(255 * bit); p->G = (byte)(255 * bit); p->B = (byte)(255 * bit); p->A = (byte)(255); p++; } } return image; }
public unsafe ARGBImage GenerateY(ARGBImage image, int i) { ARGB32 *p = image.Data(0, 0); for (int y = 0; y < height; y++) { // Gray code changes only one bit from one column/row to the next column/row int grayCode = y ^ (y >> 1); // pick out the bit for this image int bit = (grayCode & (1 << i)) >> i; for (int x = 0; x < width; x++) { p->R = (byte)(255 * bit); p->G = (byte)(255 * bit); p->B = (byte)(255 * bit); p->A = (byte)(255); p++; } } return(image); }
public static void LoadFromTiff(SharpDX.WIC.ImagingFactory imagingFactory, ARGBImage image, string filename) { LoadFromTiff(imagingFactory, image, filename, 4); }
public void CaptureDepthAndColor(string directory) { // foreach camera: // average a bunch of frames to find a good depth image // get calibration // TODO: parallelize foreach (var camera in cameras) { string cameraDirectory = directory + "/camera" + camera.name; if (!Directory.Exists(cameraDirectory)) Directory.CreateDirectory(cameraDirectory); // compute mean and variance of depth image var sum = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); sum.Zero(); var sumSquared = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); sumSquared.Zero(); var count = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); count.Zero(); var depth = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); for (int i = 0; i < 100; i++) { var depthBytes = camera.Client.LatestDepthImage(); Marshal.Copy(depthBytes, 0, depth.DataIntPtr, Kinect2Calibration.depthImageWidth * Kinect2Calibration.depthImageHeight * 2); Console.WriteLine("acquired depth image " + i); for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++) for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++) if (depth[x, y] != 0) { ushort d = depth[x, y]; count[x, y]++; sum[x, y] += d; sumSquared[x, y] += d * d; } } var meanImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); meanImage.Zero(); // not all pixels will be assigned var varianceImage = new FloatImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); varianceImage.Zero(); // not all pixels will be assigned for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++) for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++) { if (count[x, y] > 50) { float mean = sum[x, y] / count[x, y]; meanImage[x, y] = mean; float variance = sumSquared[x, y] / count[x, y] - mean * mean; varianceImage[x, y] = variance; } } // WIC doesn't support encoding float tiff images, so for now we write to a binary file meanImage.SaveToFile(cameraDirectory + "/mean.bin"); varianceImage.SaveToFile(cameraDirectory + "/variance.bin"); // create a short version that we can write, used only for debugging var meanDepthShortImage = new ShortImage(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++) for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++) meanDepthShortImage[x, y] = (ushort)meanImage[x, y]; SaveToTiff(imagingFactory, meanDepthShortImage, cameraDirectory + "/mean.tiff"); // convert to world coordinates and save to ply file camera.calibration = camera.Client.GetCalibration(); var depthFrameToCameraSpaceTable = camera.calibration.ComputeDepthFrameToCameraSpaceTable(); var world = new Float3Image(Kinect2Calibration.depthImageWidth, Kinect2Calibration.depthImageHeight); // TODO: move out/reuse for (int y = 0; y < Kinect2Calibration.depthImageHeight; y++) for (int x = 0; x < Kinect2Calibration.depthImageWidth; x++) { var pointF = depthFrameToCameraSpaceTable[y * Kinect2Calibration.depthImageWidth + x]; float meanDepthMeters = meanImage[x, y] / 1000.0f; Float3 worldPoint; worldPoint.x = pointF.X * meanDepthMeters; worldPoint.y = pointF.Y * meanDepthMeters; worldPoint.z = meanDepthMeters; world[x, y] = worldPoint; } SaveToPly(cameraDirectory + "/mean.ply", world); // TODO: consider writing OBJ instead } //// connect to projectors //foreach (var projector in projectors) //{ // projector.Client.OpenDisplay(projector.displayIndex); //} // collect color images; this is not necessary for calibration, but is nice to have for visualization //foreach (var projector in projectors) // projector.Client.SetColor(projector.displayIndex, 0f, 0f, 0f); //System.Threading.Thread.Sleep(5000); foreach (var camera in cameras) { // save color image string cameraDirectory = directory + "/camera" + camera.name; var jpegBytes = camera.Client.LatestJPEGImage(); File.WriteAllBytes(cameraDirectory + "/color.jpg", jpegBytes); var colorBytes = camera.Client.LatestRGBImage(); var image = new ARGBImage(Kinect2Calibration.colorImageWidth, Kinect2Calibration.colorImageHeight); Marshal.Copy(colorBytes, 0, image.DataIntPtr, Kinect2Calibration.colorImageWidth * Kinect2Calibration.colorImageHeight * 4); SaveToTiff(imagingFactory, image, cameraDirectory + "/color.tiff"); image.Dispose(); } //// close all displays //foreach (var projector in projectors) //{ // projector.Client.CloseDisplay(projector.displayIndex); //} }
public static void SaveToTiff(SharpDX.WIC.ImagingFactory imagingFactory, ARGBImage image, string filename) { SaveToTiff(imagingFactory, image, filename, SharpDX.WIC.PixelFormat.Format32bppRGBA, 4); }
// encapsulates d3d resources for a camera public CameraDeviceResource(SharpDX.Direct3D11.Device device, ProjectorCameraEnsemble.Camera camera, Object renderLock, string directory) { this.device = device; this.camera = camera; this.renderLock = renderLock; // Kinect depth image var depthImageTextureDesc = new Texture2DDescription() { Width = 512, Height = 424, MipLevels = 1, ArraySize = 1, Format = SharpDX.DXGI.Format.R16_UInt, SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0), Usage = ResourceUsage.Dynamic, BindFlags = BindFlags.ShaderResource, CpuAccessFlags = CpuAccessFlags.Write, }; depthImageTexture = new Texture2D(device, depthImageTextureDesc); depthImageTextureRV = new ShaderResourceView(device, depthImageTexture); var floatDepthImageTextureDesc = new Texture2DDescription() { Width = 512, Height = 424, MipLevels = 1, ArraySize = 1, Format = SharpDX.DXGI.Format.R32_Float, SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0), Usage = ResourceUsage.Default, BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource, CpuAccessFlags = CpuAccessFlags.None, }; floatDepthImageTexture = new Texture2D(device, floatDepthImageTextureDesc); floatDepthImageRV = new ShaderResourceView(device, floatDepthImageTexture); floatDepthImageRenderTargetView = new RenderTargetView(device, floatDepthImageTexture); floatDepthImageTexture2 = new Texture2D(device, floatDepthImageTextureDesc); floatDepthImageRV2 = new ShaderResourceView(device, floatDepthImageTexture2); floatDepthImageRenderTargetView2 = new RenderTargetView(device, floatDepthImageTexture2); // Kinect color image var colorImageStagingTextureDesc = new Texture2DDescription() { Width = colorImageWidth, Height = colorImageHeight, MipLevels = 1, ArraySize = 1, Format = SharpDX.DXGI.Format.B8G8R8A8_UNorm, SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0), Usage = ResourceUsage.Dynamic, BindFlags = BindFlags.ShaderResource, CpuAccessFlags = CpuAccessFlags.Write }; colorImageStagingTexture = new Texture2D(device, colorImageStagingTextureDesc); var colorImageTextureDesc = new Texture2DDescription() { Width = colorImageWidth, Height = colorImageHeight, MipLevels = 0, ArraySize = 1, Format = SharpDX.DXGI.Format.B8G8R8A8_UNorm, SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0), Usage = ResourceUsage.Default, BindFlags = BindFlags.ShaderResource | BindFlags.RenderTarget, CpuAccessFlags = CpuAccessFlags.None, OptionFlags = ResourceOptionFlags.GenerateMipMaps }; colorImageTexture = new Texture2D(device, colorImageTextureDesc); colorImageTextureRV = new ShaderResourceView(device, colorImageTexture); // vertex buffer var table = camera.calibration.ComputeDepthFrameToCameraSpaceTable(); int numVertices = 6 * (depthImageWidth - 1) * (depthImageHeight - 1); var vertices = new VertexPosition[numVertices]; Int3[] quadOffsets = new Int3[] { new Int3(0, 0, 0), new Int3(1, 0, 0), new Int3(0, 1, 0), new Int3(1, 0, 0), new Int3(1, 1, 0), new Int3(0, 1, 0), }; int vertexIndex = 0; for (int y = 0; y < depthImageHeight - 1; y++) { for (int x = 0; x < depthImageWidth - 1; x++) { for (int i = 0; i < 6; i++) { int vertexX = x + quadOffsets[i].X; int vertexY = y + quadOffsets[i].Y; var point = table[depthImageWidth * vertexY + vertexX]; var vertex = new VertexPosition(); vertex.position = new SharpDX.Vector4(point.X, point.Y, vertexX, vertexY); vertices[vertexIndex++] = vertex; } } } var stream = new DataStream(numVertices * VertexPosition.SizeInBytes, true, true); stream.WriteRange(vertices); stream.Position = 0; var vertexBufferDesc = new BufferDescription() { BindFlags = BindFlags.VertexBuffer, CpuAccessFlags = CpuAccessFlags.None, Usage = ResourceUsage.Default, SizeInBytes = numVertices * VertexPosition.SizeInBytes, }; vertexBuffer = new SharpDX.Direct3D11.Buffer(device, stream, vertexBufferDesc); vertexBufferBinding = new VertexBufferBinding(vertexBuffer, VertexPosition.SizeInBytes, 0); stream.Dispose(); var colorImage = new RoomAliveToolkit.ARGBImage(colorImageWidth, colorImageHeight); ProjectorCameraEnsemble.LoadFromTiff(imagingFactory, colorImage, directory + "/camera" + camera.name + "/colorDark.tiff"); var depthImage = new RoomAliveToolkit.ShortImage(depthImageWidth, depthImageHeight); ProjectorCameraEnsemble.LoadFromTiff(imagingFactory, depthImage, directory + "/camera" + camera.name + "/mean.tiff"); lock (renderLock) // necessary? { UpdateColorImage(device.ImmediateContext, colorImage.DataIntPtr); UpdateDepthImage(device.ImmediateContext, depthImage.DataIntPtr); } colorImage.Dispose(); depthImage.Dispose(); }
public void DecimateAndReduce(ARGBImage a, int factor) { byte* output = data; for (int y = 0; y < height; y++) { ARGB32* pa = a.Data(0, y * factor); for (int x = 0; x < width; x++) { *output++ = (*pa).R; pa += factor; } } }
public void CaptureGrayCodes(string directory) { // for each projector // for each gray code // display gray code // for each camera (fork?) // capture color image; save to file // store as projector#/camera#/grayCode# // foreach camera // get calibration // save depth map to file var grayImage = new ByteImage(colorWidth, colorHeight); // pick up projector image dimensions form server and save them in configuration // put up projector's name on each foreach (var projector in projectors) { var size = projector.Client.Size(projector.displayIndex); projector.width = size.Width; projector.height = size.Height; projector.Client.OpenDisplay(projector.displayIndex); projector.Client.DisplayName(projector.displayIndex, projector.name); } // let AGC settle System.Threading.Thread.Sleep(2000); // save an image with projector name displayed, useful for later for visualization of results foreach (var camera in cameras) { string cameraDirectory = directory + "/camera" + camera.name; if (!Directory.Exists(cameraDirectory)) Directory.CreateDirectory(cameraDirectory); //var jpegBytes = camera.client.LatestJPEGImage(); //File.WriteAllBytes(cameraDirectory + "/projectorLabels.jpg", jpegBytes); var colorBytes = camera.Client.LatestRGBImage(); var image = new ARGBImage(colorWidth, colorHeight); Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4); SaveToTiff(imagingFactory, image, cameraDirectory + "/projectorLabels.tiff"); image.Dispose(); } // TODO: consider combining with later aquiring color and depth // set all projectors to black foreach (var projector in projectors) projector.Client.SetColor(projector.displayIndex, 0, 0, 0); foreach (var projector in projectors) { string projectorDirectory = directory + "/projector" + projector.name; if (!Directory.Exists(projectorDirectory)) Directory.CreateDirectory(projectorDirectory); int numberOfGrayCodeImages = projector.Client.NumberOfGrayCodeImages(projector.displayIndex); // set display to gray, to give AGC a chance to settle projector.Client.SetColor(projector.displayIndex, 0.5f, 0.5f, 0.5f); System.Threading.Thread.Sleep(1500); for (int i = 0; i < numberOfGrayCodeImages; i++) { projector.Client.DisplayGrayCode(projector.displayIndex, i); // wait for the image to be displayed and give camera AGC time to settle System.Threading.Thread.Sleep(500); // TODO: parallelize? foreach (var camera in cameras) { string cameraDirectory = projectorDirectory + "/camera" + camera.name; if (!Directory.Exists(cameraDirectory)) Directory.CreateDirectory(cameraDirectory); //// acquire color frames until exposure and gain have settled to a stable value //int numUnchanging = 0; //long lastExposureTime = 0; //float lastGain = 0; //const int numUnchangingThreshold = 5; //byte[] colorImageBytes = null; //while (numUnchanging < numUnchangingThreshold) //{ // colorImageBytes = camera.client.NextColorImage(); // consider providing a way of getting color exposure etc. w/o calling NextColorImage // long exposureTime = camera.client.LastColorExposureTimeTicks(); // float gain = camera.client.LastColorGain(); // if ((gain == lastGain) && (exposureTime == lastExposureTime)) // numUnchanging++; // lastGain = gain; // lastExposureTime = exposureTime; //} var colorImageBytes = camera.Client.LatestYUVImage(); // convert YUY2 to grayscale for (int y = 0; y < colorHeight; y++) for (int x = 0; x < colorWidth; x++) grayImage[x, y] = colorImageBytes[2 * (colorWidth * y + x)]; // save to file SaveToTiff(imagingFactory, grayImage, cameraDirectory + "/grayCode" + i + ".tiff"); } } projector.Client.SetColor(projector.displayIndex, 0, 0, 0); } // close all displays foreach (var projector in projectors) { projector.Client.CloseDisplay(projector.displayIndex); } }
public void CaptureDepthAndColor(string directory) { // foreach camera: // average a bunch of frames to find a good depth image // get calibration // TODO: parallelize foreach (var camera in cameras) { string cameraDirectory = directory + "/camera" + camera.name; if (!Directory.Exists(cameraDirectory)) Directory.CreateDirectory(cameraDirectory); // compute mean and variance of depth image var sum = new FloatImage(depthWidth, depthHeight); sum.Zero(); var sumSquared = new FloatImage(depthWidth, depthHeight); sumSquared.Zero(); var count = new ShortImage(depthWidth, depthHeight); count.Zero(); var depth = new ShortImage(depthWidth, depthHeight); for (int i = 0; i < 100; i++) { var depthBytes = camera.Client.LatestDepthImage(); Marshal.Copy(depthBytes, 0, depth.DataIntPtr, depthWidth * depthHeight * 2); Console.WriteLine("acquired depth image " + i); for (int y = 0; y < depthHeight; y++) for (int x = 0; x < depthWidth; x++) if (depth[x, y] != 0) { ushort d = depth[x, y]; count[x, y]++; sum[x, y] += d; sumSquared[x, y] += d * d; } } var meanImage = new FloatImage(depthWidth, depthHeight); meanImage.Zero(); // not all pixels will be assigned var varianceImage = new FloatImage(depthWidth, depthHeight); varianceImage.Zero(); // not all pixels will be assigned for (int y = 0; y < depthHeight; y++) for (int x = 0; x < depthWidth; x++) { if (count[x, y] > 50) { float mean = sum[x, y] / count[x, y]; meanImage[x, y] = mean; float variance = sumSquared[x, y] / count[x, y] - mean * mean; varianceImage[x, y] = variance; } } // WIC doesn't support encoding float tiff images, so for now we write to a binary file meanImage.SaveToFile(cameraDirectory + "/mean.bin"); varianceImage.SaveToFile(cameraDirectory + "/variance.bin"); // create a short version that we can write, used only for debugging var meanDepthShortImage = new ShortImage(depthWidth, depthHeight); for (int y = 0; y < depthHeight; y++) for (int x = 0; x < depthWidth; x++) meanDepthShortImage[x, y] = (ushort)meanImage[x, y]; SaveToTiff(imagingFactory, meanDepthShortImage, cameraDirectory + "/mean.tiff"); // convert to world coordinates and save to ply file camera.calibration = camera.Client.GetCalibration(); var depthFrameToCameraSpaceTable = camera.calibration.ComputeDepthFrameToCameraSpaceTable(); var world = new Float3Image(depthWidth, depthHeight); // TODO: move out/reuse for (int y = 0; y < depthHeight; y++) for (int x = 0; x < depthWidth; x++) { var pointF = depthFrameToCameraSpaceTable[y * depthWidth + x]; Float3 worldPoint; worldPoint.x = pointF.X * meanImage[x, y]; worldPoint.y = pointF.Y * meanImage[x, y]; worldPoint.z = meanImage[x, y]; world[x, y] = worldPoint; } SaveToPly(cameraDirectory + "/mean.ply", world); // TODO: consider writing OBJ instead } // connect to projectors foreach (var projector in projectors) { //var binding = new NetTcpBinding(); //binding.Security.Mode = SecurityMode.None; //var uri = "net.tcp://" + projector.hostNameOrAddress + ":9001/ProjectorServer/service"; //var address = new EndpointAddress(uri); //projector.client = new ProjectorServerClient(binding, address); projector.Client.OpenDisplay(projector.displayIndex); } // collect color images when projecting all white and all black // set projectors to white foreach (var projector in projectors) projector.Client.SetColor(projector.displayIndex, 1f, 1f, 1f); System.Threading.Thread.Sleep(5000); foreach (var camera in cameras) { // save color image string cameraDirectory = directory + "/camera" + camera.name; var jpegBytes = camera.Client.LatestJPEGImage(); File.WriteAllBytes(cameraDirectory + "/color.jpg", jpegBytes); var colorBytes = camera.Client.LatestRGBImage(); var image = new ARGBImage(colorWidth, colorHeight); Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4); SaveToTiff(imagingFactory, image, cameraDirectory + "/color.tiff"); image.Dispose(); } foreach (var projector in projectors) projector.Client.SetColor(projector.displayIndex, 0f, 0f, 0f); System.Threading.Thread.Sleep(5000); foreach (var camera in cameras) { // save color image string cameraDirectory = directory + "/camera" + camera.name; var jpegBytes = camera.Client.LatestJPEGImage(); File.WriteAllBytes(cameraDirectory + "/colorDark.jpg", jpegBytes); var colorBytes = camera.Client.LatestRGBImage(); var image = new ARGBImage(colorWidth, colorHeight); Marshal.Copy(colorBytes, 0, image.DataIntPtr, colorWidth * colorHeight * 4); SaveToTiff(imagingFactory, image, cameraDirectory + "/colorDark.tiff"); image.Dispose(); } // close all displays foreach (var projector in projectors) { projector.Client.CloseDisplay(projector.displayIndex); } }