public void TestCopy() { using (BodyIndexFrameData frame = new BodyIndexFrameData()) { using (DynamicBodyIndexTexture texture = new DynamicBodyIndexTexture(device)) { texture.Copy(device.ImmediateContext, frame); } } }
static void Main() { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); RenderForm form = new RenderForm("Kinect body index sample"); RenderDevice device = new RenderDevice(SharpDX.Direct3D11.DeviceCreationFlags.BgraSupport); RenderContext context = new RenderContext(device); DX11SwapChain swapChain = DX11SwapChain.FromHandle(device, form.Handle); PixelShader pixelShader = ShaderCompiler.CompileFromFile<PixelShader>(device, "BodyIndexView.fx", "PS_NormalizedView"); KinectSensor sensor = KinectSensor.GetDefault(); sensor.Open(); bool doQuit = false; bool doUpload = false; BodyIndexFrameData currentData = null; DynamicBodyIndexTexture texture = new DynamicBodyIndexTexture(device); KinectSensorBodyIndexFrameProvider provider = new KinectSensorBodyIndexFrameProvider(sensor); provider.FrameReceived += (sender, args) => { currentData = args.FrameData; doUpload = true; }; form.KeyDown += (sender, args) => { if (args.KeyCode == Keys.Escape) { doQuit = true; } }; RenderLoop.Run(form, () => { if (doQuit) { form.Dispose(); return; } if (doUpload) { texture.Copy(context, currentData); doUpload = false; } context.RenderTargetStack.Push(swapChain); device.Primitives.ApplyFullTriVS(context); context.Context.PixelShader.Set(pixelShader); context.Context.PixelShader.SetSampler(0, device.SamplerStates.LinearClamp); context.Context.PixelShader.SetShaderResource(0, texture.NormalizedView); device.Primitives.FullScreenTriangle.Draw(context); context.RenderTargetStack.Pop(); swapChain.Present(0, SharpDX.DXGI.PresentFlags.None); }); swapChain.Dispose(); context.Dispose(); device.Dispose(); texture.Dispose(); provider.Dispose(); pixelShader.Dispose(); sensor.Close(); }
static void Main() { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); RenderForm form = new RenderForm("Kinect Simple filtered point cloud view sample"); RenderDevice device = new RenderDevice(SharpDX.Direct3D11.DeviceCreationFlags.BgraSupport | DeviceCreationFlags.Debug); RenderContext context = new RenderContext(device); DX11SwapChain swapChain = DX11SwapChain.FromHandle(device, form.Handle); ComputeShader computeShader = ShaderCompiler.CompileFromFile<ComputeShader>(device, "PointCloudFilter.fx", "CS_Filter"); VertexShader vertexShader = ShaderCompiler.CompileFromFile<VertexShader>(device, "PointCloudJointView.fx", "VS"); PixelShader pixelShader = ShaderCompiler.CompileFromFile<PixelShader>(device, "PointCloudJointView.fx", "PS"); BodyCameraPositionBuffer positionBuffer = new BodyCameraPositionBuffer(device); DX11StructuredBuffer colorTableBuffer = DX11StructuredBuffer.CreateImmutable<Color4>(device, ColorTable); DX11NullGeometry nullGeom = new DX11NullGeometry(device); nullGeom.Topology = SharpDX.Direct3D.PrimitiveTopology.PointList; InstancedIndirectBuffer indirectDrawBuffer = new InstancedIndirectBuffer(device); KinectSensor sensor = KinectSensor.GetDefault(); sensor.Open(); cbCamera camera = new cbCamera(); camera.Projection = Matrix.PerspectiveFovLH(1.57f * 0.5f, 1.3f, 0.01f, 100.0f); camera.View = Matrix.Translation(0.0f, 0.0f, 2.0f); camera.Projection.Transpose(); camera.View.Transpose(); ConstantBuffer<cbCamera> cameraBuffer = new ConstantBuffer<cbCamera>(device); cameraBuffer.Update(context, ref camera); bool doQuit = false; bool uploadCamera = false; bool uploadBodyIndex = false; bool uploadBody = false; CameraRGBFrameData rgbFrame = new CameraRGBFrameData(); DynamicCameraRGBTexture cameraTexture = new DynamicCameraRGBTexture(device); KinectSensorDepthFrameProvider provider = new KinectSensorDepthFrameProvider(sensor); provider.FrameReceived += (sender, args) => { rgbFrame.Update(sensor.CoordinateMapper, args.DepthData); uploadCamera = true; }; BodyIndexFrameData bodyIndexFrame = null; DynamicBodyIndexTexture bodyIndexTexture = new DynamicBodyIndexTexture(device); KinectSensorBodyIndexFrameProvider bodyIndexProvider = new KinectSensorBodyIndexFrameProvider(sensor); bodyIndexProvider.FrameReceived += (sender, args) => { bodyIndexFrame = args.FrameData; uploadBodyIndex = true; }; AppendPointCloudBuffer pointCloudBuffer = new AppendPointCloudBuffer(device); KinectBody[] bodyFrame = null; KinectSensorBodyFrameProvider bodyFrameProvider = new KinectSensorBodyFrameProvider(sensor); bodyFrameProvider.FrameReceived += (sender, args) => { bodyFrame = args.FrameData; uploadBody = true; }; form.KeyDown += (sender, args) => { if (args.KeyCode == Keys.Escape) { doQuit = true; } }; RenderLoop.Run(form, () => { if (doQuit) { form.Dispose(); return; } if (uploadCamera) { cameraTexture.Copy(context.Context, rgbFrame); uploadCamera = false; } if (uploadBodyIndex) { bodyIndexTexture.Copy(context.Context, bodyIndexFrame); uploadBodyIndex = false; } if (uploadBody) { positionBuffer.Copy(context, bodyFrame.TrackedOnly().ClosestBodies()); uploadBody = false; } //Prepare compute shader context.Context.ComputeShader.Set(computeShader); context.Context.ComputeShader.SetShaderResource(0, cameraTexture.ShaderView); context.Context.ComputeShader.SetShaderResource(1, bodyIndexTexture.RawView); //Set raw view here, we do not sample context.Context.ComputeShader.SetUnorderedAccessView(0, pointCloudBuffer.UnorderedView, 0); //Don't forget to set count to 0 context.Context.Dispatch(Consts.DepthWidth / 8, Consts.DepthHeight / 8, 1); //No iDivUp here, since it's not needed context.Context.ComputeShader.SetUnorderedAccessView(0, null); //Make runtime happy, and if we don't unbind we can't set as srv context.Context.CopyStructureCount(indirectDrawBuffer.ArgumentBuffer, 0, pointCloudBuffer.UnorderedView); //Draw filter buffer context.RenderTargetStack.Push(swapChain); context.Context.ClearRenderTargetView(swapChain.RenderView, SharpDX.Color.Black); context.Context.VertexShader.Set(vertexShader); context.Context.PixelShader.Set(pixelShader); context.Context.VertexShader.SetShaderResource(0, pointCloudBuffer.ShaderView); context.Context.VertexShader.SetShaderResource(1, positionBuffer.ShaderView); context.Context.VertexShader.SetShaderResource(2, colorTableBuffer.ShaderView); context.Context.VertexShader.SetConstantBuffer(0, cameraBuffer.Buffer); nullGeom.Bind(context, null); context.Context.DrawInstancedIndirect(indirectDrawBuffer.ArgumentBuffer, 0); context.Context.VertexShader.SetShaderResource(0, null); //Make runtime happy context.RenderTargetStack.Pop(); swapChain.Present(0, SharpDX.DXGI.PresentFlags.None); }); cameraBuffer.Dispose(); cameraTexture.Dispose(); bodyIndexTexture.Dispose(); provider.Dispose(); bodyIndexProvider.Dispose(); pixelShader.Dispose(); vertexShader.Dispose(); sensor.Close(); positionBuffer.Dispose(); colorTableBuffer.Dispose(); swapChain.Dispose(); context.Dispose(); device.Dispose(); }
static void Main() { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); RenderForm form = new RenderForm("Kinect background subtraction sample"); RenderDevice device = new RenderDevice(SharpDX.Direct3D11.DeviceCreationFlags.BgraSupport); RenderContext context = new RenderContext(device); DX11SwapChain swapChain = DX11SwapChain.FromHandle(device, form.Handle); PixelShader depthPixelShader = ShaderCompiler.CompileFromFile<PixelShader>(device, "FilterDepthView.fx", "PS_Sample"); PixelShader rgbPixelShader = ShaderCompiler.CompileFromFile<PixelShader>(device, "FilterRGBView.fx", "PS_Sample"); KinectSensor sensor = KinectSensor.GetDefault(); sensor.Open(); bool doQuit = false; bool swapMode = false; bool uploadColor = false; bool uploadBodyIndex = false; //We need color and body index for subtraction ColorRGBAFrameData colorData = null; DynamicColorRGBATexture colorTexture = new DynamicColorRGBATexture(device); KinectSensorColorRGBAFrameProvider colorProvider = new KinectSensorColorRGBAFrameProvider(sensor); colorProvider.FrameReceived += (sender, args) => { colorData = args.FrameData; uploadColor = true; }; BodyIndexFrameData bodyIndexData = null; DynamicBodyIndexTexture bodyIndexTexture = new DynamicBodyIndexTexture(device); KinectSensorBodyIndexFrameProvider bodyIndexProvider = new KinectSensorBodyIndexFrameProvider(sensor); bodyIndexProvider.FrameReceived += (sender, args) => { bodyIndexData = args.FrameData; uploadBodyIndex = true; }; bool uploadColorToDepth = false; bool uploadDepthToColor = false; ColorToDepthFrameData colorToDepthData = new ColorToDepthFrameData(); DepthToColorFrameData depthToColorData = new DepthToColorFrameData(); KinectSensorDepthFrameProvider depthProvider = new KinectSensorDepthFrameProvider(sensor); depthProvider.FrameReceived += (sender, args) => { if (!swapMode) { colorToDepthData.Update(sensor.CoordinateMapper, args.DepthData); uploadColorToDepth = true; } else { depthToColorData.Update(sensor.CoordinateMapper, args.DepthData); uploadDepthToColor = true; } }; DynamicColorToDepthTexture colorToDepthTexture = new DynamicColorToDepthTexture(device); DynamicDepthToColorTexture depthToColorTexture = new DynamicDepthToColorTexture(device); form.KeyDown += (sender, args) => { if (args.KeyCode == Keys.Escape) { doQuit = true; } if (args.KeyCode == Keys.Space) { swapMode = !swapMode; } }; RenderLoop.Run(form, () => { if (doQuit) { form.Dispose(); return; } if (uploadColor) { colorTexture.Copy(context, colorData); uploadColor = false; } if (uploadBodyIndex) { bodyIndexTexture.Copy(context, bodyIndexData); uploadBodyIndex = false; } if (uploadColorToDepth) { colorToDepthTexture.Copy(context, colorToDepthData); uploadColorToDepth = false; } if (uploadDepthToColor) { depthToColorTexture.Copy(context, depthToColorData); uploadDepthToColor = false; } ShaderResourceView view = swapMode ? depthToColorTexture.ShaderView : colorToDepthTexture.ShaderView; PixelShader shader = swapMode ? depthPixelShader : rgbPixelShader; context.RenderTargetStack.Push(swapChain); device.Primitives.ApplyFullTriVS(context); context.Context.PixelShader.Set(shader); context.Context.PixelShader.SetShaderResource(0, colorTexture.ShaderView); //Note: make sure to use normalized view as we use untyped resource here context.Context.PixelShader.SetShaderResource(1, bodyIndexTexture.NormalizedView); context.Context.PixelShader.SetShaderResource(2, view); context.Context.PixelShader.SetSampler(0, device.SamplerStates.LinearClamp); device.Primitives.FullScreenTriangle.Draw(context); context.RenderTargetStack.Pop(); swapChain.Present(0, SharpDX.DXGI.PresentFlags.None); }); swapChain.Dispose(); context.Dispose(); device.Dispose(); depthProvider.Dispose(); colorToDepthData.Dispose(); depthToColorData.Dispose(); colorToDepthTexture.Dispose(); depthToColorTexture.Dispose(); colorTexture.Dispose(); colorProvider.Dispose(); bodyIndexData.Dispose(); bodyIndexProvider.Dispose(); depthPixelShader.Dispose(); rgbPixelShader.Dispose(); sensor.Close(); }
static void Main() { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); RenderForm form = new RenderForm("Kinect depth local stream sample"); RenderDevice device = new RenderDevice(SharpDX.Direct3D11.DeviceCreationFlags.BgraSupport); RenderContext context = new RenderContext(device); DX11SwapChain swapChain = DX11SwapChain.FromHandle(device, form.Handle); PixelShader pixelShaderRaw = ShaderCompiler.CompileFromFile<PixelShader>(device, "DepthToWorld.fx", "PS_Raw"); KinectSensor sensor = KinectSensor.GetDefault(); sensor.Open(); KinectFrameServer frameServer = new KinectFrameServer(32000, sensor); KinectFrameClient frameClient = new KinectFrameClient(IPAddress.Parse("127.0.0.1"), 32000); RayTableTexture rayTable = RayTableTexture.FromCoordinateMapper(device, sensor.CoordinateMapper); RenderCameraTexture renderCamera = new RenderCameraTexture(device); frameClient.Connect(); bool doQuit = false; bool uploadDepth = false; bool uploadBody = false; int mode = 0; //0 = body index, 1 = depth, 2 = world DepthFrameData depthData = null; DynamicDepthTexture depth = new DynamicDepthTexture(device); IDepthFrameProvider networkDepth = (IDepthFrameProvider)frameClient; networkDepth.FrameReceived += (sender, args) => { depthData = args.DepthData; uploadDepth = true; }; BodyIndexFrameData bodyIndexData = null; DynamicBodyIndexTexture bodyIndexTexture = new DynamicBodyIndexTexture(device); IBodyIndexFrameProvider networkBody = (IBodyIndexFrameProvider)frameClient; networkBody.FrameReceived += (sender, args) => { bodyIndexData = args.FrameData; uploadBody = true; }; form.KeyDown += (sender, args) => { if (args.KeyCode == Keys.Escape) { doQuit = true; } if (args.KeyCode == Keys.Space) { mode++; if (mode > 2) { mode = 0; } } }; RenderLoop.Run(form, () => { if (doQuit) { form.Dispose(); return; } if (uploadDepth) { depth.Copy(context, depthData); uploadDepth = false; if (mode == 2) { //Convert depth to world context.Context.OutputMerger.SetRenderTargets(renderCamera.RenderView); device.Primitives.ApplyFullTriVS(context); context.Context.PixelShader.Set(pixelShaderRaw); context.Context.PixelShader.SetShaderResource(0, depth.RawView); context.Context.PixelShader.SetShaderResource(1, rayTable.ShaderView); device.Primitives.FullScreenTriangle.Draw(context); context.RenderTargetStack.Apply(); } } if (uploadBody) { bodyIndexTexture.Copy(context, bodyIndexData); uploadBody = false; } context.RenderTargetStack.Push(swapChain); if (mode == 0) { device.Primitives.ApplyFullTri(context, bodyIndexTexture.NormalizedView); } else if (mode == 1) { device.Primitives.ApplyFullTri(context, depth.NormalizedView); } else { device.Primitives.ApplyFullTri(context, renderCamera.ShaderView); } device.Primitives.FullScreenTriangle.Draw(context); context.RenderTargetStack.Pop(); swapChain.Present(0, SharpDX.DXGI.PresentFlags.None); }); swapChain.Dispose(); context.Dispose(); device.Dispose(); depth.Dispose(); bodyIndexTexture.Dispose(); frameClient.Stop(); frameServer.Dispose(); rayTable.Dispose(); renderCamera.Dispose(); pixelShaderRaw.Dispose(); sensor.Close(); }