public void Dispose() { if (!disposed) { disposed = true; Background.Dispose(); CompressToSwap.Dispose(); SphereRenderer.Dispose(); CapsuleRenderer.Dispose(); UILineRenderer.Dispose(); GlyphRenderer.Dispose(); depthBuffer.Dispose(); dsv.Dispose(); colorBuffer.Dispose(); rtv.Dispose(); rasterizerState.Dispose(); opaqueDepthState.Dispose(); opaqueBlendState.Dispose(); uiDepthState.Dispose(); uiBlendState.Dispose(); } }
public void Dispose() { if (!disposed) { disposed = true; Background.Dispose(); CompressToSwap.Dispose(); SphereRenderer.Dispose(); CapsuleRenderer.Dispose(); BoxRenderer.Dispose(); TriangleRenderer.Dispose(); MeshRenderer.Dispose(); UILineRenderer.Dispose(); GlyphRenderer.Dispose(); dsv.Dispose(); depthBuffer.Dispose(); rtv.Dispose(); colorBuffer.Dispose(); resolvedSRV.Dispose(); resolvedRTV.Dispose(); resolvedColorBuffer.Dispose(); rasterizerState.Dispose(); opaqueDepthState.Dispose(); opaqueBlendState.Dispose(); a2cBlendState.Dispose(); uiDepthState.Dispose(); uiBlendState.Dispose(); Shapes.Dispose(); } }
protected override void DoDispose() { Background.Dispose(); CompressToSwap.Dispose(); Lines.Dispose(); SphereRenderer.Dispose(); CapsuleRenderer.Dispose(); CylinderRenderer.Dispose(); BoxRenderer.Dispose(); TriangleRenderer.Dispose(); MeshRenderer.Dispose(); UILineRenderer.Dispose(); GlyphRenderer.Dispose(); GL.DeleteFramebuffer(framebuffer); GL.DeleteTexture(depthBuffer); GL.DeleteTexture(colorBuffer); GL.DeleteFramebuffer(resolvedFramebuffer); GL.DeleteTexture(resolvedColorBuffer); Shapes.Dispose(); }
public void Render(Camera camera) { if (Surface.Resolution.X != depthBuffer.Description.Width || Surface.Resolution.Y != depthBuffer.Description.Height) { OnResize(); } var context = Surface.Context; Shapes.MeshCache.FlushPendingUploads(context); context.Rasterizer.SetViewport(0, 0, Surface.Resolution.X, Surface.Resolution.Y, 0.0f, 1.0f); //Note reversed depth. context.ClearDepthStencilView(dsv, DepthStencilClearFlags.Depth, 0, 0); context.ClearRenderTargetView(rtv, new SharpDX.Mathematics.Interop.RawColor4()); context.OutputMerger.SetRenderTargets(dsv, rtv); context.Rasterizer.State = rasterizerState; context.OutputMerger.SetDepthStencilState(opaqueDepthState); //All ray traced shapes use analytic coverage writes to get antialiasing. context.OutputMerger.SetBlendState(a2cBlendState); SphereRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.spheres.Span), 0, Shapes.spheres.Count); CapsuleRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.capsules.Span), 0, Shapes.capsules.Count); CylinderRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.cylinders.Span), 0, Shapes.cylinders.Count); //Non-raytraced shapes just use regular opaque rendering. context.OutputMerger.SetBlendState(opaqueBlendState); BoxRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.boxes.Span), 0, Shapes.boxes.Count); TriangleRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.triangles.Span), 0, Shapes.triangles.Count); MeshRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.meshes.Span), 0, Shapes.meshes.Count); LineRenderer.Render(context, camera, Surface.Resolution, SpanConverter.AsSpan(Lines.lines.Span), 0, Lines.lines.Count); Background.Render(context, camera); //Resolve MSAA rendering down to a single sample buffer for screenspace work. //Note that we're not bothering to properly handle tonemapping during the resolve. That's going to hurt quality a little, but the demos don't make use of very wide ranges. //(If for some reason you end up expanding the demos to make use of wider HDR, you can make this a custom resolve pretty easily.) context.ResolveSubresource(colorBuffer, 0, resolvedColorBuffer, 0, Format.R16G16B16A16_Float); context.OutputMerger.SetRenderTargets(resolvedRTV); //Glyph and screenspace line drawing rely on the same premultiplied alpha blending transparency. We'll handle their state out here. context.OutputMerger.SetBlendState(uiBlendState); context.OutputMerger.SetDepthStencilState(uiDepthState); ImageRenderer.PreparePipeline(context); ImageBatcher.Flush(context, Surface.Resolution, ImageRenderer); UILineBatcher.Flush(context, Surface.Resolution, UILineRenderer); GlyphRenderer.PreparePipeline(context); TextBatcher.Flush(context, Surface.Resolution, GlyphRenderer); //Note that, for now, the compress to swap handles its own depth state since it's the only post processing stage. context.OutputMerger.SetBlendState(opaqueBlendState); context.Rasterizer.State = rasterizerState; CompressToSwap.Render(context, resolvedSRV, Surface.RTV); }
public void Render(Camera camera) { if (Surface.Resolution.X != width || Surface.Resolution.Y != height) { OnResize(); } Shapes.MeshCache.FlushPendingUploads(); GL.BindFramebuffer(FramebufferTarget.Framebuffer, framebuffer); //Note reversed depth. GL.ClearDepth(0.0f); GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit); GL.Enable(EnableCap.CullFace); GL.Enable(EnableCap.DepthTest); GL.DepthFunc(DepthFunction.Greater); //All ray traced shapes use analytic coverage writes to get antialiasing. GL.Enable(EnableCap.SampleAlphaToCoverage); SphereRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.spheres.Span), 0, Shapes.spheres.Count); CapsuleRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.capsules.Span), 0, Shapes.capsules.Count); CylinderRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.cylinders.Span), 0, Shapes.cylinders.Count); //Non-raytraced shapes just use regular opaque rendering. GL.Disable(EnableCap.SampleAlphaToCoverage); BoxRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.boxes.Span), 0, Shapes.boxes.Count); TriangleRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.triangles.Span), 0, Shapes.triangles.Count); MeshRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Shapes.meshes.Span), 0, Shapes.meshes.Count); LineRenderer.Render(camera, Surface.Resolution, SpanConverter.AsSpan(Lines.lines.Span), 0, Lines.lines.Count); Background.Render(camera); GL.Disable(EnableCap.CullFace); GL.Disable(EnableCap.DepthTest); //Resolve MSAA rendering down to a single sample buffer for screenspace work. //Note that we're not bothering to properly handle tonemapping during the resolve. That's going to hurt quality a little, but the demos don't make use of very wide ranges. //(If for some reason you end up expanding the demos to make use of wider HDR, you can make this a custom resolve pretty easily.) GL.BlitNamedFramebuffer(framebuffer, resolvedFramebuffer, 0, 0, width, height, 0, 0, width, height, ClearBufferMask.ColorBufferBit, BlitFramebufferFilter.Nearest); GL.BindFramebuffer(FramebufferTarget.Framebuffer, resolvedFramebuffer); //Glyph and screenspace line drawing rely on the same premultiplied alpha blending transparency. We'll handle their state out here. GL.Enable(EnableCap.Blend); GL.BlendFunc(BlendingFactorSrc.One, BlendingFactorDest.OneMinusSrcAlpha); GL.BlendEquation(BlendEquationMode.FuncAdd); ImageRenderer.PreparePipeline(); ImageBatcher.Flush(Surface.Resolution, ImageRenderer); UILineBatcher.Flush(Surface.Resolution, UILineRenderer); GlyphRenderer.PreparePipeline(); TextBatcher.Flush(Surface.Resolution, GlyphRenderer); GL.Disable(EnableCap.Blend); GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0); CompressToSwap.Render(resolvedColorBuffer); }
public void Render(Camera camera) { if (Surface.Resolution.X != depthBuffer.Description.Width || Surface.Resolution.Y != depthBuffer.Description.Height) { OnResize(); } var context = Surface.Context; context.Rasterizer.SetViewport(0, 0, Surface.Resolution.X, Surface.Resolution.Y, 0.0f, 1.0f); //Note reversed depth. context.ClearDepthStencilView(dsv, DepthStencilClearFlags.Depth, 0, 0); //The background render is going to fill out the entire color buffer, but having a clear can be useful- e.g. clearing out MSAA history. //We don't use MSAA right now, but the cost of doing this clear is negligible and it avoids a surprise later. context.ClearRenderTargetView(rtv, new SharpDX.Mathematics.Interop.RawColor4()); context.OutputMerger.SetRenderTargets(dsv, rtv); context.Rasterizer.State = rasterizerState; context.OutputMerger.SetBlendState(opaqueBlendState); context.OutputMerger.SetDepthStencilState(opaqueDepthState); SphereRenderer.Render(context, camera, Surface.Resolution, Shapes.spheres.Span.Memory, 0, Shapes.spheres.Count); CapsuleRenderer.Render(context, camera, Surface.Resolution, Shapes.capsules.Span.Memory, 0, Shapes.capsules.Count); BoxRenderer.Render(context, camera, Surface.Resolution, Shapes.boxes.Span.Memory, 0, Shapes.boxes.Count); LineRenderer.Render(context, camera, Surface.Resolution, Lines.lines.Span.Memory, 0, Lines.lines.Count); Background.Render(context, camera); //Glyph and screenspace line drawing rely on the same premultiplied alpha blending transparency. We'll handle their state out here. context.OutputMerger.SetBlendState(uiBlendState); context.OutputMerger.SetDepthStencilState(uiDepthState); UILineBatcher.Flush(context, Surface.Resolution, UILineRenderer); GlyphRenderer.PreparePipeline(context); TextBatcher.Flush(context, Surface.Resolution, GlyphRenderer); //Note that, for now, the compress to swap handles its own depth state since it's the only post processing stage. context.OutputMerger.SetBlendState(opaqueBlendState); context.Rasterizer.State = rasterizerState; CompressToSwap.Render(context, srv, Surface.RTV); }