TextureHandle RenderPostProcess(RenderGraph renderGraph, PrepassOutput prepassOutput, TextureHandle inputColor, TextureHandle backBuffer, CullingResults cullResults, HDCamera hdCamera) { PostProcessParameters parameters = PreparePostProcess(cullResults, hdCamera); TextureHandle afterPostProcessBuffer = renderGraph.defaultResources.blackTextureXR; TextureHandle dest = HDUtils.PostProcessIsFinalPass(parameters.hdCamera) ? backBuffer : renderGraph.CreateTexture( new TextureDesc(Vector2.one, true, true) { colorFormat = GetColorBufferFormat(), name = "Intermediate Postprocess buffer" }); if (hdCamera.frameSettings.IsEnabled(FrameSettingsField.AfterPostprocess)) { // We render AfterPostProcess objects first into a separate buffer that will be composited in the final post process pass using (var builder = renderGraph.AddRenderPass <AfterPostProcessPassData>("After Post-Process", out var passData, ProfilingSampler.Get(HDProfileId.AfterPostProcessing))) { passData.parameters = parameters; passData.afterPostProcessBuffer = builder.UseColorBuffer(renderGraph.CreateTexture( new TextureDesc(Vector2.one, true, true) { colorFormat = GraphicsFormat.R8G8B8A8_SRGB, clearBuffer = true, clearColor = Color.black, name = "OffScreen AfterPostProcess" }), 0); if (passData.parameters.useDepthBuffer) { passData.depthStencilBuffer = builder.UseDepthBuffer(prepassOutput.depthBuffer, DepthAccess.ReadWrite); } passData.opaqueAfterPostprocessRL = builder.UseRendererList(renderGraph.CreateRendererList(passData.parameters.opaqueAfterPPDesc)); passData.transparentAfterPostprocessRL = builder.UseRendererList(renderGraph.CreateRendererList(passData.parameters.transparentAfterPPDesc)); builder.SetRenderFunc( (AfterPostProcessPassData data, RenderGraphContext ctx) => { RenderAfterPostProcess(data.parameters, data.opaqueAfterPostprocessRL, data.transparentAfterPostprocessRL, ctx.renderContext, ctx.cmd); }); afterPostProcessBuffer = passData.afterPostProcessBuffer; } } m_PostProcessSystem.Render( renderGraph, parameters.hdCamera, parameters.blueNoise, inputColor, afterPostProcessBuffer, prepassOutput.resolvedDepthBuffer, prepassOutput.depthPyramidTexture, prepassOutput.resolvedNormalBuffer, prepassOutput.resolvedMotionVectorsBuffer, dest, parameters.flipYInPostProcess ); return(dest); }
TextureHandle RenderScreenSpaceShadows(RenderGraph renderGraph, HDCamera hdCamera, PrepassOutput prepassOutput, TextureHandle depthBuffer, TextureHandle normalBuffer, TextureHandle motionVectorsBuffer, TextureHandle historyValidityBuffer, TextureHandle rayCountTexture) { // If screen space shadows are not supported for this camera, we are done bool validConditions = hdCamera.frameSettings.IsEnabled(FrameSettingsField.ScreenSpaceShadows) && RequestedScreenSpaceShadows(); if (!validConditions) { // We push the debug texture anyway if we are not evaluating any screen space shadows. PushFullScreenDebugTexture(m_RenderGraph, m_RenderGraph.defaultResources.whiteTextureXR, FullScreenDebugMode.ScreenSpaceShadows); return(m_RenderGraph.defaultResources.blackTextureArrayXR); } using (new RenderGraphProfilingScope(renderGraph, ProfilingSampler.Get(HDProfileId.ScreenSpaceShadows))) { // Request the output texture TextureHandle screenSpaceShadowTexture = CreateScreenSpaceShadowTextureArray(renderGraph); // First of all we handle the directional light RenderDirectionalLightScreenSpaceShadow(renderGraph, hdCamera, depthBuffer, normalBuffer, motionVectorsBuffer, historyValidityBuffer, rayCountTexture, screenSpaceShadowTexture); if (hdCamera.frameSettings.IsEnabled(FrameSettingsField.RayTracing) && GetRayTracingState()) { // We handle the other light sources RenderLightScreenSpaceShadows(renderGraph, hdCamera, prepassOutput, depthBuffer, normalBuffer, motionVectorsBuffer, historyValidityBuffer, rayCountTexture, screenSpaceShadowTexture); } // We render the debug view, if the texture is not used, it is not evaluated anyway TextureHandle screenSpaceShadowDebug = EvaluateShadowDebugView(renderGraph, hdCamera, screenSpaceShadowTexture); PushFullScreenDebugTexture(m_RenderGraph, screenSpaceShadowDebug, FullScreenDebugMode.ScreenSpaceShadows); return(screenSpaceShadowTexture); } }
PrepassOutput RenderPrepass(RenderGraph renderGraph, RenderGraphMutableResource sssBuffer, CullingResults cullingResults, HDCamera hdCamera) { m_IsDepthBufferCopyValid = false; var result = new PrepassOutput(); result.gbuffer = m_GBufferOutput; result.dbuffer = m_DBufferOutput; bool msaa = hdCamera.frameSettings.IsEnabled(FrameSettingsField.MSAA); bool clearMotionVectors = hdCamera.camera.cameraType == CameraType.SceneView && !CoreUtils.AreAnimatedMaterialsEnabled(hdCamera.camera); // TODO: See how to clean this. Some buffers are created outside, some inside functions... result.motionVectorsBuffer = CreateMotionVectorBuffer(renderGraph, msaa, clearMotionVectors); result.depthBuffer = CreateDepthBuffer(renderGraph, msaa); RenderOcclusionMeshes(renderGraph, hdCamera, result.depthBuffer); StartSinglePass(renderGraph, hdCamera); bool renderMotionVectorAfterGBuffer = RenderDepthPrepass(renderGraph, cullingResults, hdCamera, ref result); if (!renderMotionVectorAfterGBuffer) { // If objects motion vectors are enabled, this will render the objects with motion vector into the target buffers (in addition to the depth) // Note: An object with motion vector must not be render in the prepass otherwise we can have motion vector write that should have been rejected RenderObjectsMotionVectors(renderGraph, cullingResults, hdCamera, result); } ResolveStencilBufferIfNeeded(renderGraph, hdCamera, ref result); // At this point in forward all objects have been rendered to the prepass (depth/normal/motion vectors) so we can resolve them ResolvePrepassBuffers(renderGraph, hdCamera, ref result); RenderDecals(renderGraph, hdCamera, ref result, cullingResults); RenderGBuffer(renderGraph, sssBuffer, ref result, cullingResults, hdCamera); // In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing. GenerateDepthPyramid(renderGraph, hdCamera, ref result); if (renderMotionVectorAfterGBuffer) { // See the call RenderObjectsMotionVectors() above and comment RenderObjectsMotionVectors(renderGraph, cullingResults, hdCamera, result); } RenderCameraMotionVectors(renderGraph, hdCamera, result.depthPyramidTexture, result.resolvedMotionVectorsBuffer); result.stencilBuffer = msaa ? result.resolvedStencilBuffer : result.depthBuffer; StopSinglePass(renderGraph, hdCamera); return(result); }
bool RenderLightScreenSpaceShadows(RenderGraph renderGraph, HDCamera hdCamera, PrepassOutput prepassOutput, TextureHandle depthBuffer, TextureHandle normalBuffer, TextureHandle motionVectorsBuffer, TextureHandle historyValidityBuffer, TextureHandle rayCountTexture, TextureHandle screenSpaceShadowArray) { // Loop through all the potential screen space light shadows for (int lightIdx = 0; lightIdx < m_ScreenSpaceShadowIndex; ++lightIdx) { // This matches the directional light if (!m_CurrentScreenSpaceShadowData[lightIdx].valid) { continue; } // Fetch the light data and additional light data LightData currentLight = m_GpuLightsBuilder.lights[m_CurrentScreenSpaceShadowData[lightIdx].lightDataIndex]; HDAdditionalLightData currentAdditionalLightData = m_CurrentScreenSpaceShadowData[lightIdx].additionalLightData; // Trigger the right algorithm based on the light type switch (currentLight.lightType) { case GPULightType.Rectangle: { RenderAreaScreenSpaceShadow(renderGraph, hdCamera, currentLight, currentAdditionalLightData, m_CurrentScreenSpaceShadowData[lightIdx].lightDataIndex, prepassOutput, depthBuffer, normalBuffer, motionVectorsBuffer, rayCountTexture, screenSpaceShadowArray); } break; case GPULightType.Point: case GPULightType.Spot: { RenderPunctualScreenSpaceShadow(renderGraph, hdCamera, currentLight, currentAdditionalLightData, m_CurrentScreenSpaceShadowData[lightIdx].lightDataIndex, prepassOutput, depthBuffer, normalBuffer, motionVectorsBuffer, historyValidityBuffer, rayCountTexture, screenSpaceShadowArray); } break; } } return(true); }
// RenderDepthPrepass render both opaque and opaque alpha tested based on engine configuration. // Lit Forward only: We always render all materials // Lit Deferred: We always render depth prepass for alpha tested (optimization), other deferred material are render based on engine configuration. // Forward opaque with deferred renderer (DepthForwardOnly pass): We always render all materials // True is returned if motion vector must be rendered after GBuffer pass bool RenderDepthPrepass(RenderGraph renderGraph, CullingResults cull, HDCamera hdCamera, ref PrepassOutput output) { var depthPrepassParameters = PrepareDepthPrepass(cull, hdCamera); bool msaa = hdCamera.frameSettings.IsEnabled(FrameSettingsField.MSAA); using (var builder = renderGraph.AddRenderPass <DepthPrepassData>(depthPrepassParameters.passName, out var passData, ProfilingSampler.Get(depthPrepassParameters.profilingId))) { passData.frameSettings = hdCamera.frameSettings; passData.msaaEnabled = msaa; passData.hasDepthOnlyPrepass = depthPrepassParameters.hasDepthOnlyPass; passData.renderRayTracingPrepass = depthPrepassParameters.renderRayTracingPrepass; passData.depthBuffer = builder.UseDepthBuffer(output.depthBuffer, DepthAccess.ReadWrite); passData.normalBuffer = builder.WriteTexture(CreateNormalBuffer(renderGraph, msaa)); // This texture must be used because reading directly from an MSAA Depth buffer is way to expensive. // The solution that we went for is writing the depth in an additional color buffer (10x cheaper to solve on ps4) if (msaa) { passData.depthAsColorBuffer = builder.WriteTexture(renderGraph.CreateTexture(new TextureDesc(Vector2.one, true, true) { colorFormat = GraphicsFormat.R32_SFloat, clearBuffer = true, clearColor = Color.black, bindTextureMS = true, enableMSAA = true, name = "DepthAsColorMSAA" }, HDShaderIDs._DepthTextureMS)); } if (passData.hasDepthOnlyPrepass) { passData.rendererListDepthOnly = builder.UseRendererList(renderGraph.CreateRendererList(depthPrepassParameters.depthOnlyRendererListDesc)); } passData.rendererListMRT = builder.UseRendererList(renderGraph.CreateRendererList(depthPrepassParameters.mrtRendererListDesc)); if (hdCamera.frameSettings.IsEnabled(FrameSettingsField.RayTracing)) { passData.renderListRayTracingOpaque = builder.UseRendererList(renderGraph.CreateRendererList(depthPrepassParameters.rayTracingOpaqueRLDesc)); passData.renderListRayTracingTransparent = builder.UseRendererList(renderGraph.CreateRendererList(depthPrepassParameters.rayTracingTransparentRLDesc)); } output.depthBuffer = passData.depthBuffer; output.depthAsColor = passData.depthAsColorBuffer; output.normalBuffer = passData.normalBuffer; builder.SetRenderFunc( (DepthPrepassData data, RenderGraphContext context) => { var mrt = context.renderGraphPool.GetTempArray <RenderTargetIdentifier>(data.msaaEnabled ? 2 : 1); mrt[0] = context.resources.GetTexture(data.normalBuffer); if (data.msaaEnabled) { mrt[1] = context.resources.GetTexture(data.depthAsColorBuffer); } bool useRayTracing = data.frameSettings.IsEnabled(FrameSettingsField.RayTracing); RenderDepthPrepass(context.renderContext, context.cmd, data.frameSettings , mrt , context.resources.GetTexture(data.depthBuffer) , context.resources.GetRendererList(data.rendererListDepthOnly) , context.resources.GetRendererList(data.rendererListMRT) , data.hasDepthOnlyPrepass , context.resources.GetRendererList(data.renderListRayTracingOpaque) , context.resources.GetRendererList(data.renderListRayTracingTransparent) , data.renderRayTracingPrepass ); }); } return(depthPrepassParameters.shouldRenderMotionVectorAfterGBuffer); }
PrepassOutput RenderPrepass(RenderGraph renderGraph, TextureHandle sssBuffer, CullingResults cullingResults, HDCamera hdCamera) { m_IsDepthBufferCopyValid = false; var result = new PrepassOutput(); result.gbuffer = m_GBufferOutput; result.dbuffer = m_DBufferOutput; bool msaa = hdCamera.frameSettings.IsEnabled(FrameSettingsField.MSAA); bool clearMotionVectors = hdCamera.camera.cameraType == CameraType.SceneView && !hdCamera.animateMaterials; // TODO: See how to clean this. Some buffers are created outside, some inside functions... result.motionVectorsBuffer = CreateMotionVectorBuffer(renderGraph, msaa, clearMotionVectors); result.depthBuffer = CreateDepthBuffer(renderGraph, msaa); RenderXROcclusionMeshes(renderGraph, hdCamera, result.depthBuffer); using (new XRSinglePassScope(renderGraph, hdCamera)) { // TODO RENDERGRAPH //// Bind the custom color/depth before the first custom pass //if (hdCamera.frameSettings.IsEnabled(FrameSettingsField.CustomPass)) //{ // if (m_CustomPassColorBuffer.IsValueCreated) // cmd.SetGlobalTexture(HDShaderIDs._CustomColorTexture, m_CustomPassColorBuffer.Value); // if (m_CustomPassDepthBuffer.IsValueCreated) // cmd.SetGlobalTexture(HDShaderIDs._CustomDepthTexture, m_CustomPassDepthBuffer.Value); //} //RenderCustomPass(renderContext, cmd, hdCamera, customPassCullingResults, CustomPassInjectionPoint.BeforeRendering); bool shouldRenderMotionVectorAfterGBuffer = RenderDepthPrepass(renderGraph, cullingResults, hdCamera, ref result); if (!shouldRenderMotionVectorAfterGBuffer) { // If objects motion vectors are enabled, this will render the objects with motion vector into the target buffers (in addition to the depth) // Note: An object with motion vector must not be render in the prepass otherwise we can have motion vector write that should have been rejected RenderObjectsMotionVectors(renderGraph, cullingResults, hdCamera, result); } // If we have MSAA, we need to complete the motion vector buffer before buffer resolves, hence we need to run camera mv first. // This is always fine since shouldRenderMotionVectorAfterGBuffer is always false for forward. bool needCameraMVBeforeResolve = hdCamera.frameSettings.IsEnabled(FrameSettingsField.MSAA); if (needCameraMVBeforeResolve) { RenderCameraMotionVectors(renderGraph, hdCamera, result.depthPyramidTexture, result.resolvedMotionVectorsBuffer); } // TODO RENDERGRAPH //PreRenderSky(hdCamera, cmd); // At this point in forward all objects have been rendered to the prepass (depth/normal/motion vectors) so we can resolve them ResolvePrepassBuffers(renderGraph, hdCamera, ref result); RenderDBuffer(renderGraph, hdCamera, ref result, cullingResults); RenderGBuffer(renderGraph, sssBuffer, ref result, cullingResults, hdCamera); DecalNormalPatch(renderGraph, hdCamera, ref result); // TODO RENDERGRAPH //// After Depth and Normals/roughness including decals //RenderCustomPass(renderContext, cmd, hdCamera, customPassCullingResults, CustomPassInjectionPoint.AfterOpaqueDepthAndNormal); // In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing. GenerateDepthPyramid(renderGraph, hdCamera, ref result); // TODO RENDERGRAPH //// Send all the geometry graphics buffer to client systems if required (must be done after the pyramid and before the transparent depth pre-pass) //SendGeometryGraphicsBuffers(cmd, hdCamera); if (shouldRenderMotionVectorAfterGBuffer) { // See the call RenderObjectsMotionVectors() above and comment RenderObjectsMotionVectors(renderGraph, cullingResults, hdCamera, result); } // In case we don't have MSAA, we always run camera motion vectors when is safe to assume Object MV are rendered if (!needCameraMVBeforeResolve) { RenderCameraMotionVectors(renderGraph, hdCamera, result.depthPyramidTexture, result.resolvedMotionVectorsBuffer); } // TODO RENDERGRAPH / Probably need to move this somewhere else. //RenderTransparencyOverdraw(cullingResults, hdCamera, renderContext, cmd); BuildCoarseStencilAndResolveIfNeeded(renderGraph, hdCamera, ref result); } return(result); }
TextureHandle RenderScreenSpaceShadows(RenderGraph renderGraph, HDCamera hdCamera, PrepassOutput prepassOutput, TextureHandle depthBuffer, TextureHandle normalBuffer, TextureHandle motionVectorsBuffer, TextureHandle rayCountTexture) { // If screen space shadows are not supported for this camera, we are done if (!hdCamera.frameSettings.IsEnabled(FrameSettingsField.ScreenSpaceShadows) || !RequestedScreenSpaceShadows()) { return(m_RenderGraph.defaultResources.blackTextureArrayXR); } using (new RenderGraphProfilingScope(renderGraph, ProfilingSampler.Get(HDProfileId.ScreenSpaceShadows))) { // Request the output texture TextureHandle screenSpaceShadowTexture = CreateScreenSpaceShadowTextureArray(renderGraph); // First of all we handle the directional light RenderDirectionalLightScreenSpaceShadow(renderGraph, hdCamera, depthBuffer, normalBuffer, motionVectorsBuffer, rayCountTexture, screenSpaceShadowTexture); if (hdCamera.frameSettings.IsEnabled(FrameSettingsField.RayTracing)) { // We handle the other light sources RenderLightScreenSpaceShadows(renderGraph, hdCamera, prepassOutput, depthBuffer, normalBuffer, motionVectorsBuffer, rayCountTexture, screenSpaceShadowTexture); } // We render the debug view // TODO: The texture is currently unused, make usage of it EvaluateShadowDebugView(renderGraph, hdCamera, screenSpaceShadowTexture); return(screenSpaceShadowTexture); } }