public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor) { cmd.GetTemporaryRT(m_ScreenSpaceShadowmap.id, m_RenderTextureDescriptor, FilterMode.Bilinear); RenderTargetIdentifier screenSpaceOcclusionTexture = m_ScreenSpaceShadowmap.Identifier(); ConfigureTarget(screenSpaceOcclusionTexture); ConfigureClear(ClearFlag.All, Color.white); }
/// <inheritdoc/> public override void OnCameraSetup(CommandBuffer cmd, ref RenderingData renderingData) { m_RenderTextureDescriptor = renderingData.cameraData.cameraTargetDescriptor; m_RenderTextureDescriptor.depthBufferBits = 0; m_RenderTextureDescriptor.msaaSamples = 1; m_RenderTextureDescriptor.graphicsFormat = RenderingUtils.SupportsGraphicsFormat(GraphicsFormat.R8_UNorm, FormatUsage.Linear | FormatUsage.Render) ? GraphicsFormat.R8_UNorm : GraphicsFormat.B8G8R8A8_UNorm; cmd.GetTemporaryRT(m_RenderTarget.id, m_RenderTextureDescriptor, FilterMode.Point); RenderTargetIdentifier renderTargetTexture = m_RenderTarget.Identifier(); ConfigureTarget(renderTargetTexture); ConfigureClear(ClearFlag.None, Color.white); }
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get(m_ProfilerTag); RenderTextureDescriptor opaqueDesc = renderingData.cameraData.cameraTargetDescriptor; opaqueDesc.depthBufferBits = 0; opaqueDesc.msaaSamples = 1; cmd.GetTemporaryRT(m_TemporaryColorTexture.id, opaqueDesc, filterMode); Blit(cmd, source, m_TemporaryColorTexture.Identifier(), blitMaterial, 0); Blit(cmd, m_TemporaryColorTexture.Identifier(), source); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public bool Equals(RenderTargetHandle other) { if (id == -2 || other.id == -2) { return(Identifier() == other.Identifier()); } return(id == other.id); }
public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData) { m_CopyDepthPass.Setup(m_CameraDepthAttachment, m_DepthTexture); renderer.EnqueuePass(m_CopyDepthPass); Downsampling downsamplingMethod = UniversalRenderPipeline.asset.opaqueDownsampling; m_CopyColorPass.Setup(m_CameraColorAttachment.Identifier(), m_ColorTexture, downsamplingMethod); renderer.EnqueuePass(m_CopyColorPass); }
public override void Execute(ScriptableRenderContext context, ref UnityEngine.Rendering.Universal.RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get(k_RenderGrabPassTag); using (new ProfilingSample(cmd, k_RenderGrabPassTag)) { // copy screen into temporary RT Blit(cmd, m_ColorSource, m_ScreenCopyId.Identifier()); // downsample screen copy into smaller RTs Blit(cmd, m_ScreenCopyId.Identifier(), m_BlurTemp1.Identifier()); // Setup blur commands m_Blur.SetupCommandBuffer(cmd, m_BlurTemp1.id, m_BlurTemp2.id); // Set texture id so we can use it later cmd.SetGlobalTexture("_GrabBlurTexture", m_BlurTemp1.id); } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref UnityEngine.Rendering.Universal.RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get(m_ProfilerTag); RenderTextureDescriptor opaqueDesc = renderingData.cameraData.cameraTargetDescriptor; opaqueDesc.depthBufferBits = 0; // Can't read and write to same color target, create a temp render target to blit. if (destination == UnityEngine.Rendering.Universal.RenderTargetHandle.CameraTarget) { cmd.GetTemporaryRT(m_TemporaryColorTexture.id, opaqueDesc, filterMode); Blit(cmd, source, m_TemporaryColorTexture.Identifier(), blitMaterial, blitShaderPassIndex); Blit(cmd, m_TemporaryColorTexture.Identifier(), source); } else { Blit(cmd, source, destination.Identifier(), blitMaterial, blitShaderPassIndex); } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
// Here you can implement the rendering logic. // Use <c>ScriptableRenderContext</c> to issue drawing commands or execute command buffers // https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html // You don't have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline. public override void Execute(ScriptableRenderContext context, ref UnityEngine.Rendering.Universal.RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get("WorldSpaceRipple Pass"); RenderTextureDescriptor opaqueDescriptor = renderingData.cameraData.cameraTargetDescriptor; opaqueDescriptor.depthBufferBits = 0; if (destination == UnityEngine.Rendering.Universal.RenderTargetHandle.CameraTarget) { cmd.GetTemporaryRT(temporaryColorTexture.id, opaqueDescriptor, FilterMode.Point); Blit(cmd, source, temporaryColorTexture.Identifier(), rippleMaterial, 0); Blit(cmd, temporaryColorTexture.Identifier(), source); } else { Blit(cmd, source, destination.Identifier(), rippleMaterial, 0); } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
/// <inheritdoc /> public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get(m_profilerTag); int width = renderingData.cameraData.cameraTargetDescriptor.width; int height = renderingData.cameraData.cameraTargetDescriptor.height; cmd.GetTemporaryRT(m_CameraColor.id, width, height); cmd.GetTemporaryRT(m_CameraDepth.id, width, height, 16); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); ConfigureCameraTarget(m_CameraColor.Identifier(), m_CameraDepth.Identifier()); // 1) Render different colors to the MRT outputs (render a blue quad to output#0 and a red quad to output#1) //m_ColorToMrtOutputs[0] = m_CameraColor; m_ColorsToMrtsPass.Setup(ref renderingData, cameraColorTarget, m_ColorToMrtOutputs); EnqueuePass(m_ColorsToMrtsPass); // Notice that the renderPass clearColor (yellow) is applied. // 2) Copy results to the camera target // layout (margin/blit/margin/..) // x: <-0.04-><-0.44-><-0.04-><-0.44-><-0.04-> // y: <-0.25-><-0.50-><-0.25-> m_Viewport.x = 0.04f * width; m_Viewport.width = 0.44f * width; m_Viewport.y = 0.25f * height; m_Viewport.height = 0.50f * height; m_CopyToViewportPasses[0].Setup(m_ColorToMrtOutputs[0].Identifier(), m_CameraColor, m_Viewport); EnqueuePass(m_CopyToViewportPasses[0]); m_Viewport.x = (0.04f + 0.44f + 0.04f) * width; m_CopyToViewportPasses[1].Setup(m_ColorToMrtOutputs[1].Identifier(), m_CameraColor, m_Viewport); EnqueuePass(m_CopyToViewportPasses[1]); // 3) Final blit to the backbuffer m_FinalBlitPass.Setup(renderingData.cameraData.cameraTargetDescriptor, m_CameraColor); EnqueuePass(m_FinalBlitPass); }
public override void Execute(ScriptableRenderContext context, ref UnityEngine.Rendering.Universal.RenderingData renderingData) { CommandBuffer buf = CommandBufferPool.Get(); using (new ProfilingScope(buf, m_ProfilingSampler)) { // copy screen into temporary RT int screenCopyID = Shader.PropertyToID("_ScreenCopyTexture"); buf.GetTemporaryRT(screenCopyID, m_OpaqueDesc, FilterMode.Bilinear); buf.Blit(m_ColorHandle.Identifier(), screenCopyID); m_OpaqueDesc.width /= 2; m_OpaqueDesc.height /= 2; // get two smaller RTs int blurredID = Shader.PropertyToID("_BlurRT1"); int blurredID2 = Shader.PropertyToID("_BlurRT2"); buf.GetTemporaryRT(blurredID, m_OpaqueDesc, FilterMode.Bilinear); buf.GetTemporaryRT(blurredID2, m_OpaqueDesc, FilterMode.Bilinear); // downsample screen copy into smaller RT, release screen RT buf.Blit(screenCopyID, blurredID); buf.ReleaseTemporaryRT(screenCopyID); // horizontal blur buf.SetGlobalVector("offsets", new Vector4(m_BlurAmount.x / Screen.width, 0, 0, 0)); buf.Blit(blurredID, blurredID2, m_BlurMaterial); // vertical blur buf.SetGlobalVector("offsets", new Vector4(0, m_BlurAmount.y / Screen.height, 0, 0)); buf.Blit(blurredID2, blurredID, m_BlurMaterial); // horizontal blur buf.SetGlobalVector("offsets", new Vector4(m_BlurAmount.x * 2 / Screen.width, 0, 0, 0)); buf.Blit(blurredID, blurredID2, m_BlurMaterial); // vertical blur buf.SetGlobalVector("offsets", new Vector4(0, m_BlurAmount.y * 2 / Screen.height, 0, 0)); buf.Blit(blurredID2, blurredID, m_BlurMaterial); //Set Texture for Shader Graph buf.SetGlobalTexture("_GrabBlurTexture", blurredID); } context.ExecuteCommandBuffer(buf); CommandBufferPool.Release(buf); }
/// <inheritdoc /> public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get(m_profilerTag); cmd.GetTemporaryRT(m_CameraColor.id, 1280, 720); cmd.GetTemporaryRT(m_CameraDepth.id, 1280, 720, 16); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); ConfigureCameraTarget(m_CameraColor.Identifier(), m_CameraDepth.Identifier()); // 1) Depth pre-pass m_DepthPrepass.Setup(renderingData.cameraData.cameraTargetDescriptor, m_CameraDepth); EnqueuePass(m_DepthPrepass); // 2) Forward opaque EnqueuePass(m_RenderOpaqueForwardPass); // will render to renderingData.cameraData.camera // 3) Final blit to the backbuffer m_FinalBlitPass.Setup(renderingData.cameraData.cameraTargetDescriptor, m_CameraColor); EnqueuePass(m_FinalBlitPass); }
/// <summary> /// Set RenderBuffer for camera; /// </summary> private void RefreshRenderBufferForSingleCamera(ScriptableRenderContext context, ref RenderingData renderingData, ref CameraData cameraData, out bool requiresDepthPrepass, out bool createDepthTexture) { Camera camera = renderingData.cameraData.camera; RenderTextureDescriptor cameraTargetDescriptor = renderingData.cameraData.cameraTargetDescriptor; bool applyPostProcessing = cameraData.postProcessEnabled; bool isSceneViewCamera = cameraData.isSceneViewCamera; bool isPreviewCamera = cameraData.isPreviewCamera; bool requiresDepthTexture = cameraData.requiresDepthTexture; bool isStereoEnabled = cameraData.isStereoEnabled; // Depth prepass is generated in the following cases: // - If game or offscreen camera requires it we check if we can copy the depth from the rendering opaques pass and use that instead. // - Scene or preview cameras always require a depth texture. We do a depth pre-pass to simplify it and it shouldn't matter much for editor. requiresDepthPrepass = requiresDepthTexture && !CanCopyDepth(ref renderingData.cameraData); requiresDepthPrepass |= isSceneViewCamera; requiresDepthPrepass |= isPreviewCamera; // The copying of depth should normally happen after rendering opaques. // But if we only require it for post processing or the scene camera then we do it after rendering transparent objects m_CopyDepthPass.renderPassEvent = (!requiresDepthTexture && (applyPostProcessing || isSceneViewCamera)) ? RenderPassEvent.AfterRenderingTransparents : RenderPassEvent.AfterRenderingOpaques; // TODO: CopyDepth pass is disabled in XR due to required work to handle camera matrices in URP. // IF this condition is removed make sure the CopyDepthPass.cs is working properly on all XR modes. This requires PureXR SDK integration. if (isStereoEnabled && requiresDepthTexture) { requiresDepthPrepass = true; } bool isRunningHololens = false; #if ENABLE_VR && ENABLE_VR_MODULE isRunningHololens = UniversalRenderPipeline.IsRunningHololens(camera); #endif bool createColorTexture = RequiresIntermediateColorTexture(ref cameraData); createColorTexture |= (rendererFeatures.Count != 0 && !isRunningHololens); createColorTexture &= !isPreviewCamera; // If camera requires depth and there's no depth pre-pass we create a depth texture that can be read later by effect requiring it. createDepthTexture = cameraData.requiresDepthTexture && !requiresDepthPrepass; createDepthTexture |= (cameraData.renderType == CameraRenderType.Base && !cameraData.resolveFinalTarget); #if UNITY_ANDROID || UNITY_WEBGL if (SystemInfo.graphicsDeviceType != GraphicsDeviceType.Vulkan) { // GLES can not use render texture's depth buffer with the color buffer of the backbuffer // in such case we create a color texture for it too. createColorTexture |= createDepthTexture; } #endif // Configure all settings require to start a new camera stack (base camera only) if (cameraData.renderType == CameraRenderType.Base) { m_ActiveCameraColorAttachment = (createColorTexture) ? m_CameraColorAttachment : RenderTargetHandle.CameraTarget; m_ActiveCameraDepthAttachment = (createDepthTexture) ? m_CameraDepthAttachment : RenderTargetHandle.CameraTarget; bool intermediateRenderTexture = createColorTexture || createDepthTexture; // Doesn't create texture for Overlay cameras as they are already overlaying on top of created textures. bool createTextures = intermediateRenderTexture; if (createTextures) { CreateCameraRenderTarget(context, ref renderingData.cameraData); } // if rendering to intermediate render texture we don't have to create msaa backbuffer int backbufferMsaaSamples = (intermediateRenderTexture) ? 1 : cameraTargetDescriptor.msaaSamples; if (Camera.main == camera && camera.cameraType == CameraType.Game && cameraData.targetTexture == null) { SetupBackbufferFormat(backbufferMsaaSamples, isStereoEnabled); } } else { if (m_SplitUICameraAndSceneCameraRenderer) { RefreshCameraColorAttachment(context, ref renderingData.cameraData); } else { m_ActiveCameraColorAttachment = m_CameraColorAttachment; m_ActiveCameraDepthAttachment = m_CameraDepthAttachment; } } ConfigureCameraTarget(m_ActiveCameraColorAttachment.Identifier(), m_ActiveCameraDepthAttachment.Identifier()); }