public static void RenderShadows(IRenderPass2D pass, RenderingData renderingData, CommandBuffer cmdBuffer, int layerToRender, Light2D light, float shadowIntensity, RenderTargetIdentifier renderTexture, RenderTargetIdentifier depthTexture) { cmdBuffer.SetGlobalFloat(k_ShadowIntensityID, 1 - light.shadowIntensity); cmdBuffer.SetGlobalFloat(k_ShadowVolumeIntensityID, 1 - light.shadowVolumeIntensity); if (shadowIntensity > 0) { CreateShadowRenderTexture(pass, renderingData, cmdBuffer, light.blendStyleIndex); cmdBuffer.SetRenderTarget(pass.rendererData.shadowsRenderTarget.Identifier(), RenderBufferLoadAction.Load, RenderBufferStoreAction.Store, RenderBufferLoadAction.DontCare, RenderBufferStoreAction.DontCare); cmdBuffer.ClearRenderTarget(true, true, Color.black); var shadowRadius = 1.42f * light.boundingSphere.radius; cmdBuffer.SetGlobalVector(k_LightPosID, light.transform.position); cmdBuffer.SetGlobalFloat(k_ShadowRadiusID, shadowRadius); var shadowMaterial = pass.rendererData.GetShadowMaterial(1); var removeSelfShadowMaterial = pass.rendererData.GetRemoveSelfShadowMaterial(1); var shadowCasterGroups = ShadowCasterGroup2DManager.shadowCasterGroups; if (shadowCasterGroups != null && shadowCasterGroups.Count > 0) { var previousShadowGroupIndex = -1; var incrementingGroupIndex = 0; for (var group = 0; group < shadowCasterGroups.Count; group++) { var shadowCasterGroup = shadowCasterGroups[group]; var shadowCasters = shadowCasterGroup.GetShadowCasters(); var shadowGroupIndex = shadowCasterGroup.GetShadowGroup(); if (LightUtility.CheckForChange(shadowGroupIndex, ref previousShadowGroupIndex) || shadowGroupIndex == 0) { incrementingGroupIndex++; shadowMaterial = pass.rendererData.GetShadowMaterial(incrementingGroupIndex); removeSelfShadowMaterial = pass.rendererData.GetRemoveSelfShadowMaterial(incrementingGroupIndex); } if (shadowCasters != null) { // Draw the shadow casting group first, then draw the silhouttes.. for (var i = 0; i < shadowCasters.Count; i++) { var shadowCaster = shadowCasters[i]; if (shadowCaster != null && shadowMaterial != null && shadowCaster.IsShadowedLayer(layerToRender)) { if (shadowCaster.castsShadows) { cmdBuffer.DrawMesh(shadowCaster.mesh, shadowCaster.transform.localToWorldMatrix, shadowMaterial); } } } for (var i = 0; i < shadowCasters.Count; i++) { var shadowCaster = shadowCasters[i]; if (shadowCaster != null && shadowMaterial != null && shadowCaster.IsShadowedLayer(layerToRender)) { if (shadowCaster.useRendererSilhouette) { var renderer = shadowCaster.GetComponent <Renderer>(); if (renderer != null) { if (!shadowCaster.selfShadows) { cmdBuffer.DrawRenderer(renderer, removeSelfShadowMaterial); } else { cmdBuffer.DrawRenderer(renderer, shadowMaterial, 0, 1); } } } else { if (!shadowCaster.selfShadows) { var meshMat = shadowCaster.transform.localToWorldMatrix; cmdBuffer.DrawMesh(shadowCaster.mesh, meshMat, removeSelfShadowMaterial); } } } } } } } cmdBuffer.ReleaseTemporaryRT(pass.rendererData.shadowsRenderTarget.id); cmdBuffer.SetRenderTarget(renderTexture, depthTexture); } }
public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData) { SetupLights(context, ref renderingData); }
bool RequiresIntermediateColorTexture(ref RenderingData renderingData, RenderTextureDescriptor baseDescriptor) { ref CameraData cameraData = ref renderingData.cameraData;
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { if (m_CopyDepthMaterial == null) { Debug.LogErrorFormat("Missing {0}. {1} render pass will not execute. Check for missing reference in the renderer resources.", m_CopyDepthMaterial, GetType().Name); return; } CommandBuffer cmd = CommandBufferPool.Get(); using (new ProfilingScope(cmd, ProfilingSampler.Get(URPProfileId.CopyDepth))) { int cameraSamples = 0; if (MssaSamples == -1) { RenderTextureDescriptor descriptor = renderingData.cameraData.cameraTargetDescriptor; cameraSamples = descriptor.msaaSamples; } else { cameraSamples = MssaSamples; } CameraData cameraData = renderingData.cameraData; switch (cameraSamples) { case 8: cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.EnableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; case 4: cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.EnableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; case 2: cmd.EnableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; // MSAA disabled default: cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; } cmd.SetGlobalTexture("_CameraDepthAttachment", source.Identifier()); #if ENABLE_VR && ENABLE_XR_MODULE // XR uses procedural draw instead of cmd.blit or cmd.DrawFullScreenMesh if (renderingData.cameraData.xr.enabled) { // XR flip logic is not the same as non-XR case because XR uses draw procedure // and draw procedure does not need to take projection matrix yflip into account // We y-flip if // 1) we are bliting from render texture to back buffer and // 2) renderTexture starts UV at top // XRTODO: handle scalebias and scalebiasRt for src and dst separately bool isRenderToBackBufferTarget = destination.Identifier() == cameraData.xr.renderTarget && !cameraData.xr.renderTargetIsRenderTexture; bool yflip = isRenderToBackBufferTarget && SystemInfo.graphicsUVStartsAtTop; float flipSign = (yflip) ? -1.0f : 1.0f; Vector4 scaleBiasRt = (flipSign < 0.0f) ? new Vector4(flipSign, 1.0f, -1.0f, 1.0f) : new Vector4(flipSign, 0.0f, 1.0f, 1.0f); cmd.SetGlobalVector(ShaderPropertyId.scaleBiasRt, scaleBiasRt); cmd.DrawProcedural(Matrix4x4.identity, m_CopyDepthMaterial, 0, MeshTopology.Quads, 4); } else #endif { // Blit has logic to flip projection matrix when rendering to render texture. // Currently the y-flip is handled in CopyDepthPass.hlsl by checking _ProjectionParams.x // If you replace this Blit with a Draw* that sets projection matrix double check // to also update shader. // scaleBias.x = flipSign // scaleBias.y = scale // scaleBias.z = bias // scaleBias.w = unused // In game view final target acts as back buffer were target is not flipped bool isGameViewFinalTarget = (cameraData.cameraType == CameraType.Game && destination == RenderTargetHandle.CameraTarget); bool yflip = (cameraData.IsCameraProjectionMatrixFlipped()) && !isGameViewFinalTarget; float flipSign = yflip ? -1.0f : 1.0f; Vector4 scaleBiasRt = (flipSign < 0.0f) ? new Vector4(flipSign, 1.0f, -1.0f, 1.0f) : new Vector4(flipSign, 0.0f, 1.0f, 1.0f); cmd.SetGlobalVector(ShaderPropertyId.scaleBiasRt, scaleBiasRt); cmd.DrawMesh(RenderingUtils.fullscreenMesh, Matrix4x4.identity, m_CopyDepthMaterial); } } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
// Here you can implement the rendering logic. // Use <c>ScriptableRenderContext</c> to issue drawing commands or execute command buffers // https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html // You don't have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline. public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { if (!volumetricLightMaterial || !renderingData.cameraData.requiresDepthTexture) { return; } CommandBuffer cmd = CommandBufferPool.Get(k_RenderTag); using (new ProfilingScope(cmd, new ProfilingSampler(k_RenderTag))) { context.ExecuteCommandBuffer(cmd); cmd.Clear(); try { volumetricLightMaterial.SetFloat("_Scattering", passSetting.scatter); volumetricLightMaterial.SetFloat("_Steps", passSetting.step); volumetricLightMaterial.SetFloat("_MaxDistance", passSetting.maxDistance); volumetricLightMaterial.SetFloat("_JitterVolumetric", passSetting.jitter); volumetricLightMaterial.SetFloat("_Intensity", passSetting.intensity); volumetricLightMaterial.SetFloat("_Extinction", passSetting.extinction * passSetting.extinction); volumetricLightMaterial.SetFloat("_Absorbtion", passSetting.absorbtion); volumetricLightMaterial.SetFloat("_Offset", passSetting.dualKawase.offset); // ray marching cmd.Blit(currentTarget, volumetricHandle.Identifier(), volumetricLightMaterial, 0); // dual kaease if (dualKawaseIds != null) { RenderTargetIdentifier cur = volumetricHandle.Identifier(); // down sample for (int i = 0; i < dualKawaseIds.Length; ++i) { cmd.Blit(cur, dualKawaseIds[i], volumetricLightMaterial, 1); cur = dualKawaseIds[i]; } // up sample for (int i = dualKawaseIds.Length - 2; 0 <= i; --i) { cmd.Blit(cur, dualKawaseIds[i], volumetricLightMaterial, 2); cur = dualKawaseIds[i]; } cmd.Blit(dualKawaseIds[0], volumetricHandle.Identifier(), volumetricLightMaterial, 2); } cmd.SetGlobalTexture(volumetricHandle.id, volumetricHandle.Identifier()); // down sample depth cmd.Blit(currentTarget, lowResDepthHandle.Identifier(), volumetricLightMaterial, 3); cmd.SetGlobalTexture(lowResDepthHandle.id, lowResDepthHandle.Identifier()); //upsample and composite cmd.Blit(currentTarget, compositingHandle.Identifier(), volumetricLightMaterial, 4); cmd.Blit(compositingHandle.Identifier(), currentTarget); } catch { } } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public bool Setup(ref RenderingData renderingData) { if (!renderingData.shadowData.supportsAdditionalLightShadows) { return(false); } Clear(); m_ShadowmapWidth = renderingData.shadowData.additionalLightsShadowmapWidth; m_ShadowmapHeight = renderingData.shadowData.additionalLightsShadowmapHeight; var visibleLights = renderingData.lightData.visibleLights; int additionalLightsCount = renderingData.lightData.additionalLightsCount; if (m_AdditionalLightSlices == null || m_AdditionalLightSlices.Length < additionalLightsCount) { m_AdditionalLightSlices = new ShadowSliceData[additionalLightsCount]; } if (m_AdditionalLightsShadowData == null || m_AdditionalLightsShadowData.Length < additionalLightsCount) { m_AdditionalLightsShadowData = new ShaderInput.ShadowData[additionalLightsCount]; } int validShadowCastingLights = 0; bool supportsSoftShadows = renderingData.shadowData.supportsSoftShadows; for (int i = 0; i < visibleLights.Length && m_AdditionalShadowCastingLightIndices.Count < additionalLightsCount; ++i) { VisibleLight shadowLight = visibleLights[i]; // Skip all directional lights as they are not baked into the additional // shadowmap atlas. if (shadowLight.lightType == LightType.Directional) { continue; } int shadowCastingLightIndex = m_AdditionalShadowCastingLightIndices.Count; bool isValidShadowSlice = false; if (IsValidShadowCastingLight(ref renderingData.lightData, i)) { if (renderingData.cullResults.GetShadowCasterBounds(i, out var bounds)) { bool success = ShadowUtils.ExtractSpotLightMatrix(ref renderingData.cullResults, ref renderingData.shadowData, i, out var shadowTransform, out m_AdditionalLightSlices[shadowCastingLightIndex].viewMatrix, out m_AdditionalLightSlices[shadowCastingLightIndex].projectionMatrix); if (success) { m_AdditionalShadowCastingLightIndices.Add(i); var light = shadowLight.light; float shadowStrength = light.shadowStrength; float softShadows = (supportsSoftShadows && light.shadows == LightShadows.Soft) ? 1.0f : 0.0f; Vector4 shadowParams = new Vector4(shadowStrength, softShadows, 0.0f, 0.0f); if (m_UseStructuredBuffer) { m_AdditionalLightsShadowData[shadowCastingLightIndex].worldToShadowMatrix = shadowTransform; m_AdditionalLightsShadowData[shadowCastingLightIndex].shadowParams = shadowParams; } else { m_AdditionalLightsWorldToShadow[shadowCastingLightIndex] = shadowTransform; m_AdditionalLightsShadowParams[shadowCastingLightIndex] = shadowParams; } isValidShadowSlice = true; validShadowCastingLights++; } } } if (m_UseStructuredBuffer) { // When using StructuredBuffers all the valid shadow casting slices data // are stored in a the ShadowData buffer and then we setup a index map to // map from light indices to shadow buffer index. A index map of -1 means // the light is not a valid shadow casting light and there's no data for it // in the shadow buffer. int indexMap = (isValidShadowSlice) ? shadowCastingLightIndex : -1; m_AdditionalShadowCastingLightIndicesMap.Add(indexMap); } else if (!isValidShadowSlice) { // When NOT using structured buffers we have no performant way to sample the // index map as int[]. Unity shader compiler converts int[] to float4[] to force memory alignment. // This makes indexing int[] arrays very slow. So, in order to avoid indexing shadow lights we // setup slice data and reserve shadow map space even for invalid shadow slices. // The data is setup with zero shadow strength. This has the same visual effect of no shadow // attenuation contribution from this light. // This makes sampling shadow faster but introduces waste in shadow map atlas. // The waste increases with the amount of additional lights to shade. // Therefore Universal RP try to keep the limit at sane levels when using uniform buffers. Matrix4x4 identity = Matrix4x4.identity; m_AdditionalShadowCastingLightIndices.Add(i); m_AdditionalLightsWorldToShadow[shadowCastingLightIndex] = identity; m_AdditionalLightsShadowParams[shadowCastingLightIndex] = Vector4.zero; m_AdditionalLightSlices[shadowCastingLightIndex].viewMatrix = identity; m_AdditionalLightSlices[shadowCastingLightIndex].projectionMatrix = identity; } } // Lights that need to be rendered in the shadow map atlas if (validShadowCastingLights == 0) { return(false); } int atlasWidth = renderingData.shadowData.additionalLightsShadowmapWidth; int atlasHeight = renderingData.shadowData.additionalLightsShadowmapHeight; int sliceResolution = ShadowUtils.GetMaxTileResolutionInAtlas(atlasWidth, atlasHeight, validShadowCastingLights); // In the UI we only allow for square shadow map atlas. Here we check if we can fit // all shadow slices into half resolution of the atlas and adjust height to have tighter packing. int maximumSlices = (m_ShadowmapWidth / sliceResolution) * (m_ShadowmapHeight / sliceResolution); if (validShadowCastingLights <= (maximumSlices / 2)) { m_ShadowmapHeight /= 2; } int shadowSlicesPerRow = (atlasWidth / sliceResolution); float oneOverAtlasWidth = 1.0f / m_ShadowmapWidth; float oneOverAtlasHeight = 1.0f / m_ShadowmapHeight; int sliceIndex = 0; int shadowCastingLightsBufferCount = m_AdditionalShadowCastingLightIndices.Count; Matrix4x4 sliceTransform = Matrix4x4.identity; sliceTransform.m00 = sliceResolution * oneOverAtlasWidth; sliceTransform.m11 = sliceResolution * oneOverAtlasHeight; for (int i = 0; i < shadowCastingLightsBufferCount; ++i) { // we can skip the slice if strength is zero. Some slices with zero // strength exists when using uniform array path. if (!m_UseStructuredBuffer && Mathf.Approximately(m_AdditionalLightsShadowParams[i].x, 0.0f)) { continue; } m_AdditionalLightSlices[i].offsetX = (sliceIndex % shadowSlicesPerRow) * sliceResolution; m_AdditionalLightSlices[i].offsetY = (sliceIndex / shadowSlicesPerRow) * sliceResolution; m_AdditionalLightSlices[i].resolution = sliceResolution; sliceTransform.m03 = m_AdditionalLightSlices[i].offsetX * oneOverAtlasWidth; sliceTransform.m13 = m_AdditionalLightSlices[i].offsetY * oneOverAtlasHeight; // We bake scale and bias to each shadow map in the atlas in the matrix. // saves some instructions in shader. if (m_UseStructuredBuffer) { m_AdditionalLightsShadowData[i].worldToShadowMatrix = sliceTransform * m_AdditionalLightsShadowData[i].worldToShadowMatrix; } else { m_AdditionalLightsWorldToShadow[i] = sliceTransform * m_AdditionalLightsWorldToShadow[i]; } sliceIndex++; } return(true); }
public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData) { renderer.EnqueuePass(renderObjectsPass); }
public void Setup(ScriptableRenderer renderer, ref RenderingData renderingData) { Init(); Camera camera = renderingData.cameraData.camera; renderer.SetupPerObjectLightIndices(ref renderingData.cullResults, ref renderingData.lightData); RenderTextureDescriptor baseDescriptor = ScriptableRenderer.CreateRenderTextureDescriptor(ref renderingData.cameraData); RenderTextureDescriptor shadowDescriptor = baseDescriptor; shadowDescriptor.dimension = TextureDimension.Tex2D; bool mainLightShadows = false; if (renderingData.shadowData.supportsMainLightShadows) { mainLightShadows = m_MainLightShadowCasterPass.Setup(m_MainLightShadowmap, ref renderingData); if (mainLightShadows) { renderer.EnqueuePass(m_MainLightShadowCasterPass); } } if (renderingData.shadowData.supportsAdditionalLightShadows) { bool additionalLightShadows = m_AdditionalLightsShadowCasterPass.Setup(m_AdditionalLightsShadowmap, ref renderingData, renderer.maxVisibleAdditionalLights); if (additionalLightShadows) { renderer.EnqueuePass(m_AdditionalLightsShadowCasterPass); } } bool resolveShadowsInScreenSpace = mainLightShadows && renderingData.shadowData.requiresScreenSpaceShadowResolve; bool requiresDepthPrepass = resolveShadowsInScreenSpace || renderingData.cameraData.isSceneViewCamera || (renderingData.cameraData.requiresDepthTexture && (!CanCopyDepth(ref renderingData.cameraData))); // For now VR requires a depth prepass until we figure out how to properly resolve texture2DMS in stereo requiresDepthPrepass |= renderingData.cameraData.isStereoEnabled; renderer.EnqueuePass(m_SetupForwardRenderingPass); camera.GetComponents(m_AfterDepthpasses); camera.GetComponents(m_AfterOpaquePasses); camera.GetComponents(m_AfterOpaquePostProcessPasses); camera.GetComponents(m_AfterSkyboxPasses); camera.GetComponents(m_AfterTransparentPasses); camera.GetComponents(m_AfterRenderPasses); if (requiresDepthPrepass) { m_DepthOnlyPass.Setup(baseDescriptor, m_DepthTexture, SampleCount.One); renderer.EnqueuePass(m_DepthOnlyPass); foreach (var pass in m_AfterDepthpasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(m_DepthOnlyPass.descriptor, m_DepthTexture)); } } if (resolveShadowsInScreenSpace) { m_ScreenSpaceShadowResolvePass.Setup(baseDescriptor, m_ScreenSpaceShadowmap); renderer.EnqueuePass(m_ScreenSpaceShadowResolvePass); } bool requiresRenderToTexture = RequiresIntermediateColorTexture(ref renderingData.cameraData, baseDescriptor) || m_AfterDepthpasses.Count != 0 || m_AfterOpaquePasses.Count != 0 || m_AfterOpaquePostProcessPasses.Count != 0 || m_AfterSkyboxPasses.Count != 0 || m_AfterTransparentPasses.Count != 0 || m_AfterRenderPasses.Count != 0 || Display.main.requiresBlitToBackbuffer || renderingData.killAlphaInFinalBlit; RenderTargetHandle colorHandle = RenderTargetHandle.CameraTarget; RenderTargetHandle depthHandle = RenderTargetHandle.CameraTarget; var sampleCount = (SampleCount)renderingData.cameraData.msaaSamples; if (requiresRenderToTexture) { colorHandle = m_ColorAttachment; depthHandle = m_DepthAttachment; m_CreateLightweightRenderTexturesPass.Setup(baseDescriptor, colorHandle, depthHandle, sampleCount); renderer.EnqueuePass(m_CreateLightweightRenderTexturesPass); } if (renderingData.cameraData.isStereoEnabled) { renderer.EnqueuePass(m_BeginXrRenderingPass); } var perObjectFlags = ScriptableRenderer.GetPerObjectLightFlags(renderingData.lightData.mainLightIndex, renderingData.lightData.additionalLightsCount); m_SetupLightweightConstants.Setup(renderer.maxVisibleAdditionalLights, renderer.perObjectLightIndices); renderer.EnqueuePass(m_SetupLightweightConstants); m_RenderOpaqueForwardPass.Setup(baseDescriptor, colorHandle, depthHandle, ScriptableRenderer.GetCameraClearFlag(camera), camera.backgroundColor, perObjectFlags); renderer.EnqueuePass(m_RenderOpaqueForwardPass); foreach (var pass in m_AfterOpaquePasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } if (renderingData.cameraData.postProcessEnabled && renderingData.cameraData.postProcessLayer.HasOpaqueOnlyEffects(renderer.postProcessingContext)) { m_CreatePostOpaqueColorPass.Setup(baseDescriptor, m_ColorAttachmentAfterOpaquePost, sampleCount); renderer.EnqueuePass(m_CreatePostOpaqueColorPass); m_OpaquePostProcessPass.Setup(baseDescriptor, colorHandle, m_ColorAttachmentAfterOpaquePost, true, false); renderer.EnqueuePass(m_OpaquePostProcessPass); colorHandle = m_ColorAttachmentAfterOpaquePost; foreach (var pass in m_AfterOpaquePostProcessPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } } if (camera.clearFlags == CameraClearFlags.Skybox) { m_DrawSkyboxPass.Setup(colorHandle, depthHandle); renderer.EnqueuePass(m_DrawSkyboxPass); } foreach (var pass in m_AfterSkyboxPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } if (renderingData.cameraData.requiresDepthTexture && !requiresDepthPrepass) { m_CopyDepthPass.Setup(depthHandle, m_DepthTexture); renderer.EnqueuePass(m_CopyDepthPass); } if (renderingData.cameraData.requiresOpaqueTexture) { m_CopyColorPass.Setup(colorHandle, m_OpaqueColor); renderer.EnqueuePass(m_CopyColorPass); } m_RenderTransparentForwardPass.Setup(baseDescriptor, colorHandle, depthHandle, perObjectFlags); renderer.EnqueuePass(m_RenderTransparentForwardPass); foreach (var pass in m_AfterTransparentPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } #if UNITY_EDITOR m_LitGizmoRenderingPass.Setup(true); renderer.EnqueuePass(m_LitGizmoRenderingPass); #endif bool afterRenderExists = m_AfterRenderPasses.Count != 0; // if we have additional filters // we need to stay in a RT if (afterRenderExists) { // perform post with src / dest the same if (renderingData.cameraData.postProcessEnabled) { m_CreatePostTransparentColorPass.Setup(baseDescriptor, m_ColorAttachmentAfterTransparentPost, sampleCount); renderer.EnqueuePass(m_CreatePostTransparentColorPass); m_PostProcessPass.Setup(baseDescriptor, colorHandle, m_ColorAttachmentAfterTransparentPost, false, false); renderer.EnqueuePass(m_PostProcessPass); colorHandle = m_ColorAttachmentAfterTransparentPost; } //execute after passes foreach (var pass in m_AfterRenderPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } //now blit into the final target if (colorHandle != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(baseDescriptor, colorHandle, Display.main.requiresSrgbBlitToBackbuffer, renderingData.killAlphaInFinalBlit); renderer.EnqueuePass(m_FinalBlitPass); } } else { if (renderingData.cameraData.postProcessEnabled) { m_PostProcessPass.Setup(baseDescriptor, colorHandle, RenderTargetHandle.CameraTarget, false, renderingData.cameraData.camera.targetTexture == null); renderer.EnqueuePass(m_PostProcessPass); } else if (colorHandle != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(baseDescriptor, colorHandle, Display.main.requiresSrgbBlitToBackbuffer, renderingData.killAlphaInFinalBlit); renderer.EnqueuePass(m_FinalBlitPass); } } if (renderingData.cameraData.isStereoEnabled) { renderer.EnqueuePass(m_EndXrRenderingPass); } #if UNITY_EDITOR m_UnlitGizmoRenderingPass.Setup(false); renderer.EnqueuePass(m_UnlitGizmoRenderingPass); if (renderingData.cameraData.isSceneViewCamera) { m_SceneViewDepthCopyPass.Setup(m_DepthTexture); renderer.EnqueuePass(m_SceneViewDepthCopyPass); } #endif }
// The actual rendering execution is done here public override void Render(CommandBuffer cmd, RenderTargetIdentifier source, RenderTargetIdentifier destination, ref RenderingData renderingData, CustomPostProcessInjectionPoint injectionPoint) { // set material properties if (m_Material != null) { m_Material.SetFloat(ShaderIDs.Intensity, m_VolumeComponent.intensity.value); m_Material.SetFloat(ShaderIDs.Thickness, m_VolumeComponent.thickness.value); Vector2 normalThreshold = m_VolumeComponent.normalThreshold.value; Vector2 depthThreshold = m_VolumeComponent.depthThreshold.value; Vector4 threshold = new Vector4(Mathf.Cos(normalThreshold.y * Mathf.Deg2Rad), Mathf.Cos(normalThreshold.x * Mathf.Deg2Rad), depthThreshold.x, depthThreshold.y); m_Material.SetVector(ShaderIDs.Threshold, threshold); m_Material.SetColor(ShaderIDs.Color, m_VolumeComponent.color.value); } // set source texture cmd.SetGlobalTexture(ShaderIDs.Input, source); // draw a fullscreen triangle to the destination CoreUtils.DrawFullScreen(cmd, m_Material, destination); }
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { context.DrawSkybox(renderingData.cameraData.camera); }
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { CameraData cameraData = renderingData.cameraData; Camera camera = cameraData.camera; var activeDebugHandler = GetActiveDebugHandler(renderingData); if (activeDebugHandler != null) { // TODO: The skybox needs to work the same as the other shaders, but until it does we'll not render it // when certain debug modes are active (e.g. wireframe/overdraw modes) if (activeDebugHandler.IsScreenClearNeeded) { return; } } #if ENABLE_VR && ENABLE_XR_MODULE // XRTODO: Remove this code once Skybox pass is moved to SRP land. if (cameraData.xr.enabled) { // Setup Legacy XR buffer states if (cameraData.xr.singlePassEnabled) { // Setup legacy skybox stereo buffer camera.SetStereoProjectionMatrix(Camera.StereoscopicEye.Left, cameraData.GetProjectionMatrix(0)); camera.SetStereoViewMatrix(Camera.StereoscopicEye.Left, cameraData.GetViewMatrix(0)); camera.SetStereoProjectionMatrix(Camera.StereoscopicEye.Right, cameraData.GetProjectionMatrix(1)); camera.SetStereoViewMatrix(Camera.StereoscopicEye.Right, cameraData.GetViewMatrix(1)); CommandBuffer cmd = CommandBufferPool.Get(); // Use legacy stereo instancing mode to have legacy XR code path configured cmd.SetSinglePassStereo(SystemInfo.supportsMultiview ? SinglePassStereoMode.Multiview : SinglePassStereoMode.Instancing); context.ExecuteCommandBuffer(cmd); cmd.Clear(); // Calling into built-in skybox pass context.DrawSkybox(camera); // Disable Legacy XR path cmd.SetSinglePassStereo(SinglePassStereoMode.None); context.ExecuteCommandBuffer(cmd); // We do not need to submit here due to special handling of stereo matrices in core. // context.Submit(); CommandBufferPool.Release(cmd); camera.ResetStereoProjectionMatrices(); camera.ResetStereoViewMatrices(); } else { camera.projectionMatrix = cameraData.GetProjectionMatrix(0); camera.worldToCameraMatrix = cameraData.GetViewMatrix(0); context.DrawSkybox(camera); // XRTODO: remove this call because it creates issues with nested profiling scopes // See examples in UniversalRenderPipeline.RenderSingleCamera() and in ScriptableRenderer.Execute() context.Submit(); // Submit and execute the skybox pass before resetting the matrices camera.ResetProjectionMatrix(); camera.ResetWorldToCameraMatrix(); } } else #endif { context.DrawSkybox(camera); } }
public override void Execute(ScriptableRenderer renderer, ScriptableRenderContext context, ref RenderingData renderingData) { RenderTextureDescriptor opaqueDesc = ScriptableRenderer.CreateRenderTextureDescriptor(ref renderingData.cameraData); var cmd = CommandBufferPool.Get("Capture Pass"); cmd.GetTemporaryRT(m_Target.id, opaqueDesc); cmd.Blit(m_Source.Identifier(), m_Target.Identifier()); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public override void Execute(ScriptableRenderer renderer, ScriptableRenderContext context, ref RenderingData renderingData) { m_CopyResult.Setup(RenderTargetHandle.CameraTarget, afterAll); m_CopyResult.Execute(renderer, context, ref renderingData); Material material = renderer.GetMaterial(MaterialHandles.Blit); CommandBuffer cmd = CommandBufferPool.Get("Blit Pass"); cmd.SetRenderTarget(BuiltinRenderTextureType.CameraTarget); cmd.SetViewProjectionMatrices(Matrix4x4.identity, Matrix4x4.identity); cmd.SetViewport(new Rect(0, renderingData.cameraData.camera.pixelRect.height / 2.0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterDepth.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, material); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterOpaque.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, material); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f * 2.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterOpaquePost.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, material); cmd.SetViewport(new Rect(0f, 0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterSkybox.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, material); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f, 0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterTransparent.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, material); //TODO: Upsidown UV trash, ignore this for now // Need to flip UV as we come from a framebuffer. /*cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f * 2.0f, 0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); * cmd.SetGlobalTexture("_BlitTex", afterAll.Identifier()); * cmd.EnableShaderKeyword("FLIP_VERTICAL_UV"); * LightweightPipeline.DrawFullScreen(cmd, material); * cmd.DisableShaderKeyword("FLIP_VERTICAL_UV");*/ context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { if (!renderingData.cameraData.postProcessEnabled) { return; } var stack = VolumeManager.instance.stack; m_SunShafts = stack.GetComponent <SunShafts>(); if (m_SunShafts == null) { return; } if (!m_SunShafts.IsActive()) { return; } if (sunShaftsMaterial == null) { return; } var cmd = CommandBufferPool.Get(k_RenderTag); if (RenderSettings.sun == null) { return; } Vector3 vSun; bool positiveZ = UpdateSun(renderingData.cameraData.camera, out vSun); int divider = 4; if (m_SunShafts.resolution == SunShaftsResolution.Normal) { divider = 2; } if (!positiveZ) { return; } float sradius = m_SunShafts.sunShaftBlurRadius.value; RenderTextureDescriptor desc = renderingData.cameraData.cameraTargetDescriptor; var material = sunShaftsMaterial; var destination = currentTarget; cmd.SetGlobalTexture(MainTexId, destination); cmd.GetTemporaryRT(TempTargetId, desc.width, desc.height, 0, FilterMode.Bilinear, desc.colorFormat); cmd.Blit(destination, TempTargetId); int rtW = desc.width / divider; int rtH = desc.height / divider; RenderTexture lrColorB; RenderTexture lrDepthBuffer = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); // mask out everything except the skybox // we have 2 methods, one of which requires depth buffer support, the other one is just comparing images material.SetVector("_SunPosition", new Vector4(vSun.x, vSun.y, vSun.z, m_SunShafts.maxRadius.value)); cmd.Blit(TempTargetId, lrDepthBuffer, material, 2); // paint a small black small border to get rid of clamping problems //DrawBorder(lrDepthBuffer, Color.clear); // radial blur: m_SunShafts.radialBlurIterations.value = Mathf.Clamp(m_SunShafts.radialBlurIterations.value, 1, 4); int iter = m_SunShafts.radialBlurIterations.value; float ofs = sradius * (1.0f / 768.0f); material.SetVector("_BlurRadius4", new Vector4(ofs, ofs, 0.0f, 0.0f)); material.SetVector("_SunPosition", new Vector4(vSun.x, vSun.y, vSun.z, m_SunShafts.maxRadius.value)); for (int it2 = 0; it2 < iter; it2++) { // each iteration takes 2 * 6 samples // we update _BlurRadius each time to cheaply get a very smooth look lrColorB = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrDepthBuffer, lrColorB, material, 1); RenderTexture.ReleaseTemporary(lrDepthBuffer); ofs = sradius * (((it2 * 2.0f + 1.0f) * 6.0f)) / 768.0f; material.SetVector("_BlurRadius4", new Vector4(ofs, ofs, 0.0f, 0.0f)); lrDepthBuffer = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrColorB, lrDepthBuffer, material, 1); RenderTexture.ReleaseTemporary(lrColorB); ofs = sradius * (((it2 * 2.0f + 2.0f) * 6.0f)) / 768.0f; material.SetVector("_BlurRadius4", new Vector4(ofs, ofs, 0.0f, 0.0f)); } if (m_SunShafts.lastBlur.value) { lrColorB = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrDepthBuffer, lrColorB, material, 5); RenderTexture.ReleaseTemporary(lrDepthBuffer); lrDepthBuffer = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrColorB, lrDepthBuffer, material, 6); RenderTexture.ReleaseTemporary(lrColorB); } // put together: material.SetTexture("_ColorBuffer", lrDepthBuffer); cmd.Blit(TempTargetId, destination, material, (m_SunShafts.screenBlendMode == ShaftsScreenBlendMode.Screen) ? 0 : 4); RenderTexture.ReleaseTemporary(lrDepthBuffer); cmd.ReleaseTemporaryRT(TempTargetId); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public override void Render(CommandBuffer cmd, ref RenderingData renderingData, PostProcessingRenderContext context) { _blurBlitter.SetSource(context.activeRenderTarget, context.sourceRenderTextureDescriptor); _blurBlitter.Render(cmd); }
// Here you can implement the rendering logic. // Use <c>ScriptableRenderContext</c> to issue drawing commands or execute command buffers // https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html // You don't have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline. public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { var cmd = CommandBufferPool.Get(profilerSampler.name); renderBounds = StylizedGrassRenderer.Instance.bounds; frustrumPlanes = GeometryUtility.CalculateFrustumPlanes(StylizedGrassRenderer.Instance.renderCam); using (new ProfilingScope(cmd, profilerSampler)) { foreach (KeyValuePair <int, List <GrassBender> > layer in StylizedGrassRenderer.GrassBenders) { foreach (GrassBender b in layer.Value) { if (b.enabled == false) { continue; } props.SetVector("_Params", new Vector4(b.strength, b.heightOffset, b.pushStrength, b.scaleMultiplier)); if (b.benderType == GrassBenderBase.BenderType.Trail) { if (!b.trailRenderer) { continue; } if (!b.trailRenderer.emitting) { continue; } if (!GeometryUtility.TestPlanesAABB(frustrumPlanes, b.trailRenderer.bounds)) { continue; } m_TrailRenderer = b.trailRenderer; m_TrailRenderer.SetPropertyBlock(props); //Trail m_TrailRenderer.emitting = b.gameObject.activeInHierarchy; m_TrailRenderer.generateLightingData = true; m_TrailRenderer.widthMultiplier = b.trailRadius; m_TrailRenderer.time = b.trailLifetime; m_TrailRenderer.minVertexDistance = b.trailAccuracy; m_TrailRenderer.widthCurve = b.widthOverLifetime; m_TrailRenderer.colorGradient = GrassBenderBase.GetGradient(b.strengthOverLifetime); //If disabled, temporarly enable in order to bake mesh trailEnabled = m_TrailRenderer.enabled ? true : false; if (!trailEnabled) { m_TrailRenderer.enabled = true; } if (b.bakedMesh == null) { b.bakedMesh = new Mesh(); } m_TrailRenderer.BakeMesh(b.bakedMesh, renderingData.cameraData.camera, false); cmd.DrawMesh(b.bakedMesh, Matrix4x4.identity, GrassBenderBase.TrailMaterial, 0, 0, props); //Note: Faster, but crashed when trails are disabled (Case 1200430) //cmd.DrawRenderer(m_TrailRenderer, GrassBenderBase.TrailMaterial, 0, 0); if (!trailEnabled) { m_TrailRenderer.enabled = false; } //trailMesh.Clear(); } if (b.benderType == GrassBenderBase.BenderType.ParticleSystem) { if (!b.particleSystem) { continue; } if (!GeometryUtility.TestPlanesAABB(frustrumPlanes, b.particleRenderer.bounds)) { continue; } m_ParticleRenderer = b.particleRenderer; m_ParticleRenderer.SetPropertyBlock(props); var grad = b.particleSystem.colorOverLifetime; grad.enabled = true; grad.color = GrassBenderBase.GetGradient(b.strengthOverLifetime); bool localSpace = b.particleSystem.main.simulationSpace == ParticleSystemSimulationSpace.Local; //Note: DrawRenderes with particle systems appear to be broken. Only renders to scene cam when it redraws. Bake the mesh down and render it instead. //Todo: Create repo project and file bug report. //cmd.DrawRenderer(m_ParticleRenderer, m_Material, 0, 0); if (!b.bakedMesh) { b.bakedMesh = new Mesh(); } m_ParticleRenderer.BakeMesh(b.bakedMesh, renderingData.cameraData.camera); cmd.DrawMesh(b.bakedMesh, localSpace ? m_ParticleRenderer.localToWorldMatrix : Matrix4x4.identity, GrassBenderBase.MeshMaterial, 0, b.alphaBlending ? 1 : 0, props); //Also draw particle trails if (b.hasParticleTrails) { if (!b.particleTrailMesh) { b.particleTrailMesh = new Mesh(); } m_ParticleRenderer.BakeTrailsMesh(b.particleTrailMesh, renderingData.cameraData.camera); cmd.DrawMesh(b.particleTrailMesh, m_ParticleRenderer.localToWorldMatrix, GrassBenderBase.TrailMaterial, 1, 0, props); //cmd.DrawRenderer(m_ParticleRenderer, GrassBenderBase.TrailMaterial, 1, 0); } } if (b.benderType == GrassBenderBase.BenderType.Mesh) { if (!b.meshRenderer) { continue; } if (!GeometryUtility.TestPlanesAABB(frustrumPlanes, b.meshRenderer.bounds)) { continue; } m_MeshRenderer = b.meshRenderer; m_MeshRenderer.SetPropertyBlock(props); cmd.DrawRenderer(m_MeshRenderer, GrassBenderBase.MeshMaterial, 0, b.alphaBlending ? 1 : 0); } } } //Mask edges of bend area, avoids streaking at edges if (enableEdgeMasking) { cmd.SetGlobalTexture("_BendMapInput", BuiltinRenderTextureType.CurrentActive); cmd.Blit(BuiltinRenderTextureType.CurrentActive, bendVectorID, m_MaskMat); cmd.SetGlobalTexture(StylizedGrassRenderer.VECTOR_MAP_PARAM, bendVectorID); } } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData) { pass.Setup(renderer.cameraColorTarget); renderer.EnqueuePass(pass); }
// The actual rendering execution is done here public override void Render(CommandBuffer cmd, RenderTargetIdentifier source, RenderTargetIdentifier destination, ref RenderingData renderingData, CustomPostProcessInjectionPoint injectionPoint) { // set material properties if (m_Material != null) { m_Material.SetFloat(ShaderIDs.Blend, m_VolumeComponent.blend.value); } // set source texture cmd.SetGlobalTexture(ShaderIDs.Input, source); // draw a fullscreen triangle to the destination CoreUtils.DrawFullScreen(cmd, m_Material, destination); }
public bool Setup(ref RenderingData renderingData) { // Currently we only need to enqueue this pass when the user // doesn't want transparent objects to receive shadows return(!m_shouldReceiveShadows); }
public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData) { Camera camera = renderingData.cameraData.camera; RenderTextureDescriptor cameraTargetDescriptor = renderingData.cameraData.cameraTargetDescriptor; bool mainLightShadows = m_MainLightShadowCasterPass.Setup(ref renderingData); bool additionalLightShadows = m_AdditionalLightsShadowCasterPass.Setup(ref renderingData); bool resolveShadowsInScreenSpace = mainLightShadows && renderingData.shadowData.requiresScreenSpaceShadowResolve; // Depth prepass is generated in the following cases: // - We resolve shadows in screen space // - Scene view camera always requires a depth texture. We do a depth pre-pass to simplify it and it shouldn't matter much for editor. // - If game or offscreen camera requires it we check if we can copy the depth from the rendering opaques pass and use that instead. bool requiresDepthPrepass = renderingData.cameraData.isSceneViewCamera || (renderingData.cameraData.requiresDepthTexture && (!CanCopyDepth(ref renderingData.cameraData))); requiresDepthPrepass |= resolveShadowsInScreenSpace; bool createColorTexture = RequiresIntermediateColorTexture(ref renderingData, cameraTargetDescriptor) || rendererFeatures.Count != 0; // If camera requires depth and there's no depth pre-pass we create a depth texture that can be read // later by effect requiring it. bool createDepthTexture = renderingData.cameraData.requiresDepthTexture && !requiresDepthPrepass; bool postProcessEnabled = renderingData.cameraData.postProcessEnabled; bool hasOpaquePostProcess = postProcessEnabled && renderingData.cameraData.postProcessLayer.HasOpaqueOnlyEffects(RenderingUtils.postProcessRenderContext); m_ActiveCameraColorAttachment = (createColorTexture) ? m_CameraColorAttachment : RenderTargetHandle.CameraTarget; m_ActiveCameraDepthAttachment = (createDepthTexture) ? m_CameraDepthAttachment : RenderTargetHandle.CameraTarget; if (createColorTexture || createDepthTexture) { CreateCameraRenderTarget(context, ref renderingData.cameraData); } ConfigureCameraTarget(m_ActiveCameraColorAttachment.Identifier(), m_ActiveCameraDepthAttachment.Identifier()); for (int i = 0; i < rendererFeatures.Count; ++i) { rendererFeatures[i].AddRenderPasses(this, ref renderingData); } int count = activeRenderPassQueue.Count; for (int i = count - 1; i >= 0; i--) { if (activeRenderPassQueue[i] == null) { activeRenderPassQueue.RemoveAt(i); } } bool hasAfterRendering = activeRenderPassQueue.Find(x => x.renderPassEvent == RenderPassEvent.AfterRendering) != null; if (mainLightShadows) { EnqueuePass(m_MainLightShadowCasterPass); } if (additionalLightShadows) { EnqueuePass(m_AdditionalLightsShadowCasterPass); } if (requiresDepthPrepass) { m_DepthPrepass.Setup(cameraTargetDescriptor, m_DepthTexture); EnqueuePass(m_DepthPrepass); } if (resolveShadowsInScreenSpace) { m_ScreenSpaceShadowResolvePass.Setup(cameraTargetDescriptor); EnqueuePass(m_ScreenSpaceShadowResolvePass); if (renderingData.shadowData.ssShadowDownSampleSize > 1) { m_SSSDownsamplePass.Setup(cameraTargetDescriptor, renderingData); EnqueuePass(m_SSSDownsamplePass); } } EnqueuePass(m_RenderOpaqueForwardPass); if (hasOpaquePostProcess) { m_OpaquePostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, m_ActiveCameraColorAttachment); } //EnqueuePass(m_RenderOpaqueDiscardAndBlendPass); if (camera.clearFlags == CameraClearFlags.Skybox && RenderSettings.skybox != null) { EnqueuePass(m_DrawSkyboxPass); } // If a depth texture was created we necessarily need to copy it, otherwise we could have render it to a renderbuffer if (createDepthTexture) { m_CopyDepthPass.Setup(m_ActiveCameraDepthAttachment, m_DepthTexture); EnqueuePass(m_CopyDepthPass); } if (renderingData.cameraData.requiresOpaqueTexture) { m_CopyColorPass.Setup(m_ActiveCameraColorAttachment.Identifier(), m_OpaqueColor); EnqueuePass(m_CopyColorPass); } //OIT Pass m_BlendWeightedAccumulatePass.Setup(cameraTargetDescriptor, accumulateHandle, m_DepthTexture); //m_BlendWeightedAccumulatePass.Setup(new RenderTextureDescriptor(cameraTargetDescriptor.width, cameraTargetDescriptor.height), accumulateHandle, m_DepthTexture); EnqueuePass(m_BlendWeightedAccumulatePass); m_BlendWeightedRevealagePass.Setup(cameraTargetDescriptor, revealageHandle, m_DepthTexture); //m_BlendWeightedRevealagePass.Setup(new RenderTextureDescriptor(cameraTargetDescriptor.width, cameraTargetDescriptor.height), revealageHandle, m_DepthTexture); EnqueuePass(m_BlendWeightedRevealagePass); //cameraTargetDescriptor.msaaSamples = msaaSamples; //cameraTargetDescriptor.bindMS = bindMS; //Blit Feature AddPass(EnqueuePass bool afterRenderExists = renderingData.cameraData.captureActions != null || hasAfterRendering; // if we have additional filters // we need to stay in a RT if (afterRenderExists) { // perform post with src / dest the same if (postProcessEnabled) { m_PostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, m_ActiveCameraColorAttachment); EnqueuePass(m_PostProcessPass); } //now blit into the final target if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget) { if (renderingData.cameraData.captureActions != null) { m_CapturePass.Setup(m_ActiveCameraColorAttachment); EnqueuePass(m_CapturePass); } m_FinalBlitPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment); EnqueuePass(m_FinalBlitPass); } } else { if (postProcessEnabled) { m_PostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, RenderTargetHandle.CameraTarget); EnqueuePass(m_PostProcessPass); } else if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment); EnqueuePass(m_FinalBlitPass); } } #if UNITY_EDITOR if (renderingData.cameraData.isSceneViewCamera) { m_SceneViewDepthCopyPass.Setup(m_DepthTexture); EnqueuePass(m_SceneViewDepthCopyPass); } #endif }
void SetupAdditionalLightConstants(CommandBuffer cmd, ref RenderingData renderingData) { ref LightData lightData = ref renderingData.lightData;
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { RenderMainLightCascadeShadowmap(ref context, ref renderingData.cullResults, ref renderingData.lightData, ref renderingData.shadowData); }
// This method is called before executing the render pass. // It can be used to configure render targets and their clear state. Also to create temporary render target textures. // When empty this render pass will render to the active camera render target. // You should never call CommandBuffer.SetRenderTarget. Instead call <c>ConfigureTarget</c> and <c>ConfigureClear</c>. // The render pipeline will ensure target setup and clearing happens in a performant manner. public override void OnCameraSetup(CommandBuffer cmd, ref RenderingData renderingData) { }
public bool Setup(ref RenderingData renderingData) { using var profScope = new ProfilingScope(null, m_ProfilingSetupSampler); if (!renderingData.shadowData.supportsMainLightShadows) { return(false); } Clear(); int shadowLightIndex = renderingData.lightData.mainLightIndex; if (shadowLightIndex == -1) { return(false); } VisibleLight shadowLight = renderingData.lightData.visibleLights[shadowLightIndex]; Light light = shadowLight.light; if (light.shadows == LightShadows.None) { return(false); } if (shadowLight.lightType != LightType.Directional) { Debug.LogWarning("Only directional lights are supported as main light."); } Bounds bounds; if (!renderingData.cullResults.GetShadowCasterBounds(shadowLightIndex, out bounds)) { return(false); } m_ShadowCasterCascadesCount = renderingData.shadowData.mainLightShadowCascadesCount; int shadowResolution = ShadowUtils.GetMaxTileResolutionInAtlas(renderingData.shadowData.mainLightShadowmapWidth, renderingData.shadowData.mainLightShadowmapHeight, m_ShadowCasterCascadesCount); m_ShadowmapWidth = renderingData.shadowData.mainLightShadowmapWidth; m_ShadowmapHeight = (m_ShadowCasterCascadesCount == 2) ? renderingData.shadowData.mainLightShadowmapHeight >> 1 : renderingData.shadowData.mainLightShadowmapHeight; for (int cascadeIndex = 0; cascadeIndex < m_ShadowCasterCascadesCount; ++cascadeIndex) { bool success = ShadowUtils.ExtractDirectionalLightMatrix(ref renderingData.cullResults, ref renderingData.shadowData, shadowLightIndex, cascadeIndex, m_ShadowmapWidth, m_ShadowmapHeight, shadowResolution, light.shadowNearPlane, out m_CascadeSplitDistances[cascadeIndex], out m_CascadeSlices[cascadeIndex]); if (!success) { return(false); } } m_MainLightShadowParams = ShadowUtils.GetMainLightShadowParams(ref renderingData); return(true); }
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { var camera = renderingData.cameraData.camera; SortingCriteria sortFlags; if (filterSettings.renderQueueRange == RenderQueueRange.opaque) { sortFlags = renderingData.cameraData.defaultOpaqueSortFlags; } else { sortFlags = SortingCriteria.CommonTransparent; } var drawSettings = CreateDrawingSettings(shaderTagIds, ref renderingData, sortFlags); CommandBuffer cmd = CommandBufferPool.Get("ClipSphere"); cmd.EnableShaderKeyword("CLIP_SPHERE_ON"); cmd.SetGlobalFloat("_ClipObjEdgeThickness", 0.01f); context.ExecuteCommandBuffer(cmd); cmd.Clear(); var baseCameraMatrix = camera.worldToCameraMatrix; foreach (var proxy in ProxyNode.Instances) { if (!proxy.isActiveAndEnabled) { continue; } if (proxy.State == ProxyNode.ProxyState.Minimized) { continue; } foreach (var mark in proxy.Marks) { if (mark == null || !mark.isActiveAndEnabled) { continue; } cmd.SetGlobalColor("_ClipObjEdgeColor", proxy.Color); cmd.SetGlobalVector("_ClipObjPosition", mark.transform.position); cmd.SetGlobalVector("_ClipObjScale", 0.5f * proxy.ProxyScaleFactor * mark.transform.localScale); var clipTransform = GetWarpTransform(proxy.transform, mark.transform); cmd.SetGlobalMatrix("_ClipTransform", clipTransform); cmd.SetGlobalMatrix("_ClipTransformInv", clipTransform.inverse); context.ExecuteCommandBuffer(cmd); OnProxyPass?.Invoke(this, GetProxyCameraPosition(proxy.transform, mark.transform)); // Stupid hack, because the other versions crashed or didn't work var scp = new ScriptableCullingParameters(); camera.TryGetCullingParameters(true, out scp); scp.cullingPlaneCount = 6; scp.SetCullingPlane(0, new Plane(new Vector3(1, 0, 0), mark.transform.position - new Vector3(1, 0, 0) * mark.transform.localScale.x)); scp.SetCullingPlane(1, new Plane(new Vector3(-1, 0, 0), mark.transform.position + new Vector3(1, 0, 0) * mark.transform.localScale.x)); scp.SetCullingPlane(2, new Plane(new Vector3(0, 1, 0), mark.transform.position - new Vector3(0, 1, 0) * mark.transform.localScale.y)); scp.SetCullingPlane(3, new Plane(new Vector3(0, -1, 0), mark.transform.position + new Vector3(0, 1, 0) * mark.transform.localScale.y)); scp.SetCullingPlane(4, new Plane(new Vector3(0, 0, 1), mark.transform.position - new Vector3(0, 0, 1) * mark.transform.localScale.z)); scp.SetCullingPlane(5, new Plane(new Vector3(0, 0, -1), mark.transform.position + new Vector3(0, 0, 1) * mark.transform.localScale.z)); var cullResults = context.Cull(ref scp); context.DrawRenderers(cullResults, ref drawSettings, ref filterSettings, ref renderStateBlock); } } cmd.SetGlobalMatrix("_ClipTransform", Matrix4x4.identity); cmd.DisableShaderKeyword("CLIP_SPHERE_ON"); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public virtual void Cleanup(ScriptableRenderContext context, ref RenderingData renderingData) { }
public override void SetupLights(ScriptableRenderContext context, ref RenderingData renderingData) { m_ForwardLights.Setup(context, ref renderingData); }
public override void Execute(ScriptableRenderer renderer, ScriptableRenderContext context, ref RenderingData renderingData) { if (renderer == null) { throw new ArgumentNullException("renderer"); } var pass = new CopyColorPass(CoreUtils.CreateEngineMaterial(Shader.Find("Hidden/Lightweight Render Pipeline/Sampling"))); pass.Setup(colorHandle, afterAll); pass.Execute(renderer, context, ref renderingData); CommandBuffer cmd = CommandBufferPool.Get("Blit Pass"); cmd.SetRenderTarget(colorHandle.id, depthHandle.id); cmd.SetViewProjectionMatrices(Matrix4x4.identity, Matrix4x4.identity); cmd.SetViewport(new Rect(0, renderingData.cameraData.camera.pixelRect.height / 2.0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", beforeAll.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, m_BlitMaterial); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterOpaque.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, m_BlitMaterial); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f * 2.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterOpaquePost.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, m_BlitMaterial); cmd.SetViewport(new Rect(0f, 0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterSkybox.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, m_BlitMaterial); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f, 0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterTransparent.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, m_BlitMaterial); cmd.SetViewport(new Rect(renderingData.cameraData.camera.pixelRect.width / 3.0f * 2.0f, 0f, renderingData.cameraData.camera.pixelRect.width / 3.0f, renderingData.cameraData.camera.pixelRect.height / 2.0f)); cmd.SetGlobalTexture("_BlitTex", afterAll.Identifier()); ScriptableRenderer.RenderFullscreenQuad(cmd, m_BlitMaterial); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData) { Camera camera = renderingData.cameraData.camera; RenderTextureDescriptor cameraTargetDescriptor = renderingData.cameraData.cameraTargetDescriptor; // Special path for depth only offscreen cameras. Only write opaques + transparents. bool isOffscreenDepthTexture = camera.targetTexture != null && camera.targetTexture.format == RenderTextureFormat.Depth; if (isOffscreenDepthTexture) { ConfigureCameraTarget(BuiltinRenderTextureType.CameraTarget, BuiltinRenderTextureType.CameraTarget); for (int i = 0; i < rendererFeatures.Count; ++i) { rendererFeatures[i].AddRenderPasses(this, ref renderingData); } EnqueuePass(m_RenderOpaqueForwardPass); EnqueuePass(m_DrawSkyboxPass); EnqueuePass(m_RenderTransparentForwardPass); return; } bool mainLightShadows = m_MainLightShadowCasterPass.Setup(ref renderingData); bool additionalLightShadows = m_AdditionalLightsShadowCasterPass.Setup(ref renderingData); bool resolveShadowsInScreenSpace = mainLightShadows && renderingData.shadowData.requiresScreenSpaceShadowResolve; // Depth prepass is generated in the following cases: // - We resolve shadows in screen space // - Scene view camera always requires a depth texture. We do a depth pre-pass to simplify it and it shouldn't matter much for editor. // - If game or offscreen camera requires it we check if we can copy the depth from the rendering opaques pass and use that instead. bool requiresDepthPrepass = renderingData.cameraData.isSceneViewCamera || (renderingData.cameraData.requiresDepthTexture && (!CanCopyDepth(ref renderingData.cameraData))); requiresDepthPrepass |= resolveShadowsInScreenSpace; // TODO: There's an issue in multiview and depth copy pass. Atm forcing a depth prepass on XR until // we have a proper fix. if (renderingData.cameraData.isStereoEnabled && renderingData.cameraData.requiresDepthTexture) { requiresDepthPrepass = true; } requiresDepthPrepass = false; bool createColorTexture = RequiresIntermediateColorTexture(ref renderingData, cameraTargetDescriptor) || rendererFeatures.Count != 0; createColorTexture = false; // If camera requires depth and there's no depth pre-pass we create a depth texture that can be read // later by effect requiring it. bool createDepthTexture = renderingData.cameraData.requiresDepthTexture && !requiresDepthPrepass; bool postProcessEnabled = renderingData.cameraData.postProcessEnabled; bool hasOpaquePostProcess = postProcessEnabled && renderingData.cameraData.postProcessLayer.HasOpaqueOnlyEffects(RenderingUtils.postProcessRenderContext); m_ActiveCameraColorAttachment = (createColorTexture) ? m_CameraColorAttachment : RenderTargetHandle.CameraTarget; m_ActiveCameraDepthAttachment = (createDepthTexture) ? m_CameraDepthAttachment : RenderTargetHandle.CameraTarget; if (createColorTexture || createDepthTexture) { CreateCameraRenderTarget(context, ref renderingData.cameraData); } ConfigureCameraTarget(m_ActiveCameraColorAttachment.Identifier(), m_ActiveCameraDepthAttachment.Identifier()); for (int i = 0; i < rendererFeatures.Count; ++i) { rendererFeatures[i].AddRenderPasses(this, ref renderingData); } int count = activeRenderPassQueue.Count; for (int i = count - 1; i >= 0; i--) { if (activeRenderPassQueue[i] == null) { activeRenderPassQueue.RemoveAt(i); } } bool hasAfterRendering = activeRenderPassQueue.Find(x => x.renderPassEvent == RenderPassEvent.AfterRendering) != null; if (mainLightShadows) { EnqueuePass(m_MainLightShadowCasterPass); } if (additionalLightShadows) { EnqueuePass(m_AdditionalLightsShadowCasterPass); } if (requiresDepthPrepass) { m_DepthPrepass.Setup(cameraTargetDescriptor, m_DepthTexture); EnqueuePass(m_DepthPrepass); } if (resolveShadowsInScreenSpace) { m_ScreenSpaceShadowResolvePass.Setup(cameraTargetDescriptor); EnqueuePass(m_ScreenSpaceShadowResolvePass); } EnqueuePass(m_RenderOpaqueForwardPass); if (hasOpaquePostProcess) { m_OpaquePostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, m_ActiveCameraColorAttachment); } if (camera.clearFlags == CameraClearFlags.Skybox && RenderSettings.skybox != null) { EnqueuePass(m_DrawSkyboxPass); } // If a depth texture was created we necessarily need to copy it, otherwise we could have render it to a renderbuffer if (createDepthTexture) { m_CopyDepthPass.Setup(m_ActiveCameraDepthAttachment, m_DepthTexture); EnqueuePass(m_CopyDepthPass); } if (renderingData.cameraData.requiresOpaqueTexture) { // TODO: Downsampling method should be store in the renderer isntead of in the asset. // We need to migrate this data to renderer. For now, we query the method in the active asset. Downsampling downsamplingMethod = LightweightRenderPipeline.asset.opaqueDownsampling; m_CopyColorPass.Setup(m_ActiveCameraColorAttachment.Identifier(), m_OpaqueColor, downsamplingMethod); EnqueuePass(m_CopyColorPass); } EnqueuePass(m_RenderTransparentForwardPass); bool afterRenderExists = renderingData.cameraData.captureActions != null || hasAfterRendering; // if we have additional filters // we need to stay in a RT if (afterRenderExists) { // perform post with src / dest the same if (postProcessEnabled) { m_PostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, m_ActiveCameraColorAttachment); EnqueuePass(m_PostProcessPass); } //now blit into the final target if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget) { if (renderingData.cameraData.captureActions != null) { m_CapturePass.Setup(m_ActiveCameraColorAttachment); EnqueuePass(m_CapturePass); } m_FinalBlitPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment); EnqueuePass(m_FinalBlitPass); } } else { if (postProcessEnabled) { m_PostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, RenderTargetHandle.CameraTarget); EnqueuePass(m_PostProcessPass); } else if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment); EnqueuePass(m_FinalBlitPass); } } #if UNITY_EDITOR if (renderingData.cameraData.isSceneViewCamera) { m_SceneViewDepthCopyPass.Setup(m_DepthTexture); EnqueuePass(m_SceneViewDepthCopyPass); } #endif }
public override void Execute(ScriptableRenderer renderer, ScriptableRenderContext context, ref RenderingData renderingData) { var cmd = CommandBufferPool.Get("Clear Color"); cmd.SetRenderTarget(m_ColorHandle.Identifier()); cmd.ClearRenderTarget(CoreUtils.HasFlag(m_ClearFlag, ClearFlag.Color), CoreUtils.HasFlag(m_ClearFlag, ClearFlag.Depth), Color.yellow); RenderTextureDescriptor opaqueDesc = ScriptableRenderer.CreateRenderTextureDescriptor(ref renderingData.cameraData); cmd.GetTemporaryRT(beforeAll.id, opaqueDesc, FilterMode.Point); cmd.Blit(m_ColorHandle.Identifier(), beforeAll.Identifier()); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
public void LerpShouldReturnARenderingDataObject() { var data = new RenderingData(); var newRenderingDataObject = data.Lerp(data, 0.5f); Assert.IsNotNull(newRenderingDataObject); }
private static RenderingData LoadRenderingData(BinaryReader reader) { var data = new RenderingData(); data.RequestedUserUV = LoadRectangle(reader); data.RequestedDrawArea = LoadRectangle(reader); data.FlipMode = (FlipMode)reader.ReadInt32(); data.AtlasUV = LoadRectangle(reader); data.DrawArea = LoadRectangle(reader); data.HasSomethingToRender = reader.ReadBoolean(); data.IsAtlasRotated = reader.ReadBoolean(); return data; }