public static void RegisterDebug(string menuName, FrameSettings frameSettings) { List <DebugUI.Widget> widgets = new List <DebugUI.Widget>(); widgets.AddRange( new DebugUI.Widget[] { new DebugUI.Container { displayName = "Rendering Passes", children = { new DebugUI.BoolField { displayName = "Enable Transparent Prepass", getter = () => frameSettings.enableTransparentPrepass, setter = value => frameSettings.enableTransparentPrepass = value }, new DebugUI.BoolField { displayName = "Enable Transparent Postpass", getter = () => frameSettings.enableTransparentPostpass, setter = value => frameSettings.enableTransparentPostpass = value }, new DebugUI.BoolField { displayName = "Enable Motion Vectors", getter = () => frameSettings.enableMotionVectors, setter = value => frameSettings.enableMotionVectors = value }, new DebugUI.BoolField { displayName = "Enable Object Motion Vectors", getter = () => frameSettings.enableObjectMotionVectors, setter = value => frameSettings.enableObjectMotionVectors = value }, new DebugUI.BoolField { displayName = "Enable DBuffer", getter = () => frameSettings.enableDBuffer, setter = value => frameSettings.enableDBuffer = value }, new DebugUI.BoolField { displayName = "Enable Atmospheric Scattering", getter = () => frameSettings.enableAtmosphericScattering, setter = value => frameSettings.enableAtmosphericScattering = value }, new DebugUI.BoolField { displayName = "Enable Rough Refraction", getter = () => frameSettings.enableRoughRefraction, setter = value => frameSettings.enableRoughRefraction = value }, new DebugUI.BoolField { displayName = "Enable Distortion", getter = () => frameSettings.enableDistortion, setter = value => frameSettings.enableDistortion = value }, new DebugUI.BoolField { displayName = "Enable Postprocess", getter = () => frameSettings.enablePostprocess, setter = value => frameSettings.enablePostprocess = value }, } }, new DebugUI.Container { displayName = "Rendering Settings", children = { new DebugUI.BoolField { displayName = "Forward Only", getter = () => frameSettings.enableForwardRenderingOnly, setter = value => frameSettings.enableForwardRenderingOnly = value }, new DebugUI.BoolField { displayName = "Deferred Depth Prepass", getter = () => frameSettings.enableDepthPrepassWithDeferredRendering, setter = value => frameSettings.enableDepthPrepassWithDeferredRendering = value }, new DebugUI.BoolField { displayName = "Enable Async Compute", getter = () => frameSettings.enableAsyncCompute, setter = value => frameSettings.enableAsyncCompute = value }, new DebugUI.BoolField { displayName = "Enable Opaque Objects", getter = () => frameSettings.enableOpaqueObjects, setter = value => frameSettings.enableOpaqueObjects = value }, new DebugUI.BoolField { displayName = "Enable Transparent Objects", getter = () => frameSettings.enableTransparentObjects, setter = value => frameSettings.enableTransparentObjects = value }, new DebugUI.BoolField { displayName = "Enable MSAA", getter = () => frameSettings.enableMSAA, setter = value => frameSettings.enableMSAA = value }, } }, new DebugUI.Container { displayName = "XR Settings", children = { new DebugUI.BoolField { displayName = "Enable Stereo Rendering", getter = () => frameSettings.enableStereo, setter = value => frameSettings.enableStereo = value } } }, new DebugUI.Container { displayName = "Lighting Settings", children = { new DebugUI.BoolField { displayName = "Enable SSR", getter = () => frameSettings.enableSSR, setter = value => frameSettings.enableSSR = value }, new DebugUI.BoolField { displayName = "Enable SSAO", getter = () => frameSettings.enableSSAO, setter = value => frameSettings.enableSSAO = value }, new DebugUI.BoolField { displayName = "Enable SubsurfaceScattering", getter = () => frameSettings.enableSubsurfaceScattering, setter = value => frameSettings.enableSubsurfaceScattering = value }, new DebugUI.BoolField { displayName = "Enable Transmission", getter = () => frameSettings.enableTransmission, setter = value => frameSettings.enableTransmission = value }, new DebugUI.BoolField { displayName = "Enable Shadows", getter = () => frameSettings.enableShadow, setter = value => frameSettings.enableShadow = value }, new DebugUI.BoolField { displayName = "Enable Contact Shadows", getter = () => frameSettings.enableContactShadows, setter = value => frameSettings.enableContactShadows = value }, new DebugUI.BoolField { displayName = "Enable ShadowMask", getter = () => frameSettings.enableShadowMask, setter = value => frameSettings.enableShadowMask = value }, } } }); LightLoopSettings.RegisterDebug(frameSettings.lightLoopSettings, widgets); var panel = DebugManager.instance.GetPanel(menuName, true); panel.children.Add(widgets.ToArray()); }
public void VolumeVoxelizationPass(DensityVolumeList densityVolumes, HDCamera camera, CommandBuffer cmd, FrameSettings settings, uint frameIndex) { if (preset == VolumetricLightingPreset.Off) { return; } var visualEnvironment = VolumeManager.instance.stack.GetComponent <VisualEnvironment>(); if (visualEnvironment.fogType != FogType.Volumetric) { return; } VBuffer vBuffer = FindVBuffer(camera.GetViewID()); if (vBuffer == null) { return; } using (new ProfilingSample(cmd, "Volume Voxelization")) { int numVisibleVolumes = m_VisibleVolumeBounds.Count; if (numVisibleVolumes == 0) { // Clear the render target instead of running the shader. // Note: the clear must take the global fog into account! // CoreUtils.SetRenderTarget(cmd, vBuffer.GetDensityBuffer(), ClearFlag.Color, CoreUtils.clearColorAllBlack); // return; // Clearing 3D textures does not seem to work! // Use the workaround by running the full shader with 0 density } bool enableClustered = settings.lightLoopSettings.enableTileAndCluster; int kernel = m_VolumeVoxelizationCS.FindKernel(enableClustered ? "VolumeVoxelizationClustered" : "VolumeVoxelizationBruteforce"); var frameParams = vBuffer.GetParameters(frameIndex); Vector4 resolution = frameParams.resolution; float vFoV = camera.camera.fieldOfView * Mathf.Deg2Rad; // Compose the matrix which allows us to compute the world space view direction. Matrix4x4 transform = HDUtils.ComputePixelCoordToWorldSpaceViewDirectionMatrix(vFoV, resolution, camera.viewMatrix, false); cmd.SetComputeTextureParam(m_VolumeVoxelizationCS, kernel, HDShaderIDs._VBufferDensity, vBuffer.GetDensityBuffer()); cmd.SetComputeBufferParam(m_VolumeVoxelizationCS, kernel, HDShaderIDs._VolumeBounds, s_VisibleVolumeBoundsBuffer); cmd.SetComputeBufferParam(m_VolumeVoxelizationCS, kernel, HDShaderIDs._VolumeData, s_VisibleVolumeDataBuffer); // TODO: set the constant buffer data only once. cmd.SetComputeMatrixParam(m_VolumeVoxelizationCS, HDShaderIDs._VBufferCoordToViewDirWS, transform); cmd.SetComputeIntParam(m_VolumeVoxelizationCS, HDShaderIDs._NumVisibleDensityVolumes, numVisibleVolumes); int w = (int)resolution.x; int h = (int)resolution.y; // The shader defines GROUP_SIZE_1D = 8. cmd.DispatchCompute(m_VolumeVoxelizationCS, kernel, (w + 7) / 8, (h + 7) / 8, 1); } }
public void VolumetricLightingPass(HDCamera camera, CommandBuffer cmd, FrameSettings settings, uint frameIndex) { if (preset == VolumetricLightingPreset.Off) { return; } var visualEnvironment = VolumeManager.instance.stack.GetComponent <VisualEnvironment>(); if (visualEnvironment.fogType != FogType.Volumetric) { return; } VBuffer vBuffer = FindVBuffer(camera.GetViewID()); if (vBuffer == null) { return; } using (new ProfilingSample(cmd, "Volumetric Lighting")) { // Only available in the Play Mode because all the frame counters in the Edit Mode are broken. bool enableClustered = settings.lightLoopSettings.enableTileAndCluster; bool enableReprojection = Application.isPlaying && camera.camera.cameraType == CameraType.Game; int kernel; if (enableReprojection) { kernel = m_VolumetricLightingCS.FindKernel(enableClustered ? "VolumetricLightingClusteredReproj" : "VolumetricLightingBruteforceReproj"); } else { kernel = m_VolumetricLightingCS.FindKernel(enableClustered ? "VolumetricLightingClustered" : "VolumetricLightingBruteforce"); } var frameParams = vBuffer.GetParameters(frameIndex); Vector4 resolution = frameParams.resolution; float vFoV = camera.camera.fieldOfView * Mathf.Deg2Rad; // Compose the matrix which allows us to compute the world space view direction. Matrix4x4 transform = HDUtils.ComputePixelCoordToWorldSpaceViewDirectionMatrix(vFoV, resolution, camera.viewMatrix, false); Vector2[] xySeq = GetHexagonalClosePackedSpheres7(); // This is a sequence of 7 equidistant numbers from 1/14 to 13/14. // Each of them is the centroid of the interval of length 2/14. // They've been rearranged in a sequence of pairs {small, large}, s.t. (small + large) = 1. // That way, the running average position is close to 0.5. // | 6 | 2 | 4 | 1 | 5 | 3 | 7 | // | | | | o | | | | // | | o | | x | | | | // | | x | | x | | o | | // | | x | o | x | | x | | // | | x | x | x | o | x | | // | o | x | x | x | x | x | | // | x | x | x | x | x | x | o | // | x | x | x | x | x | x | x | float[] zSeq = { 7.0f / 14.0f, 3.0f / 14.0f, 11.0f / 14.0f, 5.0f / 14.0f, 9.0f / 14.0f, 1.0f / 14.0f, 13.0f / 14.0f }; int sampleIndex = (int)frameIndex % 7; // TODO: should we somehow reorder offsets in Z based on the offset in XY? S.t. the samples more evenly cover the domain. // Currently, we assume that they are completely uncorrelated, but maybe we should correlate them somehow. Vector4 offset = new Vector4(xySeq[sampleIndex].x, xySeq[sampleIndex].y, zSeq[sampleIndex], frameIndex); // Get the interpolated asymmetry value. var fog = VolumeManager.instance.stack.GetComponent <VolumetricFog>(); // TODO: set 'm_VolumetricLightingPreset'. // TODO: set the constant buffer data only once. cmd.SetComputeMatrixParam(m_VolumetricLightingCS, HDShaderIDs._VBufferCoordToViewDirWS, transform); cmd.SetComputeVectorParam(m_VolumetricLightingCS, HDShaderIDs._VBufferSampleOffset, offset); cmd.SetComputeFloatParam(m_VolumetricLightingCS, HDShaderIDs._CornetteShanksConstant, CornetteShanksPhasePartConstant(fog.asymmetry)); cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferDensity, vBuffer.GetDensityBuffer()); // Read cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferLightingIntegral, vBuffer.GetLightingIntegralBuffer()); // Write if (enableReprojection) { cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferLightingFeedback, vBuffer.GetLightingFeedbackBuffer(frameIndex)); // Write cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferLightingHistory, vBuffer.GetLightingHistoryBuffer(frameIndex)); // Read } int w = (int)resolution.x; int h = (int)resolution.y; // The shader defines GROUP_SIZE_1D = 8. cmd.DispatchCompute(m_VolumetricLightingCS, kernel, (w + 7) / 8, (h + 7) / 8, 1); } }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; UpdateAntialiasing(); // Handle memory allocation. { bool isColorPyramidHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.SSR); // TODO: TAA as well bool isVolumetricHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.Volumetrics) && m_frameSettings.IsEnabled(FrameSettingsField.ReprojectionForVolumetrics); int numColorPyramidBuffersRequired = isColorPyramidHistoryRequired ? 2 : 1; // TODO: 1 -> 0 int numVolumetricBuffersRequired = isVolumetricHistoryRequired ? 2 : 0; // History + feedback if ((numColorPyramidBuffersAllocated != numColorPyramidBuffersRequired) || (numVolumetricBuffersAllocated != numVolumetricBuffersRequired)) { // Reinit the system. colorPyramidHistoryIsValid = false; vlSys.DeinitializePerCameraData(this); // The history system only supports the "nuke all" option. m_HistoryRTSystem.Dispose(); m_HistoryRTSystem = new BufferedRTHandleSystem(); if (numColorPyramidBuffersRequired != 0) { AllocHistoryFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain, HistoryBufferAllocatorFunction, numColorPyramidBuffersRequired); colorPyramidHistoryIsValid = false; } vlSys.InitializePerCameraData(this, numVolumetricBuffersRequired); // Mark as init. numColorPyramidBuffersAllocated = numColorPyramidBuffersRequired; numVolumetricBuffersAllocated = numVolumetricBuffersRequired; } } UpdateViewConstants(IsTAAEnabled()); // Update viewport sizes. m_ViewportSizePrevFrame = new Vector2Int(m_ActualWidth, m_ActualHeight); m_ActualWidth = Math.Max(camera.pixelWidth, 1); m_ActualHeight = Math.Max(camera.pixelHeight, 1); Vector2Int nonScaledSize = new Vector2Int(m_ActualWidth, m_ActualHeight); if (isMainGameView) { Vector2Int scaledSize = HDDynamicResolutionHandler.instance.GetRTHandleScale(new Vector2Int(camera.pixelWidth, camera.pixelHeight)); nonScaledSize = HDDynamicResolutionHandler.instance.cachedOriginalSize; m_ActualWidth = scaledSize.x; m_ActualHeight = scaledSize.y; } var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; // XRTODO: double-wide cleanup textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); if (camera.stereoEnabled && XRGraphics.stereoRenderingMode == XRGraphics.StereoRenderingMode.SinglePass) { Debug.Assert(HDDynamicResolutionHandler.instance.SoftwareDynamicResIsEnabled() == false); var xrDesc = XRGraphics.eyeTextureDesc; nonScaledSize.x = screenWidth = m_ActualWidth = xrDesc.width; nonScaledSize.y = screenHeight = m_ActualHeight = xrDesc.height; textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); } m_LastFrameActive = Time.frameCount; // TODO: cache this, or make the history system spill the beans... Vector2Int prevColorPyramidBufferSize = Vector2Int.zero; if (numColorPyramidBuffersAllocated > 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; prevColorPyramidBufferSize.x = rt.width; prevColorPyramidBufferSize.y = rt.height; } // TODO: cache this, or make the history system spill the beans... Vector3Int prevVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; prevVolumetricBufferSize.x = rt.width; prevVolumetricBufferSize.y = rt.height; prevVolumetricBufferSize.z = rt.volumeDepth; } m_msaaSamples = msaaSamples; // Here we use the non scaled resolution for the RTHandleSystem ref size because we assume that at some point we will need full resolution anyway. // This is also useful because we have some RT after final up-rez that will need the full size. RTHandles.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.Swap(); Vector3Int currColorPyramidBufferSize = Vector3Int.zero; if (numColorPyramidBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; currColorPyramidBufferSize.x = rt.width; currColorPyramidBufferSize.y = rt.height; if ((currColorPyramidBufferSize.x != prevColorPyramidBufferSize.x) || (currColorPyramidBufferSize.y != prevColorPyramidBufferSize.y)) { // A reallocation has happened, so the new texture likely contains garbage. colorPyramidHistoryIsValid = false; } } Vector3Int currVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; currVolumetricBufferSize.x = rt.width; currVolumetricBufferSize.y = rt.height; currVolumetricBufferSize.z = rt.volumeDepth; if ((currVolumetricBufferSize.x != prevVolumetricBufferSize.x) || (currVolumetricBufferSize.y != prevVolumetricBufferSize.y) || (currVolumetricBufferSize.z != prevVolumetricBufferSize.z)) { // A reallocation has happened, so the new texture likely contains garbage. volumetricHistoryIsValid = false; } } Vector2 rcpTextureSize = Vector2.one / new Vector2(RTHandles.maxWidth, RTHandles.maxHeight); Vector2 rcpTextureSizeHistory = Vector2.one / new Vector2(m_HistoryRTSystem.maxWidth, m_HistoryRTSystem.maxHeight); m_ViewportScalePreviousFrame = m_ViewportSizePrevFrame * rcpTextureSize; m_ViewportScalePreviousFrameHistory = m_ViewportSizePrevFrame * rcpTextureSizeHistory; m_ViewportScaleCurrentFrame = new Vector2Int(m_ActualWidth, m_ActualHeight) * rcpTextureSize; m_ViewportScaleCurrentFrameHistory = m_ViewportSizePrevFrame * rcpTextureSizeHistory; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); finalViewport = new Rect(camera.pixelRect.x, camera.pixelRect.y, nonScaledSize.x, nonScaledSize.y); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); m_RecorderCaptureActions = CameraCaptureBridge.GetCaptureActions(camera); }
public void PushGlobalParams(CommandBuffer cmd, DiffusionProfileSettings sssParameters, FrameSettings frameSettings) { // Broadcast SSS parameters to all shaders. cmd.SetGlobalInt(HDShaderIDs._EnableSubsurfaceScattering, frameSettings.enableSubsurfaceScattering ? 1 : 0); unsafe { // Warning: Unity is not able to losslessly transfer integers larger than 2^24 to the shader system. // Therefore, we bitcast uint to float in C#, and bitcast back to uint in the shader. uint texturingModeFlags = sssParameters.texturingModeFlags; uint transmissionFlags = sssParameters.transmissionFlags; cmd.SetGlobalFloat(HDShaderIDs._TexturingModeFlags, *(float *)&texturingModeFlags); cmd.SetGlobalFloat(HDShaderIDs._TransmissionFlags, *(float *)&transmissionFlags); } cmd.SetGlobalVectorArray(HDShaderIDs._ThicknessRemaps, sssParameters.thicknessRemaps); cmd.SetGlobalVectorArray(HDShaderIDs._ShapeParams, sssParameters.shapeParams); cmd.SetGlobalVectorArray(HDShaderIDs._HalfRcpVariancesAndWeights, sssParameters.halfRcpVariancesAndWeights); // To disable transmission, we simply nullify the transmissionTint cmd.SetGlobalVectorArray(HDShaderIDs._TransmissionTintsAndFresnel0, frameSettings.enableTransmission ? sssParameters.transmissionTintsAndFresnel0 : sssParameters.disabledTransmissionTintsAndFresnel0); cmd.SetGlobalVectorArray(HDShaderIDs._WorldScales, sssParameters.worldScales); }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, PostProcessLayer postProcessLayer, VolumetricLightingSystem vlSys) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we do'nt create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = camera.cameraType == CameraType.Game && HDUtils.IsTemporalAntialiasingActive(postProcessLayer) && m_frameSettings.enablePostprocess; var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); // In stereo, this corresponds to the center eye position var pos = camera.transform.position; worldSpaceCameraPos = pos; if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevCameraPos = pos; prevViewProjMatrix = gpuVP; } else { prevCameraPos = cameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } taaFrameIndex = taaEnabled ? (uint)postProcessLayer.temporalAntialiasing.sampleIndex : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; cameraPos = pos; detViewMatrix = viewMatrix.determinant; if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(cameraPos - prevCameraPos); // Non-camera-relative positions prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); frustum = Frustum.Create(viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; m_ActualWidth = camera.pixelWidth; m_ActualHeight = camera.pixelHeight; var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; #if !UNITY_SWITCH if (m_frameSettings.enableStereo) { screenWidth = XRSettings.eyeTextureWidth; screenHeight = XRSettings.eyeTextureHeight; var xrDesc = XRSettings.eyeTextureDesc; m_ActualWidth = xrDesc.width; m_ActualHeight = xrDesc.height; ConfigureStereoMatrices(); } #endif // Unfortunately sometime (like in the HDCameraEditor) HDUtils.hdrpSettings can be null because of scripts that change the current pipeline... m_msaaSamples = HDUtils.hdrpSettings != null ? HDUtils.hdrpSettings.msaaSampleCount : MSAASamples.None; RTHandles.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_frameSettings.enableMSAA, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_frameSettings.enableMSAA, m_msaaSamples); m_HistoryRTSystem.Swap(); int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; m_ViewportScalePreviousFrame = m_ViewportScaleCurrentFrame; // Double-buffer m_ViewportScaleCurrentFrame.x = (float)m_ActualWidth / maxWidth; m_ViewportScaleCurrentFrame.y = (float)m_ActualHeight / maxHeight; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } }
// aggregateFrameSettings already contain the aggregation of RenderPipelineSettings and FrameSettings (regular and/or debug) public static void InitializeLightLoopSettings(Camera camera, FrameSettings aggregateFrameSettings, RenderPipelineSettings renderPipelineSettings, FrameSettings frameSettings, ref LightLoopSettings aggregate) { if (aggregate == null) { aggregate = new LightLoopSettings(); } aggregate.enableTileAndCluster = frameSettings.lightLoopSettings.enableTileAndCluster; aggregate.enableComputeLightEvaluation = frameSettings.lightLoopSettings.enableComputeLightEvaluation; aggregate.enableComputeLightVariants = frameSettings.lightLoopSettings.enableComputeLightVariants; aggregate.enableComputeMaterialVariants = frameSettings.lightLoopSettings.enableComputeMaterialVariants; aggregate.enableFptlForForwardOpaque = frameSettings.lightLoopSettings.enableFptlForForwardOpaque; aggregate.enableBigTilePrepass = frameSettings.lightLoopSettings.enableBigTilePrepass; // Deferred opaque are always using Fptl. Forward opaque can use Fptl or Cluster, transparent use cluster. // When MSAA is enabled we disable Fptl as it become expensive compare to cluster // In HD, MSAA is only supported for forward only rendering, no MSAA in deferred mode (for code complexity reasons) aggregate.enableFptlForForwardOpaque = aggregate.enableFptlForForwardOpaque && !aggregateFrameSettings.enableMSAA; // disable FPTL for stereo for now aggregate.enableFptlForForwardOpaque = aggregate.enableFptlForForwardOpaque && !XRGraphics.enabled; // If Deferred, enable Fptl. If we are forward renderer only and not using Fptl for forward opaque, disable Fptl aggregate.isFptlEnabled = aggregateFrameSettings.shaderLitMode == LitShaderMode.Deferred || aggregate.enableFptlForForwardOpaque; }
#pragma warning disable 618 // Type or member is obsolete internal static void MigrateFromClassVersion(ref ObsoleteFrameSettings oldFrameSettingsFormat, ref FrameSettings newFrameSettingsFormat, ref FrameSettingsOverrideMask newFrameSettingsOverrideMask) { if (oldFrameSettingsFormat == null) { return; } // no need to migrate those computed at frame value //newFrameSettingsFormat.diffuseGlobalDimmer = oldFrameSettingsFormat.diffuseGlobalDimmer; //newFrameSettingsFormat.specularGlobalDimmer = oldFrameSettingsFormat.specularGlobalDimmer; // Data switch (oldFrameSettingsFormat.shaderLitMode) { case ObsoleteLitShaderMode.Forward: newFrameSettingsFormat.litShaderMode = LitShaderMode.Forward; break; case ObsoleteLitShaderMode.Deferred: newFrameSettingsFormat.litShaderMode = LitShaderMode.Deferred; break; default: throw new ArgumentException("Unknown ObsoleteLitShaderMode"); } newFrameSettingsFormat.SetEnabled(FrameSettingsField.Shadow, oldFrameSettingsFormat.enableShadow); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ContactShadows, oldFrameSettingsFormat.enableContactShadows); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ShadowMask, oldFrameSettingsFormat.enableShadowMask); newFrameSettingsFormat.SetEnabled(FrameSettingsField.SSR, oldFrameSettingsFormat.enableSSR); newFrameSettingsFormat.SetEnabled(FrameSettingsField.SSAO, oldFrameSettingsFormat.enableSSAO); newFrameSettingsFormat.SetEnabled(FrameSettingsField.SubsurfaceScattering, oldFrameSettingsFormat.enableSubsurfaceScattering); newFrameSettingsFormat.SetEnabled(FrameSettingsField.Transmission, oldFrameSettingsFormat.enableTransmission); newFrameSettingsFormat.SetEnabled(FrameSettingsField.AtmosphericScattering, oldFrameSettingsFormat.enableAtmosphericScattering); newFrameSettingsFormat.SetEnabled(FrameSettingsField.Volumetrics, oldFrameSettingsFormat.enableVolumetrics); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ReprojectionForVolumetrics, oldFrameSettingsFormat.enableReprojectionForVolumetrics); newFrameSettingsFormat.SetEnabled(FrameSettingsField.LightLayers, oldFrameSettingsFormat.enableLightLayers); newFrameSettingsFormat.SetEnabled(FrameSettingsField.DepthPrepassWithDeferredRendering, oldFrameSettingsFormat.enableDepthPrepassWithDeferredRendering); newFrameSettingsFormat.SetEnabled(FrameSettingsField.TransparentPrepass, oldFrameSettingsFormat.enableTransparentPrepass); newFrameSettingsFormat.SetEnabled(FrameSettingsField.MotionVectors, oldFrameSettingsFormat.enableMotionVectors); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ObjectMotionVectors, oldFrameSettingsFormat.enableObjectMotionVectors); newFrameSettingsFormat.SetEnabled(FrameSettingsField.Decals, oldFrameSettingsFormat.enableDecals); newFrameSettingsFormat.SetEnabled(FrameSettingsField.RoughRefraction, oldFrameSettingsFormat.enableRoughRefraction); newFrameSettingsFormat.SetEnabled(FrameSettingsField.TransparentPostpass, oldFrameSettingsFormat.enableTransparentPostpass); newFrameSettingsFormat.SetEnabled(FrameSettingsField.Distortion, oldFrameSettingsFormat.enableDistortion); newFrameSettingsFormat.SetEnabled(FrameSettingsField.Postprocess, oldFrameSettingsFormat.enablePostprocess); newFrameSettingsFormat.SetEnabled(FrameSettingsField.OpaqueObjects, oldFrameSettingsFormat.enableOpaqueObjects); newFrameSettingsFormat.SetEnabled(FrameSettingsField.TransparentObjects, oldFrameSettingsFormat.enableTransparentObjects); newFrameSettingsFormat.SetEnabled(FrameSettingsField.RealtimePlanarReflection, oldFrameSettingsFormat.enableRealtimePlanarReflection); newFrameSettingsFormat.SetEnabled(FrameSettingsField.MSAA, oldFrameSettingsFormat.enableMSAA); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ExposureControl, oldFrameSettingsFormat.enableExposureControl); newFrameSettingsFormat.SetEnabled(FrameSettingsField.AsyncCompute, oldFrameSettingsFormat.enableAsyncCompute); newFrameSettingsFormat.SetEnabled(FrameSettingsField.LightListAsync, oldFrameSettingsFormat.runLightListAsync); newFrameSettingsFormat.SetEnabled(FrameSettingsField.SSRAsync, oldFrameSettingsFormat.runSSRAsync); newFrameSettingsFormat.SetEnabled(FrameSettingsField.SSAOAsync, oldFrameSettingsFormat.runSSAOAsync); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ContactShadowsAsync, oldFrameSettingsFormat.runContactShadowsAsync); newFrameSettingsFormat.SetEnabled(FrameSettingsField.VolumeVoxelizationsAsync, oldFrameSettingsFormat.runVolumeVoxelizationAsync); if (oldFrameSettingsFormat.lightLoopSettings != null) { newFrameSettingsFormat.SetEnabled(FrameSettingsField.DeferredTile, oldFrameSettingsFormat.lightLoopSettings.enableDeferredTileAndCluster); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ComputeLightEvaluation, oldFrameSettingsFormat.lightLoopSettings.enableComputeLightEvaluation); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ComputeLightVariants, oldFrameSettingsFormat.lightLoopSettings.enableComputeLightVariants); newFrameSettingsFormat.SetEnabled(FrameSettingsField.ComputeMaterialVariants, oldFrameSettingsFormat.lightLoopSettings.enableComputeMaterialVariants); newFrameSettingsFormat.SetEnabled(FrameSettingsField.FPTLForForwardOpaque, oldFrameSettingsFormat.lightLoopSettings.enableFptlForForwardOpaque); newFrameSettingsFormat.SetEnabled(FrameSettingsField.BigTilePrepass, oldFrameSettingsFormat.lightLoopSettings.enableBigTilePrepass); } // OverrideMask newFrameSettingsOverrideMask.mask = new BitArray128(); Array values = Enum.GetValues(typeof(ObsoleteFrameSettingsOverrides)); foreach (ObsoleteFrameSettingsOverrides val in values) { if ((val & oldFrameSettingsFormat.overrides) > 0) { switch (val) { case ObsoleteFrameSettingsOverrides.Shadow: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.Shadow] = true; break; case ObsoleteFrameSettingsOverrides.ContactShadow: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ContactShadows] = true; break; case ObsoleteFrameSettingsOverrides.ShadowMask: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ShadowMask] = true; break; case ObsoleteFrameSettingsOverrides.SSR: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.SSR] = true; break; case ObsoleteFrameSettingsOverrides.SSAO: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.SSAO] = true; break; case ObsoleteFrameSettingsOverrides.SubsurfaceScattering: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.SubsurfaceScattering] = true; break; case ObsoleteFrameSettingsOverrides.Transmission: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.Transmission] = true; break; case ObsoleteFrameSettingsOverrides.AtmosphericScaterring: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.AtmosphericScattering] = true; break; case ObsoleteFrameSettingsOverrides.Volumetrics: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.Volumetrics] = true; break; case ObsoleteFrameSettingsOverrides.ReprojectionForVolumetrics: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ReprojectionForVolumetrics] = true; break; case ObsoleteFrameSettingsOverrides.LightLayers: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.LightLayers] = true; break; case ObsoleteFrameSettingsOverrides.ShaderLitMode: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.LitShaderMode] = true; break; case ObsoleteFrameSettingsOverrides.DepthPrepassWithDeferredRendering: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.DepthPrepassWithDeferredRendering] = true; break; case ObsoleteFrameSettingsOverrides.TransparentPrepass: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.TransparentPrepass] = true; break; case ObsoleteFrameSettingsOverrides.MotionVectors: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.MotionVectors] = true; break; case ObsoleteFrameSettingsOverrides.ObjectMotionVectors: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ObjectMotionVectors] = true; break; case ObsoleteFrameSettingsOverrides.Decals: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.Decals] = true; break; case ObsoleteFrameSettingsOverrides.RoughRefraction: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.RoughRefraction] = true; break; case ObsoleteFrameSettingsOverrides.TransparentPostpass: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.TransparentPostpass] = true; break; case ObsoleteFrameSettingsOverrides.Distortion: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.Distortion] = true; break; case ObsoleteFrameSettingsOverrides.Postprocess: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.Postprocess] = true; break; case ObsoleteFrameSettingsOverrides.OpaqueObjects: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.OpaqueObjects] = true; break; case ObsoleteFrameSettingsOverrides.TransparentObjects: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.TransparentObjects] = true; break; case ObsoleteFrameSettingsOverrides.RealtimePlanarReflection: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.RealtimePlanarReflection] = true; break; case ObsoleteFrameSettingsOverrides.MSAA: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.MSAA] = true; break; case ObsoleteFrameSettingsOverrides.ExposureControl: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ExposureControl] = true; break; case ObsoleteFrameSettingsOverrides.AsyncCompute: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.AsyncCompute] = true; break; case ObsoleteFrameSettingsOverrides.LightListAsync: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.LightListAsync] = true; break; case ObsoleteFrameSettingsOverrides.SSRAsync: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.SSRAsync] = true; break; case ObsoleteFrameSettingsOverrides.SSAOAsync: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.SSAOAsync] = true; break; case ObsoleteFrameSettingsOverrides.ContactShadowsAsync: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ContactShadowsAsync] = true; break; case ObsoleteFrameSettingsOverrides.VolumeVoxelizationsAsync: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.VolumeVoxelizationsAsync] = true; break; default: throw new ArgumentException("Unknown ObsoleteFrameSettingsOverride, was " + val); } } } if (oldFrameSettingsFormat.lightLoopSettings != null) { values = Enum.GetValues(typeof(ObsoleteLightLoopSettingsOverrides)); foreach (ObsoleteLightLoopSettingsOverrides val in values) { if ((val & oldFrameSettingsFormat.lightLoopSettings.overrides) > 0) { switch (val) { case ObsoleteLightLoopSettingsOverrides.TileAndCluster: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.DeferredTile] = true; break; case ObsoleteLightLoopSettingsOverrides.BigTilePrepass: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.BigTilePrepass] = true; break; case ObsoleteLightLoopSettingsOverrides.ComputeLightEvaluation: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ComputeLightEvaluation] = true; break; case ObsoleteLightLoopSettingsOverrides.ComputeLightVariants: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ComputeLightVariants] = true; break; case ObsoleteLightLoopSettingsOverrides.ComputeMaterialVariants: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.ComputeMaterialVariants] = true; break; case ObsoleteLightLoopSettingsOverrides.FptlForForwardOpaque: newFrameSettingsOverrideMask.mask[(int)FrameSettingsField.FPTLForForwardOpaque] = true; break; default: throw new ArgumentException("Unknown ObsoleteLightLoopSettingsOverrides"); } } } } //free space: oldFrameSettingsFormat = null; }
#pragma warning restore 618 // Type or member is obsolete internal static void MigrateToAfterPostprocess(ref FrameSettings cameraFrameSettings) { cameraFrameSettings.SetEnabled(FrameSettingsField.AfterPostprocess, true); }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // Handle post-process AA // - If post-processing is disabled all together, no AA // - In scene view, only enable TAA if animated materials are enabled // - Else just use the currently set AA mode on the camera { if (!m_frameSettings.IsEnabled(FrameSettingsField.Postprocess) || !CoreUtils.ArePostProcessesEnabled(camera)) { antialiasing = AntialiasingMode.None; } #if UNITY_EDITOR else if (camera.cameraType == CameraType.SceneView) { var mode = HDRenderPipelinePreferences.sceneViewAntialiasing; if (mode == AntialiasingMode.TemporalAntialiasing && !CoreUtils.AreAnimatedMaterialsEnabled(camera)) { antialiasing = AntialiasingMode.None; } else { antialiasing = mode; } } #endif else if (m_AdditionalCameraData != null) { antialiasing = m_AdditionalCameraData.antialiasing; } else { antialiasing = AntialiasingMode.None; } } // Handle memory allocation. { bool isColorPyramidHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.SSR); // TODO: TAA as well bool isVolumetricHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.Volumetrics) && m_frameSettings.IsEnabled(FrameSettingsField.ReprojectionForVolumetrics); int numColorPyramidBuffersRequired = isColorPyramidHistoryRequired ? 2 : 1; // TODO: 1 -> 0 int numVolumetricBuffersRequired = isVolumetricHistoryRequired ? 2 : 0; // History + feedback if ((numColorPyramidBuffersAllocated != numColorPyramidBuffersRequired) || (numVolumetricBuffersAllocated != numVolumetricBuffersRequired)) { // Reinit the system. colorPyramidHistoryIsValid = false; vlSys.DeinitializePerCameraData(this); // The history system only supports the "nuke all" option. m_HistoryRTSystem.Dispose(); m_HistoryRTSystem = new BufferedRTHandleSystem(); if (numColorPyramidBuffersRequired != 0) { AllocHistoryFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain, HistoryBufferAllocatorFunction, numColorPyramidBuffersRequired); colorPyramidHistoryIsValid = false; } vlSys.InitializePerCameraData(this, numVolumetricBuffersRequired); // Mark as init. numColorPyramidBuffersAllocated = numColorPyramidBuffersRequired; numVolumetricBuffersAllocated = numVolumetricBuffersRequired; } } // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = antialiasing == AntialiasingMode.TemporalAntialiasing; if (!taaEnabled) { taaFrameIndex = 0; taaJitter = Vector4.zero; } var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? GetJitteredProjectionMatrix(nonJitteredCameraProj) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); // Update viewport sizes. m_ViewportSizePrevFrame = new Vector2Int(m_ActualWidth, m_ActualHeight); m_ActualWidth = Math.Max(camera.pixelWidth, 1); m_ActualHeight = Math.Max(camera.pixelHeight, 1); Vector2Int nonScaledSize = new Vector2Int(m_ActualWidth, m_ActualHeight); if (isMainGameView) { Vector2Int scaledSize = HDDynamicResolutionHandler.instance.GetRTHandleScale(new Vector2Int(camera.pixelWidth, camera.pixelHeight)); nonScaledSize = HDDynamicResolutionHandler.instance.cachedOriginalSize; m_ActualWidth = scaledSize.x; m_ActualHeight = scaledSize.y; } var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); numEyes = camera.stereoEnabled ? (uint)2 : (uint)1; // TODO VR: Generalize this when support for >2 eyes comes out with XR SDK if (camera.stereoEnabled) { if (XRGraphics.stereoRenderingMode == XRGraphics.StereoRenderingMode.SinglePass) { textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); } for (uint eyeIndex = 0; eyeIndex < 2; eyeIndex++) { // For VR, TAA proj matrices don't need to be jittered var currProjStereo = camera.GetStereoProjectionMatrix((Camera.StereoscopicEye)eyeIndex); var gpuCurrProjStereo = GL.GetGPUProjectionMatrix(currProjStereo, true); var gpuCurrViewStereo = camera.GetStereoViewMatrix((Camera.StereoscopicEye)eyeIndex); if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuCurrViewStereo.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuCurrVPStereo = gpuCurrProjStereo * gpuCurrViewStereo; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevWorldSpaceCameraPosStereo[eyeIndex] = gpuCurrViewStereo.inverse.GetColumn(3); prevViewProjMatrixStereo[eyeIndex] = gpuCurrVPStereo; } else { prevWorldSpaceCameraPosStereo[eyeIndex] = worldSpaceCameraPosStereo[eyeIndex]; prevViewProjMatrixStereo[eyeIndex] = GetViewProjMatrixStereo(eyeIndex); // Grabbing this before ConfigureStereoMatrices updates view/proj } isFirstFrame = false; } } // XRTODO: fix this isFirstFrame = true; // So that mono vars can still update when stereo active // XRTODO: remove once SPI is working if (XRGraphics.stereoRenderingMode == XRGraphics.StereoRenderingMode.SinglePass) { Debug.Assert(HDDynamicResolutionHandler.instance.SoftwareDynamicResIsEnabled() == false); var xrDesc = XRGraphics.eyeTextureDesc; nonScaledSize.x = screenWidth = m_ActualWidth = xrDesc.width; nonScaledSize.y = screenHeight = m_ActualHeight = xrDesc.height; } } if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed // Note: if your first rendered view during the frame is not the Game view, everything breaks. if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevWorldSpaceCameraPos = camera.transform.position; prevViewProjMatrix = gpuVP; } else { prevWorldSpaceCameraPos = worldSpaceCameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; prevViewProjMatrixNoCameraTrans = prevViewProjMatrix; } isFirstFrame = false; } // In stereo, this corresponds to the center eye position worldSpaceCameraPos = camera.transform.position; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; ConfigureStereoMatrices(); if (ShaderConfig.s_CameraRelativeRendering != 0) { prevWorldSpaceCameraPos = worldSpaceCameraPos - prevWorldSpaceCameraPos; // This fixes issue with cameraDisplacement stacking in prevViewProjMatrix when same camera renders multiple times each logical frame // causing glitchy motion blur when editor paused. if (m_LastFrameActive != Time.frameCount) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(prevWorldSpaceCameraPos); prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } } else { Matrix4x4 noTransViewMatrix = camera.worldToCameraMatrix; noTransViewMatrix.SetColumn(3, new Vector4(0, 0, 0, 1)); prevViewProjMatrixNoCameraTrans = nonJitteredProjMatrix * noTransViewMatrix; } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); Frustum.Create(frustum, viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; // TODO: cache this, or make the history system spill the beans... Vector2Int prevColorPyramidBufferSize = Vector2Int.zero; if (numColorPyramidBuffersAllocated > 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; prevColorPyramidBufferSize.x = rt.width; prevColorPyramidBufferSize.y = rt.height; } // TODO: cache this, or make the history system spill the beans... Vector3Int prevVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; prevVolumetricBufferSize.x = rt.width; prevVolumetricBufferSize.y = rt.height; prevVolumetricBufferSize.z = rt.volumeDepth; } m_msaaSamples = msaaSamples; // Here we use the non scaled resolution for the RTHandleSystem ref size because we assume that at some point we will need full resolution anyway. // This is also useful because we have some RT after final up-rez that will need the full size. RTHandles.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.Swap(); Vector3Int currColorPyramidBufferSize = Vector3Int.zero; if (numColorPyramidBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; currColorPyramidBufferSize.x = rt.width; currColorPyramidBufferSize.y = rt.height; if ((currColorPyramidBufferSize.x != prevColorPyramidBufferSize.x) || (currColorPyramidBufferSize.y != prevColorPyramidBufferSize.y)) { // A reallocation has happened, so the new texture likely contains garbage. colorPyramidHistoryIsValid = false; } } Vector3Int currVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; currVolumetricBufferSize.x = rt.width; currVolumetricBufferSize.y = rt.height; currVolumetricBufferSize.z = rt.volumeDepth; if ((currVolumetricBufferSize.x != prevVolumetricBufferSize.x) || (currVolumetricBufferSize.y != prevVolumetricBufferSize.y) || (currVolumetricBufferSize.z != prevVolumetricBufferSize.z)) { // A reallocation has happened, so the new texture likely contains garbage. volumetricHistoryIsValid = false; } } int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; Vector2 rcpTextureSize = Vector2.one / new Vector2(maxWidth, maxHeight); m_ViewportScalePreviousFrame = m_ViewportSizePrevFrame * rcpTextureSize; m_ViewportScaleCurrentFrame = new Vector2Int(m_ActualWidth, m_ActualHeight) * rcpTextureSize; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); finalViewport = new Rect(camera.pixelRect.x, camera.pixelRect.y, nonScaledSize.x, nonScaledSize.y); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); }
public void Update(PostProcessLayer postProcessLayer, FrameSettings frameSettings) { // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = Application.isPlaying && camera.cameraType == CameraType.Game && CoreUtils.IsTemporalAntialiasingActive(postProcessLayer); var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); var pos = camera.transform.position; var relPos = pos; // World-origin-relative if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); relPos = Vector3.zero; // Camera-relative } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevCameraPos = pos; prevViewProjMatrix = gpuVP; } else { prevCameraPos = cameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } const uint taaFrameCount = 8; taaFrameIndex = taaEnabled ? (uint)Time.renderedFrameCount % taaFrameCount : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; cameraPos = pos; viewParam = new Vector4(viewMatrix.determinant, 0.0f, 0.0f, 0.0f); if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(cameraPos - prevCameraPos); // Non-camera-relative positions prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } // Warning: near and far planes appear to be broken. GeometryUtility.CalculateFrustumPlanes(viewProjMatrix, frustumPlanes); for (int i = 0; i < 4; i++) { // Left, right, top, bottom. frustumPlaneEquations[i] = new Vector4(frustumPlanes[i].normal.x, frustumPlanes[i].normal.y, frustumPlanes[i].normal.z, frustumPlanes[i].distance); } // Near, far. // We need to switch forward direction based on handness (Reminder: Regular camera have a negative determinant in Unity and reflection probe follow DX convention and have a positive determinant) Vector3 forward = viewParam.x < 0.0f ? camera.transform.forward : -camera.transform.forward; frustumPlaneEquations[4] = new Vector4(forward.x, forward.y, forward.z, -Vector3.Dot(forward, relPos) - camera.nearClipPlane); frustumPlaneEquations[5] = new Vector4(-forward.x, -forward.y, -forward.z, Vector3.Dot(forward, relPos) + camera.farClipPlane); m_LastFrameActive = Time.frameCount; RenderTextureDescriptor tempDesc; if (frameSettings.enableStereo) { screenSize = new Vector4(XRSettings.eyeTextureWidth, XRSettings.eyeTextureHeight, 1.0f / XRSettings.eyeTextureWidth, 1.0f / XRSettings.eyeTextureHeight); tempDesc = XRSettings.eyeTextureDesc; } else { screenSize = new Vector4(camera.pixelWidth, camera.pixelHeight, 1.0f / camera.pixelWidth, 1.0f / camera.pixelHeight); tempDesc = new RenderTextureDescriptor(camera.pixelWidth, camera.pixelHeight); } tempDesc.msaaSamples = 1; // will be updated later, deferred will always set to 1 tempDesc.depthBufferBits = 0; tempDesc.autoGenerateMips = false; tempDesc.useMipMap = false; tempDesc.enableRandomWrite = false; tempDesc.memoryless = RenderTextureMemoryless.None; renderTextureDesc = tempDesc; }
public abstract void PushShaderParameters(CommandBuffer cmd, FrameSettings frameSettings);
public void Update(PostProcessLayer postProcessLayer, FrameSettings frameSettings) { // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = Application.isPlaying && camera.cameraType == CameraType.Game && CoreUtils.IsTemporalAntialiasingActive(postProcessLayer); var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); var pos = camera.transform.position; var relPos = pos; // World-origin-relative if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); relPos = Vector3.zero; // Camera-relative } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevCameraPos = pos; prevViewProjMatrix = gpuVP; } else { prevCameraPos = cameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } const uint taaFrameCount = 8; taaFrameIndex = taaEnabled ? (uint)Time.renderedFrameCount % taaFrameCount : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; cameraPos = pos; viewParam = new Vector4(viewMatrix.determinant, 0.0f, 0.0f, 0.0f); if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(cameraPos - prevCameraPos); // Non-camera-relative positions prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } // Warning: near and far planes appear to be broken (or rather far plane seems broken) GeometryUtility.CalculateFrustumPlanes(viewProjMatrix, frustumPlanes); for (int i = 0; i < 4; i++) { // Left, right, top, bottom. frustumPlaneEquations[i] = new Vector4(frustumPlanes[i].normal.x, frustumPlanes[i].normal.y, frustumPlanes[i].normal.z, frustumPlanes[i].distance); } // Near, far. Vector4 forward = (camera.cameraType == CameraType.Reflection) ? camera.worldToCameraMatrix.GetRow(2) : new Vector4(camera.transform.forward.x, camera.transform.forward.y, camera.transform.forward.z, 0.0f); // We need to switch forward direction based on handness (Reminder: Regular camera have a negative determinant in Unity and reflection probe follow DX convention and have a positive determinant) forward = viewParam.x < 0.0f ? forward : -forward; frustumPlaneEquations[4] = new Vector4(forward.x, forward.y, forward.z, -Vector3.Dot(forward, relPos) - camera.nearClipPlane); frustumPlaneEquations[5] = new Vector4(-forward.x, -forward.y, -forward.z, Vector3.Dot(forward, relPos) + camera.farClipPlane); m_LastFrameActive = Time.frameCount; m_ActualWidth = camera.pixelWidth; m_ActualHeight = camera.pixelHeight; var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; if (frameSettings.enableStereo) { screenWidth = XRSettings.eyeTextureWidth; screenHeight = XRSettings.eyeTextureHeight; var xrDesc = XRSettings.eyeTextureDesc; m_ActualWidth = xrDesc.width; m_ActualHeight = xrDesc.height; } // Unfortunately sometime (like in the HDCameraEditor) HDUtils.hdrpSettings can be null because of scripts that change the current pipeline... m_msaaSamples = HDUtils.hdrpSettings != null ? HDUtils.hdrpSettings.msaaSampleCount : MSAASamples.None; RTHandle.SetReferenceSize(m_ActualWidth, m_ActualHeight, frameSettings.enableMSAA, m_msaaSamples); int maxWidth = RTHandle.maxWidth; int maxHeight = RTHandle.maxHeight; m_CameraScaleBias.x = (float)m_ActualWidth / maxWidth; m_CameraScaleBias.y = (float)m_ActualHeight / maxHeight; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); }
// Combines specular lighting and diffuse lighting with subsurface scattering. public void SubsurfaceScatteringPass(HDCamera hdCamera, CommandBuffer cmd, DiffusionProfileSettings sssParameters, FrameSettings frameSettings, RenderTargetIdentifier colorBufferRT, RenderTargetIdentifier diffuseBufferRT, RenderTargetIdentifier depthStencilBufferRT, RenderTargetIdentifier depthTextureRT) { if (sssParameters == null || !frameSettings.enableSubsurfaceScattering) { return; } // TODO: For MSAA, at least initially, we can only support Jimenez, because we can't // create MSAA + UAV render targets. using (new ProfilingSample(cmd, "Subsurface Scattering", CustomSamplerId.SubsurfaceScattering.GetSampler())) { // For Jimenez we always need an extra buffer, for Disney it depends on platform if (ShaderConfig.k_UseDisneySSS == 0 || NeedTemporarySubsurfaceBuffer()) { // Caution: must be same format as m_CameraSssDiffuseLightingBuffer cmd.ReleaseTemporaryRT(m_CameraFilteringBuffer); if (frameSettings.enableMSAA) { CoreUtils.CreateCmdTemporaryRT(cmd, m_CameraFilteringBuffer, hdCamera.renderTextureDesc, 0, FilterMode.Point, RenderTextureFormat.RGB111110Float, RenderTextureReadWrite.Linear); // no UAV } else { CoreUtils.CreateCmdTemporaryRT(cmd, m_CameraFilteringBuffer, hdCamera.renderTextureDesc, 0, FilterMode.Point, RenderTextureFormat.RGB111110Float, RenderTextureReadWrite.Linear, 1, true); // Enable UAV } // Clear the SSS filtering target using (new ProfilingSample(cmd, "Clear SSS filtering target", CustomSamplerId.ClearSSSFilteringTarget.GetSampler())) { CoreUtils.SetRenderTarget(cmd, m_CameraFilteringBufferRT, ClearFlag.Color, CoreUtils.clearColorAllBlack); } } if (ShaderConfig.s_UseDisneySSS == 1) // use static here to quiet the compiler warning { using (new ProfilingSample(cmd, "HTile for SSS", CustomSamplerId.HTileForSSS.GetSampler())) { // Currently, Unity does not offer a way to access the GCN HTile even on PS4 and Xbox One. // Therefore, it's computed in a pixel shader, and optimized to only contain the SSS bit. CoreUtils.SetRenderTarget(cmd, m_HTileRT, ClearFlag.Color, CoreUtils.clearColorAllBlack); cmd.SetRandomWriteTarget(1, m_HTile); // Generate HTile for the split lighting stencil usage. Don't write into stencil texture (shaderPassId = 2) // Use ShaderPassID 1 => "Pass 2 - Export HTILE for stencilRef to output" CoreUtils.SetRenderTarget(cmd, depthStencilBufferRT); // No need for color buffer here CoreUtils.DrawFullScreen(cmd, m_CopyStencilForSplitLighting, null, 2); cmd.ClearRandomWriteTargets(); } // TODO: Remove this once fix, see comment inside the function hdCamera.SetupComputeShader(m_SubsurfaceScatteringCS, cmd); unsafe { // Warning: Unity is not able to losslessly transfer integers larger than 2^24 to the shader system. // Therefore, we bitcast uint to float in C#, and bitcast back to uint in the shader. uint texturingModeFlags = sssParameters.texturingModeFlags; cmd.SetComputeFloatParam(m_SubsurfaceScatteringCS, HDShaderIDs._TexturingModeFlags, *(float *)&texturingModeFlags); } cmd.SetComputeVectorArrayParam(m_SubsurfaceScatteringCS, HDShaderIDs._WorldScales, sssParameters.worldScales); cmd.SetComputeVectorArrayParam(m_SubsurfaceScatteringCS, HDShaderIDs._FilterKernels, sssParameters.filterKernels); cmd.SetComputeVectorArrayParam(m_SubsurfaceScatteringCS, HDShaderIDs._ShapeParams, sssParameters.shapeParams); cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._DepthTexture, depthTextureRT); cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._SSSHTile, m_HTileRT); cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._IrradianceSource, diffuseBufferRT); for (int i = 0; i < sssBufferCount; ++i) { cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._SSSBufferTexture[i], GetSSSBuffers(i)); } if (NeedTemporarySubsurfaceBuffer()) { cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._CameraFilteringBuffer, m_CameraFilteringBufferRT); // Perform the SSS filtering pass which fills 'm_CameraFilteringBufferRT'. // We dispatch 4x swizzled 16x16 groups per a 32x32 macro tile. cmd.DispatchCompute(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, 4, ((int)hdCamera.screenSize.x + 31) / 32, ((int)hdCamera.screenSize.y + 31) / 32); cmd.SetGlobalTexture(HDShaderIDs._IrradianceSource, m_CameraFilteringBufferRT); // Cannot set a RT on a material // Additively blend diffuse and specular lighting into 'm_CameraColorBufferRT'. CoreUtils.DrawFullScreen(cmd, m_CombineLightingPass, colorBufferRT, depthStencilBufferRT); } else { cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._CameraColorTexture, colorBufferRT); // Perform the SSS filtering pass which performs an in-place update of 'colorBuffer'. // We dispatch 4x swizzled 16x16 groups per a 32x32 macro tile. cmd.DispatchCompute(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, 4, ((int)hdCamera.screenSize.x + 31) / 32, ((int)hdCamera.screenSize.y + 31) / 32); } } else { for (int i = 0; i < sssBufferCount; ++i) { cmd.SetGlobalTexture(HDShaderIDs._SSSBufferTexture[i], GetSSSBuffers(i)); } cmd.SetGlobalTexture(HDShaderIDs._IrradianceSource, diffuseBufferRT); // Cannot set a RT on a material m_SssVerticalFilterPass.SetVectorArray(HDShaderIDs._FilterKernelsBasic, sssParameters.filterKernelsBasic); m_SssVerticalFilterPass.SetVectorArray(HDShaderIDs._HalfRcpWeightedVariances, sssParameters.halfRcpWeightedVariances); // Perform the vertical SSS filtering pass which fills 'm_CameraFilteringBufferRT'. CoreUtils.DrawFullScreen(cmd, m_SssVerticalFilterPass, m_CameraFilteringBufferRT, depthStencilBufferRT); cmd.SetGlobalTexture(HDShaderIDs._IrradianceSource, m_CameraFilteringBufferRT); // Cannot set a RT on a material m_SssHorizontalFilterAndCombinePass.SetVectorArray(HDShaderIDs._FilterKernelsBasic, sssParameters.filterKernelsBasic); m_SssHorizontalFilterAndCombinePass.SetVectorArray(HDShaderIDs._HalfRcpWeightedVariances, sssParameters.halfRcpWeightedVariances); // Perform the horizontal SSS filtering pass, and combine diffuse and specular lighting into 'm_CameraColorBufferRT'. CoreUtils.DrawFullScreen(cmd, m_SssHorizontalFilterAndCombinePass, colorBufferRT, depthStencilBufferRT); } } }
// Init a FrameSettings from renderpipeline settings, frame settings and debug settings (if any) // This will aggregate the various option public static void InitializeFrameSettings(Camera camera, RenderPipelineSettings renderPipelineSettings, FrameSettings srcFrameSettings, ref FrameSettings aggregate) { if (aggregate == null) { aggregate = new FrameSettings(); } // When rendering reflection probe we disable specular as it is view dependent if (camera.cameraType == CameraType.Reflection) { aggregate.diffuseGlobalDimmer = 1.0f; aggregate.specularGlobalDimmer = 0.0f; } else { aggregate.diffuseGlobalDimmer = 1.0f; aggregate.specularGlobalDimmer = 1.0f; } aggregate.enableShadow = srcFrameSettings.enableShadow; aggregate.enableContactShadows = srcFrameSettings.enableContactShadows; aggregate.enableSSR = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableSSR && renderPipelineSettings.supportSSR; aggregate.enableSSAO = srcFrameSettings.enableSSAO && renderPipelineSettings.supportSSAO; aggregate.enableSubsurfaceScattering = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableSubsurfaceScattering && renderPipelineSettings.supportSubsurfaceScattering; aggregate.enableTransmission = srcFrameSettings.enableTransmission; aggregate.enableVolumetric = srcFrameSettings.enableVolumetric && renderPipelineSettings.supportVolumetric; // TODO: Add support of volumetric in planar reflection if (camera.cameraType == CameraType.Reflection) { aggregate.enableVolumetric = false; } // We have to fall back to forward-only rendering when scene view is using wireframe rendering mode // as rendering everything in wireframe + deferred do not play well together aggregate.enableForwardRenderingOnly = srcFrameSettings.enableForwardRenderingOnly || GL.wireframe || renderPipelineSettings.supportForwardOnly; aggregate.enableDepthPrepassWithDeferredRendering = srcFrameSettings.enableDepthPrepassWithDeferredRendering; aggregate.enableTransparentPrepass = srcFrameSettings.enableTransparentPrepass; aggregate.enableMotionVectors = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableMotionVectors && renderPipelineSettings.supportMotionVectors; aggregate.enableObjectMotionVectors = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableObjectMotionVectors && renderPipelineSettings.supportMotionVectors; aggregate.enableDBuffer = srcFrameSettings.enableDBuffer && renderPipelineSettings.supportDBuffer; aggregate.enableAtmosphericScattering = srcFrameSettings.enableAtmosphericScattering; // We must take care of the scene view fog flags in the editor if (!CoreUtils.IsSceneViewFogEnabled(camera)) { aggregate.enableAtmosphericScattering = false; } aggregate.enableRoughRefraction = srcFrameSettings.enableRoughRefraction; aggregate.enableTransparentPostpass = srcFrameSettings.enableTransparentPostpass; aggregate.enableDistortion = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableDistortion; // Planar and real time cubemap doesn't need post process and render in FP16 aggregate.enablePostprocess = camera.cameraType != CameraType.Reflection && srcFrameSettings.enablePostprocess; #if UNITY_SWITCH aggregate.enableStereo = false; #else aggregate.enableStereo = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableStereo && XRSettings.isDeviceActive && (camera.stereoTargetEye == StereoTargetEyeMask.Both) && renderPipelineSettings.supportStereo; #endif aggregate.enableAsyncCompute = srcFrameSettings.enableAsyncCompute && SystemInfo.supportsAsyncCompute; aggregate.enableOpaqueObjects = srcFrameSettings.enableOpaqueObjects; aggregate.enableTransparentObjects = srcFrameSettings.enableTransparentObjects; aggregate.enableMSAA = srcFrameSettings.enableMSAA && renderPipelineSettings.supportMSAA; aggregate.enableShadowMask = srcFrameSettings.enableShadowMask && renderPipelineSettings.supportShadowMask; aggregate.ConfigureMSAADependentSettings(); aggregate.ConfigureStereoDependentSettings(); if (camera.cameraType == CameraType.Preview) { // remove undesired feature in preview aggregate.enableShadow = false; aggregate.enableContactShadows = false; aggregate.enableSSR = false; aggregate.enableSSAO = false; aggregate.enableTransparentPrepass = false; aggregate.enableMotionVectors = false; aggregate.enableObjectMotionVectors = false; aggregate.enableDBuffer = false; aggregate.enableAtmosphericScattering = false; aggregate.enableTransparentPostpass = false; aggregate.enableDistortion = false; aggregate.enablePostprocess = false; aggregate.enableStereo = false; aggregate.enableShadowMask = false; aggregate.enableVolumetric = false; } LightLoopSettings.InitializeLightLoopSettings(camera, aggregate, renderPipelineSettings, srcFrameSettings, ref aggregate.lightLoopSettings); }
// Init a FrameSettings from renderpipeline settings, frame settings and debug settings (if any) // This will aggregate the various option static public FrameSettings InitializeFrameSettings(Camera camera, RenderPipelineSettings renderPipelineSettings, FrameSettings frameSettings) { FrameSettings aggregate = new FrameSettings(); // When rendering reflection probe we disable specular as it is view dependent if (camera.cameraType == CameraType.Reflection) { aggregate.diffuseGlobalDimmer = 1.0f; aggregate.specularGlobalDimmer = 0.0f; } else { aggregate.diffuseGlobalDimmer = 1.0f; aggregate.specularGlobalDimmer = 1.0f; } aggregate.enableShadow = frameSettings.enableShadow; aggregate.enableSSR = camera.cameraType == CameraType.Reflection ? false : frameSettings.enableSSR && renderPipelineSettings.supportSSR; aggregate.enableSSAO = frameSettings.enableSSAO && renderPipelineSettings.supportSSAO; aggregate.enableSSSAndTransmission = camera.cameraType == CameraType.Reflection ? false : frameSettings.enableSSSAndTransmission && renderPipelineSettings.supportSSSAndTransmission; // We have to fall back to forward-only rendering when scene view is using wireframe rendering mode // as rendering everything in wireframe + deferred do not play well together aggregate.enableForwardRenderingOnly = frameSettings.enableForwardRenderingOnly || GL.wireframe; aggregate.enableDepthPrepassWithDeferredRendering = frameSettings.enableDepthPrepassWithDeferredRendering; aggregate.enableAlphaTestOnlyInDeferredPrepass = frameSettings.enableAlphaTestOnlyInDeferredPrepass; aggregate.enableTransparentPrePass = frameSettings.enableTransparentPrePass; aggregate.enableMotionVectors = camera.cameraType == CameraType.Reflection ? false : frameSettings.enableMotionVectors; aggregate.enableObjectMotionVectors = camera.cameraType == CameraType.Reflection ? false : frameSettings.enableObjectMotionVectors; aggregate.enableDBuffer = frameSettings.enableDBuffer && renderPipelineSettings.supportDBuffer; aggregate.enableAtmosphericScattering = frameSettings.enableAtmosphericScattering; aggregate.enableRoughRefraction = frameSettings.enableRoughRefraction; aggregate.enableTransparentPostPass = frameSettings.enableTransparentPostPass; aggregate.enableDistortion = camera.cameraType == CameraType.Reflection ? false : frameSettings.enableDistortion; // Planar and real time cubemap doesn't need post process and render in FP16 aggregate.enablePostprocess = camera.cameraType == CameraType.Reflection ? false : frameSettings.enablePostprocess; aggregate.enableStereo = camera.cameraType == CameraType.Reflection ? false : frameSettings.enableStereo && UnityEngine.XR.XRSettings.isDeviceActive && (camera.stereoTargetEye == StereoTargetEyeMask.Both); // Force forward if we request stereo. TODO: We should not enforce that, users should be able to chose deferred aggregate.enableForwardRenderingOnly = aggregate.enableForwardRenderingOnly || aggregate.enableStereo; aggregate.enableAsyncCompute = frameSettings.enableAsyncCompute && renderPipelineSettings.supportAsyncCompute; aggregate.enableOpaqueObjects = frameSettings.enableOpaqueObjects; aggregate.enableTransparentObjects = frameSettings.enableTransparentObjects; aggregate.enableMSAA = frameSettings.enableMSAA && renderPipelineSettings.supportMSAA; aggregate.enableShadowMask = renderPipelineSettings.supportShadowMask; aggregate.lightLoopSettings = LightLoopSettings.InitializeLightLoopSettings(camera, aggregate, renderPipelineSettings, frameSettings); return(aggregate); }
// Note: this version is the one tested as there is issue getting HDRenderPipelineAsset in batchmode in unit test framework currently. /// <summary>Same than FrameSettings.AggregateFrameSettings but keep history of agregation in a collection for DebugMenu. /// Aggregation is default with override of the renderer then sanitazed depending on supported features of hdrpasset. Then the DebugMenu override occurs.</summary> /// <param name="aggregatedFrameSettings">The aggregated FrameSettings result.</param> /// <param name="camera">The camera rendering.</param> /// <param name="additionalData">Additional data of the camera rendering.</param> /// <param name="defaultFrameSettings">Base framesettings to copy prior any override.</param> /// <param name="supportedFeatures">Currently supported feature for the sanitazation pass.</param> public static void AggregateFrameSettings(ref FrameSettings aggregatedFrameSettings, Camera camera, HDAdditionalCameraData additionalData, ref FrameSettings defaultFrameSettings, RenderPipelineSettings supportedFeatures) { FrameSettingsHistory history = new FrameSettingsHistory { camera = camera, defaultType = additionalData ? additionalData.defaultFrameSettings : FrameSettingsRenderType.Camera }; aggregatedFrameSettings = defaultFrameSettings; if (additionalData && additionalData.customRenderingSettings) { FrameSettings.Override(ref aggregatedFrameSettings, additionalData.renderingPathCustomFrameSettings, additionalData.renderingPathCustomFrameSettingsOverrideMask); history.customMask = additionalData.renderingPathCustomFrameSettingsOverrideMask; } history.overridden = aggregatedFrameSettings; FrameSettings.Sanitize(ref aggregatedFrameSettings, camera, supportedFeatures); bool noHistory = !frameSettingsHistory.ContainsKey(camera); bool updatedComponent = !noHistory && frameSettingsHistory[camera].sanitazed != aggregatedFrameSettings; bool dirty = noHistory || updatedComponent; history.sanitazed = aggregatedFrameSettings; if (dirty) { history.debug = history.sanitazed; } else { history.debug = frameSettingsHistory[camera].debug; // Ensure user is not trying to activate unsupported settings in DebugMenu FrameSettings.Sanitize(ref history.debug, camera, supportedFeatures); } aggregatedFrameSettings = history.debug; frameSettingsHistory[camera] = history; }
public FrameSettings(FrameSettings toCopy) { toCopy.CopyTo(this); }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, PostProcessLayer postProcessLayer, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // Handle memory allocation. { bool isColorPyramidHistoryRequired = m_frameSettings.enableSSR; // TODO: TAA as well bool isVolumetricHistoryRequired = m_frameSettings.enableVolumetrics && m_frameSettings.enableReprojectionForVolumetrics; int numColorPyramidBuffersRequired = isColorPyramidHistoryRequired ? 2 : 1; // TODO: 1 -> 0 int numVolumetricBuffersRequired = isVolumetricHistoryRequired ? 2 : 0; // History + feedback if ((numColorPyramidBuffersAllocated != numColorPyramidBuffersRequired) || (numVolumetricBuffersAllocated != numVolumetricBuffersRequired)) { // Reinit the system. colorPyramidHistoryIsValid = false; vlSys.DeinitializePerCameraData(this); // The history system only supports the "nuke all" option. m_HistoryRTSystem.Dispose(); m_HistoryRTSystem = new BufferedRTHandleSystem(); if (numColorPyramidBuffersRequired != 0) { AllocHistoryFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain, HistoryBufferAllocatorFunction, numColorPyramidBuffersRequired); colorPyramidHistoryIsValid = false; } vlSys.InitializePerCameraData(this, numVolumetricBuffersRequired); // Mark as init. numColorPyramidBuffersAllocated = numColorPyramidBuffersRequired; numVolumetricBuffersAllocated = numVolumetricBuffersRequired; } } // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = camera.cameraType == CameraType.Game && HDUtils.IsTemporalAntialiasingActive(postProcessLayer) && m_frameSettings.enablePostprocess; var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); // Update viewport sizes. m_ViewportSizePrevFrame = new Vector2Int(m_ActualWidth, m_ActualHeight); m_ActualWidth = Math.Max(camera.pixelWidth, 1); m_ActualHeight = Math.Max(camera.pixelHeight, 1); var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); numEyes = camera.stereoEnabled ? (uint)2 : (uint)1; // TODO VR: Generalize this when support for >2 eyes comes out with XR SDK if (camera.stereoEnabled) { textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); for (uint eyeIndex = 0; eyeIndex < 2; eyeIndex++) { // For VR, TAA proj matrices don't need to be jittered var currProjStereo = camera.GetStereoProjectionMatrix((Camera.StereoscopicEye)eyeIndex); var gpuCurrProjStereo = GL.GetGPUProjectionMatrix(currProjStereo, true); var gpuCurrViewStereo = camera.GetStereoViewMatrix((Camera.StereoscopicEye)eyeIndex); if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuCurrViewStereo.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuCurrVPStereo = gpuCurrProjStereo * gpuCurrViewStereo; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevViewMatrixStereo[eyeIndex] = gpuCurrViewStereo; prevViewProjMatrixStereo[eyeIndex] = gpuCurrVPStereo; } else { prevViewMatrixStereo[eyeIndex] = viewMatrixStereo[eyeIndex]; prevViewProjMatrixStereo[eyeIndex] = GetViewProjMatrixStereo(eyeIndex); // Grabbing this before ConfigureStereoMatrices updates view/proj } isFirstFrame = false; } } isFirstFrame = true; // So that mono vars can still update when stereo active screenWidth = XRGraphics.eyeTextureWidth; screenHeight = XRGraphics.eyeTextureHeight; var xrDesc = XRGraphics.eyeTextureDesc; m_ActualWidth = xrDesc.width; m_ActualHeight = xrDesc.height; } if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed // Note: if your first rendered view during the frame is not the Game view, everything breaks. if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevWorldSpaceCameraPos = camera.transform.position; prevViewProjMatrix = gpuVP; } else { prevWorldSpaceCameraPos = worldSpaceCameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } // In stereo, this corresponds to the center eye position worldSpaceCameraPos = camera.transform.position; taaFrameIndex = taaEnabled ? (uint)postProcessLayer.temporalAntialiasing.sampleIndex : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; ConfigureStereoMatrices(); if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(worldSpaceCameraPos - prevWorldSpaceCameraPos); prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); Frustum.Create(frustum, viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; // TODO: cache this, or make the history system spill the beans... Vector2Int prevColorPyramidBufferSize = Vector2Int.zero; if (numColorPyramidBuffersAllocated > 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; prevColorPyramidBufferSize.x = rt.width; prevColorPyramidBufferSize.y = rt.height; } // TODO: cache this, or make the history system spill the beans... Vector3Int prevVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; prevVolumetricBufferSize.x = rt.width; prevVolumetricBufferSize.y = rt.height; prevVolumetricBufferSize.z = rt.volumeDepth; } // Unfortunately sometime (like in the HDCameraEditor) HDUtils.hdrpSettings can be null because of scripts that change the current pipeline... m_msaaSamples = msaaSamples; RTHandles.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_msaaSamples); m_HistoryRTSystem.Swap(); Vector3Int currColorPyramidBufferSize = Vector3Int.zero; if (numColorPyramidBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; currColorPyramidBufferSize.x = rt.width; currColorPyramidBufferSize.y = rt.height; if ((currColorPyramidBufferSize.x != prevColorPyramidBufferSize.x) || (currColorPyramidBufferSize.y != prevColorPyramidBufferSize.y)) { // A reallocation has happened, so the new texture likely contains garbage. colorPyramidHistoryIsValid = false; } } Vector3Int currVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; currVolumetricBufferSize.x = rt.width; currVolumetricBufferSize.y = rt.height; currVolumetricBufferSize.z = rt.volumeDepth; if ((currVolumetricBufferSize.x != prevVolumetricBufferSize.x) || (currVolumetricBufferSize.y != prevVolumetricBufferSize.y) || (currVolumetricBufferSize.z != prevVolumetricBufferSize.z)) { // A reallocation has happened, so the new texture likely contains garbage. volumetricHistoryIsValid = false; } } int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; Vector2 rcpTextureSize = Vector2.one / new Vector2(maxWidth, maxHeight); m_ViewportScalePreviousFrame = m_ViewportSizePrevFrame * rcpTextureSize; m_ViewportScaleCurrentFrame = new Vector2Int(m_ActualWidth, m_ActualHeight) * rcpTextureSize; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); }
// Init a FrameSettings from renderpipeline settings, frame settings and debug settings (if any) // This will aggregate the various option public static void InitializeFrameSettings(Camera camera, RenderPipelineSettings renderPipelineSettings, FrameSettings srcFrameSettings, ref FrameSettings aggregate) { if (aggregate == null) { aggregate = new FrameSettings(); } // When rendering reflection probe we disable specular as it is view dependent if (camera.cameraType == CameraType.Reflection) { aggregate.diffuseGlobalDimmer = 1.0f; aggregate.specularGlobalDimmer = 0.0f; } else { aggregate.diffuseGlobalDimmer = 1.0f; aggregate.specularGlobalDimmer = 1.0f; } aggregate.enableShadow = srcFrameSettings.enableShadow; aggregate.enableContactShadows = srcFrameSettings.enableContactShadows; aggregate.enableShadowMask = srcFrameSettings.enableShadowMask && renderPipelineSettings.supportShadowMask; aggregate.enableSSR = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableSSR && renderPipelineSettings.supportSSR; // No recursive reflections aggregate.enableSSAO = srcFrameSettings.enableSSAO && renderPipelineSettings.supportSSAO; aggregate.enableSubsurfaceScattering = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableSubsurfaceScattering && renderPipelineSettings.supportSubsurfaceScattering; aggregate.enableTransmission = srcFrameSettings.enableTransmission; aggregate.enableAtmosphericScattering = srcFrameSettings.enableAtmosphericScattering; // We must take care of the scene view fog flags in the editor if (!CoreUtils.IsSceneViewFogEnabled(camera)) { aggregate.enableAtmosphericScattering = false; } // Volumetric are disabled if there is no atmospheric scattering aggregate.enableVolumetrics = srcFrameSettings.enableVolumetrics && renderPipelineSettings.supportVolumetrics && aggregate.enableAtmosphericScattering; aggregate.enableReprojectionForVolumetrics = srcFrameSettings.enableReprojectionForVolumetrics; aggregate.enableLightLayers = srcFrameSettings.enableLightLayers && renderPipelineSettings.supportLightLayers; // We have to fall back to forward-only rendering when scene view is using wireframe rendering mode // as rendering everything in wireframe + deferred do not play well together if (GL.wireframe) //force forward mode for wireframe { aggregate.shaderLitMode = LitShaderMode.Forward; } else { switch (renderPipelineSettings.supportedLitShaderMode) { case RenderPipelineSettings.SupportedLitShaderMode.ForwardOnly: aggregate.shaderLitMode = LitShaderMode.Forward; break; case RenderPipelineSettings.SupportedLitShaderMode.DeferredOnly: aggregate.shaderLitMode = LitShaderMode.Deferred; break; case RenderPipelineSettings.SupportedLitShaderMode.Both: aggregate.shaderLitMode = srcFrameSettings.shaderLitMode; break; } } aggregate.enableDepthPrepassWithDeferredRendering = srcFrameSettings.enableDepthPrepassWithDeferredRendering; aggregate.enableTransparentPrepass = srcFrameSettings.enableTransparentPrepass && renderPipelineSettings.supportTransparentDepthPrepass; aggregate.enableMotionVectors = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableMotionVectors && renderPipelineSettings.supportMotionVectors; // Object motion vector are disabled if motion vector are disabled aggregate.enableObjectMotionVectors = srcFrameSettings.enableObjectMotionVectors && aggregate.enableMotionVectors; aggregate.enableDecals = srcFrameSettings.enableDecals && renderPipelineSettings.supportDecals; aggregate.enableRoughRefraction = srcFrameSettings.enableRoughRefraction; aggregate.enableTransparentPostpass = srcFrameSettings.enableTransparentPostpass && renderPipelineSettings.supportTransparentDepthPostpass; aggregate.enableDistortion = camera.cameraType != CameraType.Reflection && srcFrameSettings.enableDistortion && renderPipelineSettings.supportDistortion; // Planar and real time cubemap doesn't need post process and render in FP16 aggregate.enablePostprocess = camera.cameraType != CameraType.Reflection && srcFrameSettings.enablePostprocess; aggregate.enableAsyncCompute = srcFrameSettings.enableAsyncCompute && SystemInfo.supportsAsyncCompute; aggregate.runLightListAsync = aggregate.enableAsyncCompute && srcFrameSettings.runLightListAsync; aggregate.runSSRAsync = aggregate.enableAsyncCompute && srcFrameSettings.runSSRAsync; aggregate.runSSAOAsync = aggregate.enableAsyncCompute && srcFrameSettings.runSSAOAsync; aggregate.runContactShadowsAsync = aggregate.enableAsyncCompute && srcFrameSettings.runContactShadowsAsync; aggregate.runVolumeVoxelizationAsync = aggregate.enableAsyncCompute && srcFrameSettings.runVolumeVoxelizationAsync; aggregate.enableOpaqueObjects = srcFrameSettings.enableOpaqueObjects; aggregate.enableTransparentObjects = srcFrameSettings.enableTransparentObjects; aggregate.enableRealtimePlanarReflection = srcFrameSettings.enableRealtimePlanarReflection; //MSAA only supported in forward aggregate.enableMSAA = srcFrameSettings.enableMSAA && renderPipelineSettings.supportMSAA && aggregate.shaderLitMode == LitShaderMode.Forward; aggregate.ConfigureMSAADependentSettings(); aggregate.ConfigureStereoDependentSettings(camera); // Disable various option for the preview except if we are a Camera Editor preview if (HDUtils.IsRegularPreviewCamera(camera)) { aggregate.enableShadow = false; aggregate.enableContactShadows = false; aggregate.enableShadowMask = false; aggregate.enableSSR = false; aggregate.enableSSAO = false; aggregate.enableAtmosphericScattering = false; aggregate.enableVolumetrics = false; aggregate.enableReprojectionForVolumetrics = false; aggregate.enableLightLayers = false; aggregate.enableTransparentPrepass = false; aggregate.enableMotionVectors = false; aggregate.enableObjectMotionVectors = false; aggregate.enableDecals = false; aggregate.enableTransparentPostpass = false; aggregate.enableDistortion = false; aggregate.enablePostprocess = false; } LightLoopSettings.InitializeLightLoopSettings(camera, aggregate, renderPipelineSettings, srcFrameSettings, ref aggregate.lightLoopSettings); aggregate.m_LitShaderModeEnumIndex = srcFrameSettings.m_LitShaderModeEnumIndex; }
public void ReserveShadows(Camera camera, HDShadowManager shadowManager, HDShadowInitParameters initParameters, CullingResults cullResults, FrameSettings frameSettings, int lightIndex) { Bounds bounds; float cameraDistance = Vector3.Distance(camera.transform.position, transform.position); m_WillRenderShadows = m_Light.shadows != LightShadows.None && frameSettings.enableShadow; m_WillRenderShadows &= cullResults.GetShadowCasterBounds(lightIndex, out bounds); // When creating a new light, at the first frame, there is no AdditionalShadowData so we can't really render shadows m_WillRenderShadows &= m_ShadowData != null && m_ShadowData.shadowDimmer > 0; // If the shadow is too far away, we don't render it if (m_ShadowData != null) { m_WillRenderShadows &= m_Light.type == LightType.Directional || cameraDistance < (m_ShadowData.shadowFadeDistance); } if (!m_WillRenderShadows) { return; } // Create shadow requests array using the light type if (shadowRequests == null || m_ShadowRequestIndices == null) { const int maxLightShadowRequestsCount = 6; shadowRequests = new HDShadowRequest[maxLightShadowRequestsCount]; m_ShadowRequestIndices = new int[maxLightShadowRequestsCount]; for (int i = 0; i < maxLightShadowRequestsCount; i++) { shadowRequests[i] = new HDShadowRequest(); } } Vector2 viewportSize = new Vector2(m_ShadowData.shadowResolution, m_ShadowData.shadowResolution); // Compute dynamic shadow resolution if (initParameters.useDynamicViewportRescale && m_Light.type != LightType.Directional) { // resize viewport size by the normalized size of the light on screen // When we will have access to the non screen clamped bounding sphere light size, we could use it to scale the shadow map resolution // For the moment, this will be enough viewportSize *= Mathf.Lerp(64f / viewportSize.x, 1f, m_Light.range / (camera.transform.position - transform.position).magnitude); viewportSize = Vector2.Max(new Vector2(64f, 64f) / viewportSize, viewportSize); // Prevent flickering caused by the floating size of the viewport viewportSize.x = Mathf.Round(viewportSize.x); viewportSize.y = Mathf.Round(viewportSize.y); } viewportSize = Vector2.Max(viewportSize, new Vector2(16, 16)); // Update the directional shadow atlas size if (m_Light.type == LightType.Directional) { shadowManager.UpdateDirectionalShadowResolution((int)viewportSize.x, m_ShadowSettings.cascadeShadowSplitCount); } // Reserver wanted resolution in the shadow atlas bool allowResize = m_Light.type != LightType.Directional; int count = GetShadowRequestCount(); for (int index = 0; index < count; index++) { m_ShadowRequestIndices[index] = shadowManager.ReserveShadowResolutions(viewportSize, allowResize); } }
public static void RegisterDebug(string menuName, FrameSettings frameSettings) { List <DebugUI.Widget> widgets = new List <DebugUI.Widget>(); widgets.AddRange( new DebugUI.Widget[] { new DebugUI.Foldout { displayName = "Rendering Passes", children = { new DebugUI.BoolField { displayName = "Enable Transparent Prepass", getter = () => frameSettings.enableTransparentPrepass, setter = value => frameSettings.enableTransparentPrepass = value }, new DebugUI.BoolField { displayName = "Enable Transparent Postpass", getter = () => frameSettings.enableTransparentPostpass, setter = value => frameSettings.enableTransparentPostpass = value }, new DebugUI.BoolField { displayName = "Enable Motion Vectors", getter = () => frameSettings.enableMotionVectors, setter = value => frameSettings.enableMotionVectors = value }, new DebugUI.BoolField { displayName = " Enable Object Motion Vectors", getter = () => frameSettings.enableObjectMotionVectors, setter = value => frameSettings.enableObjectMotionVectors = value }, new DebugUI.BoolField { displayName = "Enable DBuffer", getter = () => frameSettings.enableDecals, setter = value => frameSettings.enableDecals = value }, new DebugUI.BoolField { displayName = "Enable Rough Refraction", getter = () => frameSettings.enableRoughRefraction, setter = value => frameSettings.enableRoughRefraction = value }, new DebugUI.BoolField { displayName = "Enable Distortion", getter = () => frameSettings.enableDistortion, setter = value => frameSettings.enableDistortion = value }, new DebugUI.BoolField { displayName = "Enable Postprocess", getter = () => frameSettings.enablePostprocess, setter = value => frameSettings.enablePostprocess = value }, } }, new DebugUI.Foldout { displayName = "Rendering Settings", children = { new DebugUI.EnumField { displayName = "Lit Shader Mode", getter = () => (int)frameSettings.shaderLitMode, setter = value => frameSettings.shaderLitMode = (LitShaderMode)value, autoEnum = typeof(LitShaderMode), getIndex = () => frameSettings.m_LitShaderModeEnumIndex, setIndex = value => frameSettings.m_LitShaderModeEnumIndex = value }, new DebugUI.BoolField { displayName = "Deferred Depth Prepass", getter = () => frameSettings.enableDepthPrepassWithDeferredRendering, setter = value => frameSettings.enableDepthPrepassWithDeferredRendering = value }, new DebugUI.BoolField { displayName = "Enable Opaque Objects", getter = () => frameSettings.enableOpaqueObjects, setter = value => frameSettings.enableOpaqueObjects = value }, new DebugUI.BoolField { displayName = "Enable Transparent Objects", getter = () => frameSettings.enableTransparentObjects, setter = value => frameSettings.enableTransparentObjects = value }, new DebugUI.BoolField { displayName = "Enable Realtime Planar Reflection", getter = () => frameSettings.enableRealtimePlanarReflection, setter = value => frameSettings.enableRealtimePlanarReflection = value }, new DebugUI.BoolField { displayName = "Enable MSAA", getter = () => frameSettings.enableMSAA, setter = value => frameSettings.enableMSAA = value }, } }, new DebugUI.Foldout { displayName = "Lighting Settings", children = { new DebugUI.BoolField { displayName = "Enable SSR", getter = () => frameSettings.enableSSR, setter = value => frameSettings.enableSSR = value }, new DebugUI.BoolField { displayName = "Enable SSAO", getter = () => frameSettings.enableSSAO, setter = value => frameSettings.enableSSAO = value }, new DebugUI.BoolField { displayName = "Enable SubsurfaceScattering", getter = () => frameSettings.enableSubsurfaceScattering, setter = value => frameSettings.enableSubsurfaceScattering = value }, new DebugUI.BoolField { displayName = "Enable Transmission", getter = () => frameSettings.enableTransmission, setter = value => frameSettings.enableTransmission = value }, new DebugUI.BoolField { displayName = "Enable Shadows", getter = () => frameSettings.enableShadow, setter = value => frameSettings.enableShadow = value }, new DebugUI.BoolField { displayName = "Enable Contact Shadows", getter = () => frameSettings.enableContactShadows, setter = value => frameSettings.enableContactShadows = value }, new DebugUI.BoolField { displayName = "Enable ShadowMask", getter = () => frameSettings.enableShadowMask, setter = value => frameSettings.enableShadowMask = value }, new DebugUI.BoolField { displayName = "Enable Atmospheric Scattering", getter = () => frameSettings.enableAtmosphericScattering, setter = value => frameSettings.enableAtmosphericScattering = value }, new DebugUI.BoolField { displayName = "Enable Volumetrics", getter = () => frameSettings.enableVolumetrics, setter = value => frameSettings.enableVolumetrics = value }, new DebugUI.BoolField { displayName = "Enable Reprojection For Volumetrics", getter = () => frameSettings.enableReprojectionForVolumetrics, setter = value => frameSettings.enableReprojectionForVolumetrics = value }, new DebugUI.BoolField { displayName = "Enable LightLayers", getter = () => frameSettings.enableLightLayers, setter = value => frameSettings.enableLightLayers = value }, } }, new DebugUI.Foldout { displayName = "Async Compute Settings", children = { new DebugUI.BoolField { displayName = "Enable Async Compute", getter = () => frameSettings.enableAsyncCompute, setter = value => frameSettings.enableAsyncCompute = value }, new DebugUI.BoolField { displayName = "Run Build Light List Async", getter = () => frameSettings.runLightListAsync, setter = value => frameSettings.runLightListAsync = value }, new DebugUI.BoolField { displayName = "Run SSR Async", getter = () => frameSettings.runSSRAsync, setter = value => frameSettings.runSSRAsync = value }, new DebugUI.BoolField { displayName = "Run SSAO Async", getter = () => frameSettings.runSSAOAsync, setter = value => frameSettings.runSSAOAsync = value }, new DebugUI.BoolField { displayName = "Run Contact Shadows Async", getter = () => frameSettings.runContactShadowsAsync, setter = value => frameSettings.runContactShadowsAsync = value }, new DebugUI.BoolField { displayName = "Run Volume Voxelization Async", getter = () => frameSettings.runVolumeVoxelizationAsync, setter = value => frameSettings.runVolumeVoxelizationAsync = value }, } } }); LightLoopSettings.RegisterDebug(frameSettings.lightLoopSettings, widgets); var panel = DebugManager.instance.GetPanel(menuName, true); panel.children.Add(widgets.ToArray()); }
public void VolumetricLightingPass(HDCamera camera, CommandBuffer cmd, FrameSettings frameSettings) { if (preset == VolumetricLightingPreset.Off) { return; } using (new ProfilingSample(cmd, "Volumetric Lighting")) { VBuffer vBuffer = FindVBuffer(camera.GetViewID()); Debug.Assert(vBuffer != null); if (HomogeneousFog.GetGlobalFogComponent() == null) { // Clear the render target instead of running the shader. // CoreUtils.SetRenderTarget(cmd, GetVBufferLightingIntegral(viewOffset), ClearFlag.Color, CoreUtils.clearColorAllBlack); // return; // Clearing 3D textures does not seem to work! // Use the workaround by running the full shader with no volume. } bool enableClustered = frameSettings.lightLoopSettings.enableTileAndCluster; bool enableReprojection = Application.isPlaying && camera.camera.cameraType == CameraType.Game; int kernel; if (enableReprojection) { // Only available in the Play Mode because all the frame counters in the Edit Mode are broken. kernel = m_VolumetricLightingCS.FindKernel(enableClustered ? "VolumetricLightingClusteredReproj" : "VolumetricLightingAllLightsReproj"); } else { kernel = m_VolumetricLightingCS.FindKernel(enableClustered ? "VolumetricLightingClustered" : "VolumetricLightingAllLights"); } int w = 0, h = 0, d = 0; Vector2 scale = ComputeVBufferResolutionAndScale(preset, (int)camera.screenSize.x, (int)camera.screenSize.y, ref w, ref h, ref d); float vFoV = camera.camera.fieldOfView * Mathf.Deg2Rad; // Compose the matrix which allows us to compute the world space view direction. // Compute it using the scaled resolution to account for the visible area of the VBuffer. Vector4 scaledRes = new Vector4(w * scale.x, h * scale.y, 1.0f / (w * scale.x), 1.0f / (h * scale.y)); Matrix4x4 transform = HDUtils.ComputePixelCoordToWorldSpaceViewDirectionMatrix(vFoV, scaledRes, camera.viewMatrix, false); camera.SetupComputeShader(m_VolumetricLightingCS, cmd); Vector2[] xySeq = GetHexagonalClosePackedSpheres7(); // This is a sequence of 7 equidistant numbers from 1/14 to 13/14. // Each of them is the centroid of the interval of length 2/14. // They've been rearranged in a sequence of pairs {small, large}, s.t. (small + large) = 1. // That way, the running average position is close to 0.5. // | 6 | 2 | 4 | 1 | 5 | 3 | 7 | // | | | | o | | | | // | | o | | x | | | | // | | x | | x | | o | | // | | x | o | x | | x | | // | | x | x | x | o | x | | // | o | x | x | x | x | x | | // | x | x | x | x | x | x | o | // | x | x | x | x | x | x | x | float[] zSeq = { 7.0f / 14.0f, 3.0f / 14.0f, 11.0f / 14.0f, 5.0f / 14.0f, 9.0f / 14.0f, 1.0f / 14.0f, 13.0f / 14.0f }; int rfc = Time.renderedFrameCount; int sampleIndex = rfc % 7; Vector4 offset = new Vector4(xySeq[sampleIndex].x, xySeq[sampleIndex].y, zSeq[sampleIndex], rfc); // TODO: set 'm_VolumetricLightingPreset'. cmd.SetComputeVectorParam(m_VolumetricLightingCS, HDShaderIDs._VBufferSampleOffset, offset); cmd.SetComputeMatrixParam(m_VolumetricLightingCS, HDShaderIDs._VBufferCoordToViewDirWS, transform); cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferLightingIntegral, vBuffer.GetLightingIntegralBuffer()); // Write if (enableReprojection) { cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferLightingFeedback, vBuffer.GetLightingFeedbackBuffer()); // Write cmd.SetComputeTextureParam(m_VolumetricLightingCS, kernel, HDShaderIDs._VBufferLightingHistory, vBuffer.GetLightingHistoryBuffer()); // Read } // The shader defines GROUP_SIZE_1D = 16. cmd.DispatchCompute(m_VolumetricLightingCS, kernel, (w + 15) / 16, (h + 15) / 16, 1); } }
public override void PushShaderParameters(CommandBuffer cmd, FrameSettings frameSettings) { PushShaderParametersCommon(cmd, FogType.Linear, frameSettings); cmd.SetGlobalVector(m_LinearFogParam, new Vector4(fogStart, fogEnd, 1.0f / (fogEnd - fogStart), 0.0f)); }