public HDCamera(Camera cam) { camera = cam; frustum = new Frustum(); frustum.planes = new Plane[6]; frustum.corners = new Vector3[8]; frustumPlaneEquations = new Vector4[6]; viewMatrixStereo = new Matrix4x4[2]; projMatrixStereo = new Matrix4x4[2]; viewProjStereo = new Matrix4x4[2]; invViewStereo = new Matrix4x4[2]; invProjStereo = new Matrix4x4[2]; invViewProjStereo = new Matrix4x4[2]; worldSpaceCameraPosStereo = new Vector4[2]; worldSpaceCameraPosStereoEyeOffset = new Vector4[2]; prevWorldSpaceCameraPosStereo = new Vector4[2]; m_AdditionalCameraData = null; // Init in Update Reset(); }
public HDCamera(Camera cam) { camera = cam; frustum = new Frustum(); frustum.planes = new Plane[6]; frustum.corners = new Vector3[8]; frustumPlaneEquations = new Vector4[6]; viewMatrixStereo = new Matrix4x4[2]; projMatrixStereo = new Matrix4x4[2]; viewProjStereo = new Matrix4x4[2]; invViewStereo = new Matrix4x4[2]; invProjStereo = new Matrix4x4[2]; invViewProjStereo = new Matrix4x4[2]; postprocessRenderContext = new PostProcessRenderContext(); m_AdditionalCameraData = null; // Init in Update colorPyramidIsValid = false; Reset(); }
public HDCamera(Camera cam) { camera = cam; frustumPlanes = new Plane[6]; frustumPlaneEquations = new Vector4[6]; postprocessRenderContext = new PostProcessRenderContext(); m_AdditionalCameraData = cam.GetComponent <HDAdditionalCameraData>(); Reset(); }
public HDCamera(Camera cam) { camera = cam; frustum = new Frustum(); frustumPlaneEquations = new Vector4[6]; viewMatrixStereo = new Matrix4x4[2]; projMatrixStereo = new Matrix4x4[2]; postprocessRenderContext = new PostProcessRenderContext(); m_AdditionalCameraData = cam.GetComponent <HDAdditionalCameraData>(); Reset(); }
public HDCamera(Camera cam) { camera = cam; frustum = new Frustum(); frustum.planes = new Plane[6]; frustum.corners = new Vector3[8]; frustumPlaneEquations = new Vector4[6]; m_AdditionalCameraData = null; // Init in Update Reset(); }
public HDCamera(Camera cam) { camera = cam; frustum = new Frustum(); frustumPlaneEquations = new Vector4[6]; viewMatrixStereo = new Matrix4x4[2]; projMatrixStereo = new Matrix4x4[2]; postprocessRenderContext = new PostProcessRenderContext(); m_AdditionalCameraData = null; // Init in Update Reset(); }
// This is called at the creation of the HD Additional Camera Data, to convert the legacy camera settings to HD public static void InitDefaultHDAdditionalCameraData(HDAdditionalCameraData cameraData) { var camera = cameraData.gameObject.GetComponent <Camera>(); cameraData.clearDepth = camera.clearFlags != CameraClearFlags.Nothing; if (camera.clearFlags == CameraClearFlags.Skybox) { cameraData.clearColorMode = ClearColorMode.Sky; } else if (camera.clearFlags == CameraClearFlags.SolidColor) { cameraData.clearColorMode = ClearColorMode.BackgroundColor; } else // None { cameraData.clearColorMode = ClearColorMode.None; } }
/// <summary>Register FrameSettingsHistory for DebugMenu</summary> public static IDebugData RegisterDebug(Camera camera, HDAdditionalCameraData additionalCameraData) { HDRenderPipelineAsset hdrpAsset = GraphicsSettings.renderPipelineAsset as HDRenderPipelineAsset; Assertions.Assert.IsNotNull(hdrpAsset); // complete frame settings history is required for displaying debug menu. // AggregateFrameSettings will finish the registration if it is not yet registered FrameSettings registering = new FrameSettings(); AggregateFrameSettings(ref registering, camera, additionalCameraData, hdrpAsset); GenerateFrameSettingsPanel(camera.name, frameSettingsHistory[camera]); #if UNITY_EDITOR if (sceneViewCamera == null && camera.cameraType == CameraType.SceneView) { sceneViewCamera = camera; } #endif return(frameSettingsHistory[camera]); }
// This is use to copy data into camera for the Reset() workflow in camera editor public void CopyTo(HDAdditionalCameraData data) { data.clearColorMode = clearColorMode; data.backgroundColorHDR = backgroundColorHDR; data.clearDepth = clearDepth; data.customRenderingSettings = customRenderingSettings; data.volumeLayerMask = volumeLayerMask; data.volumeAnchorOverride = volumeAnchorOverride; data.aperture = aperture; data.shutterSpeed = shutterSpeed; data.iso = iso; m_FrameSettings.CopyTo(data.m_FrameSettings); m_FrameSettingsRuntime.CopyTo(data.m_FrameSettingsRuntime); data.m_frameSettingsIsDirty = true; // Let's be sure it is dirty for update // We must not copy the following //data.m_IsDebugRegistered = m_IsDebugRegistered; //data.m_CameraRegisterName = m_CameraRegisterName; //data.isEditorCameraPreview = isEditorCameraPreview; }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; UpdateAntialiasing(); // Handle memory allocation. { bool isColorPyramidHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.SSR); // TODO: TAA as well bool isVolumetricHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.Volumetrics) && m_frameSettings.IsEnabled(FrameSettingsField.ReprojectionForVolumetrics); int numColorPyramidBuffersRequired = isColorPyramidHistoryRequired ? 2 : 1; // TODO: 1 -> 0 int numVolumetricBuffersRequired = isVolumetricHistoryRequired ? 2 : 0; // History + feedback if ((numColorPyramidBuffersAllocated != numColorPyramidBuffersRequired) || (numVolumetricBuffersAllocated != numVolumetricBuffersRequired)) { // Reinit the system. colorPyramidHistoryIsValid = false; vlSys.DeinitializePerCameraData(this); // The history system only supports the "nuke all" option. m_HistoryRTSystem.Dispose(); m_HistoryRTSystem = new BufferedRTHandleSystem(); if (numColorPyramidBuffersRequired != 0) { AllocHistoryFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain, HistoryBufferAllocatorFunction, numColorPyramidBuffersRequired); colorPyramidHistoryIsValid = false; } vlSys.InitializePerCameraData(this, numVolumetricBuffersRequired); // Mark as init. numColorPyramidBuffersAllocated = numColorPyramidBuffersRequired; numVolumetricBuffersAllocated = numVolumetricBuffersRequired; } } UpdateViewConstants(); // Update viewport sizes. m_ViewportSizePrevFrame = new Vector2Int(m_ActualWidth, m_ActualHeight); m_ActualWidth = Math.Max(camera.pixelWidth, 1); m_ActualHeight = Math.Max(camera.pixelHeight, 1); Vector2Int nonScaledSize = new Vector2Int(m_ActualWidth, m_ActualHeight); if (isMainGameView) { Vector2Int scaledSize = HDDynamicResolutionHandler.instance.GetRTHandleScale(new Vector2Int(camera.pixelWidth, camera.pixelHeight)); nonScaledSize = HDDynamicResolutionHandler.instance.cachedOriginalSize; m_ActualWidth = scaledSize.x; m_ActualHeight = scaledSize.y; } var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; // XRTODO: double-wide cleanup textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); if (camera.stereoEnabled && XRGraphics.stereoRenderingMode == XRGraphics.StereoRenderingMode.SinglePass) { Debug.Assert(HDDynamicResolutionHandler.instance.SoftwareDynamicResIsEnabled() == false); var xrDesc = XRGraphics.eyeTextureDesc; nonScaledSize.x = screenWidth = m_ActualWidth = xrDesc.width; nonScaledSize.y = screenHeight = m_ActualHeight = xrDesc.height; textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); } m_LastFrameActive = Time.frameCount; // TODO: cache this, or make the history system spill the beans... Vector2Int prevColorPyramidBufferSize = Vector2Int.zero; if (numColorPyramidBuffersAllocated > 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; prevColorPyramidBufferSize.x = rt.width; prevColorPyramidBufferSize.y = rt.height; } // TODO: cache this, or make the history system spill the beans... Vector3Int prevVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; prevVolumetricBufferSize.x = rt.width; prevVolumetricBufferSize.y = rt.height; prevVolumetricBufferSize.z = rt.volumeDepth; } m_msaaSamples = msaaSamples; // Here we use the non scaled resolution for the RTHandleSystem ref size because we assume that at some point we will need full resolution anyway. // This is also useful because we have some RT after final up-rez that will need the full size. RTHandles.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.Swap(); Vector3Int currColorPyramidBufferSize = Vector3Int.zero; if (numColorPyramidBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; currColorPyramidBufferSize.x = rt.width; currColorPyramidBufferSize.y = rt.height; if ((currColorPyramidBufferSize.x != prevColorPyramidBufferSize.x) || (currColorPyramidBufferSize.y != prevColorPyramidBufferSize.y)) { // A reallocation has happened, so the new texture likely contains garbage. colorPyramidHistoryIsValid = false; } } Vector3Int currVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; currVolumetricBufferSize.x = rt.width; currVolumetricBufferSize.y = rt.height; currVolumetricBufferSize.z = rt.volumeDepth; if ((currVolumetricBufferSize.x != prevVolumetricBufferSize.x) || (currVolumetricBufferSize.y != prevVolumetricBufferSize.y) || (currVolumetricBufferSize.z != prevVolumetricBufferSize.z)) { // A reallocation has happened, so the new texture likely contains garbage. volumetricHistoryIsValid = false; } } int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; Vector2 rcpTextureSize = Vector2.one / new Vector2(maxWidth, maxHeight); m_ViewportScalePreviousFrame = m_ViewportSizePrevFrame * rcpTextureSize; m_ViewportScaleCurrentFrame = new Vector2Int(m_ActualWidth, m_ActualHeight) * rcpTextureSize; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); finalViewport = new Rect(camera.pixelRect.x, camera.pixelRect.y, nonScaledSize.x, nonScaledSize.y); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); m_RecorderCaptureActions = CameraCaptureBridge.GetCaptureActions(camera); }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, PostProcessLayer postProcessLayer, VolumetricLightingSystem vlSys) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we do'nt create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = camera.cameraType == CameraType.Game && HDUtils.IsTemporalAntialiasingActive(postProcessLayer) && m_frameSettings.enablePostprocess; var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); // In stereo, this corresponds to the center eye position var pos = camera.transform.position; worldSpaceCameraPos = pos; if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevCameraPos = pos; prevViewProjMatrix = gpuVP; } else { prevCameraPos = cameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } taaFrameIndex = taaEnabled ? (uint)postProcessLayer.temporalAntialiasing.sampleIndex : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; cameraPos = pos; detViewMatrix = viewMatrix.determinant; if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(cameraPos - prevCameraPos); // Non-camera-relative positions prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); frustum = Frustum.Create(viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; m_ActualWidth = camera.pixelWidth; m_ActualHeight = camera.pixelHeight; var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; //forest-begin: Added XboxOne to define around XR code #if !UNITY_SWITCH && !UNITY_XBOXONE //forest-end: if (m_frameSettings.enableStereo) { screenWidth = XRSettings.eyeTextureWidth; screenHeight = XRSettings.eyeTextureHeight; var xrDesc = XRSettings.eyeTextureDesc; m_ActualWidth = xrDesc.width; m_ActualHeight = xrDesc.height; ConfigureStereoMatrices(); } #endif // Unfortunately sometime (like in the HDCameraEditor) HDUtils.hdrpSettings can be null because of scripts that change the current pipeline... m_msaaSamples = HDUtils.hdrpSettings != null ? HDUtils.hdrpSettings.msaaSampleCount : MSAASamples.None; RTHandles.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_frameSettings.enableMSAA, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_frameSettings.enableMSAA, m_msaaSamples); m_HistoryRTSystem.Swap(); int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; m_ViewportScalePreviousFrame = m_ViewportScaleCurrentFrame; // Double-buffer m_ViewportScaleCurrentFrame.x = (float)m_ActualWidth / maxWidth; m_ViewportScaleCurrentFrame.y = (float)m_ActualHeight / maxHeight; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // Handle post-process AA // - If post-processing is disabled all together, no AA // - In scene view, only enable TAA if animated materials are enabled // - Else just use the currently set AA mode on the camera { if (!m_frameSettings.IsEnabled(FrameSettingsField.Postprocess) || !CoreUtils.ArePostProcessesEnabled(camera)) { antialiasing = AntialiasingMode.None; } #if UNITY_EDITOR else if (camera.cameraType == CameraType.SceneView) { var mode = HDRenderPipelinePreferences.sceneViewAntialiasing; if (mode == AntialiasingMode.TemporalAntialiasing && !CoreUtils.AreAnimatedMaterialsEnabled(camera)) { antialiasing = AntialiasingMode.None; } else { antialiasing = mode; } } #endif else if (m_AdditionalCameraData != null) { antialiasing = m_AdditionalCameraData.antialiasing; } else { antialiasing = AntialiasingMode.None; } } // Handle memory allocation. { bool isColorPyramidHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.SSR); // TODO: TAA as well bool isVolumetricHistoryRequired = m_frameSettings.IsEnabled(FrameSettingsField.Volumetrics) && m_frameSettings.IsEnabled(FrameSettingsField.ReprojectionForVolumetrics); int numColorPyramidBuffersRequired = isColorPyramidHistoryRequired ? 2 : 1; // TODO: 1 -> 0 int numVolumetricBuffersRequired = isVolumetricHistoryRequired ? 2 : 0; // History + feedback if ((numColorPyramidBuffersAllocated != numColorPyramidBuffersRequired) || (numVolumetricBuffersAllocated != numVolumetricBuffersRequired)) { // Reinit the system. colorPyramidHistoryIsValid = false; vlSys.DeinitializePerCameraData(this); // The history system only supports the "nuke all" option. m_HistoryRTSystem.Dispose(); m_HistoryRTSystem = new BufferedRTHandleSystem(); if (numColorPyramidBuffersRequired != 0) { AllocHistoryFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain, HistoryBufferAllocatorFunction, numColorPyramidBuffersRequired); colorPyramidHistoryIsValid = false; } vlSys.InitializePerCameraData(this, numVolumetricBuffersRequired); // Mark as init. numColorPyramidBuffersAllocated = numColorPyramidBuffersRequired; numVolumetricBuffersAllocated = numVolumetricBuffersRequired; } } // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = antialiasing == AntialiasingMode.TemporalAntialiasing; if (!taaEnabled) { taaFrameIndex = 0; taaJitter = Vector4.zero; } var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? GetJitteredProjectionMatrix(nonJitteredCameraProj) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); // Update viewport sizes. m_ViewportSizePrevFrame = new Vector2Int(m_ActualWidth, m_ActualHeight); m_ActualWidth = Math.Max(camera.pixelWidth, 1); m_ActualHeight = Math.Max(camera.pixelHeight, 1); Vector2Int nonScaledSize = new Vector2Int(m_ActualWidth, m_ActualHeight); if (isMainGameView) { Vector2Int scaledSize = HDDynamicResolutionHandler.instance.GetRTHandleScale(new Vector2Int(camera.pixelWidth, camera.pixelHeight)); nonScaledSize = HDDynamicResolutionHandler.instance.cachedOriginalSize; m_ActualWidth = scaledSize.x; m_ActualHeight = scaledSize.y; } var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); numEyes = camera.stereoEnabled ? (uint)2 : (uint)1; // TODO VR: Generalize this when support for >2 eyes comes out with XR SDK if (camera.stereoEnabled) { if (XRGraphics.stereoRenderingMode == XRGraphics.StereoRenderingMode.SinglePass) { textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); } for (uint eyeIndex = 0; eyeIndex < 2; eyeIndex++) { // For VR, TAA proj matrices don't need to be jittered var currProjStereo = camera.GetStereoProjectionMatrix((Camera.StereoscopicEye)eyeIndex); var gpuCurrProjStereo = GL.GetGPUProjectionMatrix(currProjStereo, true); var gpuCurrViewStereo = camera.GetStereoViewMatrix((Camera.StereoscopicEye)eyeIndex); if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuCurrViewStereo.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuCurrVPStereo = gpuCurrProjStereo * gpuCurrViewStereo; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevWorldSpaceCameraPosStereo[eyeIndex] = gpuCurrViewStereo.inverse.GetColumn(3); prevViewProjMatrixStereo[eyeIndex] = gpuCurrVPStereo; } else { prevWorldSpaceCameraPosStereo[eyeIndex] = worldSpaceCameraPosStereo[eyeIndex]; prevViewProjMatrixStereo[eyeIndex] = GetViewProjMatrixStereo(eyeIndex); // Grabbing this before ConfigureStereoMatrices updates view/proj } isFirstFrame = false; } } // XRTODO: fix this isFirstFrame = true; // So that mono vars can still update when stereo active // XRTODO: remove once SPI is working if (XRGraphics.stereoRenderingMode == XRGraphics.StereoRenderingMode.SinglePass) { Debug.Assert(HDDynamicResolutionHandler.instance.SoftwareDynamicResIsEnabled() == false); var xrDesc = XRGraphics.eyeTextureDesc; nonScaledSize.x = screenWidth = m_ActualWidth = xrDesc.width; nonScaledSize.y = screenHeight = m_ActualHeight = xrDesc.height; } } if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed // Note: if your first rendered view during the frame is not the Game view, everything breaks. if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevWorldSpaceCameraPos = camera.transform.position; prevViewProjMatrix = gpuVP; } else { prevWorldSpaceCameraPos = worldSpaceCameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; prevViewProjMatrixNoCameraTrans = prevViewProjMatrix; } isFirstFrame = false; } // In stereo, this corresponds to the center eye position worldSpaceCameraPos = camera.transform.position; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; ConfigureStereoMatrices(); if (ShaderConfig.s_CameraRelativeRendering != 0) { prevWorldSpaceCameraPos = worldSpaceCameraPos - prevWorldSpaceCameraPos; // This fixes issue with cameraDisplacement stacking in prevViewProjMatrix when same camera renders multiple times each logical frame // causing glitchy motion blur when editor paused. if (m_LastFrameActive != Time.frameCount) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(prevWorldSpaceCameraPos); prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } } else { Matrix4x4 noTransViewMatrix = camera.worldToCameraMatrix; noTransViewMatrix.SetColumn(3, new Vector4(0, 0, 0, 1)); prevViewProjMatrixNoCameraTrans = nonJitteredProjMatrix * noTransViewMatrix; } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); Frustum.Create(frustum, viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; // TODO: cache this, or make the history system spill the beans... Vector2Int prevColorPyramidBufferSize = Vector2Int.zero; if (numColorPyramidBuffersAllocated > 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; prevColorPyramidBufferSize.x = rt.width; prevColorPyramidBufferSize.y = rt.height; } // TODO: cache this, or make the history system spill the beans... Vector3Int prevVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; prevVolumetricBufferSize.x = rt.width; prevVolumetricBufferSize.y = rt.height; prevVolumetricBufferSize.z = rt.volumeDepth; } m_msaaSamples = msaaSamples; // Here we use the non scaled resolution for the RTHandleSystem ref size because we assume that at some point we will need full resolution anyway. // This is also useful because we have some RT after final up-rez that will need the full size. RTHandles.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(nonScaledSize.x, nonScaledSize.y, m_msaaSamples); m_HistoryRTSystem.Swap(); Vector3Int currColorPyramidBufferSize = Vector3Int.zero; if (numColorPyramidBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; currColorPyramidBufferSize.x = rt.width; currColorPyramidBufferSize.y = rt.height; if ((currColorPyramidBufferSize.x != prevColorPyramidBufferSize.x) || (currColorPyramidBufferSize.y != prevColorPyramidBufferSize.y)) { // A reallocation has happened, so the new texture likely contains garbage. colorPyramidHistoryIsValid = false; } } Vector3Int currVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; currVolumetricBufferSize.x = rt.width; currVolumetricBufferSize.y = rt.height; currVolumetricBufferSize.z = rt.volumeDepth; if ((currVolumetricBufferSize.x != prevVolumetricBufferSize.x) || (currVolumetricBufferSize.y != prevVolumetricBufferSize.y) || (currVolumetricBufferSize.z != prevVolumetricBufferSize.z)) { // A reallocation has happened, so the new texture likely contains garbage. volumetricHistoryIsValid = false; } } int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; Vector2 rcpTextureSize = Vector2.one / new Vector2(maxWidth, maxHeight); m_ViewportScalePreviousFrame = m_ViewportSizePrevFrame * rcpTextureSize; m_ViewportScaleCurrentFrame = new Vector2Int(m_ActualWidth, m_ActualHeight) * rcpTextureSize; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); finalViewport = new Rect(camera.pixelRect.x, camera.pixelRect.y, nonScaledSize.x, nonScaledSize.y); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, PostProcessLayer postProcessLayer, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // Handle memory allocation. { bool isColorPyramidHistoryRequired = m_frameSettings.enableSSR; // TODO: TAA as well bool isVolumetricHistoryRequired = m_frameSettings.enableVolumetrics && m_frameSettings.enableReprojectionForVolumetrics; int numColorPyramidBuffersRequired = isColorPyramidHistoryRequired ? 2 : 1; // TODO: 1 -> 0 int numVolumetricBuffersRequired = isVolumetricHistoryRequired ? 2 : 0; // History + feedback if ((numColorPyramidBuffersAllocated != numColorPyramidBuffersRequired) || (numVolumetricBuffersAllocated != numVolumetricBuffersRequired)) { // Reinit the system. colorPyramidHistoryIsValid = false; vlSys.DeinitializePerCameraData(this); // The history system only supports the "nuke all" option. m_HistoryRTSystem.Dispose(); m_HistoryRTSystem = new BufferedRTHandleSystem(); if (numColorPyramidBuffersRequired != 0) { AllocHistoryFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain, HistoryBufferAllocatorFunction, numColorPyramidBuffersRequired); colorPyramidHistoryIsValid = false; } vlSys.InitializePerCameraData(this, numVolumetricBuffersRequired); // Mark as init. numColorPyramidBuffersAllocated = numColorPyramidBuffersRequired; numVolumetricBuffersAllocated = numVolumetricBuffersRequired; } } // In stereo, this corresponds to the center eye position var pos = camera.transform.position; worldSpaceCameraPos = pos; // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = camera.cameraType == CameraType.Game && HDUtils.IsTemporalAntialiasingActive(postProcessLayer) && m_frameSettings.enablePostprocess; var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); // Update viewport sizes. m_ViewportSizePrevFrame = new Vector2Int(m_ActualWidth, m_ActualHeight); m_ActualWidth = camera.pixelWidth; m_ActualHeight = camera.pixelHeight; var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); numEyes = camera.stereoEnabled ? (uint)2 : (uint)1; // TODO VR: Generalize this when support for >2 eyes comes out with XR SDK if (camera.stereoEnabled) { textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); for (uint eyeIndex = 0; eyeIndex < 2; eyeIndex++) { // For VR, TAA proj matrices don't need to be jittered var currProjStereo = camera.GetStereoProjectionMatrix((Camera.StereoscopicEye)eyeIndex); var gpuCurrProjStereo = GL.GetGPUProjectionMatrix(currProjStereo, true); var gpuCurrViewStereo = camera.GetStereoViewMatrix((Camera.StereoscopicEye)eyeIndex); if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuCurrViewStereo.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuCurrVPStereo = gpuCurrProjStereo * gpuCurrViewStereo; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevViewProjMatrixStereo[eyeIndex] = gpuCurrVPStereo; } else { prevViewProjMatrixStereo[eyeIndex] = GetViewProjMatrixStereo(eyeIndex); // Grabbing this before ConfigureStereoMatrices updates view/proj } isFirstFrame = false; } } isFirstFrame = true; // So that mono vars can still update when stereo active screenWidth = XRGraphics.eyeTextureWidth; screenHeight = XRGraphics.eyeTextureHeight; var xrDesc = XRGraphics.eyeTextureDesc; m_ActualWidth = xrDesc.width; m_ActualHeight = xrDesc.height; } if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevCameraPos = pos; prevViewProjMatrix = gpuVP; } else { prevCameraPos = cameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } taaFrameIndex = taaEnabled ? (uint)postProcessLayer.temporalAntialiasing.sampleIndex : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; cameraPos = pos; ConfigureStereoMatrices(); if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(cameraPos - prevCameraPos); // Non-camera-relative positions prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); Frustum.Create(frustum, viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; // TODO: cache this, or make the history system spill the beans... Vector2Int prevColorPyramidBufferSize = Vector2Int.zero; if (numColorPyramidBuffersAllocated > 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; prevColorPyramidBufferSize.x = rt.width; prevColorPyramidBufferSize.y = rt.height; } // TODO: cache this, or make the history system spill the beans... Vector3Int prevVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; prevVolumetricBufferSize.x = rt.width; prevVolumetricBufferSize.y = rt.height; prevVolumetricBufferSize.z = rt.volumeDepth; } // Unfortunately sometime (like in the HDCameraEditor) HDUtils.hdrpSettings can be null because of scripts that change the current pipeline... m_msaaSamples = msaaSamples; RTHandles.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_msaaSamples); m_HistoryRTSystem.Swap(); Vector3Int currColorPyramidBufferSize = Vector3Int.zero; if (numColorPyramidBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.ColorBufferMipChain).rt; currColorPyramidBufferSize.x = rt.width; currColorPyramidBufferSize.y = rt.height; if ((currColorPyramidBufferSize.x != prevColorPyramidBufferSize.x) || (currColorPyramidBufferSize.y != prevColorPyramidBufferSize.y)) { // A reallocation has happened, so the new texture likely contains garbage. colorPyramidHistoryIsValid = false; } } Vector3Int currVolumetricBufferSize = Vector3Int.zero; if (numVolumetricBuffersAllocated != 0) { var rt = GetCurrentFrameRT((int)HDCameraFrameHistoryType.VolumetricLighting).rt; currVolumetricBufferSize.x = rt.width; currVolumetricBufferSize.y = rt.height; currVolumetricBufferSize.z = rt.volumeDepth; if ((currVolumetricBufferSize.x != prevVolumetricBufferSize.x) || (currVolumetricBufferSize.y != prevVolumetricBufferSize.y) || (currVolumetricBufferSize.z != prevVolumetricBufferSize.z)) { // A reallocation has happened, so the new texture likely contains garbage. volumetricHistoryIsValid = false; } } int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; Vector2 rcpTextureSize = Vector2.one / new Vector2(maxWidth, maxHeight); m_ViewportScalePreviousFrame = m_ViewportSizePrevFrame * rcpTextureSize; m_ViewportScaleCurrentFrame = new Vector2Int(m_ActualWidth, m_ActualHeight) * rcpTextureSize; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); }
// Pass all the systems that may want to update per-camera data here. // That way you will never update an HDCamera and forget to update the dependent system. public void Update(FrameSettings currentFrameSettings, PostProcessLayer postProcessLayer, VolumetricLightingSystem vlSys, MSAASamples msaaSamples) { // store a shortcut on HDAdditionalCameraData (done here and not in the constructor as // we don't create HDCamera at every frame and user can change the HDAdditionalData later (Like when they create a new scene). m_AdditionalCameraData = camera.GetComponent <HDAdditionalCameraData>(); m_frameSettings = currentFrameSettings; // In stereo, this corresponds to the center eye position var pos = camera.transform.position; worldSpaceCameraPos = pos; // If TAA is enabled projMatrix will hold a jittered projection matrix. The original, // non-jittered projection matrix can be accessed via nonJitteredProjMatrix. bool taaEnabled = camera.cameraType == CameraType.Game && HDUtils.IsTemporalAntialiasingActive(postProcessLayer) && m_frameSettings.enablePostprocess; var nonJitteredCameraProj = camera.projectionMatrix; var cameraProj = taaEnabled ? postProcessLayer.temporalAntialiasing.GetJitteredProjectionMatrix(camera) : nonJitteredCameraProj; // The actual projection matrix used in shaders is actually massaged a bit to work across all platforms // (different Z value ranges etc.) var gpuProj = GL.GetGPUProjectionMatrix(cameraProj, true); // Had to change this from 'false' var gpuView = camera.worldToCameraMatrix; var gpuNonJitteredProj = GL.GetGPUProjectionMatrix(nonJitteredCameraProj, true); m_ActualWidth = camera.pixelWidth; m_ActualHeight = camera.pixelHeight; var screenWidth = m_ActualWidth; var screenHeight = m_ActualHeight; textureWidthScaling = new Vector4(1.0f, 1.0f, 0.0f, 0.0f); numEyes = m_frameSettings.enableStereo ? (uint)2 : (uint)1; // TODO VR: Generalize this when support for >2 eyes comes out with XR SDK if (m_frameSettings.enableStereo) { textureWidthScaling = new Vector4(2.0f, 0.5f, 0.0f, 0.0f); for (uint eyeIndex = 0; eyeIndex < 2; eyeIndex++) { // For VR, TAA proj matrices don't need to be jittered var currProjStereo = camera.GetStereoProjectionMatrix((Camera.StereoscopicEye)eyeIndex); var gpuCurrProjStereo = GL.GetGPUProjectionMatrix(currProjStereo, true); var gpuCurrViewStereo = camera.GetStereoViewMatrix((Camera.StereoscopicEye)eyeIndex); if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuCurrViewStereo.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuCurrVPStereo = gpuCurrProjStereo * gpuCurrViewStereo; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevViewProjMatrixStereo[eyeIndex] = gpuCurrVPStereo; } else { prevViewProjMatrixStereo[eyeIndex] = GetViewProjMatrixStereo(eyeIndex); // Grabbing this before ConfigureStereoMatrices updates view/proj } isFirstFrame = false; } } isFirstFrame = true; // So that mono vars can still update when stereo active screenWidth = XRGraphicsConfig.eyeTextureWidth; screenHeight = XRGraphicsConfig.eyeTextureHeight; var xrDesc = XRGraphicsConfig.eyeTextureDesc; m_ActualWidth = xrDesc.width; m_ActualHeight = xrDesc.height; ConfigureStereoMatrices(); } if (ShaderConfig.s_CameraRelativeRendering != 0) { // Zero out the translation component. gpuView.SetColumn(3, new Vector4(0, 0, 0, 1)); } var gpuVP = gpuNonJitteredProj * gpuView; // A camera could be rendered multiple times per frame, only updates the previous view proj & pos if needed if (m_LastFrameActive != Time.frameCount) { if (isFirstFrame) { prevCameraPos = pos; prevViewProjMatrix = gpuVP; } else { prevCameraPos = cameraPos; prevViewProjMatrix = nonJitteredViewProjMatrix; } isFirstFrame = false; } taaFrameIndex = taaEnabled ? (uint)postProcessLayer.temporalAntialiasing.sampleIndex : 0; taaFrameRotation = new Vector2(Mathf.Sin(taaFrameIndex * (0.5f * Mathf.PI)), Mathf.Cos(taaFrameIndex * (0.5f * Mathf.PI))); viewMatrix = gpuView; projMatrix = gpuProj; nonJitteredProjMatrix = gpuNonJitteredProj; cameraPos = pos; if (!m_frameSettings.enableStereo) { // TODO VR: Current solution for compute shaders grabs matrices from // stereo matrices even when not rendering stereo in order to reduce shader variants. // After native fix for compute shader keywords is completed, qualify this with stereoEnabled. viewMatrixStereo[0] = viewMatrix; projMatrixStereo[0] = projMatrix; } if (ShaderConfig.s_CameraRelativeRendering != 0) { Matrix4x4 cameraDisplacement = Matrix4x4.Translate(cameraPos - prevCameraPos); // Non-camera-relative positions prevViewProjMatrix *= cameraDisplacement; // Now prevViewProjMatrix correctly transforms this frame's camera-relative positionWS } float n = camera.nearClipPlane; float f = camera.farClipPlane; // Analyze the projection matrix. // p[2][3] = (reverseZ ? 1 : -1) * (depth_0_1 ? 1 : 2) * (f * n) / (f - n) float scale = projMatrix[2, 3] / (f * n) * (f - n); bool depth_0_1 = Mathf.Abs(scale) < 1.5f; bool reverseZ = scale > 0; bool flipProj = projMatrix.inverse.MultiplyPoint(new Vector3(0, 1, 0)).y < 0; // http://www.humus.name/temp/Linearize%20depth.txt if (reverseZ) { zBufferParams = new Vector4(-1 + f / n, 1, -1 / f + 1 / n, 1 / f); } else { zBufferParams = new Vector4(1 - f / n, f / n, 1 / f - 1 / n, 1 / n); } projectionParams = new Vector4(flipProj ? -1 : 1, n, f, 1.0f / f); float orthoHeight = camera.orthographic ? 2 * camera.orthographicSize : 0; float orthoWidth = orthoHeight * camera.aspect; unity_OrthoParams = new Vector4(orthoWidth, orthoHeight, 0, camera.orthographic ? 1 : 0); Frustum.Create(frustum, viewProjMatrix, depth_0_1, reverseZ); // Left, right, top, bottom, near, far. for (int i = 0; i < 6; i++) { frustumPlaneEquations[i] = new Vector4(frustum.planes[i].normal.x, frustum.planes[i].normal.y, frustum.planes[i].normal.z, frustum.planes[i].distance); } m_LastFrameActive = Time.frameCount; Vector2 lastTextureSize = new Vector2(RTHandles.maxWidth, RTHandles.maxHeight); // Unfortunately sometime (like in the HDCameraEditor) HDUtils.hdrpSettings can be null because of scripts that change the current pipeline... m_msaaSamples = msaaSamples; RTHandles.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_msaaSamples); m_HistoryRTSystem.SetReferenceSize(m_ActualWidth, m_ActualHeight, m_msaaSamples); m_HistoryRTSystem.Swap(); int maxWidth = RTHandles.maxWidth; int maxHeight = RTHandles.maxHeight; Vector2 lastByCurrentTextureSizeRatio = lastTextureSize / new Vector2(maxWidth, maxHeight); // Double-buffer. Note: this should be (LastViewportSize / CurrentTextureSize). m_ViewportScalePreviousFrame = m_ViewportScaleCurrentFrame * lastByCurrentTextureSizeRatio; m_ViewportScaleCurrentFrame.x = (float)m_ActualWidth / maxWidth; m_ViewportScaleCurrentFrame.y = (float)m_ActualHeight / maxHeight; screenSize = new Vector4(screenWidth, screenHeight, 1.0f / screenWidth, 1.0f / screenHeight); screenParams = new Vector4(screenSize.x, screenSize.y, 1 + screenSize.z, 1 + screenSize.w); if (vlSys != null) { vlSys.UpdatePerCameraData(this); } UpdateVolumeParameters(); }
// Note: this version is the one tested as there is issue getting HDRenderPipelineAsset in batchmode in unit test framework currently. /// <summary>Same than FrameSettings.AggregateFrameSettings but keep history of agregation in a collection for DebugMenu. /// Aggregation is default with override of the renderer then sanitazed depending on supported features of hdrpasset. Then the DebugMenu override occurs.</summary> /// <param name="aggregatedFrameSettings">The aggregated FrameSettings result.</param> /// <param name="camera">The camera rendering.</param> /// <param name="additionalData">Additional data of the camera rendering.</param> /// <param name="defaultFrameSettings">Base framesettings to copy prior any override.</param> /// <param name="supportedFeatures">Currently supported feature for the sanitazation pass.</param> public static void AggregateFrameSettings(ref FrameSettings aggregatedFrameSettings, Camera camera, HDAdditionalCameraData additionalData, ref FrameSettings defaultFrameSettings, RenderPipelineSettings supportedFeatures) { FrameSettingsHistory history = new FrameSettingsHistory { camera = camera, defaultType = additionalData ? additionalData.defaultFrameSettings : FrameSettingsRenderType.Camera }; aggregatedFrameSettings = defaultFrameSettings; if (additionalData && additionalData.customRenderingSettings) { FrameSettings.Override(ref aggregatedFrameSettings, additionalData.renderingPathCustomFrameSettings, additionalData.renderingPathCustomFrameSettingsOverrideMask); history.customMask = additionalData.renderingPathCustomFrameSettingsOverrideMask; } history.overridden = aggregatedFrameSettings; FrameSettings.Sanitize(ref aggregatedFrameSettings, camera, supportedFeatures); bool noHistory = !frameSettingsHistory.ContainsKey(camera); bool updatedComponent = !noHistory && frameSettingsHistory[camera].sanitazed != aggregatedFrameSettings; bool dirty = noHistory || updatedComponent; history.sanitazed = aggregatedFrameSettings; if (dirty) { history.debug = history.sanitazed; } else { history.debug = frameSettingsHistory[camera].debug; // Ensure user is not trying to activate unsupported settings in DebugMenu FrameSettings.Sanitize(ref history.debug, camera, supportedFeatures); } aggregatedFrameSettings = history.debug; frameSettingsHistory[camera] = history; }
/// <summary>Same than FrameSettings.AggregateFrameSettings but keep history of agregation in a collection for DebugMenu. /// Aggregation is default with override of the renderer then sanitazed depending on supported features of hdrpasset. Then the DebugMenu override occurs.</summary> /// <param name="aggregatedFrameSettings">The aggregated FrameSettings result.</param> /// <param name="camera">The camera rendering.</param> /// <param name="additionalData">Additional data of the camera rendering.</param> /// <param name="hdrpAsset">HDRenderPipelineAsset contening default FrameSettings.</param> public static void AggregateFrameSettings(ref FrameSettings aggregatedFrameSettings, Camera camera, HDAdditionalCameraData additionalData, HDRenderPipelineAsset hdrpAsset) => AggregateFrameSettings( ref aggregatedFrameSettings, camera, additionalData, ref hdrpAsset.GetDefaultFrameSettings(additionalData?.defaultFrameSettings ?? FrameSettingsRenderType.Camera), //fallback on Camera for SceneCamera and PreviewCamera hdrpAsset.currentPlatformRenderPipelineSettings );