/// <summary> /// Adds <c>Azure BLOB Storage</c> telemetry attachment transmission services to the service collection. /// </summary> /// <param name="container">A <see cref="IBlobContainer"/> instance.</param> /// <param name="descriptor">An attachment descriptor instance.</param> public static Task UploadAsync(this IBlobContainer container, AttachmentDescriptor descriptor) { if (container == null) { throw new ArgumentNullException(nameof(container)); } return(container.UploadAsync(descriptor, CancellationToken.None)); }
Task <Stream> OpenInlineData() { var base64String = AttachmentDescriptor.GetPrimitiveProperty <string>(DataPropertyName); return(Task.Factory.StartNew( () => { var inlineData = base64String.HasNoValue()? EmptyBuffer: Convert.FromBase64String(base64String); return new MemoryStream(inlineData) as Stream; })); }
public void GetFilepath_DescriptorNull() { // arrange AttachmentDescriptor descriptor = null; // act Action validate = () => descriptor.GetFilepath(); // assert Assert.Throws <ArgumentNullException>("descriptor", validate); }
/// <inheritdoc/> public void Enqueue(AttachmentDescriptor data) { if (data == null) { throw new ArgumentNullException(nameof(data)); } if (!_disposeToken.IsCancellationRequested) { _buffer.Enqueue(data); } }
/// <inheritdoc/> public void Enqueue(AttachmentDescriptor data) { if (data == null) { throw new ArgumentNullException(nameof(data)); } if (!_disposeToken.IsCancellationRequested) { Task.Run(() => _storage.EnqueueAsync(new[] { data })); } }
public async Task UploadAsync_ContainerNull() { // arrange IBlobContainer container = null; AttachmentDescriptor descriptor = new AttachmentDescriptor(); // act Func <Task> verify = () => container.UploadAsync(descriptor); // arrange await Assert.ThrowsAsync <ArgumentNullException>("container", verify).ConfigureAwait(false); }
internal static int FindAttachmentDescriptorIndexInList(RenderTargetIdentifier target, AttachmentDescriptor[] attachmentDescriptors) { for (int i = 0; i < attachmentDescriptors.Length; i++) { AttachmentDescriptor att = attachmentDescriptors[i]; if (att.loadStoreTarget == target) { return(i); } } return(-1); }
public void Enqueue_DataNull() { // arrange ITransmissionStorage <AttachmentDescriptor> storage = new Mock <ITransmissionStorage <AttachmentDescriptor> >().Object; ITransmissionSender <AttachmentDescriptor> sender = new Mock <ITransmissionSender <AttachmentDescriptor> >().Object; ITelemetryAttachmentTransmitter transmitter = new BlobStorageTransmitter(storage, sender); AttachmentDescriptor data = null; // act Action verify = () => transmitter.Enqueue(data); // assert Assert.Throws <ArgumentNullException>("data", verify); }
public async Task EnqueueAsync_BatchOutOfRange() { // arrange string storagePath = "C:\\EnqueueAsync_BatchOutOfRange_Test"; BlobStorageTransmissionStorage storage = new BlobStorageTransmissionStorage(storagePath); AttachmentDescriptor[] batch = new AttachmentDescriptor[0]; // act Func <Task> verify = () => storage.EnqueueAsync(batch); // assert await Assert.ThrowsAsync <ArgumentOutOfRangeException>("batch", verify).ConfigureAwait(false); }
public void GetFilepath_Success() { // arrange AttachmentDescriptor descriptor = new AttachmentDescriptor { Id = "UNIQUE", Name = "561" }; // act string filepath = descriptor.GetFilepath(); // assert Assert.Equal("UNIQUE\\561", filepath); }
public void Enqueue_DataNull() { // arrange IMemoryBuffer <AttachmentDescriptor> buffer = new Mock <IMemoryBuffer <AttachmentDescriptor> >().Object; Mock <ITransmissionStorage <AttachmentDescriptor> > storage = CreateEmptyStorage(); ITransmissionSender <AttachmentDescriptor> sender = new Mock <ITransmissionSender <AttachmentDescriptor> >().Object; ITelemetryAttachmentTransmitter transmitter = new BlobStorageTransmitter(buffer, storage.Object, sender, default); AttachmentDescriptor data = null; // act Action verify = () => transmitter.Enqueue(data); // assert Assert.Throws <ArgumentNullException>("data", verify); }
public async Task UploadAsync_DescriptorNull() { // arrange CloudBlobContainer reference = CloudStorageAccount .Parse(Constants.FakeConnectionString) .CreateCloudBlobClient() .GetContainerReference("test-456"); BlobContainer container = new BlobContainer(reference); AttachmentDescriptor descriptor = null; // act Func <Task> verify = () => container.UploadAsync(descriptor); // arrange await Assert.ThrowsAsync <ArgumentNullException>("descriptor", verify).ConfigureAwait(false); }
internal static int FindAttachmentDescriptorIndexInList(int attachmentIdx, AttachmentDescriptor attachmentDescriptor, AttachmentDescriptor[] attachmentDescriptors) { int existingAttachmentIndex = -1; for (int i = 0; i <= attachmentIdx; ++i) { AttachmentDescriptor att = attachmentDescriptors[i]; if (att.loadStoreTarget == attachmentDescriptor.loadStoreTarget && att.graphicsFormat == attachmentDescriptor.graphicsFormat) { existingAttachmentIndex = i; break; } } return(existingAttachmentIndex); }
public async Task UploadAsync_NoException() { // arrange Mock <IBlobContainer> container = new Mock <IBlobContainer>(); container .Setup(t => t.UploadAsync(It.IsAny <AttachmentDescriptor>(), It.IsAny <CancellationToken>())) .Returns(Task.FromResult(0)); AttachmentDescriptor descriptor = new AttachmentDescriptor(); // act Func <Task> verify = () => container.Object.UploadAsync(descriptor); // assert Assert.Null(await Record.ExceptionAsync(verify).ConfigureAwait(false)); }
public async Task SendAsync_BatchOutOfRange() { // arrange Mock <IBlobContainer> container = new Mock <IBlobContainer>(); container .Setup(t => t.UploadAsync(It.IsAny <AttachmentDescriptor>(), It.IsAny <CancellationToken>())) .Returns(Task.FromResult(0)); BlobStorageTransmissionSender sender = new BlobStorageTransmissionSender(container.Object, default); IAsyncEnumerable <AttachmentDescriptor> batch = new AttachmentDescriptor[0].ToAsyncEnumerable(); // act await sender.SendAsync(batch, default); // assert container.Verify(c => c.UploadAsync(It.IsAny <AttachmentDescriptor>(), default), Times.Never); }
/// <inheritdoc/> public async Task UploadAsync(AttachmentDescriptor descriptor, CancellationToken cancellationToken) { if (descriptor == null) { throw new ArgumentNullException(nameof(descriptor)); } try { await _container.GetBlockBlobReference(descriptor.GetFilepath()) .UploadFromByteArrayAsync(descriptor.Value, 0, descriptor.Value.Length, cancellationToken) .ConfigureAwait(false); } catch (Exception) { // todo: log via event provider } }
public async Task SendAsync_BatchOutOfRange() { // arrange Mock <IBlobContainer> container = new Mock <IBlobContainer>(); container .Setup(t => t.UploadAsync(It.IsAny <AttachmentDescriptor>(), It.IsAny <CancellationToken>())) .Returns(Task.FromResult(0)); BlobStorageTransmissionSender sender = new BlobStorageTransmissionSender(container.Object); AttachmentDescriptor[] batch = new AttachmentDescriptor[0]; // act Func <Task> verify = () => sender.SendAsync(batch); // assert await Assert.ThrowsAsync <ArgumentOutOfRangeException>("batch", verify).ConfigureAwait(false); }
internal void SetNativeRenderPassAttachmentList(ScriptableRenderPass renderPass, ref CameraData cameraData, RenderTargetIdentifier passColorAttachment, RenderTargetIdentifier passDepthAttachment, ClearFlag finalClearFlag, Color finalClearColor) { using (new ProfilingScope(null, Profiling.setAttachmentList)) { int currentPassIndex = renderPass.renderPassQueueIndex; Hash128 currentPassHash = m_PassIndexToPassHash[currentPassIndex]; int[] currentMergeablePasses = m_MergeableRenderPassesMap[currentPassHash]; // Skip if not the first pass if (currentMergeablePasses.First() != currentPassIndex) { return; } m_RenderPassesAttachmentCount[currentPassHash] = 0; int currentAttachmentIdx = 0; foreach (var passIdx in currentMergeablePasses) { if (passIdx == -1) { break; } ScriptableRenderPass pass = m_ActiveRenderPassQueue[passIdx]; for (int i = 0; i < pass.m_InputAttachmentIndices.Length; ++i) { pass.m_InputAttachmentIndices[i] = -1; } AttachmentDescriptor currentAttachmentDescriptor; var usesTargetTexture = cameraData.targetTexture != null; var depthOnly = renderPass.depthOnly || (usesTargetTexture && cameraData.targetTexture.graphicsFormat == GraphicsFormat.DepthAuto); // Offscreen depth-only cameras need this set explicitly if (depthOnly && usesTargetTexture) { if (cameraData.targetTexture.graphicsFormat == GraphicsFormat.DepthAuto && !pass.overrideCameraTarget) { passColorAttachment = new RenderTargetIdentifier(cameraData.targetTexture); } else { passColorAttachment = renderPass.colorAttachment; } currentAttachmentDescriptor = new AttachmentDescriptor(GraphicsFormat.DepthAuto); } else { currentAttachmentDescriptor = new AttachmentDescriptor(cameraData.cameraTargetDescriptor.graphicsFormat); } if (pass.overrideCameraTarget) { currentAttachmentDescriptor = new AttachmentDescriptor(pass.renderTargetFormat[0] != GraphicsFormat.None ? pass.renderTargetFormat[0] : GetDefaultGraphicsFormat(cameraData)); } var samples = pass.renderTargetSampleCount != -1 ? pass.renderTargetSampleCount : cameraData.cameraTargetDescriptor.msaaSamples; var colorAttachmentTarget = (depthOnly || passColorAttachment != BuiltinRenderTextureType.CameraTarget) ? passColorAttachment : (usesTargetTexture ? new RenderTargetIdentifier(cameraData.targetTexture.colorBuffer) : BuiltinRenderTextureType.CameraTarget); var depthAttachmentTarget = (passDepthAttachment != BuiltinRenderTextureType.CameraTarget) ? passDepthAttachment : (usesTargetTexture ? new RenderTargetIdentifier(cameraData.targetTexture.depthBuffer) : BuiltinRenderTextureType.Depth); // TODO: review the lastPassToBB logic to mak it work with merged passes // keep track if this is the current camera's last pass and the RT is the backbuffer (BuiltinRenderTextureType.CameraTarget) // knowing isLastPassToBB can help decide the optimal store action as it gives us additional information about the current frame bool isLastPassToBB = pass.isLastPass && (colorAttachmentTarget == BuiltinRenderTextureType.CameraTarget); currentAttachmentDescriptor.ConfigureTarget(colorAttachmentTarget, ((uint)finalClearFlag & (uint)ClearFlag.Color) == 0, !(samples > 1 && isLastPassToBB)); // TODO: this is redundant and is being setup for each attachment. Needs to be done only once per mergeable pass list (we need to make sure mergeable passes use the same depth!) m_ActiveDepthAttachmentDescriptor = new AttachmentDescriptor(GraphicsFormat.DepthAuto); m_ActiveDepthAttachmentDescriptor.ConfigureTarget(depthAttachmentTarget, ((uint)finalClearFlag & (uint)ClearFlag.Depth) == 0, !isLastPassToBB); if (finalClearFlag != ClearFlag.None) { // We don't clear color for Overlay render targets, however pipeline set's up depth only render passes as color attachments which we do need to clear if ((cameraData.renderType != CameraRenderType.Overlay || depthOnly && ((uint)finalClearFlag & (uint)ClearFlag.Color) != 0)) { currentAttachmentDescriptor.ConfigureClear(finalClearColor, 1.0f, 0); } if (((uint)finalClearFlag & (uint)ClearFlag.Depth) != 0) { m_ActiveDepthAttachmentDescriptor.ConfigureClear(Color.black, 1.0f, 0); } } // resolving to the implicit color target's resolve surface TODO: handle m_CameraResolveTarget if present? if (samples > 1) { currentAttachmentDescriptor.ConfigureResolveTarget(colorAttachmentTarget); } int existingAttachmentIndex = FindAttachmentDescriptorIndexInList(currentAttachmentIdx, currentAttachmentDescriptor, m_ActiveColorAttachmentDescriptors); if (existingAttachmentIndex == -1) { // add a new attachment pass.m_InputAttachmentIndices[0] = currentAttachmentIdx; m_ActiveColorAttachmentDescriptors[currentAttachmentIdx] = currentAttachmentDescriptor; currentAttachmentIdx++; m_RenderPassesAttachmentCount[currentPassHash]++; } else { // attachment was already present pass.m_InputAttachmentIndices[0] = existingAttachmentIndex; } } } }
public static void Render(ScriptableRenderContext context, IEnumerable <Camera> cameras, SimpleRenderPipeline.Mode mode) { foreach (var camera in cameras) { // Culling ScriptableCullingParameters cullingParams; if (!camera.TryGetCullingParameters(out cullingParams)) { continue; } CullingResults cull = context.Cull(ref cullingParams); context.SetupCameraProperties(camera); AttachmentDescriptor color = new AttachmentDescriptor(RenderTextureFormat.ARGB32); AttachmentDescriptor depth = new AttachmentDescriptor(RenderTextureFormat.Depth); bool needsFinalBlit = camera.cameraType == CameraType.SceneView; RenderTargetIdentifier tmpBuf = new RenderTargetIdentifier("TempSurface"); if (needsFinalBlit) { using (var cmd = new CommandBuffer()) { cmd.GetTemporaryRT(Shader.PropertyToID("TempSurface"), camera.pixelWidth, camera.pixelHeight, 24, FilterMode.Bilinear, RenderTextureFormat.ARGB32); context.ExecuteCommandBuffer(cmd); } color.ConfigureTarget(tmpBuf, false, true); } else { color.ConfigureTarget(BuiltinRenderTextureType.CameraTarget, false, true); } // No configure target for depth means depth will be memoryless color.ConfigureClear(Color.blue / 3 + Color.red / 2); depth.ConfigureClear(Color.black, 1.0f, 0); using (var attachmentsDisposable = new NativeArray <AttachmentDescriptor>(2, Allocator.Temp)) { var attachments = attachmentsDisposable; const int depthIndex = 0, colorIndex = 1; attachments[depthIndex] = depth; attachments[colorIndex] = color; using (context.BeginScopedRenderPass(camera.pixelWidth, camera.pixelHeight, 1, attachments, depthIndex)) { var fs = new FilteringSettings(RenderQueueRange.opaque); if (mode == SimpleRenderPipeline.Mode.DepthPrepass) { var depthPrePasssettings = new DrawingSettings(new ShaderTagId("DepthPrepass"), new SortingSettings(camera)); using (var depthOnlyDisposable = new NativeArray <int>(0, Allocator.Temp)) { var depthArray = depthOnlyDisposable; using (context.BeginScopedSubPass(depthArray)) { context.DrawRenderers(cull, ref depthPrePasssettings, ref fs); } } var mainPasssettings = new DrawingSettings(new ShaderTagId("AfterZPrepass"), new SortingSettings(camera)); using (var colorsDisposable = new NativeArray <int>(1, Allocator.Temp)) { var colors = colorsDisposable; colors[0] = colorIndex; using (context.BeginScopedSubPass(colors)) { context.DrawRenderers(cull, ref mainPasssettings, ref fs); } } } else if (mode == SimpleRenderPipeline.Mode.OnePassAlphaTest) { var mainPasssettings = new DrawingSettings(new ShaderTagId("OnePassAlphaClip"), new SortingSettings(camera)); using (var colorsDisposable = new NativeArray <int>(1, Allocator.Temp)) { var colors = colorsDisposable; colors[0] = colorIndex; using (context.BeginScopedSubPass(colors)) { context.DrawRenderers(cull, ref mainPasssettings, ref fs); } } } else if (mode == SimpleRenderPipeline.Mode.OnePassAlphaBlend) { var sortingSettings = new SortingSettings(camera); sortingSettings.criteria = SortingCriteria.BackToFront; var mainPasssettings = new DrawingSettings(new ShaderTagId("OnePassAlphaBlend"), sortingSettings); using (var colorsDisposable = new NativeArray <int>(1, Allocator.Temp)) { var colors = colorsDisposable; colors[0] = colorIndex; using (context.BeginScopedSubPass(colors)) { context.DrawRenderers(cull, ref mainPasssettings, ref fs); } } } } } if (needsFinalBlit) { using (var cmd = new CommandBuffer()) { cmd.Blit(tmpBuf, new RenderTargetIdentifier(BuiltinRenderTextureType.CameraTarget)); context.ExecuteCommandBuffer(cmd); } } context.Submit(); } }
void DeferredLightPass(ScriptableRenderContext context, CullingResults cullingResults, Camera camera) { var cmd = CommandBufferPool.Get("SetupGlobalLights"); SortingSettings opaqueSortingSettings = new SortingSettings(camera); opaqueSortingSettings.criteria = SortingCriteria.CommonOpaque; FilteringSettings opaqueFilteringSettings = new FilteringSettings(RenderQueueRange.opaque); if (cullingResults.visibleLights.Length > 0) { VisibleLight lightData = cullingResults.visibleLights[0]; Vector4 dir = -lightData.localToWorldMatrix.GetColumn(2); Vector4 lightPos = new Vector4(dir.x, dir.y, dir.z, 0.0f); cmd.SetGlobalVector("_MainLightPosition", lightPos); cmd.SetGlobalVector("_MainLightColor", lightData.finalColor); } if (cullingResults.visibleReflectionProbes.Length > 0) { cmd.SetGlobalTexture("unity_SpecCube0", cullingResults.visibleReflectionProbes[0].texture); } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); bool useRenderPass = false; if (!useRenderPass) { bool enableDynamicBatching = false; bool enableInstancing = false; PerObjectData perObjectData = PerObjectData.None; // DrawGBuffers DrawingSettings gBufferDrawingSettings = new DrawingSettings(ShaderPassTag.GBuffer, opaqueSortingSettings); gBufferDrawingSettings.enableDynamicBatching = enableDynamicBatching; gBufferDrawingSettings.enableInstancing = enableInstancing; gBufferDrawingSettings.perObjectData = perObjectData; cmd = CommandBufferPool.Get("Gbuffer"); cmd.SetRenderTarget(m_GBufferRTIDs, m_DepthBufferRTID); cmd.ClearRenderTarget(true, true, camera.backgroundColor); //CoreUtils.SetRenderTarget(cmd, m_GBufferRTIDs, m_DepthBufferRTID, ClearFlag.All); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); context.DrawRenderers(cullingResults, ref gBufferDrawingSettings, ref opaqueFilteringSettings); cmd = CommandBufferPool.Get("DeferredLightingPass"); cmd.SetGlobalVector("unity_SpecCube0_HDR", new Vector4(1, 1, 0, 0)); // Bind buffers cmd.SetGlobalTexture("_GBufferAlbedo", m_GBufferRTIDs[0]); cmd.SetGlobalTexture("_GBufferNormal", m_GBufferRTIDs[1]); cmd.SetGlobalTexture("_GBufferMetallicOcclusionSmoothness", m_GBufferRTIDs[2]); cmd.SetGlobalTexture("_GBufferDepth", m_DepthBufferRTID); cmd.SetGlobalInt("_TileCountX", (camera.scaledPixelWidth + 64 - 1) / 64); cmd.SetGlobalInt("_TileCountY", (camera.scaledPixelHeight + 64 - 1) / 64); cmd.SetGlobalVector("unity_LightData", new Vector4(6, 0, 1, 0)); //Set RenderTarget cmd.SetRenderTarget(m_ColorBuffer, RenderBufferLoadAction.DontCare, RenderBufferStoreAction.Store); cmd.ClearRenderTarget(true, true, Color.black, 0.0f); //cmd.SetViewProjectionMatrices(Matrix4x4.identity, Matrix4x4.identity); //cmd.SetViewport(camera.pixelRect); cmd.DrawMesh(CustomRenderPipeline.fullscreenMesh, Matrix4x4.identity, m_DeferredLightingMat, 0, 0); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); //cmd.SetViewport(new Rect(0,0,camera.scaledPixelWidth, camera.scaledPixelHeight)); } else { // var colorBuffer = new AttachmentDescriptor(RenderTextureFormat.ARGBHalf); // 64 bit // var depthBuffer = new AttachmentDescriptor(RenderTextureFormat.Depth); // 32 bit // var albedoGBuffer = new AttachmentDescriptor(RenderTextureFormat.ARGB32); // 32 bit // var normalGBuffer = new AttachmentDescriptor(RenderTextureFormat.ARGBHalf); // 64 bit // var pbrGBuffer = new AttachmentDescriptor(RenderTextureFormat.ARGB32); // 32 // var depthGBuffer = new AttachmentDescriptor(RenderTextureFormat.RFloat); // 32 bit // // colorBuffer.ConfigureClear(new Color(0.0f, 0.0f, 0.0f, 0.0f), 1.0f, 0); // depthBuffer.ConfigureClear(new Color(), 1.0f, 0); // colorBuffer.ConfigureTarget(BuiltinRenderTextureType.CameraTarget, true, true); // // var attachments = new NativeArray<AttachmentDescriptor>(6, Allocator.Temp); // const int colorBufferId = 0, depthBufferId = 1, albedoGBufferId = 2, normalGBufferId = 3, pbrGbufferId = 4, depthGBufferId = 5; // //const int colorBufferId = 0, albedoGBufferId = 2, normalGBufferId = 3, pbrGbufferId = 4, depthGBufferId = 5; // attachments[colorBufferId] = colorBuffer; // attachments[depthBufferId] = depthBuffer; // attachments[albedoGBufferId] = albedoGBuffer; // attachments[normalGBufferId] = normalGBuffer; // attachments[pbrGbufferId] = pbrGBuffer; // attachments[depthGBufferId] = depthGBuffer; // // using (context.BeginScopedRenderPass(camera.scaledPixelWidth, camera.scaledPixelHeight, 1, attachments, depthBufferId)) // { // attachments.Dispose(); // var gBuffers = new NativeArray<int>(4, Allocator.Temp); // // //gBuffers[0] = colorBufferId; // gBuffers[0] = albedoGBufferId; // gBuffers[1] = normalGBufferId; // gBuffers[2] = pbrGbufferId; // gBuffers[3] = depthGBufferId; // using (context.BeginScopedSubPass(gBuffers)) // { // RenderGbufferSubPass(context, cullingResults, camera); // } // var lightingColors = new NativeArray<int>(1, Allocator.Temp); // lightingColors[0] = colorBufferId; // var lightingInputs = new NativeArray<int>(4, Allocator.Temp); // lightingInputs[0] = albedoGBufferId; // //lightingInputs[1] = albedoGBufferId; // lightingInputs[1] = normalGBufferId; // lightingInputs[2] = pbrGbufferId; // lightingInputs[3] = depthGBufferId; // using (context.BeginScopedSubPass(lightingColors, lightingInputs, true)) // { // lightingColors.Dispose(); // lightingInputs.Dispose(); // // // RenderLighting(camera, cullResults, context); // RenderDeferredLightingSubPass(context, cullingResults, camera); // } // // // } var albedo = new AttachmentDescriptor(RenderTextureFormat.ARGB32); var specRough = new AttachmentDescriptor(RenderTextureFormat.ARGB32); var normal = new AttachmentDescriptor(RenderTextureFormat.ARGB2101010); var emission = new AttachmentDescriptor(RenderTextureFormat.ARGBHalf); var depth = new AttachmentDescriptor(RenderTextureFormat.Depth); emission.ConfigureClear(new Color(0.0f, 0.0f, 0.0f, 0.0f), 1.0f, 0); depth.ConfigureClear(new Color(), 1.0f, 0); albedo.ConfigureTarget(BuiltinRenderTextureType.CameraTarget, false, true); var attachments = new NativeArray <AttachmentDescriptor>(5, Allocator.Temp); const int depthIndex = 0, albedoIndex = 1, specRoughIndex = 2, normalIndex = 3, emissionIndex = 4; attachments[depthIndex] = depth; attachments[albedoIndex] = albedo; attachments[specRoughIndex] = specRough; attachments[normalIndex] = normal; attachments[emissionIndex] = emission; using (context.BeginScopedRenderPass(camera.pixelWidth, camera.pixelHeight, 1, attachments, depthIndex)) { attachments.Dispose(); // Start the first subpass, GBuffer creation: render to albedo, specRough, normal and emission, no need to read any input attachments var gbufferColors = new NativeArray <int>(4, Allocator.Temp); gbufferColors[0] = albedoIndex; gbufferColors[1] = specRoughIndex; gbufferColors[2] = normalIndex; gbufferColors[3] = emissionIndex; using (context.BeginScopedSubPass(gbufferColors)) { gbufferColors.Dispose(); // Render the deferred G-Buffer RenderGbufferSubPass(context, cullingResults, camera); } // Second subpass, lighting: Render to the emission buffer, read from albedo, specRough, normal and depth. // The last parameter indicates whether the depth buffer can be bound as read-only. // Note that some renderers (notably iOS Metal) won't allow reading from the depth buffer while it's bound as Z-buffer, // so those renderers should write the Z into an additional FP32 render target manually in the pixel shader and read from it instead var lightingColors = new NativeArray <int>(1, Allocator.Temp); lightingColors[0] = emissionIndex; var lightingInputs = new NativeArray <int>(4, Allocator.Temp); lightingInputs[0] = albedoIndex; lightingInputs[1] = specRoughIndex; lightingInputs[2] = normalIndex; lightingInputs[3] = depthIndex; using (context.BeginScopedSubPass(lightingColors, lightingInputs, true)) { lightingColors.Dispose(); lightingInputs.Dispose(); // PushGlobalShadowParams(context); RenderDeferredLightingSubPass(context, cullingResults, camera); } // Third subpass, tonemapping: Render to albedo (which is bound to the camera target), read from emission. // var tonemappingColors = new NativeArray<int>(1, Allocator.Temp); // tonemappingColors[0] = albedoIndex; // var tonemappingInputs = new NativeArray<int>(1, Allocator.Temp); // tonemappingInputs[0] = emissionIndex; // using (context.BeginScopedSubPass(tonemappingColors, tonemappingInputs, true)) // { // tonemappingColors.Dispose(); // tonemappingInputs.Dispose(); // // // present frame buffer. // // FinalPass(context); // } } } }
internal void ExecuteNativeRenderPass(ScriptableRenderContext context, ScriptableRenderPass renderPass, CameraData cameraData, ref RenderingData renderingData) { using (new ProfilingScope(null, Profiling.execute)) { int currentPassIndex = renderPass.renderPassQueueIndex; Hash128 currentPassHash = m_PassIndexToPassHash[currentPassIndex]; int[] currentMergeablePasses = m_MergeableRenderPassesMap[currentPassHash]; int validColorBuffersCount = m_RenderPassesAttachmentCount[currentPassHash]; var depthOnly = (renderPass.colorAttachmentHandle.rt != null && IsDepthOnlyRenderTexture(renderPass.colorAttachmentHandle.rt)) || (cameraData.targetTexture != null && IsDepthOnlyRenderTexture(cameraData.targetTexture)); bool useDepth = depthOnly || (!renderPass.overrideCameraTarget || (renderPass.overrideCameraTarget && renderPass.depthAttachmentHandle.nameID != BuiltinRenderTextureType.CameraTarget));// && var attachments = new NativeArray <AttachmentDescriptor>(useDepth && !depthOnly ? validColorBuffersCount + 1 : 1, Allocator.Temp); for (int i = 0; i < validColorBuffersCount; ++i) { attachments[i] = m_ActiveColorAttachmentDescriptors[i]; } if (useDepth && !depthOnly) { attachments[validColorBuffersCount] = m_ActiveDepthAttachmentDescriptor; } var rpDesc = InitializeRenderPassDescriptor(cameraData, renderPass); int validPassCount = GetValidPassIndexCount(currentMergeablePasses); var attachmentIndicesCount = GetSubPassAttachmentIndicesCount(renderPass); var attachmentIndices = new NativeArray <int>(!depthOnly ? (int)attachmentIndicesCount : 0, Allocator.Temp); if (!depthOnly) { for (int i = 0; i < attachmentIndicesCount; ++i) { attachmentIndices[i] = renderPass.m_ColorAttachmentIndices[i]; } } if (validPassCount == 1 || currentMergeablePasses[0] == currentPassIndex) // Check if it's the first pass { if (PassHasInputAttachments(renderPass)) { Debug.LogWarning("First pass in a RenderPass should not have input attachments."); } context.BeginRenderPass(rpDesc.w, rpDesc.h, Math.Max(rpDesc.samples, 1), attachments, useDepth ? (!depthOnly ? validColorBuffersCount : 0) : -1); attachments.Dispose(); context.BeginSubPass(attachmentIndices); m_LastBeginSubpassPassIndex = currentPassIndex; } else { // Regarding input attachments, currently we always recreate a new subpass if it contains input attachments // This might not the most optimal way though and it should be investigated in the future // Whether merging subpasses with matching input attachments is a more viable option if (!AreAttachmentIndicesCompatible(m_ActiveRenderPassQueue[m_LastBeginSubpassPassIndex], m_ActiveRenderPassQueue[currentPassIndex])) { context.EndSubPass(); if (PassHasInputAttachments(m_ActiveRenderPassQueue[currentPassIndex])) { context.BeginSubPass(attachmentIndices, m_ActiveRenderPassQueue[currentPassIndex].m_InputAttachmentIndices); } else { context.BeginSubPass(attachmentIndices); } m_LastBeginSubpassPassIndex = currentPassIndex; } else if (PassHasInputAttachments(m_ActiveRenderPassQueue[currentPassIndex])) { context.EndSubPass(); context.BeginSubPass(attachmentIndices, m_ActiveRenderPassQueue[currentPassIndex].m_InputAttachmentIndices); m_LastBeginSubpassPassIndex = currentPassIndex; } } attachmentIndices.Dispose(); renderPass.Execute(context, ref renderingData); if (validPassCount == 1 || currentMergeablePasses[validPassCount - 1] == currentPassIndex) // Check if it's the last pass { context.EndSubPass(); context.EndRenderPass(); m_LastBeginSubpassPassIndex = 0; } for (int i = 0; i < m_ActiveColorAttachmentDescriptors.Length; ++i) { m_ActiveColorAttachmentDescriptors[i] = RenderingUtils.emptyAttachment; m_IsActiveColorAttachmentTransient[i] = false; } m_ActiveDepthAttachmentDescriptor = RenderingUtils.emptyAttachment; } }
internal void SetNativeRenderPassAttachmentList(ScriptableRenderPass renderPass, ref CameraData cameraData, RTHandle passColorAttachment, RTHandle passDepthAttachment, ClearFlag finalClearFlag, Color finalClearColor) { using (new ProfilingScope(null, Profiling.setAttachmentList)) { int currentPassIndex = renderPass.renderPassQueueIndex; Hash128 currentPassHash = m_PassIndexToPassHash[currentPassIndex]; int[] currentMergeablePasses = m_MergeableRenderPassesMap[currentPassHash]; // Skip if not the first pass if (currentMergeablePasses.First() != currentPassIndex) { return; } m_RenderPassesAttachmentCount[currentPassHash] = 0; UpdateFinalStoreActions(currentMergeablePasses, cameraData); int currentAttachmentIdx = 0; foreach (var passIdx in currentMergeablePasses) { if (passIdx == -1) { break; } ScriptableRenderPass pass = m_ActiveRenderPassQueue[passIdx]; for (int i = 0; i < pass.m_ColorAttachmentIndices.Length; ++i) { pass.m_ColorAttachmentIndices[i] = -1; } AttachmentDescriptor currentAttachmentDescriptor; var usesTargetTexture = cameraData.targetTexture != null; var depthOnly = (pass.colorAttachmentHandle.rt != null && IsDepthOnlyRenderTexture(pass.colorAttachmentHandle.rt)) || (usesTargetTexture && IsDepthOnlyRenderTexture(cameraData.targetTexture)); int samples; RenderTargetIdentifier colorAttachmentTarget; // We are not rendering to Backbuffer so we have the RT and the information with it // while also creating a new RenderTargetIdentifier to ignore the current depth slice (which might get bypassed in XR setup eventually) if (new RenderTargetIdentifier(passColorAttachment.nameID, 0, depthSlice: 0) != BuiltinRenderTextureType.CameraTarget) { currentAttachmentDescriptor = new AttachmentDescriptor(depthOnly ? passColorAttachment.rt.descriptor.depthStencilFormat : passColorAttachment.rt.descriptor.graphicsFormat); samples = passColorAttachment.rt.descriptor.msaaSamples; colorAttachmentTarget = passColorAttachment.nameID; } else // In this case we might be rendering the the targetTexture or the Backbuffer, so less information is available { currentAttachmentDescriptor = new AttachmentDescriptor(GetDefaultGraphicsFormat(cameraData, depthOnly)); samples = cameraData.cameraTargetDescriptor.msaaSamples; colorAttachmentTarget = usesTargetTexture ? new RenderTargetIdentifier(cameraData.targetTexture) : BuiltinRenderTextureType.CameraTarget; } currentAttachmentDescriptor.ConfigureTarget(colorAttachmentTarget, ((uint)finalClearFlag & (uint)ClearFlag.Color) == 0, true); if (PassHasInputAttachments(pass)) { SetupInputAttachmentIndices(pass); } // TODO: this is redundant and is being setup for each attachment. Needs to be done only once per mergeable pass list (we need to make sure mergeable passes use the same depth!) m_ActiveDepthAttachmentDescriptor = new AttachmentDescriptor(SystemInfo.GetGraphicsFormat(DefaultFormat.DepthStencil)); m_ActiveDepthAttachmentDescriptor.ConfigureTarget(passDepthAttachment.nameID != BuiltinRenderTextureType.CameraTarget ? passDepthAttachment.nameID : (usesTargetTexture ? new RenderTargetIdentifier(cameraData.targetTexture.depthBuffer) : BuiltinRenderTextureType.Depth), ((uint)finalClearFlag & (uint)ClearFlag.Depth) == 0, true); if (finalClearFlag != ClearFlag.None) { // We don't clear color for Overlay render targets, however pipeline set's up depth only render passes as color attachments which we do need to clear if ((cameraData.renderType != CameraRenderType.Overlay || depthOnly && ((uint)finalClearFlag & (uint)ClearFlag.Color) != 0)) { currentAttachmentDescriptor.ConfigureClear(finalClearColor, 1.0f, 0); } if (((uint)finalClearFlag & (uint)ClearFlag.Depth) != 0) { m_ActiveDepthAttachmentDescriptor.ConfigureClear(Color.black, 1.0f, 0); } } // resolving to the implicit color target's resolve surface TODO: handle m_CameraResolveTarget if present? if (samples > 1) { currentAttachmentDescriptor.ConfigureResolveTarget(colorAttachmentTarget); if (RenderingUtils.MultisampleDepthResolveSupported()) { m_ActiveDepthAttachmentDescriptor.ConfigureResolveTarget(m_ActiveDepthAttachmentDescriptor.loadStoreTarget); } } if (m_UseOptimizedStoreActions) { currentAttachmentDescriptor.storeAction = m_FinalColorStoreAction[0]; m_ActiveDepthAttachmentDescriptor.storeAction = m_FinalDepthStoreAction; } int existingAttachmentIndex = FindAttachmentDescriptorIndexInList(currentAttachmentIdx, currentAttachmentDescriptor, m_ActiveColorAttachmentDescriptors); if (existingAttachmentIndex == -1) { // add a new attachment pass.m_ColorAttachmentIndices[0] = currentAttachmentIdx; m_ActiveColorAttachmentDescriptors[currentAttachmentIdx] = currentAttachmentDescriptor; currentAttachmentIdx++; m_RenderPassesAttachmentCount[currentPassHash]++; } else { // attachment was already present pass.m_ColorAttachmentIndices[0] = existingAttachmentIndex; } } } }
internal void SetNativeRenderPassMRTAttachmentList(ScriptableRenderPass renderPass, ref CameraData cameraData, bool needCustomCameraColorClear, ClearFlag cameraClearFlag) { using (new ProfilingScope(null, Profiling.setMRTAttachmentsList)) { int currentPassIndex = renderPass.renderPassQueueIndex; Hash128 currentPassHash = m_PassIndexToPassHash[currentPassIndex]; int[] currentMergeablePasses = m_MergeableRenderPassesMap[currentPassHash]; // Not the first pass if (currentMergeablePasses.First() != currentPassIndex) { return; } m_RenderPassesAttachmentCount[currentPassHash] = 0; UpdateFinalStoreActions(currentMergeablePasses, cameraData); int currentAttachmentIdx = 0; bool hasInput = false; foreach (var passIdx in currentMergeablePasses) { if (passIdx == -1) { break; } ScriptableRenderPass pass = m_ActiveRenderPassQueue[passIdx]; for (int i = 0; i < pass.m_ColorAttachmentIndices.Length; ++i) { pass.m_ColorAttachmentIndices[i] = -1; } for (int i = 0; i < pass.m_InputAttachmentIndices.Length; ++i) { pass.m_InputAttachmentIndices[i] = -1; } uint validColorBuffersCount = RenderingUtils.GetValidColorBufferCount(pass.colorAttachmentHandles); for (int i = 0; i < validColorBuffersCount; ++i) { AttachmentDescriptor currentAttachmentDescriptor = new AttachmentDescriptor(pass.renderTargetFormat[i] != GraphicsFormat.None ? pass.renderTargetFormat[i] : GetDefaultGraphicsFormat(cameraData)); var colorHandle = pass.overrideCameraTarget ? pass.colorAttachmentHandles[i] : m_CameraColorTarget.handle; int existingAttachmentIndex = FindAttachmentDescriptorIndexInList(colorHandle.nameID, m_ActiveColorAttachmentDescriptors); if (m_UseOptimizedStoreActions) { currentAttachmentDescriptor.storeAction = m_FinalColorStoreAction[i]; } if (existingAttachmentIndex == -1) { // add a new attachment m_ActiveColorAttachmentDescriptors[currentAttachmentIdx] = currentAttachmentDescriptor; bool passHasClearColor = (pass.clearFlag & ClearFlag.Color) != 0; m_ActiveColorAttachmentDescriptors[currentAttachmentIdx].ConfigureTarget(colorHandle.nameID, !passHasClearColor, true); if (pass.colorAttachmentHandles[i] == m_CameraColorTarget.handle && needCustomCameraColorClear && (cameraClearFlag & ClearFlag.Color) != 0) { m_ActiveColorAttachmentDescriptors[currentAttachmentIdx].ConfigureClear(cameraData.backgroundColor, 1.0f, 0); } else if (passHasClearColor) { m_ActiveColorAttachmentDescriptors[currentAttachmentIdx].ConfigureClear(CoreUtils.ConvertSRGBToActiveColorSpace(pass.clearColor), 1.0f, 0); } pass.m_ColorAttachmentIndices[i] = currentAttachmentIdx; currentAttachmentIdx++; m_RenderPassesAttachmentCount[currentPassHash]++; } else { // attachment was already present pass.m_ColorAttachmentIndices[i] = existingAttachmentIndex; } } if (PassHasInputAttachments(pass)) { hasInput = true; SetupInputAttachmentIndices(pass); } // TODO: this is redundant and is being setup for each attachment. Needs to be done only once per mergeable pass list (we need to make sure mergeable passes use the same depth!) m_ActiveDepthAttachmentDescriptor = new AttachmentDescriptor(SystemInfo.GetGraphicsFormat(DefaultFormat.DepthStencil)); bool passHasClearDepth = (cameraClearFlag & ClearFlag.DepthStencil) != 0; m_ActiveDepthAttachmentDescriptor.ConfigureTarget(pass.overrideCameraTarget ? pass.depthAttachmentHandle.nameID : m_CameraDepthTarget.nameID, !passHasClearDepth, true); if (passHasClearDepth) { m_ActiveDepthAttachmentDescriptor.ConfigureClear(Color.black, 1.0f, 0); } if (m_UseOptimizedStoreActions) { m_ActiveDepthAttachmentDescriptor.storeAction = m_FinalDepthStoreAction; } } if (hasInput) { SetupTransientInputAttachments(m_RenderPassesAttachmentCount[currentPassHash]); } } }
private void DeferedRender(ScriptableRenderContext context, Camera camera, ref CullingResults cullingResult) { var albedo = new AttachmentDescriptor(RenderTextureFormat.ARGB32); var specRough = new AttachmentDescriptor(RenderTextureFormat.ARGB32); var normal = new AttachmentDescriptor(RenderTextureFormat.ARGB2101010); var emission = new AttachmentDescriptor(RenderTextureFormat.ARGBHalf); var depth = new AttachmentDescriptor(RenderTextureFormat.Depth); emission.ConfigureClear(new Color(0.0f, 0.0f, 0.0f, 0.0f), 1.0f, 0); depth.ConfigureClear(new Color(), 1.0f, 0); NativeArray <AttachmentDescriptor> colorAttachments = new NativeArray <AttachmentDescriptor>(5, Allocator.Temp); colorAttachments[depthIndex] = depth; colorAttachments[specRoughIndex] = specRough; colorAttachments[normalIndex] = normal; colorAttachments[emissionIndex] = emission; colorAttachments[albedoIndex] = albedo; albedo.ConfigureTarget(BuiltinRenderTextureType.CameraTarget, false, true); context.BeginRenderPass(camera.pixelWidth, camera.pixelHeight, 1, colorAttachments, depthIndex); colorAttachments.Dispose(); var gbufferColors = new NativeArray <int>(4, Allocator.Temp); gbufferColors[0] = albedoIndex; gbufferColors[1] = specRoughIndex; gbufferColors[2] = normalIndex; gbufferColors[3] = emissionIndex; //G-Buffer Pass context.BeginSubPass(gbufferColors); gbufferColors.Dispose(); RenderGBuffer(context, camera, ref cullingResult); context.EndSubPass(); var lightingColors = new NativeArray <int>(1, Allocator.Temp); lightingColors[0] = albedoIndex; var lightingInputs = new NativeArray <int>(4, Allocator.Temp); lightingInputs[0] = emissionIndex; lightingInputs[1] = specRoughIndex; lightingInputs[2] = normalIndex; lightingInputs[3] = depthIndex; //Lightning Pass. context.BeginSubPass(lightingColors, lightingInputs, true); lightingInputs.Dispose(); lightingColors.Dispose(); RenderLights(context, camera, ref cullingResult); context.EndSubPass(); context.EndRenderPass(); }
internal void ExecuteNativeRenderPass(ScriptableRenderContext context, ScriptableRenderPass renderPass, CameraData cameraData, ref RenderingData renderingData) { using (new ProfilingScope(null, Profiling.execute)) { int currentPassIndex = renderPass.renderPassQueueIndex; Hash128 currentPassHash = m_PassIndexToPassHash[currentPassIndex]; int[] currentMergeablePasses = m_MergeableRenderPassesMap[currentPassHash]; int validColorBuffersCount = m_RenderPassesAttachmentCount[currentPassHash]; bool isLastPass = renderPass.isLastPass; // TODO: review the lastPassToBB logic to mak it work with merged passes // keep track if this is the current camera's last pass and the RT is the backbuffer (BuiltinRenderTextureType.CameraTarget) bool isLastPassToBB = isLastPass && (m_ActiveColorAttachmentDescriptors[0].loadStoreTarget == BuiltinRenderTextureType.CameraTarget); var depthOnly = renderPass.depthOnly || (cameraData.targetTexture != null && cameraData.targetTexture.graphicsFormat == GraphicsFormat.DepthAuto); bool useDepth = depthOnly || (!renderPass.overrideCameraTarget || (renderPass.overrideCameraTarget && renderPass.depthAttachment != BuiltinRenderTextureType.CameraTarget)) && (!(isLastPassToBB || (isLastPass && cameraData.camera.targetTexture != null))); var attachments = new NativeArray <AttachmentDescriptor>(useDepth && !depthOnly ? validColorBuffersCount + 1 : 1, Allocator.Temp); for (int i = 0; i < validColorBuffersCount; ++i) { attachments[i] = m_ActiveColorAttachmentDescriptors[i]; } if (useDepth && !depthOnly) { attachments[validColorBuffersCount] = m_ActiveDepthAttachmentDescriptor; } var rpDesc = InitializeRenderPassDescriptor(cameraData, renderPass); int validPassCount = GetValidPassIndexCount(currentMergeablePasses); var attachmentIndicesCount = GetSubPassAttachmentIndicesCount(renderPass); var attachmentIndices = new NativeArray <int>(!depthOnly ? (int)attachmentIndicesCount : 0, Allocator.Temp); if (!depthOnly) { for (int i = 0; i < attachmentIndicesCount; ++i) { attachmentIndices[i] = renderPass.m_InputAttachmentIndices[i]; } } if (validPassCount == 1 || currentMergeablePasses[0] == currentPassIndex) // Check if it's the first pass { context.BeginRenderPass(rpDesc.w, rpDesc.h, Math.Max(rpDesc.samples, 1), attachments, useDepth ? (!depthOnly ? validColorBuffersCount : 0) : -1); attachments.Dispose(); context.BeginSubPass(attachmentIndices); m_LastBeginSubpassPassIndex = currentPassIndex; } else { if (!AreAttachmentIndicesCompatible(m_ActiveRenderPassQueue[m_LastBeginSubpassPassIndex], m_ActiveRenderPassQueue[currentPassIndex])) { context.EndSubPass(); context.BeginSubPass(attachmentIndices); m_LastBeginSubpassPassIndex = currentPassIndex; } } attachmentIndices.Dispose(); renderPass.Execute(context, ref renderingData); if (validPassCount == 1 || currentMergeablePasses[validPassCount - 1] == currentPassIndex) // Check if it's the last pass { context.EndSubPass(); context.EndRenderPass(); m_LastBeginSubpassPassIndex = 0; } for (int i = 0; i < m_ActiveColorAttachmentDescriptors.Length; ++i) { m_ActiveColorAttachmentDescriptors[i] = RenderingUtils.emptyAttachment; } m_ActiveDepthAttachmentDescriptor = RenderingUtils.emptyAttachment; } }
internal void SetNativeRenderPassMRTAttachmentList(ScriptableRenderPass renderPass, ref CameraData cameraData, uint validColorBuffersCount, bool needCustomCameraColorClear, ClearFlag clearFlag) { using (new ProfilingScope(null, Profiling.setMRTAttachmentsList)) { int currentPassIndex = renderPass.renderPassQueueIndex; Hash128 currentPassHash = m_PassIndexToPassHash[currentPassIndex]; int[] currentMergeablePasses = m_MergeableRenderPassesMap[currentPassHash]; // Not the first pass if (currentMergeablePasses.First() != currentPassIndex) { return; } m_RenderPassesAttachmentCount[currentPassHash] = 0; int currentAttachmentIdx = 0; foreach (var passIdx in currentMergeablePasses) { if (passIdx == -1) { break; } ScriptableRenderPass pass = m_ActiveRenderPassQueue[passIdx]; for (int i = 0; i < pass.m_InputAttachmentIndices.Length; ++i) { pass.m_InputAttachmentIndices[i] = -1; } // TODO: review the lastPassToBB logic to mak it work with merged passes bool isLastPassToBB = false; for (int i = 0; i < validColorBuffersCount; ++i) { AttachmentDescriptor currentAttachmentDescriptor = new AttachmentDescriptor(pass.renderTargetFormat[i] != GraphicsFormat.None ? pass.renderTargetFormat[i] : GetDefaultGraphicsFormat(cameraData)); // if this is the current camera's last pass, also check if one of the RTs is the backbuffer (BuiltinRenderTextureType.CameraTarget) isLastPassToBB |= pass.isLastPass && (pass.colorAttachments[i] == BuiltinRenderTextureType.CameraTarget); int existingAttachmentIndex = FindAttachmentDescriptorIndexInList(currentAttachmentIdx, currentAttachmentDescriptor, m_ActiveColorAttachmentDescriptors); if (existingAttachmentIndex == -1) { // add a new attachment m_ActiveColorAttachmentDescriptors[currentAttachmentIdx] = currentAttachmentDescriptor; m_ActiveColorAttachmentDescriptors[currentAttachmentIdx].ConfigureTarget(pass.colorAttachments[i], (clearFlag & ClearFlag.Color) == 0, true); if ((clearFlag & ClearFlag.Color) != 0) { var clearColor = (needCustomCameraColorClear && pass.colorAttachments[i] == m_CameraColorTarget) ? cameraData.camera.backgroundColor : renderPass.clearColor; m_ActiveColorAttachmentDescriptors[currentAttachmentIdx].ConfigureClear(CoreUtils.ConvertSRGBToActiveColorSpace(clearColor), 1.0f, 0); } pass.m_InputAttachmentIndices[i] = currentAttachmentIdx; currentAttachmentIdx++; m_RenderPassesAttachmentCount[currentPassHash]++; } else { // attachment was already present pass.m_InputAttachmentIndices[i] = existingAttachmentIndex; } } // TODO: this is redundant and is being setup for each attachment. Needs to be done only once per mergeable pass list (we need to make sure mergeable passes use the same depth!) m_ActiveDepthAttachmentDescriptor = new AttachmentDescriptor(GraphicsFormat.DepthAuto); m_ActiveDepthAttachmentDescriptor.ConfigureTarget(pass.depthAttachment, (clearFlag & ClearFlag.DepthStencil) == 0, !isLastPassToBB); if ((clearFlag & ClearFlag.DepthStencil) != 0) { m_ActiveDepthAttachmentDescriptor.ConfigureClear(Color.black, 1.0f, 0); } } } }
void DrawDeferred(Camera camera, CullingResults cullingResults, ScriptableRenderContext context) { var albedo = new AttachmentDescriptor(RenderTextureFormat.ARGB32); var specRough = new AttachmentDescriptor(RenderTextureFormat.ARGB32); var normal = new AttachmentDescriptor(RenderTextureFormat.ARGB2101010); var emission = new AttachmentDescriptor(RenderTextureFormat.ARGBHalf); var depth = new AttachmentDescriptor(RenderTextureFormat.Depth); var depthSRV = new AttachmentDescriptor(RenderTextureFormat.ARGBHalf); // At the beginning of the render pass, clear the emission buffer to all black, and the depth buffer to 1.0f emission.ConfigureClear(new Color(0.0f, 0.0f, 0.0f, 0.0f), 1.0f, 0); depth.ConfigureClear(new Color(), 1.0f, 0); //depthSRV.ConfigureClear(new Color(), 1.0f, 0); albedo.ConfigureTarget(BuiltinRenderTextureType.CameraTarget, false, true); var attachments = new NativeArray <AttachmentDescriptor>(5, Allocator.Temp); const int depthIndex = 0, albedoIndex = 1, specRoughIndex = 2, normalIndex = 3, emissionIndex = 4, depthSRVIndex = 5; attachments[depthIndex] = depth; attachments[albedoIndex] = albedo; attachments[specRoughIndex] = specRough; attachments[normalIndex] = normal; attachments[emissionIndex] = emission; //attachments[depthSRVIndex] = depthSRV; using (context.BeginScopedRenderPass(camera.pixelWidth, camera.pixelHeight, 1, attachments, depthIndex)) { attachments.Dispose(); // Start the first subpass, GBuffer creation: render to albedo, specRough, normal and emission, no need to read any input attachments var gbufferColors = new NativeArray <int>(4, Allocator.Temp); gbufferColors[0] = albedoIndex; gbufferColors[1] = specRoughIndex; gbufferColors[2] = normalIndex; gbufferColors[3] = emissionIndex; //gbufferColors[4] = depthSRVIndex; using (context.BeginScopedSubPass(gbufferColors)) { gbufferColors.Dispose(); // Render the deferred G-Buffer RenderGBuffer(camera, cullingResults, context); } // Second subpass, lighting: Render to the emission buffer, read from albedo, specRough, normal and depth. // The last parameter indicates whether the depth buffer can be bound as read-only. // Note that some renderers (notably iOS Metal) won't allow reading from the depth buffer while it's bound as Z-buffer, // so those renderers should write the Z into an additional FP32 render target manually in the pixel shader and read from it instead var lightingColors = new NativeArray <int>(1, Allocator.Temp); lightingColors[0] = albedoIndex; var lightingInputs = new NativeArray <int>(1, Allocator.Temp); lightingInputs[0] = albedoIndex; // lightingInputs[1] = specRoughIndex; // lightingInputs[2] = normalIndex; // //lightingInputs[3] = depthSRVIndex; using (context.BeginScopedSubPass(lightingColors, lightingInputs, true)) { lightingColors.Dispose(); lightingInputs.Dispose(); // PushGlobalShadowParams(context); RenderDeferredLighting(camera, cullingResults, context); } // Third subpass, tonemapping: Render to albedo (which is bound to the camera target), read from emission. var tonemappingColors = new NativeArray <int>(1, Allocator.Temp); tonemappingColors[0] = albedoIndex; var tonemappingInputs = new NativeArray <int>(1, Allocator.Temp); tonemappingInputs[0] = albedoIndex; using (context.BeginScopedSubPass(tonemappingColors, tonemappingInputs, true)) { tonemappingColors.Dispose(); tonemappingInputs.Dispose(); // present frame buffer. RenderDeferredFinalPass(context); } } }