protected override void OnProcess(RenderContext context) { context.ThrowIfCameraMissing(); var graphicsDevice = GraphicsService.GraphicsDevice; var source = context.SourceTexture; var target = context.RenderTarget; var viewport = context.Viewport; var tempFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None); RenderTarget2D blurredScene = GraphicsService.RenderTargetPool.Obtain2D(tempFormat); if (TextureHelper.IsFloatingPointFormat(source.Format)) { graphicsDevice.SamplerStates[0] = SamplerState.PointClamp; graphicsDevice.SamplerStates[1] = SamplerState.PointClamp; } else { graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp; graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp; } context.RenderTarget = blurredScene; context.Viewport = new Viewport(0, 0, blurredScene.Width, blurredScene.Height); // Get view-dependent information stored in camera node. var cameraNode = context.CameraNode; object dummy; cameraNode.ViewDependentData.TryGetValue(this, out dummy); var data = dummy as ViewDependentData; if (data == null) { data = new ViewDependentData(GraphicsService); cameraNode.ViewDependentData[this] = data; } if (data.LastBlurredScene == null) { // This is the first frame. Simply remember the current source for the next frame. _copyFilter.Process(context); } else { // Create new blurred scene. graphicsDevice.SetRenderTarget(blurredScene); _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height)); _strengthParameter.SetValue(Strength); _sourceTextureParameter.SetValue(source); _lastSourceTextureParameter.SetValue(data.LastBlurredScene); _effect.CurrentTechnique.Passes[0].Apply(); graphicsDevice.DrawFullScreenQuad(); } // Copy blurredScene to target. context.SourceTexture = blurredScene; context.RenderTarget = target; context.Viewport = viewport; _copyFilter.Process(context); // Recycle old blurred scene and store new scene (switch render targets). GraphicsService.RenderTargetPool.Recycle(data.LastBlurredScene); data.LastBlurredScene = blurredScene; _sourceTextureParameter.SetValue((Texture2D)null); _lastSourceTextureParameter.SetValue((Texture2D)null); // Restore original context. context.SourceTexture = source; }
protected override void OnProcess(RenderContext context) { context.ThrowIfCameraMissing(); var graphicsDevice = GraphicsService.GraphicsDevice; var renderTargetPool = GraphicsService.RenderTargetPool; var source = context.SourceTexture; var target = context.RenderTarget; var viewport = context.Viewport; RenderTarget2D temp128x128 = renderTargetPool.Obtain2D( new RenderTargetFormat(128, 128, false, SurfaceFormat.HalfVector4, DepthFormat.None)); RenderTarget2D temp64x64 = renderTargetPool.Obtain2D( new RenderTargetFormat(64, 64, false, SurfaceFormat.HalfVector4, DepthFormat.None)); RenderTarget2D luminance = renderTargetPool.Obtain2D( new RenderTargetFormat(1, 1, false, SurfaceFormat.HalfVector4, DepthFormat.None)); // ----- Downsample scene into temp128x128. context.RenderTarget = temp128x128; context.Viewport = new Viewport(0, 0, temp128x128.Width, temp128x128.Height); _downsampleFilter.Process(context); _useGeometricMeanParameter.SetValue(UseGeometricMean); // Get view-dependent information stored in camera node. var cameraNode = context.CameraNode; object dummy; cameraNode.ViewDependentData.TryGetValue(this, out dummy); var data = dummy as ViewDependentData; if (data == null) { data = new ViewDependentData(GraphicsService); cameraNode.ViewDependentData[this] = data; } if (UseAdaption) { // Use adaption if required by user and if we already have luminance info. _useAdaptionParameter.SetValue(data.LastLuminance != null); _deltaTimeParameter.SetValue((float)context.DeltaTime.TotalSeconds); _adaptionSpeedParameter.SetValue(AdaptionSpeed); _lastLuminanceTextureParameter.SetValue(data.LastLuminance); } else { _useAdaptionParameter.SetValue(false); _lastLuminanceTextureParameter.SetValue((Texture2D)null); // Reset old luminance info. data.Dispose(); } // ----- First downsample temp128x128 into temp64x64 and create luminance info. graphicsDevice.SetRenderTarget(temp64x64); _textureParameter.SetValue(temp128x128); _sourceSizeParameter.SetValue(new Vector2(temp128x128.Width, temp128x128.Height)); _targetSizeParameter.SetValue(new Vector2(temp64x64.Width, temp64x64.Height)); _createPass.Apply(); graphicsDevice.DrawFullScreenQuad(); // temp128x128 is not needed anymore. renderTargetPool.Recycle(temp128x128); // ----- Downsample luminance info. RenderTarget2D last = temp64x64; while (last.Width > 2) { Debug.Assert(last.Width == last.Height, "The render target must be quadratic"); RenderTarget2D temp = renderTargetPool.Obtain2D( new RenderTargetFormat(last.Width / 2, last.Height / 2, false, last.Format, DepthFormat.None)); graphicsDevice.SetRenderTarget(temp); _textureParameter.SetValue(last); _sourceSizeParameter.SetValue(new Vector2(last.Width, last.Height)); _targetSizeParameter.SetValue(new Vector2(temp.Width, temp.Height)); _downsamplePass.Apply(); graphicsDevice.DrawFullScreenQuad(); renderTargetPool.Recycle(last); last = temp; } // ----- Sample 'last' and store final info in 'luminance'. graphicsDevice.SetRenderTarget(luminance); _textureParameter.SetValue(last); _sourceSizeParameter.SetValue(new Vector2(last.Width, last.Height)); _targetSizeParameter.SetValue(new Vector2(luminance.Width, luminance.Height)); _finalPass.Apply(); graphicsDevice.DrawFullScreenQuad(); renderTargetPool.Recycle(last); // ----- Copy luminance to original context.RenderTarget. context.SourceTexture = luminance; context.RenderTarget = target; context.Viewport = viewport; _copyFilter.Process(context); // ----- Store luminance for next frame. renderTargetPool.Recycle(data.LastLuminance); data.LastLuminance = luminance; // Restore original context. context.SourceTexture = source; _textureParameter.SetValue((Texture2D)null); }
protected override void OnProcess(RenderContext context) { context.ThrowIfCameraMissing(); var graphicsDevice = GraphicsService.GraphicsDevice; var renderTargetPool = GraphicsService.RenderTargetPool; var cameraNode = context.CameraNode; var source = context.SourceTexture; var target = context.RenderTarget; var viewport = context.Viewport; if (Quality == 0) { // No ambient occlusion. if (!CombineWithSource) { // CombineWithSource is not set. --> Simply clear the render target to white. graphicsDevice.SetRenderTarget(target); graphicsDevice.Viewport = viewport; graphicsDevice.Clear(Color.White); } else { // Copy source image to target. _copyFilter.Process(context); } return; } // Try to get downsampled depth buffer from render context. // If we cannot find it in the render context, we downsample it manually. Texture2D downsampledDepthTexture = null; RenderTarget2D downsampledDepthTarget = null; if (DownsampleFactor == 2) { object dummy; if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out dummy)) { downsampledDepthTexture = dummy as Texture2D; } } if (downsampledDepthTexture == null) { context.ThrowIfGBuffer0Missing(); if (DownsampleFactor == 1) { downsampledDepthTexture = context.GBuffer0; } else { // Downsample manually. // If we do not downsample the depth target, we get artifacts (strange horizontal and vertical // lines). TODO: Check what causes the artifacts and try to remove the downsampling. downsampledDepthTarget = renderTargetPool.Obtain2D(new RenderTargetFormat( context.GBuffer0.Width / DownsampleFactor, context.GBuffer0.Height / DownsampleFactor, false, context.GBuffer0.Format, DepthFormat.None)); context.SourceTexture = context.GBuffer0; context.RenderTarget = downsampledDepthTarget; context.Viewport = new Viewport(0, 0, downsampledDepthTarget.Width, downsampledDepthTarget.Height); _downsampleFilter.Process(context); downsampledDepthTexture = downsampledDepthTarget; } } // We use two temporary render targets. // We do not use a floating point format because float textures cannot use hardware filtering. RenderTarget2D temp0; if (!CombineWithSource && target != null && target.Width == context.GBuffer0.Width / DownsampleFactor && target.Height == context.GBuffer0.Height / DownsampleFactor && Strength < 1) { // If we do not have to combine the AO result with the source image, and if the target // image has the half resolution, then we can use the target image directly and do not // need a temporary render target. // Also, a Strength > 1 is always applied in a separate pass because applying a Strength // > 1 before the blur has no effect. temp0 = target; } else { temp0 = renderTargetPool.Obtain2D(new RenderTargetFormat( context.GBuffer0.Width / DownsampleFactor, context.GBuffer0.Height / DownsampleFactor, false, SurfaceFormat.Color, DepthFormat.None)); } // Create SSAO. graphicsDevice.SetRenderTarget(temp0); _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height)); _farParameter.SetValue(cameraNode.Camera.Projection.Far); _radiusParameter.SetValue((Vector2)Radii); _maxDistancesParameter.SetValue((Vector2)MaxDistances); _strengthParameter.SetValue(Strength < 1 ? Strength : 1); _gBuffer0Parameter.SetValue(downsampledDepthTexture); if (Quality == 1) { _createLinesAPass.Apply(); } else { _createLinesBPass.Apply(); } graphicsDevice.DrawFullScreenQuad(); if (UseEdgeAwareBlur) { RenderTarget2D temp1 = renderTargetPool.Obtain2D(new RenderTargetFormat( context.GBuffer0.Width / DownsampleFactor, context.GBuffer0.Height / DownsampleFactor, false, SurfaceFormat.Color, DepthFormat.None)); for (int i = 0; i < NumberOfBlurPasses; i++) { // Blur horizontally. // Note: We use a bilateral filter which is not separable - but the results are still ok // if we use separate the horizontal and vertical blur. graphicsDevice.SetRenderTarget(temp1); _occlusionTextureParameter.SetValue(temp0); _blurHorizontalPass.Apply(); graphicsDevice.DrawFullScreenQuad(); // Blur vertically. graphicsDevice.SetRenderTarget(temp0); _occlusionTextureParameter.SetValue(temp1); _blurVerticalPass.Apply(); graphicsDevice.DrawFullScreenQuad(); } // A few render targets are not needed anymore. renderTargetPool.Recycle(downsampledDepthTarget); renderTargetPool.Recycle(temp1); } else { // A few render targets are not needed anymore. renderTargetPool.Recycle(downsampledDepthTarget); context.SourceTexture = temp0; context.RenderTarget = temp0; context.Viewport = new Viewport(0, 0, temp0.Width, temp0.Height); for (int i = 0; i < NumberOfBlurPasses; i++) { _blur.Process(context); } } _strengthParameter.SetValue(Strength > 1 ? Strength : 1); if (CombineWithSource) { if (TextureHelper.IsFloatingPointFormat(source.Format)) { graphicsDevice.SamplerStates[0] = SamplerState.PointClamp; } else { graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp; } // Combine with scene. graphicsDevice.SetRenderTarget(target); graphicsDevice.Viewport = viewport; _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height)); _occlusionTextureParameter.SetValue(temp0); _sourceTextureParameter.SetValue(source); _combinePass.Apply(); graphicsDevice.DrawFullScreenQuad(); } else { if (temp0 != target) { graphicsDevice.SetRenderTarget(target); graphicsDevice.Viewport = viewport; _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height)); _occlusionTextureParameter.SetValue(temp0); _copyPass.Apply(); graphicsDevice.DrawFullScreenQuad(); } } // Clean-up if (temp0 != target) { renderTargetPool.Recycle(temp0); } _sourceTextureParameter.SetValue((Texture2D)null); _gBuffer0Parameter.SetValue((Texture2D)null); _occlusionTextureParameter.SetValue((Texture2D)null); context.SourceTexture = source; context.RenderTarget = target; context.Viewport = viewport; }