示例#1
0
        //--------------------------------------------------------------
        #region Creation & Cleanup
        //--------------------------------------------------------------

        /// <summary>
        /// Initializes a new instance of the <see cref="LuminanceFilter"/> class.
        /// </summary>
        /// <param name="graphicsService">The graphics service.</param>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="graphicsService"/> is <see langword="null"/>.
        /// </exception>
        public LuminanceFilter(IGraphicsService graphicsService)
            : base(graphicsService)
        {
            _effect = GraphicsService.Content.Load <Effect>("DigitalRune/PostProcessing/LuminanceFilter");
            _useGeometricMeanParameter     = _effect.Parameters["UseGeometricMean"];
            _useAdaptionParameter          = _effect.Parameters["UseAdaption"];
            _deltaTimeParameter            = _effect.Parameters["DeltaTime"];
            _adaptionSpeedParameter        = _effect.Parameters["AdaptionSpeed"];
            _lastLuminanceTextureParameter = _effect.Parameters["LastLuminanceTexture"];
            _textureParameter    = _effect.Parameters["SourceTexture"];
            _sourceSizeParameter = _effect.Parameters["SourceSize"];
            _targetSizeParameter = _effect.Parameters["TargetSize"];
            _createPass          = _effect.CurrentTechnique.Passes["Create"];
            _downsamplePass      = _effect.CurrentTechnique.Passes["Downsample"];
            _finalPass           = _effect.CurrentTechnique.Passes["Final"];

            _downsampleFilter = PostProcessHelper.GetDownsampleFilter(graphicsService);
            _copyFilter       = PostProcessHelper.GetCopyFilter(graphicsService);

            UseGeometricMean = true;
            UseAdaption      = true;
            AdaptionSpeed    = 0.02f;

            DefaultTargetFormat = new RenderTargetFormat(1, 1, false, SurfaceFormat.HalfVector4, DepthFormat.None);
        }
示例#2
0
        //--------------------------------------------------------------
        /// <summary>
        /// Initializes a new instance of the <see cref="PostProcessor"/> class.
        /// </summary>
        /// <param name="graphicsService">The graphics service.</param>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="graphicsService"/> is <see langword="null"/>.
        /// </exception>
        protected PostProcessor(IGraphicsService graphicsService)
        {
            if (graphicsService == null)
            throw new ArgumentNullException("graphicsService");

              GraphicsService = graphicsService;
              _enabled = true;  // Note: Virtual OnEnabled must not be called in constructor.
              DefaultTargetFormat = new RenderTargetFormat(null, null, false, null, DepthFormat.None);
        }
示例#3
0
        protected override void OnProcess(RenderContext context)
        {
            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            {
                throw new GraphicsException("Source texture format must not be a floating-point format.");
            }

            var graphicsDevice = GraphicsService.GraphicsDevice;

            // The target width/height.
            int targetWidth  = context.Viewport.Width;
            int targetHeight = context.Viewport.Height;

            _pixelSizeParameter.SetValue(new Vector2(1.0f / targetWidth, 1.0f / targetHeight));
            _viewportSizeParameter.SetValue(new Vector2(targetWidth, targetHeight));

            // Cannot use render target from pool because we need a stencil buffer.
            //var edgeRenderTarget = graphicsService.RenderTargetPool.Obtain2D(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.Depth24Stencil8);
            //var blendRenderTarget = graphicsService.RenderTargetPool.Obtain2D(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.Depth24Stencil8);
            var tempFormat        = new RenderTargetFormat(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.None);
            var edgeRenderTarget  = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
            var blendRenderTarget = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

            //graphicsDevice.DepthStencilState = _stencilStateReplace;
            graphicsDevice.SetRenderTarget(edgeRenderTarget);
            // Clear color + stencil buffer.
            //graphicsDevice.Clear(ClearOptions.Target | ClearOptions.Stencil, new Color(0, 0, 0, 0), 1, 0);
            graphicsDevice.Clear(ClearOptions.Target, new Color(0, 0, 0, 0), 1, 0);
            _sourceTextureParameter.SetValue(context.SourceTexture);
            _lumaEdgeDetectionPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            //graphicsDevice.DepthStencilState = _stencilStateKeep;
            graphicsDevice.SetRenderTarget(blendRenderTarget);
            //graphicsDevice.Clear(ClearOptions.Target, new Color(0, 0, 0, 0), 1, 1);
            _edgesTextureParameter.SetValue(edgeRenderTarget);
            _areaLookupTextureParameter.SetValue(_areaLookupTexture);
            _searchLookupTextureParameter.SetValue(_searchLookupTexture);
            _blendWeightCalculationPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            //graphicsDevice.DepthStencilState = DepthStencilState.None;
            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;
            _blendTextureParameter.SetValue(blendRenderTarget);
            _neighborhoodBlendingPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            _sourceTextureParameter.SetValue((Texture2D)null);
            _edgesTextureParameter.SetValue((Texture2D)null);
            _blendTextureParameter.SetValue((Texture2D)null);

            GraphicsService.RenderTargetPool.Recycle(blendRenderTarget);
            GraphicsService.RenderTargetPool.Recycle(edgeRenderTarget);
        }
示例#4
0
        //--------------------------------------------------------------
        #region Creation & Cleanup
        //--------------------------------------------------------------

        /// <summary>
        /// Initializes a new instance of the <see cref="PostProcessor"/> class.
        /// </summary>
        /// <param name="graphicsService">The graphics service.</param>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="graphicsService"/> is <see langword="null"/>.
        /// </exception>
        protected PostProcessor(IGraphicsService graphicsService)
        {
            if (graphicsService == null)
            {
                throw new ArgumentNullException("graphicsService");
            }

            GraphicsService     = graphicsService;
            _enabled            = true; // Note: Virtual OnEnabled must not be called in constructor.
            DefaultTargetFormat = new RenderTargetFormat(null, null, false, null, DepthFormat.None);
        }
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;

            var source   = context.SourceTexture;
            var target   = context.RenderTarget;
            var viewport = context.Viewport;

            if (TextureHelper.IsFloatingPointFormat(source.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
                graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
                graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
            }

            // Blur source texture.
            var tempFormat   = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
            var blurredImage = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

            context.RenderTarget = blurredImage;
            context.Viewport     = new Viewport(0, 0, blurredImage.Width, blurredImage.Height);
            Blur.Process(context);

            // Unsharp masking.
            context.RenderTarget = target;
            context.Viewport     = viewport;
            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;

            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sharpnessParameter.SetValue(Sharpness);
            _sourceTextureParameter.SetValue(source);
            _blurredTextureParameter.SetValue(blurredImage);
            _effect.CurrentTechnique.Passes[0].Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Clean-up
            _sourceTextureParameter.SetValue((Texture2D)null);
            _blurredTextureParameter.SetValue((Texture2D)null);
            GraphicsService.RenderTargetPool.Recycle(blurredImage);
        }
示例#6
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            }

            RenderTarget2D rgbLuma = null;

            if (ComputeLuminance)
            {
                var rgbLumaFormat = new RenderTargetFormat(
                    context.SourceTexture.Width,
                    context.SourceTexture.Height,
                    false,
                    context.SourceTexture.Format,
                    DepthFormat.None);
                rgbLuma = renderTargetPool.Obtain2D(rgbLumaFormat);

                graphicsDevice.SetRenderTarget(rgbLuma);
                _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
                _sourceTextureParameter.SetValue(context.SourceTexture);
                _luminanceToAlphaPass.Apply();
                graphicsDevice.DrawFullScreenQuad();
            }

            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sourceTextureParameter.SetValue(ComputeLuminance ? rgbLuma : context.SourceTexture);
            _fxaaPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            _sourceTextureParameter.SetValue((Texture2D)null);
            renderTargetPool.Recycle(rgbLuma);
        }
示例#7
0
        // Renders the sky. This method is the RenderCallback of the SceneCaptureNode.
        private void RenderSky(RenderContext context)
        {
            var graphicsDevice   = _graphicsService.GraphicsDevice;
            var renderTargetPool = _graphicsService.RenderTargetPool;

            // We have to render into this render target.
            var ldrTarget = context.RenderTarget;

            // Reset render states.
            graphicsDevice.BlendState        = BlendState.Opaque;
            graphicsDevice.DepthStencilState = DepthStencilState.Default;
            graphicsDevice.RasterizerState   = RasterizerState.CullCounterClockwise;

            // Use an intermediate HDR render target with the same resolution as the final target.
            var format = new RenderTargetFormat(ldrTarget)
            {
                SurfaceFormat      = SurfaceFormat.HdrBlendable,
                DepthStencilFormat = DepthFormat.Depth24Stencil8
            };
            var hdrTarget = renderTargetPool.Obtain2D(format);

            graphicsDevice.SetRenderTarget(hdrTarget);
            context.RenderTarget = hdrTarget;

            graphicsDevice.Clear(Color.Black);

            // Render the sky.
            _skyRenderer.Render(_skyGroupNode.Children, context);

            // Convert the HDR image to RGBM image.
            context.SourceTexture = hdrTarget;
            context.RenderTarget  = ldrTarget;
            _colorEncoder.Process(context);
            context.SourceTexture = null;

            // Clean up.
            renderTargetPool.Recycle(hdrTarget);
            context.RenderTarget = ldrTarget;
        }
        /// <summary>
        /// Renders the environment maps for the image-based lights.
        /// </summary>
        /// <remarks>
        /// This method uses the current DeferredGraphicsScreen to render new environment maps at
        /// runtime. The DeferredGraphicsScreen has a SceneCaptureRenderer which we can use to
        /// capture environment maps of the current scene.
        /// To capture new environment maps the flag _updateEnvironmentMaps must be set to true.
        /// When this flag is set, SceneCaptureNodes are added to the scene. When the graphics
        /// screen calls the SceneCaptureRenderer the next time, the new environment maps will be
        /// captured.
        /// The flag _updateEnvironmentMaps remains true until the new environment maps are available.
        /// This method checks the SceneCaptureNode.LastFrame property to check if new environment maps
        /// have been computed. Usually, the environment maps will be available in the next frame.
        /// (However, the XNA Game class can skip graphics rendering if the game is running slowly.
        /// Then we would have to wait more than 1 frame.)
        /// When environment maps are being rendered, the image-based lights are disabled to capture
        /// only the scene with ambient and directional lights. Dynamic objects are also disabled
        /// to capture only the static scene.
        /// </remarks>
        private void UpdateEnvironmentMaps()
        {
            if (!_updateEnvironmentMaps)
            {
                return;
            }

            // One-time initializations:
            if (_sceneCaptureNodes[0] == null)
            {
                // Create cube maps and scene capture nodes.
                // (Note: A cube map size of 256 is enough for surfaces with a specular power
                // in the range [0, 200000].)
                for (int i = 0; i < _sceneCaptureNodes.Length; i++)
                {
                    var renderTargetCube = new RenderTargetCube(
                        GraphicsService.GraphicsDevice,
                        256,
                        true,
                        SurfaceFormat.Color,
                        DepthFormat.None);

                    var renderToTexture = new RenderToTexture {
                        Texture = renderTargetCube
                    };
                    var projection = new PerspectiveProjection();
                    projection.SetFieldOfView(ConstantsF.PiOver2, 1, 1, 100);
                    _sceneCaptureNodes[i] = new SceneCaptureNode(renderToTexture)
                    {
                        CameraNode = new CameraNode(new Camera(projection))
                        {
                            PoseWorld = _lightNodes[i].PoseWorld,
                        },
                    };

                    _imageBasedLights[i].Texture = renderTargetCube;
                }

                // We use a ColorEncoder to encode a HDR image in a normal Color texture.
                _colorEncoder = new ColorEncoder(GraphicsService)
                {
                    SourceEncoding = ColorEncoding.Rgb,
                    TargetEncoding = ColorEncoding.Rgbm,
                };

                // The SceneCaptureRenderer has a render callback which defines what is rendered
                // into the scene capture render targets.
                _graphicsScreen.SceneCaptureRenderer.RenderCallback = context =>
                {
                    var graphicsDevice   = GraphicsService.GraphicsDevice;
                    var renderTargetPool = GraphicsService.RenderTargetPool;

                    // Get scene nodes which are visible by the current camera.
                    CustomSceneQuery sceneQuery = context.Scene.Query <CustomSceneQuery>(context.CameraNode, context);

                    // The final image has to be rendered into this render target.
                    var ldrTarget = context.RenderTarget;

                    // Use an intermediate HDR render target with the same resolution as the final target.
                    var format = new RenderTargetFormat(ldrTarget)
                    {
                        SurfaceFormat      = SurfaceFormat.HdrBlendable,
                        DepthStencilFormat = DepthFormat.Depth24Stencil8
                    };
                    var hdrTarget = renderTargetPool.Obtain2D(format);

                    graphicsDevice.SetRenderTarget(hdrTarget);
                    context.RenderTarget = hdrTarget;

                    // Render scene (without post-processing, without lens flares, no debug rendering, no reticle).
                    _graphicsScreen.RenderScene(sceneQuery, context, false, false, false, false);

                    // Convert the HDR image to RGBM image.
                    context.SourceTexture = hdrTarget;
                    context.RenderTarget  = ldrTarget;
                    _colorEncoder.Process(context);
                    context.SourceTexture = null;

                    // Clean up.
                    renderTargetPool.Recycle(hdrTarget);
                    context.RenderTarget = ldrTarget;
                };
            }

            if (_sceneCaptureNodes[0].Parent == null)
            {
                // Add the scene capture nodes to the scene.
                for (int i = 0; i < _sceneCaptureNodes.Length; i++)
                {
                    _graphicsScreen.Scene.Children.Add(_sceneCaptureNodes[i]);
                }

                // Remember the old time stamp of the nodes.
                _oldEnvironmentMapTimeStamp = _sceneCaptureNodes[0].LastFrame;

                // Disable all lights except ambient and directional lights.
                // We do not capture the image-based lights or any other lights (e.g. point lights)
                // in the cube map.
                foreach (var lightNode in _graphicsScreen.Scene.GetDescendants().OfType <LightNode>())
                {
                    lightNode.IsEnabled = (lightNode.Light is AmbientLight) || (lightNode.Light is DirectionalLight);
                }

                // Disable dynamic objects.
                foreach (var node in _graphicsScreen.Scene.GetDescendants())
                {
                    if (node is MeshNode || node is LodGroupNode)
                    {
                        if (!node.IsStatic)
                        {
                            node.IsEnabled = false;
                        }
                    }
                }
            }
            else
            {
                // The scene capture nodes are part of the scene. Check if they have been
                // updated.
                if (_sceneCaptureNodes[0].LastFrame != _oldEnvironmentMapTimeStamp)
                {
                    // We have new environment maps. Restore the normal scene.
                    for (int i = 0; i < _sceneCaptureNodes.Length; i++)
                    {
                        _graphicsScreen.Scene.Children.Remove(_sceneCaptureNodes[i]);
                    }

                    _updateEnvironmentMaps = false;

                    foreach (var lightNode in _graphicsScreen.Scene.GetDescendants().OfType <LightNode>())
                    {
                        lightNode.IsEnabled = true;
                    }

                    foreach (var node in _graphicsScreen.Scene.GetDescendants())
                    {
                        if (node is MeshNode || node is LodGroupNode)
                        {
                            if (!node.IsStatic)
                            {
                                node.IsEnabled = true;
                            }
                        }
                    }
                }
            }
        }
示例#9
0
        //--------------------------------------------------------------
        /// <summary>
        /// Initializes a new instance of the <see cref="LuminanceFilter"/> class.
        /// </summary>
        /// <param name="graphicsService">The graphics service.</param>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="graphicsService"/> is <see langword="null"/>.
        /// </exception>
        public LuminanceFilter(IGraphicsService graphicsService)
            : base(graphicsService)
        {
            _effect = GraphicsService.Content.Load<Effect>("DigitalRune/PostProcessing/LuminanceFilter");
              _useGeometricMeanParameter = _effect.Parameters["UseGeometricMean"];
              _useAdaptionParameter = _effect.Parameters["UseAdaption"];
              _deltaTimeParameter = _effect.Parameters["DeltaTime"];
              _adaptionSpeedParameter = _effect.Parameters["AdaptionSpeed"];
              _lastLuminanceTextureParameter = _effect.Parameters["LastLuminanceTexture"];
              _textureParameter = _effect.Parameters["SourceTexture"];
              _sourceSizeParameter = _effect.Parameters["SourceSize"];
              _targetSizeParameter = _effect.Parameters["TargetSize"];
              _createPass = _effect.CurrentTechnique.Passes["Create"];
              _downsamplePass = _effect.CurrentTechnique.Passes["Downsample"];
              _finalPass = _effect.CurrentTechnique.Passes["Final"];

              _downsampleFilter = PostProcessHelper.GetDownsampleFilter(graphicsService);
              _copyFilter = PostProcessHelper.GetCopyFilter(graphicsService);

              UseGeometricMean = true;
              UseAdaption = true;
              AdaptionSpeed = 0.02f;

              DefaultTargetFormat = new RenderTargetFormat(1, 1, false, SurfaceFormat.HalfVector4, DepthFormat.None);
        }
示例#10
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              var viewport = context.Viewport;
              Vector2 size = new Vector2(viewport.Width, viewport.Height);

              // Choose suitable technique.
              // We do not have shader for each sample count.
              int numberOfSamples = NumberOfSamples;
              SetCurrentTechnique(ref numberOfSamples);

              // Apply current scale and texture size to offsets.
              for (int i = 0; i < NumberOfSamples; i++)
              {
            _horizontalOffsets[i].X = Offsets[i].X * Scale / size.X;
            _horizontalOffsets[i].Y = Offsets[i].Y * Scale / size.Y;
              }

              // Make sure the other samples are 0 (e.g. if we want 11 samples but the
              // next best shader supports only 15 samples).
              for (int i = NumberOfSamples; i < numberOfSamples; i++)
              {
            _horizontalOffsets[i].X = 0;
            _horizontalOffsets[i].Y = 0;
            Weights[i] = 0;
              }

              // If we have a separable filter, we initialize _verticalOffsets too.
              if (IsSeparable)
              {
            if (_verticalOffsets == null)
              _verticalOffsets = new Vector2[MaxNumberOfSamples];

            float aspectRatio = size.X / size.Y;
            for (int i = 0; i < NumberOfSamples; i++)
            {
              _verticalOffsets[i].X = _horizontalOffsets[i].Y * aspectRatio;
              _verticalOffsets[i].Y = _horizontalOffsets[i].X * aspectRatio;
            }
            for (int i = NumberOfSamples; i < numberOfSamples; i++)
            {
              _verticalOffsets[i].X = 0;
              _verticalOffsets[i].Y = 0;
            }
              }

              // Use hardware filtering if possible.
              if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

              bool isAnisotropic = IsAnisotropic;
              bool isBilateral = IsBilateral;
              if (FilterInLogSpace)
              {
            // Anisotropic and bilateral filtering in log-space is not implemented.
            isAnisotropic = false;
            isBilateral = false;
              }
              else
              {
            if (isAnisotropic || isBilateral)
            {
              context.ThrowIfCameraMissing();

              var cameraNode = context.CameraNode;
              var projection = cameraNode.Camera.Projection;
              float far = projection.Far;

              GraphicsHelper.GetFrustumFarCorners(cameraNode.Camera.Projection, _frustumFarCorners);
              _parameterFrustumCorners.SetValue(_frustumFarCorners);

              _parameterBlurParameters0.SetValue(new Vector4(
            far,
            viewport.AspectRatio,
            1.0f / (EdgeSoftness + 0.001f) * far,
            DepthScaling));

              context.ThrowIfGBuffer0Missing();
              Texture2D depthBuffer = context.GBuffer0;
              if (viewport.Width < depthBuffer.Width && viewport.Height < depthBuffer.Height)
              {
            // Use half-resolution depth buffer.
            object obj;
            if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out obj))
            {
              var depthBufferHalf = obj as Texture2D;
              if (depthBufferHalf != null)
                depthBuffer = depthBufferHalf;
            }
              }

              _parameterGBuffer0.SetValue(depthBuffer);
            }
              }

              _parameterViewportSize.SetValue(size);
              _parameterWeights.SetValue(Weights);

              int effectiveNumberOfPasses = IsSeparable ? NumberOfPasses * 2 : NumberOfPasses;

              // We use up to two temporary render targets for ping-ponging.
              var tempFormat = new RenderTargetFormat((int)size.X, (int)size.Y, false, context.SourceTexture.Format, DepthFormat.None);
              var tempTarget0 = (effectiveNumberOfPasses > 1)
                        ? renderTargetPool.Obtain2D(tempFormat)
                        : null;
              var tempTarget1 = (effectiveNumberOfPasses > 2)
                        ? renderTargetPool.Obtain2D(tempFormat)
                        : null;

              for (int i = 0; i < effectiveNumberOfPasses; i++)
              {
            if (i == effectiveNumberOfPasses - 1)
            {
              graphicsDevice.SetRenderTarget(context.RenderTarget);
              graphicsDevice.Viewport = viewport;
            }
            else if (i % 2 == 0)
            {
              graphicsDevice.SetRenderTarget(tempTarget0);
            }
            else
            {
              graphicsDevice.SetRenderTarget(tempTarget1);
            }

            if (i == 0)
              _parameterSourceTexture.SetValue(context.SourceTexture);
            else if (i % 2 == 0)
              _parameterSourceTexture.SetValue(tempTarget1);
            else
              _parameterSourceTexture.SetValue(tempTarget0);

            Vector2[] offsets;
            if (IsSeparable && i % 2 != 0
            && !isAnisotropic) // The anisotropic filter only reads Offsets[i].x
            {
              offsets = _verticalOffsets;
            }
            else
            {
              offsets = _horizontalOffsets;
            }

            _parameterOffsets.SetValue(offsets);

            int passIndex = 0;
            if (isAnisotropic)
              passIndex = i % 2;

            _effect.CurrentTechnique.Passes[passIndex].Apply();
            graphicsDevice.DrawFullScreenQuad();
              }

              _parameterSourceTexture.SetValue((Texture2D)null);

              renderTargetPool.Recycle(tempTarget0);
              renderTargetPool.Recycle(tempTarget1);
        }
示例#11
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();

            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;
            var cameraNode       = context.CameraNode;

            var source   = context.SourceTexture;
            var target   = context.RenderTarget;
            var viewport = context.Viewport;

            Projection projection = cameraNode.Camera.Projection;
            Matrix     projMatrix = projection;
            float      near       = projection.Near;
            float      far        = projection.Far;

            _frustumInfoParameter.SetValue(new Vector4(
                                               projection.Left / near,
                                               projection.Top / near,
                                               (projection.Right - projection.Left) / near,
                                               (projection.Bottom - projection.Top) / near));

            _numberOfAOSamplesParameter.SetValue(NumberOfSamples);

            // The height of a 1 unit object 1 unit in front of the camera.
            // (Compute 0.5 unit multiply by 2 and divide by 2 to convert from [-1, 1] to [0, 1] range.)
            float projectionScale =
                projMatrix.TransformPosition(new Vector3(0, 0.5f, -1)).Y
                - projMatrix.TransformPosition(new Vector3(0, 0, -1)).Y;

            _aoParameters0.SetValue(new Vector4(
                                        projectionScale,
                                        Radius,
                                        Strength / (float)Math.Pow(Radius, 6),
                                        Bias));

            _aoParameters1.SetValue(new Vector4(
                                        viewport.Width,
                                        viewport.Height,
                                        far,
                                        MaxOcclusion));

            _aoParameters2.SetValue(new Vector4(
                                        SampleDistribution,
                                        1.0f / (EdgeSoftness + 0.001f) * far,
                                        BlurScale,
                                        MinBias));

            context.ThrowIfGBuffer0Missing();
            _gBuffer0Parameter.SetValue(context.GBuffer0);

            //var view = cameraNode.View;
            //_viewParameter.SetValue((Matrix)view);
            //_gBuffer1Parameter.SetValue(context.GBuffer1);

            // We use two temporary render targets.
            var format = new RenderTargetFormat(
                context.Viewport.Width,
                context.Viewport.Height,
                false,
                SurfaceFormat.Color,
                DepthFormat.None);

            var tempTarget0 = renderTargetPool.Obtain2D(format);
            var tempTarget1 = renderTargetPool.Obtain2D(format);

            // Create SSAO.
            graphicsDevice.SetRenderTarget(tempTarget0);
            _createAOPass.Apply();

            graphicsDevice.Clear(new Color(1.0f, 1.0f, 1.0f, 1.0f));
            graphicsDevice.DrawFullScreenQuad();

            // Horizontal blur.
            graphicsDevice.SetRenderTarget(tempTarget1);
            _occlusionTextureParameter.SetValue(tempTarget0);
            _blurHorizontalPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Vertical blur
            graphicsDevice.SetRenderTarget(target);
            graphicsDevice.Viewport = viewport;
            _occlusionTextureParameter.SetValue(tempTarget1);
            if (!CombineWithSource)
            {
                _blurVerticalPass.Apply();
            }
            else
            {
                if (_sourceTextureParameter != null)
                {
                    _sourceTextureParameter.SetValue(source);
                }

                if (TextureHelper.IsFloatingPointFormat(source.Format))
                {
                    graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
                }
                else
                {
                    graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
                }

                _blurVerticalAndCombinePass.Apply();
            }

            graphicsDevice.DrawFullScreenQuad();

            // Clean up.
            renderTargetPool.Recycle(tempTarget0);
            renderTargetPool.Recycle(tempTarget1);
            if (_sourceTextureParameter != null)
            {
                _sourceTextureParameter.SetValue((Texture2D)null);
            }
            _occlusionTextureParameter.SetValue((Texture2D)null);
            _gBuffer0Parameter.SetValue((Texture2D)null);
            //_gBuffer1Parameter.SetValue((Texture2D)null);
            context.SourceTexture = source;
            context.RenderTarget  = target;
            context.Viewport      = viewport;
        }
示例#12
0
        internal override void ProcessJobs(RenderContext context, RenderOrder order)
        {
            var graphicsDevice = _graphicsService.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              Debug.Assert(_shadowMasks.Length > 0);
              Debug.Assert(_shadowMasks[0] != null);

              RenderTarget2D lowResTarget = null;
              if (UseHalfResolution && Numeric.IsGreater(UpsampleDepthSensitivity, 0))
              {
            // Half-res rendering with upsampling.
            var format = new RenderTargetFormat(_shadowMasks[0]);
            format.Width /= 2;
            format.Height /= 2;
            lowResTarget = _graphicsService.RenderTargetPool.Obtain2D(format);
              }

              int index = 0;
              var jobs = Jobs.Array;
              int jobCount = Jobs.Count;
              int lastShadowMaskIndex = -1;
              while (index < jobCount)
              {
            int shadowMaskIndex = (int)(jobs[index].SortKey >> 16);
            var renderer = jobs[index].Renderer;

            // Find end of current batch.
            int endIndexExclusive = index + 1;
            while (endIndexExclusive < jobCount)
            {
              if ((int)(jobs[endIndexExclusive].SortKey >> 16) != lastShadowMaskIndex
              || jobs[endIndexExclusive].Renderer != renderer)
              {
            break;
              }

              endIndexExclusive++;
            }

            // Restore the render state. (The integrated scene node renderers properly
            // restore the render state, but third-party renderers might mess it up.)
            if (index > 0)
              savedRenderState.Restore();

            if (shadowMaskIndex != lastShadowMaskIndex)
            {
              // Done with current shadow mask. Apply filter.
              if (lastShadowMaskIndex >= 0)
            PostProcess(context, context.RenderTarget, _shadowMasks[lastShadowMaskIndex]);

              // Switch to next shadow mask.
              lastShadowMaskIndex = shadowMaskIndex;

              var shadowMask = lowResTarget ?? _shadowMasks[shadowMaskIndex];

              // Set device render target and clear it to white (= no shadow).
              graphicsDevice.SetRenderTarget(shadowMask);
              context.RenderTarget = shadowMask;
              context.Viewport = graphicsDevice.Viewport;
              graphicsDevice.Clear(Color.White);
            }

            // Submit batch to renderer.
            // (Use Accessor to expose current batch as IList<SceneNode>.)
            JobsAccessor.Set(Jobs, index, endIndexExclusive);
            renderer.Render(JobsAccessor, context, order);
            JobsAccessor.Reset();

            index = endIndexExclusive;
              }

              // Done with last shadow mask. Apply filter.
              PostProcess(context, context.RenderTarget, _shadowMasks[lastShadowMaskIndex]);

              savedRenderState.Restore();
              graphicsDevice.ResetTextures();
              graphicsDevice.SetRenderTarget(null);
              context.RenderTarget = target;
              context.Viewport = viewport;

              _graphicsService.RenderTargetPool.Recycle(lowResTarget);
        }
示例#13
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();
              context.ThrowIfGBuffer0Missing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              // Get temporary render targets.
              var sourceSize = new Vector2F(source.Width, source.Height);
              var isFloatingPointFormat = TextureHelper.IsFloatingPointFormat(source.Format);

              var sceneFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
              var maskedScene = renderTargetPool.Obtain2D(sceneFormat);

              var rayFormat = new RenderTargetFormat(
            Math.Max(1, (int)(sourceSize.X / DownsampleFactor)),
            Math.Max(1, (int)(sourceSize.Y / DownsampleFactor)),
            false,
            source.Format,
            DepthFormat.None);
              var rayImage0 = renderTargetPool.Obtain2D(rayFormat);
              var rayImage1 = renderTargetPool.Obtain2D(rayFormat);

              // Get view and view-projection transforms.
              var cameraNode = context.CameraNode;
              Matrix44F projection = cameraNode.Camera.Projection.ToMatrix44F();
              Matrix44F view = cameraNode.View;
              Matrix44F viewProjection = projection * view;

              // We simply place the light source "far away" in opposite light ray direction.
              Vector4F lightPositionWorld = new Vector4F(-LightDirection * 10000, 1);

              // Convert to clip space.
              Vector4F lightPositionProj = viewProjection * lightPositionWorld;
              Vector3F lightPositionClip = Vector4F.HomogeneousDivide(lightPositionProj);

              // Convert from clip space [-1, 1] to texture space [0, 1].
              Vector2 lightPosition = new Vector2(lightPositionClip.X * 0.5f + 0.5f, -lightPositionClip.Y * 0.5f + 0.5f);

              // We use dot²(forward, -LightDirection) as a smooth S-shaped attenuation
              // curve to reduce the god ray effect when we look orthogonal or away from the sun.
              var lightDirectionView = view.TransformDirection(LightDirection);
              float z = Math.Max(0, lightDirectionView.Z);
              float attenuation = z * z;

              // Common effect parameters.
              _parameters0Parameter.SetValue(new Vector4(lightPosition.X, lightPosition.Y, LightRadius * LightRadius, Scale));
              _parameters1Parameter.SetValue(new Vector2(Softness, graphicsDevice.Viewport.AspectRatio));
              _intensityParameter.SetValue((Vector3)Intensity * attenuation);
              _numberOfSamplesParameter.SetValue(NumberOfSamples);
              _gBuffer0Parameter.SetValue(context.GBuffer0);

              // First, create a scene image where occluders are black.
              graphicsDevice.SetRenderTarget(maskedScene);
              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _sourceTextureParameter.SetValue(source);
              graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
              graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;   // G-Buffer 0.
              _createMaskPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Downsample image.
              context.SourceTexture = maskedScene;
              context.RenderTarget = rayImage0;
              context.Viewport = new Viewport(0, 0, rayImage0.Width, rayImage0.Height);
              _downsampleFilter.Process(context);

              // Compute light shafts.
              _viewportSizeParameter.SetValue(new Vector2(context.Viewport.Width, context.Viewport.Height));
              graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
              for (int i = 0; i < NumberOfPasses; i++)
              {
            graphicsDevice.SetRenderTarget(rayImage1);
            _sourceTextureParameter.SetValue(rayImage0);
            _blurPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Put the current result in variable rayImage0.
            MathHelper.Swap(ref rayImage0, ref rayImage1);
              }

              // Combine light shaft image with scene.
              graphicsDevice.SetRenderTarget(target);
              graphicsDevice.Viewport = viewport;
              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _sourceTextureParameter.SetValue(source);
              _rayTextureParameter.SetValue(rayImage0);
              graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
              graphicsDevice.SamplerStates[1] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
              _combinePass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Clean-up
              _sourceTextureParameter.SetValue((Texture2D)null);
              _gBuffer0Parameter.SetValue((Texture2D)null);
              _rayTextureParameter.SetValue((Texture2D)null);
              renderTargetPool.Recycle(maskedScene);
              renderTargetPool.Recycle(rayImage0);
              renderTargetPool.Recycle(rayImage1);
              context.SourceTexture = source;
              context.RenderTarget = target;
              context.Viewport = viewport;
        }
示例#14
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            // The target width/height.
            int targetWidth  = context.Viewport.Width;
            int targetHeight = context.Viewport.Height;

            var            tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, context.SourceTexture.Format, DepthFormat.None);
            RenderTarget2D temp0      = renderTargetPool.Obtain2D(tempFormat);
            RenderTarget2D temp1      = renderTargetPool.Obtain2D(tempFormat);

            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            }

            _viewportSizeParameter.SetValue(new Vector2(targetWidth, targetHeight));
            _numberOfSamplesParameter.SetValue(NumberOfSamples / 2);

            for (int i = 0; i < NumberOfPasses; i++)
            {
                if (i == NumberOfPasses - 1)
                {
                    graphicsDevice.SetRenderTarget(context.RenderTarget);
                    graphicsDevice.Viewport = context.Viewport;
                }
                else if (i % 2 == 0)
                {
                    graphicsDevice.SetRenderTarget(temp0);
                }
                else
                {
                    graphicsDevice.SetRenderTarget(temp1);
                }

                if (i == 0)
                {
                    _sourceTextureParameter.SetValue(context.SourceTexture);
                }
                else if (i % 2 == 0)
                {
                    _sourceTextureParameter.SetValue(temp1);
                }
                else
                {
                    _sourceTextureParameter.SetValue(temp0);
                }

                _iterationParameter.SetValue(i);
                _effect.CurrentTechnique.Passes[0].Apply();
                graphicsDevice.DrawFullScreenQuad();
            }

            // Clean-up
            _sourceTextureParameter.SetValue((Texture2D)null);

            renderTargetPool.Recycle(temp0);
            renderTargetPool.Recycle(temp1);
        }
示例#15
0
        private void Render(RenderContext context)
        {
            var originalRenderTarget = context.RenderTarget;
            var originalViewport     = context.Viewport;

            var graphicsDevice = context.GraphicsService.GraphicsDevice;

            if (_updateCubeMap)
            {
                _updateCubeMap = false;

                _cloudMapRenderer.Render(_skyNodes, context);

                // Create a camera with 45° FOV for a single cube map face.
                var perspectiveProjection = new PerspectiveProjection();
                perspectiveProjection.SetFieldOfView(ConstantsF.PiOver2, 1, 1, 100);
                context.CameraNode = new CameraNode(new Camera(perspectiveProjection));

                var size      = _skybox.Texture.Size;
                var hdrFormat = new RenderTargetFormat(size, size, false, SurfaceFormat.HdrBlendable, DepthFormat.None);
                var hdrTarget = context.GraphicsService.RenderTargetPool.Obtain2D(hdrFormat);
                var ldrFormat = new RenderTargetFormat(size, size, false, SurfaceFormat.Color, DepthFormat.None);
                var ldrTarget = context.GraphicsService.RenderTargetPool.Obtain2D(ldrFormat);

                var spriteBatch = GraphicsService.GetSpriteBatch();
                for (int side = 0; side < 6; side++)
                {
                    // Rotate camera to face the current cube map face.
                    var cubeMapFace = (CubeMapFace)side;
                    context.CameraNode.View = Matrix44F.CreateLookAt(
                        new Vector3F(),
                        GraphicsHelper.GetCubeMapForwardDirection(cubeMapFace),
                        GraphicsHelper.GetCubeMapUpDirection(cubeMapFace));

                    // Render sky into HDR render target.
                    graphicsDevice.SetRenderTarget(hdrTarget);
                    context.RenderTarget = hdrTarget;
                    context.Viewport     = graphicsDevice.Viewport;
                    graphicsDevice.Clear(Color.Black);
                    _skyRenderer.Render(_skyNodes, context);

                    graphicsDevice.BlendState = BlendState.Opaque;

                    // Convert HDR to RGBM.
                    context.SourceTexture = hdrTarget;
                    context.RenderTarget  = ldrTarget;
                    _colorEncoder.Process(context);
                    context.SourceTexture = null;

                    // Copy RGBM texture into cube map face.
                    graphicsDevice.SetRenderTarget((RenderTargetCube)_skybox.Texture, cubeMapFace);
                    spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, null, null, null);
                    spriteBatch.Draw(ldrTarget, new Vector2(0, 0), Color.White);
                    spriteBatch.End();
                }

                context.GraphicsService.RenderTargetPool.Recycle(ldrTarget);
                context.GraphicsService.RenderTargetPool.Recycle(hdrTarget);
            }

            graphicsDevice.BlendState        = BlendState.Opaque;
            graphicsDevice.DepthStencilState = DepthStencilState.Default;
            graphicsDevice.RasterizerState   = RasterizerState.CullCounterClockwise;

            context.CameraNode = _cameraObject.CameraNode;

            var tempFormat = new RenderTargetFormat(originalRenderTarget);

            tempFormat.SurfaceFormat = SurfaceFormat.HdrBlendable;
            var tempTarget = context.GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

            graphicsDevice.SetRenderTarget(tempTarget);
            graphicsDevice.Viewport = originalViewport;
            context.RenderTarget    = tempTarget;
            context.Viewport        = originalViewport;

            _skyRenderer.Render(_skybox, context);

            context.SourceTexture = tempTarget;
            context.RenderTarget  = originalRenderTarget;
            _hdrFilter.Process(context);
            context.SourceTexture = null;

            context.GraphicsService.RenderTargetPool.Recycle(tempTarget);

            RenderDebugInfo(context);

            context.CameraNode = null;
        }
示例#16
0
        public override void Render(IList <SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            var graphicsService  = context.GraphicsService;
            var renderTargetPool = graphicsService.RenderTargetPool;
            var graphicsDevice   = graphicsService.GraphicsDevice;

            // Get a shared RebuildZBufferRenderer which was added by the graphics screen.
            var rebuildZBufferRenderer = (RebuildZBufferRenderer)context.Data[RenderContextKeys.RebuildZBufferRenderer];

            // We only support a render order of "user defined". This is always the case
            // if this renderer is added to a SceneRenderer. The SceneRenderer does the sorting.
            Debug.Assert(order == RenderOrder.UserDefined);

            // This renderer assumes that the current render target is an off-screen render target.
            Debug.Assert(context.RenderTarget != null);

            graphicsDevice.ResetTextures();

            // Remember the format of the current render target.
            var backBufferFormat = new RenderTargetFormat(context.RenderTarget);

            // In the loop below we will use the context.SourceTexture property.
            // Remember the original source texture.
            var originalSourceTexture = context.SourceTexture;

            context.SourceTexture = null;
            for (int i = 0; i < nodes.Count; i++)
            {
                var node = (MeshNode)nodes[i];

                // Check if the next node wants to sample from the back buffer.
                if (RequiresSourceTexture(node, context))
                {
                    // The effect of the node wants to sample from the "SourceTexture".
                    // Per default, DigitalRune Graphics uses a delegate effect parameter
                    // binding to set the "SourceTexture" parameters to the
                    // RenderContext.SourceTexture value. However, this property is usually
                    // null. We need to manually set RenderContext.SourceTexture to the
                    // current back buffer render target. Since, we cannot read from this
                    // render target and write to this render target at the same time,
                    // we have to copy it.

                    context.SourceTexture = context.RenderTarget;

                    // Set a new render target and copy the content of the lastBackBuffer
                    // and the depth buffer.
                    context.RenderTarget = renderTargetPool.Obtain2D(backBufferFormat);
                    graphicsDevice.SetRenderTarget(context.RenderTarget);
                    graphicsDevice.Viewport = context.Viewport;
                    rebuildZBufferRenderer.Render(context, context.SourceTexture);
                }

                // Add current node to a temporary list.
                _tempList.Add(node);

                // Add all following nodes until another node wants to sample from the
                // back buffer.
                for (int j = i + 1; j < nodes.Count; j++)
                {
                    node = (MeshNode)nodes[j];

                    if (RequiresSourceTexture(node, context))
                    {
                        break;
                    }

                    _tempList.Add(node);
                    i++;
                }

                // Render nodes.
                _meshRenderer.Render(_tempList, context);

                renderTargetPool.Recycle(context.SourceTexture);
                context.SourceTexture = null;

                _tempList.Clear();
            }

            // Restore original render context.
            context.SourceTexture = originalSourceTexture;
        }
示例#17
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();

            var graphicsDevice = GraphicsService.GraphicsDevice;
            var source         = context.SourceTexture;
            var target         = context.RenderTarget;
            var viewport       = context.Viewport;

            var            tempFormat   = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
            RenderTarget2D blurredScene = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

            if (TextureHelper.IsFloatingPointFormat(source.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
                graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
                graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
            }

            context.RenderTarget = blurredScene;
            context.Viewport     = new Viewport(0, 0, blurredScene.Width, blurredScene.Height);

            // Get view-dependent information stored in camera node.
            var    cameraNode = context.CameraNode;
            object dummy;

            cameraNode.ViewDependentData.TryGetValue(this, out dummy);
            var data = dummy as ViewDependentData;

            if (data == null)
            {
                data = new ViewDependentData(GraphicsService);
                cameraNode.ViewDependentData[this] = data;
            }

            if (data.LastBlurredScene == null)
            {
                // This is the first frame. Simply remember the current source for the next frame.
                _copyFilter.Process(context);
            }
            else
            {
                // Create new blurred scene.
                graphicsDevice.SetRenderTarget(blurredScene);

                _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
                _strengthParameter.SetValue(Strength);
                _sourceTextureParameter.SetValue(source);
                _lastSourceTextureParameter.SetValue(data.LastBlurredScene);
                _effect.CurrentTechnique.Passes[0].Apply();
                graphicsDevice.DrawFullScreenQuad();
            }

            // Copy blurredScene to target.
            context.SourceTexture = blurredScene;
            context.RenderTarget  = target;
            context.Viewport      = viewport;
            _copyFilter.Process(context);

            // Recycle old blurred scene and store new scene (switch render targets).
            GraphicsService.RenderTargetPool.Recycle(data.LastBlurredScene);
            data.LastBlurredScene = blurredScene;

            _sourceTextureParameter.SetValue((Texture2D)null);
            _lastSourceTextureParameter.SetValue((Texture2D)null);

            // Restore original context.
            context.SourceTexture = source;
        }
示例#18
0
        //--------------------------------------------------------------
        public void Render(IList<SceneNode> lights, RenderContext context)
        {
            var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var renderTargetPool = graphicsService.RenderTargetPool;

              var target = context.RenderTarget;
              var viewport = context.Viewport;
              var width = viewport.Width;
              var height = viewport.Height;

              RenderTarget2D aoRenderTarget = null;
              if (_ssaoFilter != null)
              {
            // Render ambient occlusion info into a render target.
            aoRenderTarget = renderTargetPool.Obtain2D(new RenderTargetFormat(
              width / _ssaoDownsampleFactor,
              height / _ssaoDownsampleFactor,
              false,
              SurfaceFormat.Color,
              DepthFormat.None));

            // PostProcessors require that context.SourceTexture is set. But since
            // _ssaoFilter.CombineWithSource is set to false, the SourceTexture is not
            // used and we can set it to anything except null.
            context.SourceTexture = aoRenderTarget;
            context.RenderTarget = aoRenderTarget;
            context.Viewport = new Viewport(0, 0, aoRenderTarget.Width, aoRenderTarget.Height);
            _ssaoFilter.Process(context);
            context.SourceTexture = null;
              }

              // The light buffer consists of two full-screen render targets into which we
              // render the accumulated diffuse and specular light intensities.
              var lightBufferFormat = new RenderTargetFormat(width, height, false, SurfaceFormat.HdrBlendable, DepthFormat.Depth24Stencil8);
              context.LightBuffer0 = renderTargetPool.Obtain2D(lightBufferFormat);
              context.LightBuffer1 = renderTargetPool.Obtain2D(lightBufferFormat);

              // Set the device render target to the light buffer.
              _renderTargetBindings[0] = new RenderTargetBinding(context.LightBuffer0); // Diffuse light accumulation
              _renderTargetBindings[1] = new RenderTargetBinding(context.LightBuffer1); // Specular light accumulation
              graphicsDevice.SetRenderTargets(_renderTargetBindings);
              context.RenderTarget = context.LightBuffer0;
              context.Viewport = graphicsDevice.Viewport;

              // Clear the light buffer. (The alpha channel is not used. We can set it to anything.)
              graphicsDevice.Clear(new Color(0, 0, 0, 255));

              // Restore the depth buffer (which XNA destroys in SetRenderTarget).
              // (This is only needed if lights can use a clip geometry (LightNode.Clip).)
              var rebuildZBufferRenderer = (RebuildZBufferRenderer)context.Data[RenderContextKeys.RebuildZBufferRenderer];
              rebuildZBufferRenderer.Render(context, true);

              // Render all lights into the light buffers.
              LightRenderer.Render(lights, context);

              if (aoRenderTarget != null)
              {
            // Render the ambient occlusion texture using multiplicative blending.
            // This will darken the light buffers depending on the ambient occlusion term.
            // Note: Theoretically, this should be done after the ambient light renderer
            // and before the directional light renderer because AO should not affect
            // directional lights. But doing this here has more impact.
            context.SourceTexture = aoRenderTarget;
            graphicsDevice.BlendState = GraphicsHelper.BlendStateMultiply;
            _copyFilter.Process(context);
              }

              // Clean up.
              graphicsService.RenderTargetPool.Recycle(aoRenderTarget);
              context.SourceTexture = null;
              context.RenderTarget = target;
              context.Viewport = viewport;

              _renderTargetBindings[0] = new RenderTargetBinding();
              _renderTargetBindings[1] = new RenderTargetBinding();
        }
示例#19
0
    //--------------------------------------------------------------
    #region Methods
    //--------------------------------------------------------------

    /// <summary>
    /// Computes the intersection of <see cref="MeshNode"/>s.
    /// </summary>
    /// <param name="meshNodePairs">
    /// A collection of <see cref="MeshNode"/> pairs.The renderer computes the intersection volume 
    /// of each pair.
    /// </param>
    /// <param name="color">The diffuse color used for the intersection.</param>
    /// <param name="alpha">The opacity of the intersection.</param>
    /// <param name="maxConvexity">
    /// The maximum convexity of the submeshes. A convex mesh has a convexity of 1. A concave mesh
    /// has a convexity greater than 1. Convexity is the number of layers required for depth peeling 
    /// (= the number of front face layers when looking at the object).
    /// </param>
    /// <param name="context">The render context.</param>
    /// <remarks>
    /// <para>
    /// This method renders an off-screen image (color and depth) of the intersection volume. This 
    /// operation destroys the currently set render target and depth/stencil buffer.
    /// </para>
    /// </remarks>
    /// <exception cref="ObjectDisposedException">
    /// The <see cref="IntersectionRenderer"/> has already been disposed.
    /// </exception>
    /// <exception cref="ArgumentNullException">
    /// <paramref name="meshNodePairs"/> or <see cref="context"/> is
    /// <see langword="null"/>.
    /// </exception>
    /// <exception cref="ArgumentOutOfRangeException">
    /// The convexity must be greater than 0.
    /// </exception>
    /// <exception cref="GraphicsException">
    /// Invalid render context: Graphics service is not set.
    /// </exception>
    /// <exception cref="GraphicsException">
    /// Invalid render context: Wrong graphics device.
    /// </exception>
    /// <exception cref="GraphicsException">
    /// Invalid render context: Scene is not set.
    /// </exception>
    /// <exception cref="GraphicsException">
    /// Invalid render context: Camera node needs to be set in render context.
    /// </exception>
    public void ComputeIntersection(IEnumerable<Pair<MeshNode>> meshNodePairs,
      Vector3F color, float alpha, float maxConvexity, RenderContext context)
    {
      if (_isDisposed)
        throw new ObjectDisposedException("IntersectionRenderer has already been disposed.");
      if (meshNodePairs == null)
        throw new ArgumentNullException("meshNodePairs");
      if (maxConvexity < 1)
        throw new ArgumentOutOfRangeException("maxConvexity", "The max convexity must be greater than 0.");
      if (context == null)
        throw new ArgumentNullException("context");
      if (context.GraphicsService == null)
        throw new GraphicsException("Invalid render context: Graphics service is not set.");
      if (_graphicsService != context.GraphicsService)
        throw new GraphicsException("Invalid render context: Wrong graphics service.");
      if (context.CameraNode == null)
        throw new GraphicsException("Camera node needs to be set in render context.");
      if (context.Scene == null)
        throw new GraphicsException("A scene needs to be set in the render context.");

      // Create 2 ordered pairs for each unordered pair.
      _pairs.Clear();
      foreach (var pair in meshNodePairs)
      {
        if (pair.First == null || pair.Second == null)
          continue;

        // Frustum culling.
        if (!context.Scene.HaveContact(pair.First, context.CameraNode))
          continue;
        if (!context.Scene.HaveContact(pair.Second, context.CameraNode))
          continue;

        _pairs.Add(new Pair<MeshNode, MeshNode>(pair.First, pair.Second));
        _pairs.Add(new Pair<MeshNode, MeshNode>(pair.Second, pair.First));
      }
      
      var renderTargetPool = _graphicsService.RenderTargetPool;

      if (_pairs.Count == 0)
      {
        renderTargetPool.Recycle(_intersectionImage);
        _intersectionImage = null;
        return;
      }

      // Color and alpha are applied in RenderIntersection().
      _color = color;
      _alpha = alpha;

      var graphicsDevice = _graphicsService.GraphicsDevice;

      // Save original render states.
      var originalBlendState = graphicsDevice.BlendState;
      var originalDepthStencilState = graphicsDevice.DepthStencilState;
      var originalRasterizerState = graphicsDevice.RasterizerState;
      var originalScissorRectangle = graphicsDevice.ScissorRectangle;

      // Get offscreen render targets.
      var viewport = context.Viewport;
      viewport.X = 0;
      viewport.Y = 0;
      viewport.Width = (int)(viewport.Width / DownsampleFactor);
      viewport.Height = (int)(viewport.Height / DownsampleFactor);
      var renderTargetFormat = new RenderTargetFormat(viewport.Width, viewport.Height, false, SurfaceFormat.Color, DepthFormat.Depth24Stencil8);

      // Try to reuse any existing render targets.
      // (Usually they are recycled in RenderIntersection()).
      var currentScene = _intersectionImage;
      if (currentScene == null || !renderTargetFormat.IsCompatibleWith(currentScene))
      {
        currentScene.SafeDispose();
        currentScene = renderTargetPool.Obtain2D(renderTargetFormat);
      }
      var lastScene = renderTargetPool.Obtain2D(renderTargetFormat);

      // Set shared effect parameters.
      var cameraNode = context.CameraNode;
      var view = (Matrix)cameraNode.View;
      var projection = cameraNode.Camera.Projection;
      var near = projection.Near;
      var far = projection.Far;
      _parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));

      // The DepthEpsilon has to be tuned if depth peeling does not work because
      // of numerical problems equality z comparisons.
      _parameterCameraParameters.SetValue(new Vector3(near, far - near, 0.0000001f));
      _parameterView.SetValue(view);
      _parameterProjection.SetValue((Matrix)projection);

      var defaultTexture = _graphicsService.GetDefaultTexture2DBlack();

      // Handle all pairs.
      bool isFirstPass = true;
      while (true)
      {
        // Find a mesh node A and all mesh nodes to which it needs to be clipped.
        MeshNode meshNodeA = null;
        _partners.Clear();
        for (int i = 0; i < _pairs.Count; i++)
        {
          var pair = _pairs[i];

          if (pair.First == null)
            continue;

          if (meshNodeA == null)
            meshNodeA = pair.First;

          if (pair.First == meshNodeA)
          {
            _partners.Add(pair.Second);

            //  Remove this pair.
            _pairs[i] = new Pair<MeshNode, MeshNode>();
          }
        }

        // Abort if we have handled all pairs.
        if (meshNodeA == null)
          break;

        var worldTransformA = (Matrix)(meshNodeA.PoseWorld * Matrix44F.CreateScale(meshNodeA.ScaleWorld));

        if (EnableScissorTest)
        {
          // Scissor rectangle of A.
          var scissorA = GraphicsHelper.GetScissorRectangle(context.CameraNode, viewport, meshNodeA);

          // Union of scissor rectangles of partners.
          Rectangle partnerRectangle = GraphicsHelper.GetScissorRectangle(context.CameraNode, viewport, _partners[0]);
          for (int i = 1; i < _partners.Count; i++)
          {
            var a = GraphicsHelper.GetScissorRectangle(context.CameraNode, viewport, _partners[i]);
            partnerRectangle = Rectangle.Union(partnerRectangle, a);
          }

          // Use intersection of A and partners.
          graphicsDevice.ScissorRectangle = Rectangle.Intersect(scissorA, partnerRectangle);
          
          // We store the union of all scissor rectangles for use in RenderIntersection().
          if (isFirstPass)
            _totalScissorRectangle = graphicsDevice.ScissorRectangle;
          else
            _totalScissorRectangle = Rectangle.Union(_totalScissorRectangle, graphicsDevice.ScissorRectangle);
        }

        // Depth peeling of A.
        for (int layer = 0; layer < maxConvexity; layer++)
        {
          // Set and clear render target.
          graphicsDevice.SetRenderTarget(currentScene);
          graphicsDevice.Clear(new Color(1, 1, 1, 0));  // RGB = "a large depth", A = "empty area"

          // Render a depth layer of A.
          graphicsDevice.DepthStencilState = DepthStencilStateWriteLess;
          graphicsDevice.BlendState = BlendState.Opaque;
          graphicsDevice.RasterizerState = EnableScissorTest ? CullCounterClockwiseScissor : RasterizerState.CullCounterClockwise;
          _parameterWorld.SetValue(worldTransformA);
          _parameterTexture.SetValue((layer == 0) ? defaultTexture : lastScene);
          _passPeel.Apply();
          foreach (var submesh in meshNodeA.Mesh.Submeshes)
            submesh.Draw();

          // Render partners to set stencil.
          graphicsDevice.DepthStencilState = DepthStencilStateOnePassStencilFail;
          graphicsDevice.BlendState = BlendStateNoWrite;
          graphicsDevice.RasterizerState = EnableScissorTest ? CullNoneScissor : RasterizerState.CullNone;
          foreach (var partner in _partners)
          {
            _parameterWorld.SetValue((Matrix)(partner.PoseWorld * Matrix44F.CreateScale(partner.ScaleWorld)));
            _passMark.Apply();
            foreach (var submesh in partner.Mesh.Submeshes)
              submesh.Draw();
          }

          // Clear depth buffer. Leave stencil buffer unchanged.
          graphicsDevice.Clear(ClearOptions.DepthBuffer, new Color(0, 1, 0), 1, 0);

          // Render A to compute lighting.
          graphicsDevice.DepthStencilState = DepthStencilStateStencilNotEqual0;
          graphicsDevice.BlendState = BlendState.Opaque;
          graphicsDevice.RasterizerState = EnableScissorTest ? CullCounterClockwiseScissor :  RasterizerState.CullCounterClockwise;
          _parameterWorld.SetValue(worldTransformA);
          _passDraw.Apply();
          foreach (var submesh in meshNodeA.Mesh.Submeshes)
            submesh.Draw();

          // Combine last intersection image with current.
          if (!isFirstPass)
          {
            graphicsDevice.DepthStencilState = DepthStencilState.DepthRead;
            graphicsDevice.BlendState = BlendState.Opaque;
            graphicsDevice.RasterizerState = EnableScissorTest ? CullNoneScissor : RasterizerState.CullNone;
            _parameterTexture.SetValue(lastScene);
            _passCombine.Apply();
            graphicsDevice.DrawFullScreenQuad();
          }

          isFirstPass = false;

          // ----- Swap render targets.
          MathHelper.Swap(ref lastScene, ref currentScene);
        }
      }

      // Store final images for RenderIntersection.
      _intersectionImage = lastScene;

      // Scale scissor rectangle back to full-screen resolution.
      if (DownsampleFactor > 1)
      {
        _totalScissorRectangle.X = (int)(_totalScissorRectangle.X * DownsampleFactor);
        _totalScissorRectangle.Y = (int)(_totalScissorRectangle.Y * DownsampleFactor);
        _totalScissorRectangle.Width = (int)(_totalScissorRectangle.Width * DownsampleFactor);
        _totalScissorRectangle.Height = (int)(_totalScissorRectangle.Height * DownsampleFactor);
      }


      // Restore original render state.
      graphicsDevice.BlendState = originalBlendState ?? BlendState.Opaque;
      graphicsDevice.DepthStencilState = originalDepthStencilState ?? DepthStencilState.Default;
      graphicsDevice.RasterizerState = originalRasterizerState ?? RasterizerState.CullCounterClockwise;
      graphicsDevice.ScissorRectangle = originalScissorRectangle;

      renderTargetPool.Recycle(currentScene);
      _partners.Clear();
      _pairs.Clear();
    }
示例#20
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              // The target width/height.
              int targetWidth = context.Viewport.Width;
              int targetHeight = context.Viewport.Height;

              // We use two temporary render targets.
              var tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, context.SourceTexture.Format, DepthFormat.None);
              RenderTarget2D temp0 = (NumberOfPasses > 1)
                             ? renderTargetPool.Obtain2D(tempFormat)
                             : null;
              RenderTarget2D temp1 = (NumberOfPasses > 2)
                             ? renderTargetPool.Obtain2D(tempFormat)
                             : null;

              bool useHalfPixelOffset = !TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format);
              if (useHalfPixelOffset)
              {
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            _useHalfPixelOffsetParameter.SetValue(1.0f);
              }
              else
              {
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            _useHalfPixelOffsetParameter.SetValue(0.0f);
              }

              _viewportSizeParameter.SetValue(new Vector2(targetWidth, targetHeight));

              for (int i = 0; i < NumberOfPasses; i++)
              {
            if (i == NumberOfPasses - 1)
            {
              graphicsDevice.SetRenderTarget(context.RenderTarget);
              graphicsDevice.Viewport = context.Viewport;
            }
            else if (i % 2 == 0)
            {
              graphicsDevice.SetRenderTarget(temp0);
            }
            else
            {
              graphicsDevice.SetRenderTarget(temp1);
            }

            if (i == 0)
              _sourceTextureParameter.SetValue(context.SourceTexture);
            else if (i % 2 == 0)
              _sourceTextureParameter.SetValue(temp1);
            else
              _sourceTextureParameter.SetValue(temp0);

            // The iteration value goes from 0 ... (n - 1) or 1 ... n depending on
            // whether a half-pixel offset is used.
            _iterationParameter.SetValue((float)(useHalfPixelOffset ? i : i + 1));

            _effect.CurrentTechnique.Passes[0].Apply();
            graphicsDevice.DrawFullScreenQuad();
              }

              // Clean-up
              _sourceTextureParameter.SetValue((Texture2D)null);

              renderTargetPool.Recycle(temp0);
              renderTargetPool.Recycle(temp1);
        }
示例#21
0
        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var renderTargetPool = graphicsService.RenderTargetPool;
              int frame = context.Frame;

              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              var originalRenderTarget = context.RenderTarget;
              var originalViewport = context.Viewport;
              var originalCameraNode = context.CameraNode;
              var originalLodCameraNode = context.LodCameraNode;
              var originalReferenceNode = context.ReferenceNode;

              try
              {
            // Use foreach instead of for-loop to catch InvalidOperationExceptions in
            // case the collection is modified.
            for (int i = 0; i < numberOfNodes; i++)
            {
              var node = nodes[i] as SceneCaptureNode;
              if (node == null)
            continue;

              // Update each node only once per frame.
              if (node.LastFrame == frame)
            continue;

              node.LastFrame = frame;

              var cameraNode = node.CameraNode;
              if (cameraNode == null)
            continue;

              var texture = node.RenderToTexture.Texture;
              if (texture == null)
            continue;

              // RenderToTexture instances can be shared. --> Update them only once per frame.
              if (node.RenderToTexture.LastFrame == frame)
            continue;

              context.CameraNode = cameraNode;
              context.LodCameraNode = cameraNode;
              context.ReferenceNode = node;

              var renderTarget2D = texture as RenderTarget2D;
              var projection = cameraNode.Camera.Projection;
              if (renderTarget2D != null)
              {
            context.RenderTarget = renderTarget2D;
            context.Viewport = new Viewport(0, 0, renderTarget2D.Width, renderTarget2D.Height);

            RenderCallback(context);

            // Update other properties of RenderToTexture.
            node.RenderToTexture.LastFrame = frame;
            node.RenderToTexture.TextureMatrix = GraphicsHelper.ProjectorBiasMatrix
                                                 * projection
                                                 * cameraNode.PoseWorld.Inverse;

            continue;
              }

              var renderTargetCube = texture as RenderTargetCube;
              if (renderTargetCube != null)
              {
            var format = new RenderTargetFormat(renderTargetCube) { Mipmap = false };

            renderTarget2D = renderTargetPool.Obtain2D(format);

            context.RenderTarget = renderTarget2D;
            context.Viewport = new Viewport(0, 0, renderTarget2D.Width, renderTarget2D.Height);

            if (_spriteBatch == null)
              _spriteBatch = graphicsService.GetSpriteBatch();

            var perspectiveProjection = projection as PerspectiveProjection;
            if (perspectiveProjection == null)
              throw new GraphicsException("The camera of the SceneCaptureNode must use a perspective projection.");

            // ReSharper disable CompareOfFloatsByEqualityOperator
            if (perspectiveProjection.FieldOfViewX != ConstantsF.PiOver2
                || perspectiveProjection.FieldOfViewY != ConstantsF.PiOver2)
              perspectiveProjection.SetFieldOfView(ConstantsF.PiOver2, 1, projection.Near, projection.Far);
            // ReSharper restore CompareOfFloatsByEqualityOperator

            var originalCameraPose = cameraNode.PoseWorld;
            for (int side = 0; side < 6; side++)
            {
              // Rotate camera to face the current cube map face.
              //var cubeMapFace = (CubeMapFace)side;
              // AMD problem: If we generate in normal order, the last cube map face contains
              // garbage when mipmaps are created.
              var cubeMapFace = (CubeMapFace)(5 - side);
              var position = cameraNode.PoseWorld.Position;
              cameraNode.View = Matrix44F.CreateLookAt(
                position,
                position + originalCameraPose.ToWorldDirection(GraphicsHelper.GetCubeMapForwardDirection(cubeMapFace)),
                originalCameraPose.ToWorldDirection(GraphicsHelper.GetCubeMapUpDirection(cubeMapFace)));

              RenderCallback(context);

              // Copy RGBM texture into cube map face.
              graphicsDevice.SetRenderTarget(renderTargetCube, cubeMapFace);
              _spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, null, null, null);
              _spriteBatch.Draw(renderTarget2D, new Vector2(0, 0), Color.White);
              _spriteBatch.End();
            }
            cameraNode.PoseWorld = originalCameraPose;

            renderTargetPool.Recycle(renderTarget2D);

            // Update other properties of RenderToTexture.
            node.RenderToTexture.LastFrame = frame;
            node.RenderToTexture.TextureMatrix = GraphicsHelper.ProjectorBiasMatrix
                                                 * projection
                                                 * cameraNode.PoseWorld.Inverse;

            continue;
              }

              throw new GraphicsException(
            "SceneCaptureNode.RenderToTexture.Texture is invalid. The texture must be a RenderTarget2D or RenderTargetCube.");
            }
              }
              catch (InvalidOperationException exception)
              {
            throw new GraphicsException(
              "InvalidOperationException was raised in SceneCaptureRenderer.Render(). "
              + "This can happen if a SceneQuery instance that is currently in use is modified in the "
              + "RenderCallback. --> Use different SceneQuery types in the method which calls "
              + "SceneCaptureRenderer.Render() and in the RenderCallback method.",
              exception);
              }

              graphicsDevice.SetRenderTarget(null);
              savedRenderState.Restore();

              context.RenderTarget = originalRenderTarget;
              context.Viewport = originalViewport;
              context.CameraNode = originalCameraNode;
              context.LodCameraNode = originalLodCameraNode;
              context.ReferenceNode = originalReferenceNode;
        }
示例#22
0
        // Perform FFTs.
        // 4 complex input images: source0.xy, source0.zw, source1.xy, source1.zw
        // 2 targets: target0 = displacement map, target1 = normal map using Color format.
        public void Process(RenderContext context, bool forward, Texture2D source0, Texture2D source1, RenderTarget2D target0, RenderTarget2D target1, float choppiness)
        {
            if (context == null)
            throw new ArgumentNullException("context");
              if (source0 == null)
            throw new ArgumentNullException("source0");
              if (source1 == null)
            throw new ArgumentNullException("source1");

              if (forward)
              {
            // For forward FFT, uncomment the LastPassScale stuff!
            throw new NotImplementedException("Forward FFT not implemented.");
              }

              var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var renderTargetPool = graphicsService.RenderTargetPool;

              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              graphicsDevice.BlendState = BlendState.Opaque;
              graphicsDevice.RasterizerState = RasterizerState.CullNone;
              graphicsDevice.DepthStencilState = DepthStencilState.None;

              int size = source0.Width;
              _parameterSize.SetValue((float)size);

              _parameterChoppiness.SetValue(choppiness);

              int numberOfButterflyPasses = (int)MathHelper.Log2GreaterOrEqual((uint)source0.Width);
              // ReSharper disable once ConditionIsAlwaysTrueOrFalse
              _parameterButterflyTexture.SetValue(GetButterflyTexture(forward, numberOfButterflyPasses));

              var format = new RenderTargetFormat(size, size, false, source0.Format, DepthFormat.None);
              var tempPing0 = renderTargetPool.Obtain2D(format);
              var tempPing1 = renderTargetPool.Obtain2D(format);
              var tempPong0 = renderTargetPool.Obtain2D(format);
              var tempPong1 = renderTargetPool.Obtain2D(format);

              //_parameterIsLastPass.SetValue(false);

              // Perform horizontal and vertical FFT pass.
              for (int i = 0; i < 2; i++)
              {
            //_parameterLastPassScale.SetValue(1);

            // Perform butterfly passes. We ping-pong between two temp targets.
            for (int pass = 0; pass < numberOfButterflyPasses; pass++)
            {
              _parameterButterflyIndex.SetValue(0.5f / numberOfButterflyPasses + (float)pass / numberOfButterflyPasses);

              if (i == 0 && pass == 0)
              {
            // First pass.
            _renderTargetBindings[0] = new RenderTargetBinding(tempPing0);
            _renderTargetBindings[1] = new RenderTargetBinding(tempPing1);
            graphicsDevice.SetRenderTargets(_renderTargetBindings);
            _parameterSourceTexture0.SetValue(source0);
            _parameterSourceTexture1.SetValue(source1);
              }
              else if (i == 1 && pass == numberOfButterflyPasses - 1)
              {
            // Last pass.
            // We have explicit shader passes for the last FFT pass.
            break;

            //_parameterIsLastPass.SetValue(true);
            //if (forward)
            //  _parameterLastPassScale.SetValue(1.0f / size / size);

            //if (_renderTargetBindings[0].RenderTarget == tempPing0)
            //{
            //  _renderTargetBindings[0] = new RenderTargetBinding(target0);
            //  _renderTargetBindings[1] = new RenderTargetBinding(target1);
            //  graphicsDevice.SetRenderTargets(_renderTargetBindings);
            //  _parameterSourceTexture0.SetValue(tempPing0);
            //  _parameterSourceTexture1.SetValue(tempPing1);
            //}
            //else
            //{
            //  _renderTargetBindings[0] = new RenderTargetBinding(target0);
            //  _renderTargetBindings[1] = new RenderTargetBinding(target1);
            //  graphicsDevice.SetRenderTargets(_renderTargetBindings);
            //  _parameterSourceTexture0.SetValue(tempPong0);
            //  _parameterSourceTexture1.SetValue(tempPong1);
            //}
              }
              else
              {
            // Intermediate pass.
            if (_renderTargetBindings[0].RenderTarget == tempPing0)
            {
              _renderTargetBindings[0] = new RenderTargetBinding(tempPong0);
              _renderTargetBindings[1] = new RenderTargetBinding(tempPong1);
              graphicsDevice.SetRenderTargets(_renderTargetBindings);
              _parameterSourceTexture0.SetValue(tempPing0);
              _parameterSourceTexture1.SetValue(tempPing1);
            }
            else
            {
              _renderTargetBindings[0] = new RenderTargetBinding(tempPing0);
              _renderTargetBindings[1] = new RenderTargetBinding(tempPing1);
              graphicsDevice.SetRenderTargets(_renderTargetBindings);
              _parameterSourceTexture0.SetValue(tempPong0);
              _parameterSourceTexture1.SetValue(tempPong1);
            }
              }

              if (i == 0)
            _passFftHorizontal.Apply();
              else
            _passFftVertical.Apply();

              graphicsDevice.DrawFullScreenQuad();
            }
              }

              // Perform final vertical FFT passes. We have to perform them separately
              // because displacement map and normal map usually have different bit depth.
              // Final pass for displacement.
              graphicsDevice.SetRenderTarget(target0);
              if (_renderTargetBindings[1].RenderTarget == tempPing1)
            _parameterSourceTexture0.SetValue(tempPing0);
              else
            _parameterSourceTexture0.SetValue(tempPong0);

              _passFftDisplacement.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Final pass for normals.
              graphicsDevice.SetRenderTarget(target1);
              if (_renderTargetBindings[1].RenderTarget == tempPing1)
            _parameterSourceTexture0.SetValue(tempPing1);
              else
            _parameterSourceTexture0.SetValue(tempPong1);

              _passFftNormal.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Clean up.
              _renderTargetBindings[0] = default(RenderTargetBinding);
              _renderTargetBindings[1] = default(RenderTargetBinding);
              _parameterButterflyTexture.SetValue((Texture2D)null);
              _parameterSourceTexture0.SetValue((Texture2D)null);
              _parameterSourceTexture1.SetValue((Texture2D)null);

              renderTargetPool.Recycle(tempPing0);
              renderTargetPool.Recycle(tempPing1);
              renderTargetPool.Recycle(tempPong0);
              renderTargetPool.Recycle(tempPong1);

              savedRenderState.Restore();

              // Reset the texture stages. If a floating point texture is set, we get exceptions
              // when a sampler with bilinear filtering is set.
            #if !MONOGAME
              graphicsDevice.ResetTextures();
            #endif
        }
示例#23
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();
            context.ThrowIfGBuffer0Missing();

            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var cameraNode       = context.CameraNode;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            var source   = context.SourceTexture;
            var target   = context.RenderTarget;
            var viewport = context.Viewport;

            var sourceSize        = new Vector2F(source.Width, source.Height);
            int width             = (int)sourceSize.X;
            int height            = (int)sourceSize.Y;
            int downsampledWidth  = Math.Max(1, width / DownsampleFactor);
            int downsampledHeight = Math.Max(1, height / DownsampleFactor);

            if (TextureHelper.IsFloatingPointFormat(source.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
                InitializeGaussianBlur(new Vector2F(downsampledWidth, downsampledHeight), false);
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
                InitializeGaussianBlur(new Vector2F(downsampledWidth, downsampledHeight), true);
            }

            // Get temporary render targets.
            var            downsampleFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, source.Format, DepthFormat.None);
            RenderTarget2D blurredScene0    = renderTargetPool.Obtain2D(downsampleFormat);
            RenderTarget2D blurredScene1    = renderTargetPool.Obtain2D(downsampleFormat);

            var            blurredDepthFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, context.GBuffer0.Format, DepthFormat.None);
            RenderTarget2D blurredDepth0      = renderTargetPool.Obtain2D(blurredDepthFormat);

            var            cocFormat = new RenderTargetFormat(width, height, false, SurfaceFormat.Single, DepthFormat.None);
            RenderTarget2D cocImage  = renderTargetPool.Obtain2D(cocFormat);

            var            downSampledCocFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, cocFormat.SurfaceFormat, DepthFormat.None);
            RenderTarget2D cocImageBlurred      = renderTargetPool.Obtain2D(downSampledCocFormat);

            // ----- Create CoC map.
            _effect.CurrentTechnique = _effect.Techniques[0];
            graphicsDevice.SetRenderTarget(cocImage);
            _screenSizeParameter.SetValue(new Vector2(cocImage.Width, cocImage.Height));
            _depthTextureParameter.SetValue(context.GBuffer0);
            _nearBlurDistanceParameter.SetValue(NearBlurDistance);
            _nearFocusDistanceParameter.SetValue(NearFocusDistance);
            _farFocusDistanceParameter.SetValue(FarFocusDistance);
            _farBlurDistanceParameter.SetValue(FarBlurDistance);
            _farParameter.SetValue(cameraNode.Camera.Projection.Far);
            _circleOfConfusionPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // ----- Downsample cocImage to cocImageBlurred.
            context.SourceTexture = cocImage;
            context.RenderTarget  = cocImageBlurred;
            context.Viewport      = new Viewport(0, 0, cocImageBlurred.Width, cocImageBlurred.Height);
            _downsampleFilter.Process(context);

            renderTargetPool.Recycle(cocImage);

            // ----- Downsample source to blurredScene0.
            context.SourceTexture = source;
            context.RenderTarget  = blurredScene0;
            context.Viewport      = new Viewport(0, 0, blurredScene0.Width, blurredScene0.Height);
            _downsampleFilter.Process(context);

            // ----- Downsample depth texture to blurredDepth0.
            context.SourceTexture = context.GBuffer0;
            context.RenderTarget  = blurredDepth0;
            context.Viewport      = new Viewport(0, 0, blurredDepth0.Width, blurredDepth0.Height);
            _downsampleFilter.Process(context);

            // ----- Blur scene.
            // Horizontal blur
            graphicsDevice.SetRenderTarget(blurredScene1);
            _screenSizeParameter.SetValue(new Vector2(blurredScene0.Width, blurredScene0.Height));
            _blurTextureParameter.SetValue(blurredScene0);
            _downsampledDepthTextureParameter.SetValue(blurredDepth0);
            _downsampledCocTextureParameter.SetValue(cocImageBlurred);
            _offsetsParameter.SetValue(_horizontalOffsets);
            _weightsParameter.SetValue(_weights);
            _blurPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Vertical blur.
            graphicsDevice.SetRenderTarget(blurredScene0);
            _blurTextureParameter.SetValue(blurredScene1);
            _offsetsParameter.SetValue(_verticalOffsets);
            _blurPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            renderTargetPool.Recycle(blurredScene1);

            // ----- Blur cocImageBlurred.
            context.SourceTexture = cocImageBlurred;
            context.RenderTarget  = cocImageBlurred;
            context.Viewport      = new Viewport(0, 0, cocImageBlurred.Width, cocImageBlurred.Height);
            _cocBlur.Process(context); // We make a two pass blur, so context.SourceTexture can be equal to context.RenderTarget.

            // ----- Blur depth.
            context.SourceTexture = blurredDepth0;
            context.RenderTarget  = blurredDepth0;
            context.Viewport      = new Viewport(0, 0, blurredDepth0.Width, blurredDepth0.Height);
            _cocBlur.Process(context);

            // ----- Create final DoF image.
            _effect.CurrentTechnique = _effect.Techniques[0];
            graphicsDevice.SetRenderTarget(target);
            graphicsDevice.Viewport = viewport;
            _screenSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sceneTextureParameter.SetValue(source);
            _blurTextureParameter.SetValue(blurredScene0);
            _depthTextureParameter.SetValue(context.GBuffer0);
            _downsampledDepthTextureParameter.SetValue(blurredDepth0);
            _downsampledCocTextureParameter.SetValue(cocImageBlurred);
            _depthOfFieldPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // ----- Clean-up
            _depthTextureParameter.SetValue((Texture2D)null);
            _blurTextureParameter.SetValue((Texture2D)null);
            _downsampledDepthTextureParameter.SetValue((Texture2D)null);
            _downsampledCocTextureParameter.SetValue((Texture2D)null);
            _sceneTextureParameter.SetValue((Texture2D)null);
            renderTargetPool.Recycle(blurredScene0);
            renderTargetPool.Recycle(blurredDepth0);
            renderTargetPool.Recycle(cocImageBlurred);
            context.SourceTexture = source;
            context.RenderTarget  = target;
            context.Viewport      = viewport;
        }
        public override void Render(IList <SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            {
                throw new ArgumentNullException("nodes");
            }
            if (context == null)
            {
                throw new ArgumentNullException("context");
            }

            int numberOfNodes = nodes.Count;

            if (numberOfNodes == 0)
            {
                return;
            }

            Debug.Assert(context.CameraNode != null, "A camera node has to be set in the render context.");
            Debug.Assert(context.Scene != null, "A scene has to be set in the render context.");

            var originalRenderTarget  = context.RenderTarget;
            var originalViewport      = context.Viewport;
            var originalReferenceNode = context.ReferenceNode;

            // Camera properties
            var cameraNode = context.CameraNode;
            var cameraPose = cameraNode.PoseWorld;
            var projection = cameraNode.Camera.Projection;

            if (!(projection is PerspectiveProjection))
            {
                throw new NotImplementedException("VSM shadow maps not yet implemented for scenes with perspective camera.");
            }

            float fieldOfViewY = projection.FieldOfViewY;
            float aspectRatio  = projection.AspectRatio;

            // Update SceneNode.LastFrame for all rendered nodes.
            int frame = context.Frame;

            cameraNode.LastFrame = frame;

            // The scene node renderer should use the light camera instead of the player camera.
            context.CameraNode = _orthographicCameraNode;

            // The shadow map is rendered using the technique "DirectionalVsm".
            // See ShadowMap.fx in the DigitalRune source code folder.
            context.Technique = "DirectionalVsm";

            var graphicsService           = context.GraphicsService;
            var graphicsDevice            = graphicsService.GraphicsDevice;
            var originalBlendState        = graphicsDevice.BlendState;
            var originalDepthStencilState = graphicsDevice.DepthStencilState;
            var originalRasterizerState   = graphicsDevice.RasterizerState;

            for (int i = 0; i < numberOfNodes; i++)
            {
                var lightNode = nodes[i] as LightNode;
                if (lightNode == null)
                {
                    continue;
                }

                var shadow = lightNode.Shadow as VarianceShadow;
                if (shadow == null)
                {
                    continue;
                }

                // LightNode is visible in current frame.
                lightNode.LastFrame = frame;

                // The format of the shadow map:
                var format = new RenderTargetFormat(
                    shadow.PreferredSize,
                    shadow.PreferredSize,
                    false,
                    shadow.Prefer16Bit ? SurfaceFormat.HalfVector2 : SurfaceFormat.Vector2, // VSM needs two channels!
                    DepthFormat.Depth24);

                if (shadow.ShadowMap != null && shadow.IsLocked)
                {
                    continue;
                }

                if (shadow.ShadowMap == null)
                {
                    shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(format);
                }

                graphicsDevice.DepthStencilState = DepthStencilState.Default;
                graphicsDevice.BlendState        = BlendState.Opaque;

                // Render front and back faces for VSM due to low shadow map texel density.
                // (VSM is usually used for distant geometry.)
                graphicsDevice.RasterizerState = RasterizerState.CullNone;

                graphicsDevice.SetRenderTarget(shadow.ShadowMap);
                context.RenderTarget = shadow.ShadowMap;
                context.Viewport     = graphicsDevice.Viewport;

                graphicsDevice.Clear(Color.White);

                // Compute an orthographic camera for the light.
                // If Shadow.TargetArea is null, the shadow map should cover the area in front of the player camera.
                // If Shadow.TargetArea is set, the shadow map should cover this static area.
                if (shadow.TargetArea == null)
                {
                    // near/far of this shadowed area.
                    float near = projection.Near;
                    float far  = shadow.MaxDistance;

                    // Abort if near-far distances are invalid.
                    if (Numeric.IsGreaterOrEqual(near, far))
                    {
                        continue;
                    }

                    // Create a view volume for frustum part that is covered by the shadow map.
                    _cameraVolume.SetFieldOfView(fieldOfViewY, aspectRatio, near, far);

                    // Find the bounding sphere of the frustum part.
                    Vector3 center;
                    float   radius;
                    GetBoundingSphere(_cameraVolume, out center, out radius);

                    // Convert center to light space.
                    Pose lightPose = lightNode.PoseWorld;
                    center = cameraPose.ToWorldPosition(center);
                    center = lightPose.ToLocalPosition(center);

                    // Snap center to texel positions to avoid shadow swimming.
                    SnapPositionToTexels(ref center, 2 * radius, shadow.ShadowMap.Height);

                    // Convert center back to world space.
                    center = lightPose.ToWorldPosition(center);

                    Matrix  orientation            = lightPose.Orientation;
                    Vector3 backward               = orientation.GetColumn(2);
                    var     orthographicProjection = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;

                    // Create a tight orthographic frustum around the target bounding sphere.
                    orthographicProjection.SetOffCenter(-radius, radius, -radius, radius, 0, 2 * radius);
                    Vector3 cameraPosition = center + radius * backward;
                    Pose    frustumPose    = new Pose(cameraPosition, orientation);
                    Pose    view           = frustumPose.Inverse;
                    shadow.ViewProjection = (Matrix)view * orthographicProjection;

                    // For rendering the shadow map, move near plane back by MinLightDistance
                    // to catch occluders in front of the camera frustum.
                    orthographicProjection.Near       = -shadow.MinLightDistance;
                    _orthographicCameraNode.PoseWorld = frustumPose;
                }
                else
                {
                    // Get bounding sphere of static target area.
                    Aabb    targetAabb = shadow.TargetArea.Value;
                    Vector3 center     = targetAabb.Center;
                    float   radius     = (targetAabb.Maximum - center).Length;

                    // Convert center to light space.
                    Matrix  orientation            = lightNode.PoseWorld.Orientation;
                    Vector3 backward               = orientation.GetColumn(2);
                    var     orthographicProjection = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;

                    // Create a tight orthographic frustum around the target bounding sphere.
                    orthographicProjection.SetOffCenter(-radius, radius, -radius, radius, 0, 2 * radius);
                    Vector3 cameraPosition = center + radius * backward;
                    Pose    frustumPose    = new Pose(cameraPosition, orientation);
                    Pose    view           = frustumPose.Inverse;
                    shadow.ViewProjection = (Matrix)view * orthographicProjection;

                    // For rendering the shadow map, move near plane back by MinLightDistance
                    // to catch occluders in front of the camera frustum.
                    orthographicProjection.Near       = -shadow.MinLightDistance;
                    _orthographicCameraNode.PoseWorld = frustumPose;
                }

                context.ReferenceNode = lightNode;
                context.Object        = shadow;

                // Render objects into shadow map.
                bool shadowMapContainsSomething = RenderCallback(context);

                if (shadowMapContainsSomething)
                {
                    // Blur shadow map.
                    if (shadow.Filter != null && shadow.Filter.Scale > 0)
                    {
                        context.SourceTexture = shadow.ShadowMap;
                        shadow.Filter.Process(context);
                        context.SourceTexture = null;
                    }
                }
                else
                {
                    // Shadow map is empty. Recycle it.
                    graphicsService.RenderTargetPool.Recycle(shadow.ShadowMap);
                    shadow.ShadowMap = null;
                }
            }

            graphicsDevice.SetRenderTarget(null);

            graphicsDevice.BlendState        = originalBlendState;
            graphicsDevice.DepthStencilState = originalDepthStencilState;
            graphicsDevice.RasterizerState   = originalRasterizerState;

            context.CameraNode    = cameraNode;
            context.Technique     = null;
            context.RenderTarget  = originalRenderTarget;
            context.Viewport      = originalViewport;
            context.ReferenceNode = originalReferenceNode;
            context.Object        = null;
        }
示例#25
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;
              var cameraNode = context.CameraNode;

              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              Projection projection = cameraNode.Camera.Projection;
              Matrix44F projMatrix = projection;
              float near = projection.Near;
              float far = projection.Far;

              _frustumInfoParameter.SetValue(new Vector4(
            projection.Left / near,
            projection.Top / near,
            (projection.Right - projection.Left) / near,
            (projection.Bottom - projection.Top) / near));

              _numberOfAOSamplesParameter.SetValue(NumberOfSamples);

              // The height of a 1 unit object 1 unit in front of the camera.
              // (Compute 0.5 unit multiply by 2 and divide by 2 to convert from [-1, 1] to [0, 1] range.)
              float projectionScale =
            projMatrix.TransformPosition(new Vector3F(0, 0.5f, -1)).Y
            - projMatrix.TransformPosition(new Vector3F(0, 0, -1)).Y;

              _aoParameters0.SetValue(new Vector4(
            projectionScale,
            Radius,
            Strength / (float)Math.Pow(Radius, 6),
            Bias));

              _aoParameters1.SetValue(new Vector4(
            viewport.Width,
            viewport.Height,
            far,
            MaxOcclusion));

              _aoParameters2.SetValue(new Vector4(
            SampleDistribution,
            1.0f / (EdgeSoftness + 0.001f) * far,
            BlurScale,
            MinBias));

              context.ThrowIfGBuffer0Missing();
              _gBuffer0Parameter.SetValue(context.GBuffer0);

              //var view = cameraNode.View;
              //_viewParameter.SetValue((Matrix)view);
              //_gBuffer1Parameter.SetValue(context.GBuffer1);

              // We use two temporary render targets.
              var format = new RenderTargetFormat(
            context.Viewport.Width,
            context.Viewport.Height,
            false,
            SurfaceFormat.Color,
            DepthFormat.None);

              var tempTarget0 = renderTargetPool.Obtain2D(format);
              var tempTarget1 = renderTargetPool.Obtain2D(format);

              // Create SSAO.
              graphicsDevice.SetRenderTarget(tempTarget0);
              _createAOPass.Apply();

              graphicsDevice.Clear(new Color(1.0f, 1.0f, 1.0f, 1.0f));
              graphicsDevice.DrawFullScreenQuad();

              // Horizontal blur.
              graphicsDevice.SetRenderTarget(tempTarget1);
              _occlusionTextureParameter.SetValue(tempTarget0);
              _blurHorizontalPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Vertical blur
              graphicsDevice.SetRenderTarget(target);
              graphicsDevice.Viewport = viewport;
              _occlusionTextureParameter.SetValue(tempTarget1);
              if (!CombineWithSource)
              {
            _blurVerticalPass.Apply();
              }
              else
              {
            if (_sourceTextureParameter != null)
              _sourceTextureParameter.SetValue(source);

            if (TextureHelper.IsFloatingPointFormat(source.Format))
              graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            else
              graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

            _blurVerticalAndCombinePass.Apply();
              }

              graphicsDevice.DrawFullScreenQuad();

              // Clean up.
              renderTargetPool.Recycle(tempTarget0);
              renderTargetPool.Recycle(tempTarget1);
              if (_sourceTextureParameter != null)
            _sourceTextureParameter.SetValue((Texture2D)null);
              _occlusionTextureParameter.SetValue((Texture2D)null);
              _gBuffer0Parameter.SetValue((Texture2D)null);
              //_gBuffer1Parameter.SetValue((Texture2D)null);
              context.SourceTexture = source;
              context.RenderTarget = target;
              context.Viewport = viewport;
        }
示例#26
0
        protected override void OnRender(RenderContext context)
        {
            if (ActiveCameraNode == null)
            {
                return;
            }

            context.Scene         = Scene;
            context.CameraNode    = ActiveCameraNode;
            context.LodCameraNode = ActiveCameraNode;

            // Copy all scene nodes into a list.
            CopyNodesToList(Scene, _sceneNodes);

            // ----- Occlusion Culling
            // Usually, we would make a scene query to get all scene nodes within the
            // viewing frustum. But in this example we will use the new OcclusionBuffer.
            if (EnableCulling)
            {
                // Render all occluders into the occlusion buffer.
                // - "_sceneNodes" is a list of all scene nodes. The OcclusionBuffer will
                //   go through the list and render all occluders.
                // - "LightNode" is the main directional light that casts a cascaded shadow.
                //   Passing the light node to the OcclusionBuffer activates shadow caster
                //   culling.
                // - A custom scene node renderer can be passed to the OcclusionBuffer. In
                //   this example, the ground mesh "Gravel/Gravel.fbx" has a material with an
                //   "Occluder" render pass. When we pass the "MeshRenderer" to the OcclusionBuffer
                //   the ground mesh will be rendered directly into the occlusion buffer.
                Profiler.Start("Occlusion.Render");
                context.RenderPass = "******";
                OcclusionBuffer.Render(_sceneNodes, LightNode, MeshRenderer, context);
                context.RenderPass = null;
                Profiler.Stop("Occlusion.Render");

                // Perform occlusion culling on the specified list of scene nodes.
                // - The scene nodes will be tested against the occluders. If a scene node
                //   is hidden, it will be replaced with a null entry in the list.
                // - When shadow caster culling is active, shadow casting scene nodes will
                //   also be tested against the occluders. If the shadow is not visible,
                //   the shadow caster will internally be marked as occluded. The ShadowMapRenderer
                //   will automatically skip occluded scene nodes.
                Profiler.Start("Occlusion.Query");
                OcclusionBuffer.Query(_sceneNodes, context);
                Profiler.Stop("Occlusion.Query");
            }

            // The base DeferredGraphicsScreen expects a CustomSceneQuery.
            // --> Copy the occlusion culling results to a CustomSceneQuery.
            _sceneQuery.Set(ActiveCameraNode, _sceneNodes, context);

            var renderTargetPool     = GraphicsService.RenderTargetPool;
            var graphicsDevice       = GraphicsService.GraphicsDevice;
            var originalRenderTarget = context.RenderTarget;
            var fullViewport         = context.Viewport;

            RenderTarget2D topDownRenderTarget = null;
            const int      topDownViewSize     = 384;

            if (ShowTopDownView)
            {
                // Render top-down scene into an offscreen render target.
                var format = new RenderTargetFormat(context.RenderTarget)
                {
                    Width  = topDownViewSize,
                    Height = topDownViewSize,
                };
                topDownRenderTarget = renderTargetPool.Obtain2D(format);

                context.Scene        = Scene;
                context.CameraNode   = _topDownCameraNode;
                context.Viewport     = new Viewport(0, 0, topDownViewSize, topDownViewSize);
                context.RenderTarget = topDownRenderTarget;
                RenderScene(_sceneQuery, context, true, false, false, false);

                _debugRenderer.Clear();
                _debugRenderer.DrawObject(ActiveCameraNode, Color.Red, true, true);
                _debugRenderer.Render(context);

                context.RenderTarget = originalRenderTarget;
                context.Viewport     = fullViewport;
            }

            // Render regular 3D scene.
            context.Scene      = Scene;
            context.CameraNode = ActiveCameraNode;
            RenderScene(_sceneQuery, context, true, false, true, false);

            // Render debug visualization on top of scene.
            bool renderObject = false;

            switch (DebugVisualization)
            {
            case DebugVisualization.CameraHzb:
                OcclusionBuffer.VisualizeCameraBuffer(DebugLevel, context);
                break;

            case DebugVisualization.LightHzb:
                OcclusionBuffer.VisualizeLightBuffer(DebugLevel, context);
                break;

            case DebugVisualization.Object:
                OcclusionBuffer.VisualizeObject(DebugObject, context);
                renderObject = true;
                break;

            case DebugVisualization.ShadowCaster:
                OcclusionBuffer.VisualizeShadowCaster(DebugObject, context);
                break;

            case DebugVisualization.ShadowVolume:
                OcclusionBuffer.VisualizeShadowVolume(DebugObject, context);
                renderObject = true;
                break;
            }

            if (renderObject)
            {
                _debugRenderer.Clear();
                _debugRenderer.DrawObject(DebugObject, Color.Yellow, true, true);
                _debugRenderer.Render(context);
            }

            if (ShowTopDownView)
            {
                // Copy offscreen buffer to screen.
                context.Viewport        = fullViewport;
                graphicsDevice.Viewport = fullViewport;

                SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
                SpriteBatch.Draw(
                    topDownRenderTarget,
                    new Rectangle(fullViewport.Width - topDownViewSize, fullViewport.Height - topDownViewSize, topDownViewSize, topDownViewSize),
                    Color.White);
                SpriteBatch.End();

                renderTargetPool.Recycle(topDownRenderTarget);
            }

            // Clean-up
            _sceneNodes.Clear();
            _sceneQuery.Reset();

            context.Scene         = null;
            context.CameraNode    = null;
            context.LodCameraNode = null;
        }
示例#27
0
        private int AssignShadowMask(LightNode lightNode, RenderContext context)
        {
            // Each shadow mask has 4 8-bit channels. We must assign a shadow mask channel to
            // each shadow-casting light. Non-overlapping lights can use the same channel.
            // Overlapping lights must use different channels. If we run out of channels,
            // we remove some lights from the list.

            var scene = context.Scene;

            var viewport   = context.Viewport;
            int maskWidth  = viewport.Width;
            int maskHeight = viewport.Height;

            if (UseHalfResolution && Numeric.IsLessOrEqual(UpsampleDepthSensitivity, 0))
            {
                // Half-res rendering with no upsampling.
                maskWidth  /= 2;
                maskHeight /= 2;
            }

            // Loop over all bins until we find one which can be used for this light node.
            int binIndex;

            for (binIndex = 0; binIndex < _shadowMaskBins.Length; binIndex++)
            {
                var bin = _shadowMaskBins[binIndex];

                // Check if the light node touches any other light nodes in this bin.
                bool hasContact = false;
                foreach (var otherLightNode in bin)
                {
                    if (scene.HaveContact(lightNode, otherLightNode))
                    {
                        hasContact = true;
                        break;
                    }
                }

                // No overlap. Use this bin.
                if (!hasContact)
                {
                    bin.Add(lightNode);
                    break;
                }
            }

            if (binIndex >= _shadowMaskBins.Length)
            {
                return(-1); // Light node does not fit into any bin.
            }
            int shadowMaskIndex = binIndex / 4;

            if (_shadowMasks[shadowMaskIndex] == null)
            {
                // Create shadow mask.
                var shadowMaskFormat = new RenderTargetFormat(maskWidth, maskHeight, false, SurfaceFormat.Color, DepthFormat.None);
                _shadowMasks[shadowMaskIndex] = context.GraphicsService.RenderTargetPool.Obtain2D(shadowMaskFormat);
            }

            // Assign shadow mask to light node.
            lightNode.Shadow.ShadowMask        = _shadowMasks[shadowMaskIndex];
            lightNode.Shadow.ShadowMaskChannel = binIndex % 4;

            return(shadowMaskIndex);
        }
示例#28
0
    protected override void OnRender(RenderContext context)
    {
      if (ActiveCameraNode == null)
        return;

      var renderTargetPool = GraphicsService.RenderTargetPool;
      var graphicsDevice = GraphicsService.GraphicsDevice;
      var originalRenderTarget = context.RenderTarget;
      var fullViewport = context.Viewport;

      // Get a render target for the first camera. Use half the width and height.
      int halfWidth = fullViewport.Width / 2;
      int halfHeight = fullViewport.Height / 2;
      var format = new RenderTargetFormat(context.RenderTarget)
      {
        Width = halfWidth,
        Height = halfHeight
      };

      var renderTarget0 = renderTargetPool.Obtain2D(format);
      var renderTarget1 = renderTargetPool.Obtain2D(format);
      var renderTarget2 = renderTargetPool.Obtain2D(format);
      var viewport0 = new Viewport(0, 0, halfWidth, halfHeight);
      var viewport1 = new Viewport(halfWidth, 0, halfWidth, halfHeight);
      var viewport2 = new Viewport(0, halfHeight, halfWidth, halfHeight);

      context.Scene = Scene;
      context.CameraNode = ActiveCameraNode;
      context.LodCameraNode = context.CameraNode;
      context.LodHysteresis = 0.5f;

      // Reduce detail level by increasing the LOD bias.
      context.LodBias = 2.0f;

      for (int i = 0; i < 4; i++)
      {
        if (i == 0)
        {
          // TOP, LEFT
          context.RenderTarget = renderTarget0;
          context.Viewport = new Viewport(0, 0, viewport0.Width, viewport0.Height);
          context.LodBlendingEnabled = false;
        }
        else if (i == 1)
        {
          // TOP, RIGHT
          context.RenderTarget = renderTarget1;
          context.Viewport = new Viewport(0, 0, viewport1.Width, viewport1.Height);
          context.LodBlendingEnabled = true;
        }
        else if (i == 2)
        {
          // BOTTOM, LEFT
          context.RenderTarget = renderTarget2;
          context.Viewport = new Viewport(0, 0, viewport2.Width, viewport2.Height);
          context.LodBlendingEnabled = false;
        }
        else
        {
          // BOTTOM, RIGHT
          context.RenderTarget = originalRenderTarget;
          context.Viewport = new Viewport(fullViewport.X + halfWidth, fullViewport.Y + halfHeight, halfWidth, halfHeight);
          context.LodBlendingEnabled = true;
        }

        var sceneQuery = Scene.Query<SceneQueryWithLodBlending>(context.CameraNode, context);

        if (i == 0 || i == 1)
        {
          // TOP
          for (int j = 0; j < sceneQuery.RenderableNodes.Count; j++)
            if (sceneQuery.RenderableNodes[j].UserFlags == 1)
              sceneQuery.RenderableNodes[j] = null;
        }
        else
        {
          // BOTTOM
          for (int j = 0; j < sceneQuery.RenderableNodes.Count; j++)
            if (sceneQuery.RenderableNodes[j].UserFlags == 2)
              sceneQuery.RenderableNodes[j] = null;
        }

        RenderScene(sceneQuery, context, true, true, true, true);
        
        sceneQuery.Reset();
      }

      // ----- Copy screens.
      // Copy the previous screens from the temporary render targets into the back buffer.
      context.Viewport = fullViewport;
      graphicsDevice.Viewport = fullViewport;

      SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
      SpriteBatch.Draw(renderTarget0, viewport0.Bounds, Color.White);
      SpriteBatch.Draw(renderTarget1, viewport1.Bounds, Color.White);
      SpriteBatch.Draw(renderTarget2, viewport2.Bounds, Color.White);
      SpriteBatch.End();

      renderTargetPool.Recycle(renderTarget0);
      renderTargetPool.Recycle(renderTarget1);
      renderTargetPool.Recycle(renderTarget2);

      context.Scene = null;
      context.CameraNode = null;
      context.LodCameraNode = null;
      context.RenderPass = null;
    }
示例#29
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              // The target width/height.
              int targetWidth = context.Viewport.Width;
              int targetHeight = context.Viewport.Height;

              var tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, context.SourceTexture.Format, DepthFormat.None);
              RenderTarget2D temp0 = renderTargetPool.Obtain2D(tempFormat);
              RenderTarget2D temp1 = renderTargetPool.Obtain2D(tempFormat);

              if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

              _viewportSizeParameter.SetValue(new Vector2(targetWidth, targetHeight));
              _numberOfSamplesParameter.SetValue(NumberOfSamples / 2);

              for (int i = 0; i < NumberOfPasses; i++)
              {
            if (i == NumberOfPasses - 1)
            {
              graphicsDevice.SetRenderTarget(context.RenderTarget);
              graphicsDevice.Viewport = context.Viewport;
            }
            else if (i % 2 == 0)
            {
              graphicsDevice.SetRenderTarget(temp0);
            }
            else
            {
              graphicsDevice.SetRenderTarget(temp1);
            }

            if (i == 0)
              _sourceTextureParameter.SetValue(context.SourceTexture);
            else if (i % 2 == 0)
              _sourceTextureParameter.SetValue(temp1);
            else
              _sourceTextureParameter.SetValue(temp0);

            _iterationParameter.SetValue(i);
            _effect.CurrentTechnique.Passes[0].Apply();
            graphicsDevice.DrawFullScreenQuad();
              }

              // Clean-up
              _sourceTextureParameter.SetValue((Texture2D)null);

              renderTargetPool.Recycle(temp0);
              renderTargetPool.Recycle(temp1);
        }
示例#30
0
    public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
    {
      if (nodes == null)
        throw new ArgumentNullException("nodes");
      if (context == null)
        throw new ArgumentNullException("context");

      int numberOfNodes = nodes.Count;
      if (numberOfNodes == 0)
        return;

      Debug.Assert(context.CameraNode != null, "A camera node has to be set in the render context.");
      Debug.Assert(context.Scene != null, "A scene has to be set in the render context.");

      var originalRenderTarget = context.RenderTarget;
      var originalViewport = context.Viewport;
      var originalReferenceNode = context.ReferenceNode;

      // Camera properties
      var cameraNode = context.CameraNode;
      var cameraPose = cameraNode.PoseWorld;
      var projection = cameraNode.Camera.Projection;
      if (!(projection is PerspectiveProjection))
        throw new NotImplementedException("VSM shadow maps not yet implemented for scenes with perspective camera.");

      float fieldOfViewY = projection.FieldOfViewY;
      float aspectRatio = projection.AspectRatio;

      // Update SceneNode.LastFrame for all rendered nodes.
      int frame = context.Frame;
      cameraNode.LastFrame = frame;

      // The scene node renderer should use the light camera instead of the player camera.
      context.CameraNode = _orthographicCameraNode;

      // The shadow map is rendered using the technique "DirectionalVsm".
      // See ShadowMap.fx in the DigitalRune source code folder.
      context.Technique = "DirectionalVsm";

      var graphicsService = context.GraphicsService;
      var graphicsDevice = graphicsService.GraphicsDevice;
      var originalBlendState = graphicsDevice.BlendState;
      var originalDepthStencilState = graphicsDevice.DepthStencilState;
      var originalRasterizerState = graphicsDevice.RasterizerState;

      for (int i = 0; i < numberOfNodes; i++)
      {
        var lightNode = nodes[i] as LightNode;
        if (lightNode == null)
          continue;

        var shadow = lightNode.Shadow as VarianceShadow;
        if (shadow == null)
          continue;

        // LightNode is visible in current frame.
        lightNode.LastFrame = frame;

        // The format of the shadow map:
        var format = new RenderTargetFormat(
          shadow.PreferredSize,
          shadow.PreferredSize,
          false,
          shadow.Prefer16Bit ? SurfaceFormat.HalfVector2 : SurfaceFormat.Vector2,  // VSM needs two channels!
          DepthFormat.Depth24);

        if (shadow.ShadowMap != null && shadow.IsLocked)
          continue;

        if (shadow.ShadowMap == null)
          shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(format);

        graphicsDevice.DepthStencilState = DepthStencilState.Default;
        graphicsDevice.BlendState = BlendState.Opaque;

        // Render front and back faces for VSM due to low shadow map texel density.
        // (VSM is usually used for distant geometry.)
        graphicsDevice.RasterizerState = RasterizerState.CullNone;

        graphicsDevice.SetRenderTarget(shadow.ShadowMap);
        context.RenderTarget = shadow.ShadowMap;
        context.Viewport = graphicsDevice.Viewport;

        graphicsDevice.Clear(Color.White);

        // Compute an orthographic camera for the light.
        // If Shadow.TargetArea is null, the shadow map should cover the area in front of the player camera.
        // If Shadow.TargetArea is set, the shadow map should cover this static area.
        if (shadow.TargetArea == null)
        {
          // near/far of this shadowed area.
          float near = projection.Near;
          float far = shadow.MaxDistance;

          // Abort if near-far distances are invalid.
          if (Numeric.IsGreaterOrEqual(near, far))
            continue;

          // Create a view volume for frustum part that is covered by the shadow map.
          _cameraVolume.SetFieldOfView(fieldOfViewY, aspectRatio, near, far);

          // Find the bounding sphere of the frustum part.
          Vector3F center;
          float radius;
          GetBoundingSphere(_cameraVolume, out center, out radius);

          // Convert center to light space.
          Pose lightPose = lightNode.PoseWorld;
          center = cameraPose.ToWorldPosition(center);
          center = lightPose.ToLocalPosition(center);

          // Snap center to texel positions to avoid shadow swimming.
          SnapPositionToTexels(ref center, 2 * radius, shadow.ShadowMap.Height);

          // Convert center back to world space.
          center = lightPose.ToWorldPosition(center);

          Matrix33F orientation = lightPose.Orientation;
          Vector3F backward = orientation.GetColumn(2);
          var orthographicProjection = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;

          // Create a tight orthographic frustum around the target bounding sphere.
          orthographicProjection.SetOffCenter(-radius, radius, -radius, radius, 0, 2 * radius);
          Vector3F cameraPosition = center + radius * backward;
          Pose frustumPose = new Pose(cameraPosition, orientation);
          Pose view = frustumPose.Inverse;
          shadow.ViewProjection = (Matrix)view * orthographicProjection;

          // For rendering the shadow map, move near plane back by MinLightDistance
          // to catch occluders in front of the camera frustum.
          orthographicProjection.Near = -shadow.MinLightDistance;
          _orthographicCameraNode.PoseWorld = frustumPose;
        }
        else
        {
          // Get bounding sphere of static target area.
          Aabb targetAabb = shadow.TargetArea.Value;
          Vector3F center = targetAabb.Center;
          float radius = (targetAabb.Maximum - center).Length;

          // Convert center to light space.
          Matrix33F orientation = lightNode.PoseWorld.Orientation;
          Vector3F backward = orientation.GetColumn(2);
          var orthographicProjection = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;

          // Create a tight orthographic frustum around the target bounding sphere.
          orthographicProjection.SetOffCenter(-radius, radius, -radius, radius, 0, 2 * radius);
          Vector3F cameraPosition = center + radius * backward;
          Pose frustumPose = new Pose(cameraPosition, orientation);
          Pose view = frustumPose.Inverse;
          shadow.ViewProjection = (Matrix)view * orthographicProjection;

          // For rendering the shadow map, move near plane back by MinLightDistance
          // to catch occluders in front of the camera frustum.
          orthographicProjection.Near = -shadow.MinLightDistance;
          _orthographicCameraNode.PoseWorld = frustumPose;
        }

        context.ReferenceNode = lightNode;
        context.Object = shadow;

        // Render objects into shadow map.
        bool shadowMapContainsSomething = RenderCallback(context);

        if (shadowMapContainsSomething)
        {
          // Blur shadow map.
          if (shadow.Filter != null && shadow.Filter.Scale > 0)
          {
            context.SourceTexture = shadow.ShadowMap;
            shadow.Filter.Process(context);
            context.SourceTexture = null;
          }
        }
        else
        {
          // Shadow map is empty. Recycle it.
          graphicsService.RenderTargetPool.Recycle(shadow.ShadowMap);
          shadow.ShadowMap = null;
        }
      }

      graphicsDevice.SetRenderTarget(null);

      graphicsDevice.BlendState = originalBlendState;
      graphicsDevice.DepthStencilState = originalDepthStencilState;
      graphicsDevice.RasterizerState = originalRasterizerState;

      context.CameraNode = cameraNode;
      context.Technique = null;
      context.RenderTarget = originalRenderTarget;
      context.Viewport = originalViewport;
      context.ReferenceNode = originalReferenceNode;
      context.Object = null;
    }
示例#31
0
        /// <summary>
        /// Performs post-processing using the specified collection of processors.
        /// </summary>
        /// <param name="processors">The post-processors.</param>
        /// <param name="context">The render context.</param>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="context"/> is <see langword="null"/>.
        /// </exception>
        private static void Process(IList <PostProcessor> processors, RenderContext context)
        {
            Debug.Assert(processors != null);
            Debug.Assert(context != null);
            Debug.Assert(context.SourceTexture != null);

            if (context == null)
            {
                throw new ArgumentNullException("context");
            }

            var graphicsService  = context.GraphicsService;
            var renderTargetPool = graphicsService.RenderTargetPool;

            var originalSourceTexture = context.SourceTexture;
            var originalRenderTarget  = context.RenderTarget;
            var originalViewport      = context.Viewport;

            // Some normal post-processors can be used with any blend state. In a chain
            // alpha blending does not make sense.
            graphicsService.GraphicsDevice.BlendState = BlendState.Opaque;

            // Intermediate render targets for ping-ponging.
            // TODO: Use the originalRenderTarget in the ping-ponging.
            // (Currently, we create up to 2 temp targets. If the originalRenderTarget is not null,
            // and if the viewport is the whole target, then we could use the originalRenderTarget
            // in the ping-ponging. But care must be taken that the originalRenderTarget is never
            // used as the output for the post-processor before the last post-processor...)
            RenderTarget2D tempSource = null;
            RenderTarget2D tempTarget = null;

            // The size and format for intermediate render target is determined by the source image.
            var tempFormat = new RenderTargetFormat(originalSourceTexture)
            {
                Mipmap             = false,
                DepthStencilFormat = DepthFormat.None,
            };

            // Remember if any processor has written into target.
            bool targetWritten = false;

            // Execute all processors.
            var numberOfProcessors = processors.Count;

            for (int i = 0; i < numberOfProcessors; i++)
            {
                var processor = processors[i];
                if (!processor.Enabled)
                {
                    continue;
                }

                // Find effective output target:
                // If this processor is the last, then we render into the user-defined target.
                // If this is not the last processor, then we use an intermediate buffer.
                if (IsLastOutputProcessor(processors, i))
                {
                    context.RenderTarget = originalRenderTarget;
                    context.Viewport     = originalViewport;
                    targetWritten        = true;
                }
                else
                {
                    // This is an intermediate post-processor, so we need an intermediate target.
                    // If we have one, does it still have the correct format? If not, recycle it.
                    if (tempTarget != null && !processor.DefaultTargetFormat.IsCompatibleWith(tempFormat))
                    {
                        renderTargetPool.Recycle(tempTarget);
                        tempTarget = null;
                    }

                    if (tempTarget == null)
                    {
                        // Get a new render target.
                        // The format that the processor wants has priority. The current format
                        // is the fallback.
                        tempFormat = new RenderTargetFormat(
                            processor.DefaultTargetFormat.Width ?? tempFormat.Width,
                            processor.DefaultTargetFormat.Height ?? tempFormat.Height,
                            processor.DefaultTargetFormat.Mipmap ?? tempFormat.Mipmap,
                            processor.DefaultTargetFormat.SurfaceFormat ?? tempFormat.SurfaceFormat,
                            processor.DefaultTargetFormat.DepthStencilFormat ?? tempFormat.DepthStencilFormat);
                        tempTarget = renderTargetPool.Obtain2D(tempFormat);
                    }

                    context.RenderTarget = tempTarget;
                    context.Viewport     = new Viewport(0, 0, tempFormat.Width.Value, tempFormat.Height.Value);
                }

                processor.ProcessInternal(context);

                context.SourceTexture = context.RenderTarget;

                // If we have rendered into tempTarget, then we remember it in tempSource
                // and reuse the render target in tempSource if any is set.
                if (context.RenderTarget == tempTarget)
                {
                    Mathematics.MathHelper.Swap(ref tempSource, ref tempTarget);
                }
            }

            // If there are no processors, or no processor is enabled, then we have to
            // copy the source to the target manually.
            if (!targetWritten)
            {
                graphicsService.GetCopyFilter().ProcessInternal(context);
            }

            context.SourceTexture = originalSourceTexture;

            // The last processor should have written into the original target.
            Debug.Assert(context.RenderTarget == originalRenderTarget);

            renderTargetPool.Recycle(tempSource);
            renderTargetPool.Recycle(tempTarget);
        }
示例#32
0
        private int AssignShadowMask(LightNode lightNode, RenderContext context)
        {
            // Each shadow mask has 4 8-bit channels. We must assign a shadow mask channel to
              // each shadow-casting light. Non-overlapping lights can use the same channel.
              // Overlapping lights must use different channels. If we run out of channels,
              // we remove some lights from the list.

              var scene = context.Scene;

              var viewport = context.Viewport;
              int maskWidth = viewport.Width;
              int maskHeight = viewport.Height;
              if (UseHalfResolution && Numeric.IsLessOrEqual(UpsampleDepthSensitivity, 0))
              {
            // Half-res rendering with no upsampling.
            maskWidth /= 2;
            maskHeight /= 2;
              }

              // Loop over all bins until we find one which can be used for this light node.
              int binIndex;
              for (binIndex = 0; binIndex < _shadowMaskBins.Length; binIndex++)
              {
            var bin = _shadowMaskBins[binIndex];

            // Check if the light node touches any other light nodes in this bin.
            bool hasContact = false;
            foreach (var otherLightNode in bin)
            {
              if (scene.HaveContact(lightNode, otherLightNode))
              {
            hasContact = true;
            break;
              }
            }

            // No overlap. Use this bin.
            if (!hasContact)
            {
              bin.Add(lightNode);
              break;
            }
              }

              if (binIndex >= _shadowMaskBins.Length)
            return -1;  // Light node does not fit into any bin.

              int shadowMaskIndex = binIndex / 4;

              if (_shadowMasks[shadowMaskIndex] == null)
              {
            // Create shadow mask.
            var shadowMaskFormat = new RenderTargetFormat(maskWidth, maskHeight, false, SurfaceFormat.Color, DepthFormat.None);
            _shadowMasks[shadowMaskIndex] = context.GraphicsService.RenderTargetPool.Obtain2D(shadowMaskFormat);
              }

              // Assign shadow mask to light node.
              lightNode.Shadow.ShadowMask = _shadowMasks[shadowMaskIndex];
              lightNode.Shadow.ShadowMaskChannel = binIndex % 4;

              return shadowMaskIndex;
        }
        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              context.ThrowIfCameraMissing();
              context.ThrowIfSceneMissing();

              var originalRenderTarget = context.RenderTarget;
              var originalViewport = context.Viewport;
              var originalReferenceNode = context.ReferenceNode;

              // Camera properties
              var cameraNode = context.CameraNode;
              var cameraPose = cameraNode.PoseWorld;
              var projection = cameraNode.Camera.Projection;
              if (!(projection is PerspectiveProjection))
            throw new NotImplementedException(
              "Cascaded shadow maps not yet implemented for scenes with orthographic camera.");

              float fieldOfViewY = projection.FieldOfViewY;
              float aspectRatio = projection.AspectRatio;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              // The scene node renderer should use the light camera instead of the player camera.
              context.CameraNode = _orthographicCameraNode;
              context.Technique = "Directional";

              var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              for (int i = 0; i < numberOfNodes; i++)
              {
            var lightNode = nodes[i] as LightNode;
            if (lightNode == null)
              continue;

            var shadow = lightNode.Shadow as CascadedShadow;
            if (shadow == null)
              continue;

            // LightNode is visible in current frame.
            lightNode.LastFrame = frame;

            var format = new RenderTargetFormat(
              shadow.PreferredSize * shadow.NumberOfCascades,
              shadow.PreferredSize,
              false,
              shadow.Prefer16Bit ? SurfaceFormat.HalfSingle : SurfaceFormat.Single,
              DepthFormat.Depth24);

            bool allLocked = shadow.IsCascadeLocked[0] && shadow.IsCascadeLocked[1] && shadow.IsCascadeLocked[2] && shadow.IsCascadeLocked[3];

            if (shadow.ShadowMap == null)
            {
              shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(format);
              allLocked = false;   // Need to render shadow map.
            }

            // If we can reuse the whole shadow map texture, abort early.
            if (allLocked)
              continue;

            _csmSplitDistances[0] = projection.Near;
            _csmSplitDistances[1] = shadow.Distances.X;
            _csmSplitDistances[2] = shadow.Distances.Y;
            _csmSplitDistances[3] = shadow.Distances.Z;
            _csmSplitDistances[4] = shadow.Distances.W;

            // (Re-)Initialize the array for cached matrices in the CascadedShadow.
            if (shadow.ViewProjections == null || shadow.ViewProjections.Length < shadow.NumberOfCascades)
              shadow.ViewProjections = new Matrix[shadow.NumberOfCascades];

            // Initialize the projection matrices to an empty matrix.
            // The unused matrices should not contain valid projections because
            // CsmComputeSplitOptimized in CascadedShadowMask.fxh should not choose
            // the wrong cascade.
            for (int j = 0; j < shadow.ViewProjections.Length; j++)
            {
              if (!shadow.IsCascadeLocked[j])    // Do not delete cached info for cached cascade.
            shadow.ViewProjections[j] = new Matrix();
            }

            // If some cascades are cached, we have to create a new shadow map and copy
            // the old cascades into the new shadow map.
            if (shadow.IsCascadeLocked[0] || shadow.IsCascadeLocked[1] || shadow.IsCascadeLocked[2] || shadow.IsCascadeLocked[3])
            {
              var oldShadowMap = shadow.ShadowMap;
              shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(new RenderTargetFormat(oldShadowMap));

              graphicsDevice.SetRenderTarget(shadow.ShadowMap);
              graphicsDevice.Clear(Color.White);

              var spriteBatch = graphicsService.GetSpriteBatch();
              spriteBatch.Begin(SpriteSortMode.Deferred, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
              for (int cascade = 0; cascade < shadow.NumberOfCascades; cascade++)
              {
            if (shadow.IsCascadeLocked[cascade])
            {
              var viewport = GetViewport(shadow, cascade);
              var rectangle = new Rectangle(viewport.X, viewport.Y, viewport.Width, viewport.Height);
              spriteBatch.Draw(oldShadowMap, rectangle, rectangle, Color.White);
            }
              }
              spriteBatch.End();

              graphicsService.RenderTargetPool.Recycle(oldShadowMap);
            }
            else
            {
              graphicsDevice.SetRenderTarget(shadow.ShadowMap);
              graphicsDevice.Clear(Color.White);
            }

            context.RenderTarget = shadow.ShadowMap;
            graphicsDevice.DepthStencilState = DepthStencilState.Default;
            graphicsDevice.RasterizerState = RasterizerState.CullCounterClockwise;
            graphicsDevice.BlendState = BlendState.Opaque;

            context.ReferenceNode = lightNode;
            context.Object = shadow;
            context.ShadowNear = 0;           // Obsolete: Only kept for backward compatibility.

            bool shadowMapContainsSomething = false;
            for (int split = 0; split < shadow.NumberOfCascades; split++)
            {
              if (shadow.IsCascadeLocked[split])
            continue;

              context.Data[RenderContextKeys.ShadowTileIndex] = CubeMapShadowMapRenderer.BoxedIntegers[split];

              // near/far of this split.
              float near = _csmSplitDistances[split];
              float far = Math.Max(_csmSplitDistances[split + 1], near + Numeric.EpsilonF);

              // Create a view volume for this split.
              _splitVolume.SetFieldOfView(fieldOfViewY, aspectRatio, near, far);

              // Find the bounding sphere of the split camera frustum.
              Vector3F center;
              float radius;
              GetBoundingSphere(_splitVolume, out center, out radius);

              // Extend radius to get enough border for filtering.
              int shadowMapSize = shadow.ShadowMap.Height;

              // We could extend by (ShadowMapSize + BorderTexels) / ShadowMapSize;
              // Add at least 1 texel. (This way, shadow mask shader can clamp uv to
              // texture rect in without considering half texel border to avoid sampling outside..)
              radius *= (float)(shadowMapSize + 1) / shadowMapSize;

              // Convert center to light space.
              Pose lightPose = lightNode.PoseWorld;
              center = cameraPose.ToWorldPosition(center);
              center = lightPose.ToLocalPosition(center);

              // Snap center to texel positions to avoid shadow swimming.
              SnapPositionToTexels(ref center, 2 * radius, shadowMapSize);

              // Convert center back to world space.
              center = lightPose.ToWorldPosition(center);

              Matrix33F orientation = lightPose.Orientation;
              Vector3F backward = orientation.GetColumn(2);
              var orthographicProjection = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;

              // Create a tight orthographic frustum around the cascade's bounding sphere.
              orthographicProjection.SetOffCenter(-radius, radius, -radius, radius, 0, 2 * radius);
              Vector3F cameraPosition = center + radius * backward;
              Pose frustumPose = new Pose(cameraPosition, orientation);
              Pose view = frustumPose.Inverse;
              shadow.ViewProjections[split] = (Matrix)view * orthographicProjection;

              // Convert depth bias from "texel" to light space [0, 1] depth.
              // Minus sign to move receiver depth closer to light. Divide by depth to normalize.
              float unitsPerTexel = orthographicProjection.Width / shadow.ShadowMap.Height;
              shadow.EffectiveDepthBias[split] = -shadow.DepthBias[split] * unitsPerTexel / orthographicProjection.Depth;

              // Convert normal offset from "texel" to world space.
              shadow.EffectiveNormalOffset[split] = shadow.NormalOffset[split] * unitsPerTexel;

              // For rendering the shadow map, move near plane back by MinLightDistance
              // to catch occluders in front of the cascade.
              orthographicProjection.Near = -shadow.MinLightDistance;
              _orthographicCameraNode.PoseWorld = frustumPose;

              // Set a viewport to render a tile in the texture atlas.
              graphicsDevice.Viewport = GetViewport(shadow, split);
              context.Viewport = graphicsDevice.Viewport;

              shadowMapContainsSomething |= RenderCallback(context);
            }

            // Recycle shadow map if empty.
            if (!shadowMapContainsSomething)
            {
              graphicsService.RenderTargetPool.Recycle(shadow.ShadowMap);
              shadow.ShadowMap = null;
            }
              }

              graphicsDevice.SetRenderTarget(null);
              savedRenderState.Restore();

              context.CameraNode = cameraNode;
              context.ShadowNear = float.NaN;
              context.Technique = null;
              context.RenderTarget = originalRenderTarget;
              context.Viewport = originalViewport;
              context.ReferenceNode = originalReferenceNode;
              context.Object = null;
              context.Data[RenderContextKeys.ShadowTileIndex] = null;
        }
示例#34
0
    protected override void OnRender(RenderContext context)
    {
      // This screen expects two cameras.
      if (ActiveCameraNode == null || ActiveCameraNodeB == null)
        return;

      var renderTargetPool = GraphicsService.RenderTargetPool;
      var graphicsDevice = GraphicsService.GraphicsDevice;
      var originalRenderTarget = context.RenderTarget;
      var fullViewport = context.Viewport;

      // Get a render target for the first camera. Use half the width because we split
      // the screen horizontally.
      var format = new RenderTargetFormat(context.RenderTarget)
      {
        Width = fullViewport.Width / 2
      };
      var renderTargetA = renderTargetPool.Obtain2D(format);

      context.Scene = Scene;
      context.LodHysteresis = 0.5f;
      context.LodBias = 1.0f;
      context.LodBlendingEnabled = true;

      for (int i = 0; i < 2; i++)
      {
        if (i == 0)
        {
          // The first camera renders into renderTargetA.
          context.CameraNode = ActiveCameraNode;
          context.Viewport = new Viewport(0, 0, fullViewport.Width / 2, fullViewport.Height);
          context.RenderTarget = renderTargetA;
        }
        else
        {
          // The second camera renders into the right half of the final render target.
          context.CameraNode = ActiveCameraNodeB;
          context.Viewport = new Viewport(fullViewport.X + fullViewport.Width / 2, fullViewport.Y, fullViewport.Width / 2, fullViewport.Height);
          context.RenderTarget = originalRenderTarget;
        }
        context.LodCameraNode = context.CameraNode;

        // Get all scene nodes which overlap the camera frustum.
        CustomSceneQuery sceneQuery = Scene.Query<CustomSceneQuery>(context.CameraNode, context);

        // Render the scene nodes of the sceneQuery.
        RenderScene(sceneQuery, context, true, true, true, true);

        // ----- Copy image of first camera.
        if (i == 1)
        {
          // Copy the upper screen from the temporary render target back into the back buffer.
          context.Viewport = fullViewport;
          graphicsDevice.Viewport = fullViewport;

          SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
          SpriteBatch.Draw(
            renderTargetA,
            new Rectangle(0, 0, fullViewport.Width / 2, fullViewport.Height),
            Color.White);
          SpriteBatch.End();

          renderTargetPool.Recycle(renderTargetA);
        }
      }

      // Clean-up
      context.Scene = null;
      context.CameraNode = null;
      context.LodCameraNode = null;
      context.RenderPass = null;
    }
示例#35
0
        // Perform FFTs.
        // 4 complex input images: source0.xy, source0.zw, source1.xy, source1.zw
        // 2 targets: target0 = displacement map, target1 = normal map using Color format.
        public void Process(RenderContext context, bool forward, Texture2D source0, Texture2D source1, RenderTarget2D target0, RenderTarget2D target1, float choppiness)
        {
            if (context == null)
            {
                throw new ArgumentNullException("context");
            }
            if (source0 == null)
            {
                throw new ArgumentNullException("source0");
            }
            if (source1 == null)
            {
                throw new ArgumentNullException("source1");
            }

            if (forward)
            {
                // For forward FFT, uncomment the LastPassScale stuff!
                throw new NotImplementedException("Forward FFT not implemented.");
            }

            var graphicsService  = context.GraphicsService;
            var graphicsDevice   = graphicsService.GraphicsDevice;
            var renderTargetPool = graphicsService.RenderTargetPool;

            var savedRenderState = new RenderStateSnapshot(graphicsDevice);

            graphicsDevice.BlendState        = BlendState.Opaque;
            graphicsDevice.RasterizerState   = RasterizerState.CullNone;
            graphicsDevice.DepthStencilState = DepthStencilState.None;

            int size = source0.Width;

            _parameterSize.SetValue((float)size);

            _parameterChoppiness.SetValue(choppiness);

            int numberOfButterflyPasses = (int)MathHelper.Log2GreaterOrEqual((uint)source0.Width);

            // ReSharper disable once ConditionIsAlwaysTrueOrFalse
            _parameterButterflyTexture.SetValue(GetButterflyTexture(forward, numberOfButterflyPasses));

            var format    = new RenderTargetFormat(size, size, false, source0.Format, DepthFormat.None);
            var tempPing0 = renderTargetPool.Obtain2D(format);
            var tempPing1 = renderTargetPool.Obtain2D(format);
            var tempPong0 = renderTargetPool.Obtain2D(format);
            var tempPong1 = renderTargetPool.Obtain2D(format);

            //_parameterIsLastPass.SetValue(false);

            // Perform horizontal and vertical FFT pass.
            for (int i = 0; i < 2; i++)
            {
                //_parameterLastPassScale.SetValue(1);

                // Perform butterfly passes. We ping-pong between two temp targets.
                for (int pass = 0; pass < numberOfButterflyPasses; pass++)
                {
                    _parameterButterflyIndex.SetValue(0.5f / numberOfButterflyPasses + (float)pass / numberOfButterflyPasses);

                    if (i == 0 && pass == 0)
                    {
                        // First pass.
                        _renderTargetBindings[0] = new RenderTargetBinding(tempPing0);
                        _renderTargetBindings[1] = new RenderTargetBinding(tempPing1);
                        graphicsDevice.SetRenderTargets(_renderTargetBindings);
                        _parameterSourceTexture0.SetValue(source0);
                        _parameterSourceTexture1.SetValue(source1);
                    }
                    else if (i == 1 && pass == numberOfButterflyPasses - 1)
                    {
                        // Last pass.
                        // We have explicit shader passes for the last FFT pass.
                        break;

                        //_parameterIsLastPass.SetValue(true);
                        //if (forward)
                        //  _parameterLastPassScale.SetValue(1.0f / size / size);

                        //if (_renderTargetBindings[0].RenderTarget == tempPing0)
                        //{
                        //  _renderTargetBindings[0] = new RenderTargetBinding(target0);
                        //  _renderTargetBindings[1] = new RenderTargetBinding(target1);
                        //  graphicsDevice.SetRenderTargets(_renderTargetBindings);
                        //  _parameterSourceTexture0.SetValue(tempPing0);
                        //  _parameterSourceTexture1.SetValue(tempPing1);
                        //}
                        //else
                        //{
                        //  _renderTargetBindings[0] = new RenderTargetBinding(target0);
                        //  _renderTargetBindings[1] = new RenderTargetBinding(target1);
                        //  graphicsDevice.SetRenderTargets(_renderTargetBindings);
                        //  _parameterSourceTexture0.SetValue(tempPong0);
                        //  _parameterSourceTexture1.SetValue(tempPong1);
                        //}
                    }
                    else
                    {
                        // Intermediate pass.
                        if (_renderTargetBindings[0].RenderTarget == tempPing0)
                        {
                            _renderTargetBindings[0] = new RenderTargetBinding(tempPong0);
                            _renderTargetBindings[1] = new RenderTargetBinding(tempPong1);
                            graphicsDevice.SetRenderTargets(_renderTargetBindings);
                            _parameterSourceTexture0.SetValue(tempPing0);
                            _parameterSourceTexture1.SetValue(tempPing1);
                        }
                        else
                        {
                            _renderTargetBindings[0] = new RenderTargetBinding(tempPing0);
                            _renderTargetBindings[1] = new RenderTargetBinding(tempPing1);
                            graphicsDevice.SetRenderTargets(_renderTargetBindings);
                            _parameterSourceTexture0.SetValue(tempPong0);
                            _parameterSourceTexture1.SetValue(tempPong1);
                        }
                    }

                    if (i == 0)
                    {
                        _passFftHorizontal.Apply();
                    }
                    else
                    {
                        _passFftVertical.Apply();
                    }

                    graphicsDevice.DrawFullScreenQuad();
                }
            }

            // Perform final vertical FFT passes. We have to perform them separately
            // because displacement map and normal map usually have different bit depth.
            // Final pass for displacement.
            graphicsDevice.SetRenderTarget(target0);
            if (_renderTargetBindings[1].RenderTarget == tempPing1)
            {
                _parameterSourceTexture0.SetValue(tempPing0);
            }
            else
            {
                _parameterSourceTexture0.SetValue(tempPong0);
            }

            _passFftDisplacement.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Final pass for normals.
            graphicsDevice.SetRenderTarget(target1);
            if (_renderTargetBindings[1].RenderTarget == tempPing1)
            {
                _parameterSourceTexture0.SetValue(tempPing1);
            }
            else
            {
                _parameterSourceTexture0.SetValue(tempPong1);
            }

            _passFftNormal.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Clean up.
            _renderTargetBindings[0] = default(RenderTargetBinding);
            _renderTargetBindings[1] = default(RenderTargetBinding);
            _parameterButterflyTexture.SetValue((Texture2D)null);
            _parameterSourceTexture0.SetValue((Texture2D)null);
            _parameterSourceTexture1.SetValue((Texture2D)null);

            renderTargetPool.Recycle(tempPing0);
            renderTargetPool.Recycle(tempPing1);
            renderTargetPool.Recycle(tempPong0);
            renderTargetPool.Recycle(tempPong1);

            savedRenderState.Restore();

            // Reset the texture stages. If a floating point texture is set, we get exceptions
            // when a sampler with bilinear filtering is set.

            graphicsDevice.ResetTextures();
        }
示例#36
0
    public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
    {
      var graphicsService = context.GraphicsService;
      var renderTargetPool = graphicsService.RenderTargetPool;
      var graphicsDevice = graphicsService.GraphicsDevice;

      // Get a shared RebuildZBufferRenderer which was added by the graphics screen.
      var rebuildZBufferRenderer = (RebuildZBufferRenderer)context.Data[RenderContextKeys.RebuildZBufferRenderer];
      
      // We only support a render order of "user defined". This is always the case 
      // if this renderer is added to a SceneRenderer. The SceneRenderer does the sorting.
      Debug.Assert(order == RenderOrder.UserDefined);

      // This renderer assumes that the current render target is an off-screen render target.
      Debug.Assert(context.RenderTarget != null);

      graphicsDevice.ResetTextures();

      // Remember the format of the current render target.
      var backBufferFormat = new RenderTargetFormat(context.RenderTarget);

      // In the loop below we will use the context.SourceTexture property. 
      // Remember the original source texture. 
      var originalSourceTexture = context.SourceTexture;
      
      context.SourceTexture = null;
      for (int i = 0; i < nodes.Count; i++)
      {
        var node = (MeshNode)nodes[i];

        // Check if the next node wants to sample from the back buffer.
        if (RequiresSourceTexture(node, context))
        {
          // The effect of the node wants to sample from the "SourceTexture". 
          // Per default, DigitalRune Graphics uses a delegate effect parameter 
          // binding to set the "SourceTexture" parameters to the 
          // RenderContext.SourceTexture value. However, this property is usually 
          // null. We need to manually set RenderContext.SourceTexture to the 
          // current back buffer render target. Since, we cannot read from this
          // render target and write to this render target at the same time,
          // we have to copy it.

          context.SourceTexture = context.RenderTarget;

          // Set a new render target and copy the content of the lastBackBuffer
          // and the depth buffer.
          context.RenderTarget = renderTargetPool.Obtain2D(backBufferFormat);
          graphicsDevice.SetRenderTarget(context.RenderTarget);
          graphicsDevice.Viewport = context.Viewport;
          rebuildZBufferRenderer.Render(context, context.SourceTexture);
        }

        // Add current node to a temporary list.
        _tempList.Add(node);

        // Add all following nodes until another node wants to sample from the
        // back buffer.
        for (int j = i + 1; j < nodes.Count; j++)
        {
          node = (MeshNode)nodes[j];

          if (RequiresSourceTexture(node, context))
            break;

          _tempList.Add(node);
          i++;
        }

        // Render nodes.
        _meshRenderer.Render(_tempList, context);

        renderTargetPool.Recycle(context.SourceTexture);
        context.SourceTexture = null;

        _tempList.Clear();
      }

      // Restore original render context.
      context.SourceTexture = originalSourceTexture;
    }
示例#37
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            var     viewport = context.Viewport;
            Vector2 size     = new Vector2(viewport.Width, viewport.Height);

            // Choose suitable technique.
            // We do not have shader for each sample count.
            int numberOfSamples = NumberOfSamples;

            SetCurrentTechnique(ref numberOfSamples);

            // Apply current scale and texture size to offsets.
            for (int i = 0; i < NumberOfSamples; i++)
            {
                _horizontalOffsets[i].X = Offsets[i].X * Scale / size.X;
                _horizontalOffsets[i].Y = Offsets[i].Y * Scale / size.Y;
            }

            // Make sure the other samples are 0 (e.g. if we want 11 samples but the
            // next best shader supports only 15 samples).
            for (int i = NumberOfSamples; i < numberOfSamples; i++)
            {
                _horizontalOffsets[i].X = 0;
                _horizontalOffsets[i].Y = 0;
                Weights[i] = 0;
            }

            // If we have a separable filter, we initialize _verticalOffsets too.
            if (IsSeparable)
            {
                if (_verticalOffsets == null)
                {
                    _verticalOffsets = new Vector2[MaxNumberOfSamples];
                }

                float aspectRatio = size.X / size.Y;
                for (int i = 0; i < NumberOfSamples; i++)
                {
                    _verticalOffsets[i].X = _horizontalOffsets[i].Y * aspectRatio;
                    _verticalOffsets[i].Y = _horizontalOffsets[i].X * aspectRatio;
                }
                for (int i = NumberOfSamples; i < numberOfSamples; i++)
                {
                    _verticalOffsets[i].X = 0;
                    _verticalOffsets[i].Y = 0;
                }
            }

            // Use hardware filtering if possible.
            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            }

            bool isAnisotropic = IsAnisotropic;
            bool isBilateral   = IsBilateral;

            if (FilterInLogSpace)
            {
                // Anisotropic and bilateral filtering in log-space is not implemented.
                isAnisotropic = false;
                isBilateral   = false;
            }
            else
            {
                if (isAnisotropic || isBilateral)
                {
                    context.ThrowIfCameraMissing();

                    var   cameraNode = context.CameraNode;
                    var   projection = cameraNode.Camera.Projection;
                    float far        = projection.Far;

                    GraphicsHelper.GetFrustumFarCorners(cameraNode.Camera.Projection, _frustumFarCorners);
                    _parameterFrustumCorners.SetValue(_frustumFarCorners);

                    _parameterBlurParameters0.SetValue(new Vector4(
                                                           far,
                                                           viewport.AspectRatio,
                                                           1.0f / (EdgeSoftness + 0.001f) * far,
                                                           DepthScaling));

                    context.ThrowIfGBuffer0Missing();
                    Texture2D depthBuffer = context.GBuffer0;
                    if (viewport.Width < depthBuffer.Width && viewport.Height < depthBuffer.Height)
                    {
                        // Use half-resolution depth buffer.
                        object obj;
                        if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out obj))
                        {
                            var depthBufferHalf = obj as Texture2D;
                            if (depthBufferHalf != null)
                            {
                                depthBuffer = depthBufferHalf;
                            }
                        }
                    }

                    _parameterGBuffer0.SetValue(depthBuffer);
                }
            }

            _parameterViewportSize.SetValue(size);
            _parameterWeights.SetValue(Weights);

            int effectiveNumberOfPasses = IsSeparable ? NumberOfPasses * 2 : NumberOfPasses;

            // We use up to two temporary render targets for ping-ponging.
            var tempFormat  = new RenderTargetFormat((int)size.X, (int)size.Y, false, context.SourceTexture.Format, DepthFormat.None);
            var tempTarget0 = (effectiveNumberOfPasses > 1)
                        ? renderTargetPool.Obtain2D(tempFormat)
                        : null;
            var tempTarget1 = (effectiveNumberOfPasses > 2)
                        ? renderTargetPool.Obtain2D(tempFormat)
                        : null;

            for (int i = 0; i < effectiveNumberOfPasses; i++)
            {
                if (i == effectiveNumberOfPasses - 1)
                {
                    graphicsDevice.SetRenderTarget(context.RenderTarget);
                    graphicsDevice.Viewport = viewport;
                }
                else if (i % 2 == 0)
                {
                    graphicsDevice.SetRenderTarget(tempTarget0);
                }
                else
                {
                    graphicsDevice.SetRenderTarget(tempTarget1);
                }

                if (i == 0)
                {
                    _parameterSourceTexture.SetValue(context.SourceTexture);
                }
                else if (i % 2 == 0)
                {
                    _parameterSourceTexture.SetValue(tempTarget1);
                }
                else
                {
                    _parameterSourceTexture.SetValue(tempTarget0);
                }

                Vector2[] offsets;
                if (IsSeparable && i % 2 != 0 &&
                    !isAnisotropic) // The anisotropic filter only reads Offsets[i].x
                {
                    offsets = _verticalOffsets;
                }
                else
                {
                    offsets = _horizontalOffsets;
                }

                _parameterOffsets.SetValue(offsets);

                int passIndex = 0;
                if (isAnisotropic)
                {
                    passIndex = i % 2;
                }

                _effect.CurrentTechnique.Passes[passIndex].Apply();
                graphicsDevice.DrawFullScreenQuad();
            }

            _parameterSourceTexture.SetValue((Texture2D)null);

            renderTargetPool.Recycle(tempTarget0);
            renderTargetPool.Recycle(tempTarget1);
        }
示例#38
0
    protected override void OnRender(RenderContext context)
    {
      if (ActiveCameraNode == null)
        return;

      context.Scene = Scene;
      context.CameraNode = ActiveCameraNode;
      context.LodCameraNode = ActiveCameraNode;

      // Copy all scene nodes into a list.
      CopyNodesToList(Scene, _sceneNodes);

      // ----- Occlusion Culling
      // Usually, we would make a scene query to get all scene nodes within the 
      // viewing frustum. But in this example we will use the new OcclusionBuffer.
      if (EnableCulling)
      {
        // Render all occluders into the occlusion buffer.
        // - "_sceneNodes" is a list of all scene nodes. The OcclusionBuffer will
        //   go through the list and render all occluders.
        // - "LightNode" is the main directional light that casts a cascaded shadow.
        //   Passing the light node to the OcclusionBuffer activates shadow caster
        //   culling.
        // - A custom scene node renderer can be passed to the OcclusionBuffer. In
        //   this example, the ground mesh "Gravel/Gravel.fbx" has a material with an
        //   "Occluder" render pass. When we pass the "MeshRenderer" to the OcclusionBuffer
        //   the ground mesh will be rendered directly into the occlusion buffer.
        Profiler.Start("Occlusion.Render");
        context.RenderPass = "******";
        OcclusionBuffer.Render(_sceneNodes, LightNode, MeshRenderer, context);
        context.RenderPass = null;
        Profiler.Stop("Occlusion.Render");

        // Perform occlusion culling on the specified list of scene nodes.
        // - The scene nodes will be tested against the occluders. If a scene node
        //   is hidden, it will be replaced with a null entry in the list.
        // - When shadow caster culling is active, shadow casting scene nodes will
        //   also be tested against the occluders. If the shadow is not visible,
        //   the shadow caster will internally be marked as occluded. The ShadowMapRenderer
        //   will automatically skip occluded scene nodes.
        Profiler.Start("Occlusion.Query");
        OcclusionBuffer.Query(_sceneNodes, context);
        Profiler.Stop("Occlusion.Query");
      }

      // The base DeferredGraphicsScreen expects a CustomSceneQuery.
      // --> Copy the occlusion culling results to a CustomSceneQuery.
      _sceneQuery.Set(ActiveCameraNode, _sceneNodes, context);

      var renderTargetPool = GraphicsService.RenderTargetPool;
      var graphicsDevice = GraphicsService.GraphicsDevice;
      var originalRenderTarget = context.RenderTarget;
      var fullViewport = context.Viewport;

      RenderTarget2D topDownRenderTarget = null;
      const int topDownViewSize = 384;
      if (ShowTopDownView)
      {
        // Render top-down scene into an offscreen render target.
        var format = new RenderTargetFormat(context.RenderTarget)
        {
          Width = topDownViewSize, 
          Height = topDownViewSize,
        };
        topDownRenderTarget = renderTargetPool.Obtain2D(format);

        context.Scene = Scene;
        context.CameraNode = _topDownCameraNode;
        context.Viewport = new Viewport(0, 0, topDownViewSize, topDownViewSize);
        context.RenderTarget = topDownRenderTarget;
        RenderScene(_sceneQuery, context, true, false, false, false);

        _debugRenderer.Clear();
        _debugRenderer.DrawObject(ActiveCameraNode, Color.Red, true, true);
        _debugRenderer.Render(context);

        context.RenderTarget = originalRenderTarget;
        context.Viewport = fullViewport;
      }

      // Render regular 3D scene.
      context.Scene = Scene;
      context.CameraNode = ActiveCameraNode;
      RenderScene(_sceneQuery, context, true, false, true, false);

      // Render debug visualization on top of scene.
      bool renderObject = false;
      switch (DebugVisualization)
      {
        case DebugVisualization.CameraHzb:
          OcclusionBuffer.VisualizeCameraBuffer(DebugLevel, context);
          break;
        case DebugVisualization.LightHzb:
          OcclusionBuffer.VisualizeLightBuffer(DebugLevel, context);
          break;
        case DebugVisualization.Object:
          OcclusionBuffer.VisualizeObject(DebugObject, context);
          renderObject = true;
          break;
        case DebugVisualization.ShadowCaster:
          OcclusionBuffer.VisualizeShadowCaster(DebugObject, context);
          break;
        case DebugVisualization.ShadowVolume:
          OcclusionBuffer.VisualizeShadowVolume(DebugObject, context);
          renderObject = true;
          break;
      }

      if (renderObject)
      {
        _debugRenderer.Clear();
        _debugRenderer.DrawObject(DebugObject, Color.Yellow, true, true);
        _debugRenderer.Render(context);
      }

      if (ShowTopDownView)
      {
        // Copy offscreen buffer to screen.
        context.Viewport = fullViewport;
        graphicsDevice.Viewport = fullViewport;

        SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
        SpriteBatch.Draw(
          topDownRenderTarget,
          new Rectangle(fullViewport.Width - topDownViewSize, fullViewport.Height - topDownViewSize, topDownViewSize, topDownViewSize),
          Color.White);
        SpriteBatch.End();

        renderTargetPool.Recycle(topDownRenderTarget);
      }

      // Clean-up
      _sceneNodes.Clear();
      _sceneQuery.Reset();

      context.Scene = null;
      context.CameraNode = null;
      context.LodCameraNode = null;
    }
示例#39
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            // The target width/height.
            int targetWidth  = context.Viewport.Width;
            int targetHeight = context.Viewport.Height;

            // We use two temporary render targets.
            var            tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, context.SourceTexture.Format, DepthFormat.None);
            RenderTarget2D temp0      = (NumberOfPasses > 1)
                             ? renderTargetPool.Obtain2D(tempFormat)
                             : null;
            RenderTarget2D temp1 = (NumberOfPasses > 2)
                             ? renderTargetPool.Obtain2D(tempFormat)
                             : null;

            bool useHalfPixelOffset = !TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format);

            if (useHalfPixelOffset)
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
                _useHalfPixelOffsetParameter.SetValue(1.0f);
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
                _useHalfPixelOffsetParameter.SetValue(0.0f);
            }

            _viewportSizeParameter.SetValue(new Vector2(targetWidth, targetHeight));

            for (int i = 0; i < NumberOfPasses; i++)
            {
                if (i == NumberOfPasses - 1)
                {
                    graphicsDevice.SetRenderTarget(context.RenderTarget);
                    graphicsDevice.Viewport = context.Viewport;
                }
                else if (i % 2 == 0)
                {
                    graphicsDevice.SetRenderTarget(temp0);
                }
                else
                {
                    graphicsDevice.SetRenderTarget(temp1);
                }

                if (i == 0)
                {
                    _sourceTextureParameter.SetValue(context.SourceTexture);
                }
                else if (i % 2 == 0)
                {
                    _sourceTextureParameter.SetValue(temp1);
                }
                else
                {
                    _sourceTextureParameter.SetValue(temp0);
                }

                // The iteration value goes from 0 ... (n - 1) or 1 ... n depending on
                // whether a half-pixel offset is used.
                _iterationParameter.SetValue((float)(useHalfPixelOffset ? i : i + 1));

                _effect.CurrentTechnique.Passes[0].Apply();
                graphicsDevice.DrawFullScreenQuad();
            }

            // Clean-up
            _sourceTextureParameter.SetValue((Texture2D)null);

            renderTargetPool.Recycle(temp0);
            renderTargetPool.Recycle(temp1);
        }
示例#40
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

              RenderTarget2D rgbLuma = null;
              if (ComputeLuminance)
              {
            var rgbLumaFormat = new RenderTargetFormat(
              context.SourceTexture.Width,
              context.SourceTexture.Height,
              false,
              context.SourceTexture.Format,
              DepthFormat.None);
            rgbLuma = renderTargetPool.Obtain2D(rgbLumaFormat);

            graphicsDevice.SetRenderTarget(rgbLuma);
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sourceTextureParameter.SetValue(context.SourceTexture);
            _luminanceToAlphaPass.Apply();
            graphicsDevice.DrawFullScreenQuad();
              }

              graphicsDevice.SetRenderTarget(context.RenderTarget);
              graphicsDevice.Viewport = context.Viewport;
              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _sourceTextureParameter.SetValue(ComputeLuminance ? rgbLuma : context.SourceTexture);
              _fxaaPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              _sourceTextureParameter.SetValue((Texture2D)null);
              renderTargetPool.Recycle(rgbLuma);
        }
示例#41
0
        protected override void OnRender(RenderContext context)
        {
            // This screen expects two cameras.
            if (ActiveCameraNode == null || ActiveCameraNodeB == null)
            {
                return;
            }

            var renderTargetPool     = GraphicsService.RenderTargetPool;
            var graphicsDevice       = GraphicsService.GraphicsDevice;
            var originalRenderTarget = context.RenderTarget;
            var fullViewport         = context.Viewport;

            // Get a render target for the first camera. Use half the width because we split
            // the screen horizontally.
            var format = new RenderTargetFormat(context.RenderTarget)
            {
                Width = fullViewport.Width / 2
            };
            var renderTargetA = renderTargetPool.Obtain2D(format);

            context.Scene              = Scene;
            context.LodHysteresis      = 0.5f;
            context.LodBias            = 1.0f;
            context.LodBlendingEnabled = true;

            for (int i = 0; i < 2; i++)
            {
                if (i == 0)
                {
                    // The first camera renders into renderTargetA.
                    context.CameraNode   = ActiveCameraNode;
                    context.Viewport     = new Viewport(0, 0, fullViewport.Width / 2, fullViewport.Height);
                    context.RenderTarget = renderTargetA;
                }
                else
                {
                    // The second camera renders into the right half of the final render target.
                    context.CameraNode   = ActiveCameraNodeB;
                    context.Viewport     = new Viewport(fullViewport.X + fullViewport.Width / 2, fullViewport.Y, fullViewport.Width / 2, fullViewport.Height);
                    context.RenderTarget = originalRenderTarget;
                }
                context.LodCameraNode = context.CameraNode;

                // Get all scene nodes which overlap the camera frustum.
                CustomSceneQuery sceneQuery = Scene.Query <CustomSceneQuery>(context.CameraNode, context);

                // Render the scene nodes of the sceneQuery.
                RenderScene(sceneQuery, context, true, true, true, true);

                // ----- Copy image of first camera.
                if (i == 1)
                {
                    // Copy the upper screen from the temporary render target back into the back buffer.
                    context.Viewport        = fullViewport;
                    graphicsDevice.Viewport = fullViewport;

                    SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
                    SpriteBatch.Draw(
                        renderTargetA,
                        new Rectangle(0, 0, fullViewport.Width / 2, fullViewport.Height),
                        Color.White);
                    SpriteBatch.End();

                    renderTargetPool.Recycle(renderTargetA);
                }
            }

            // Clean-up
            context.Scene         = null;
            context.CameraNode    = null;
            context.LodCameraNode = null;
            context.RenderPass    = null;
        }
示例#42
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              var tempFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
              RenderTarget2D blurredScene = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

              if (TextureHelper.IsFloatingPointFormat(source.Format))
              {
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
              }
              else
              {
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
              }

              context.RenderTarget = blurredScene;
              context.Viewport = new Viewport(0, 0, blurredScene.Width, blurredScene.Height);

              // Get view-dependent information stored in camera node.
              var cameraNode = context.CameraNode;
              object dummy;
              cameraNode.ViewDependentData.TryGetValue(this, out dummy);
              var data = dummy as ViewDependentData;
              if (data == null)
              {
            data = new ViewDependentData(GraphicsService);
            cameraNode.ViewDependentData[this] = data;
              }

              if (data.LastBlurredScene == null)
              {
            // This is the first frame. Simply remember the current source for the next frame.
            _copyFilter.Process(context);
              }
              else
              {
            // Create new blurred scene.
            graphicsDevice.SetRenderTarget(blurredScene);

            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _strengthParameter.SetValue(Strength);
            _sourceTextureParameter.SetValue(source);
            _lastSourceTextureParameter.SetValue(data.LastBlurredScene);
            _effect.CurrentTechnique.Passes[0].Apply();
            graphicsDevice.DrawFullScreenQuad();
              }

              // Copy blurredScene to target.
              context.SourceTexture = blurredScene;
              context.RenderTarget = target;
              context.Viewport = viewport;
              _copyFilter.Process(context);

              // Recycle old blurred scene and store new scene (switch render targets).
              GraphicsService.RenderTargetPool.Recycle(data.LastBlurredScene);
              data.LastBlurredScene = blurredScene;

              _sourceTextureParameter.SetValue((Texture2D)null);
              _lastSourceTextureParameter.SetValue((Texture2D)null);

              // Restore original context.
              context.SourceTexture = source;
        }
示例#43
0
        public override void Render(IList <SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            {
                throw new ArgumentNullException("nodes");
            }
            if (context == null)
            {
                throw new ArgumentNullException("context");
            }

            int numberOfNodes = nodes.Count;

            if (numberOfNodes == 0)
            {
                return;
            }

            context.ThrowIfCameraMissing();
            context.ThrowIfSceneMissing();

            var originalRenderTarget  = context.RenderTarget;
            var originalViewport      = context.Viewport;
            var originalReferenceNode = context.ReferenceNode;

            // Camera properties
            var cameraNode = context.CameraNode;
            var cameraPose = cameraNode.PoseWorld;
            var projection = cameraNode.Camera.Projection;

            if (!(projection is PerspectiveProjection))
            {
                throw new NotImplementedException(
                          "Cascaded shadow maps not yet implemented for scenes with orthographic camera.");
            }

            float fieldOfViewY = projection.FieldOfViewY;
            float aspectRatio  = projection.AspectRatio;

            // Update SceneNode.LastFrame for all visible nodes.
            int frame = context.Frame;

            cameraNode.LastFrame = frame;

            // The scene node renderer should use the light camera instead of the player camera.
            context.CameraNode = _orthographicCameraNode;
            context.Technique  = "Directional";

            var graphicsService  = context.GraphicsService;
            var graphicsDevice   = graphicsService.GraphicsDevice;
            var savedRenderState = new RenderStateSnapshot(graphicsDevice);

            for (int i = 0; i < numberOfNodes; i++)
            {
                var lightNode = nodes[i] as LightNode;
                if (lightNode == null)
                {
                    continue;
                }

                var shadow = lightNode.Shadow as CascadedShadow;
                if (shadow == null)
                {
                    continue;
                }

                // LightNode is visible in current frame.
                lightNode.LastFrame = frame;

                var format = new RenderTargetFormat(
                    shadow.PreferredSize * shadow.NumberOfCascades,
                    shadow.PreferredSize,
                    false,
                    shadow.Prefer16Bit ? SurfaceFormat.HalfSingle : SurfaceFormat.Single,
                    DepthFormat.Depth24);

                bool allLocked = shadow.IsCascadeLocked[0] && shadow.IsCascadeLocked[1] && shadow.IsCascadeLocked[2] && shadow.IsCascadeLocked[3];

                if (shadow.ShadowMap == null)
                {
                    shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(format);
                    allLocked        = false; // Need to render shadow map.
                }

                // If we can reuse the whole shadow map texture, abort early.
                if (allLocked)
                {
                    continue;
                }

                _csmSplitDistances[0] = projection.Near;
                _csmSplitDistances[1] = shadow.Distances.X;
                _csmSplitDistances[2] = shadow.Distances.Y;
                _csmSplitDistances[3] = shadow.Distances.Z;
                _csmSplitDistances[4] = shadow.Distances.W;

                // (Re-)Initialize the array for cached matrices in the CascadedShadow.
                if (shadow.ViewProjections == null || shadow.ViewProjections.Length < shadow.NumberOfCascades)
                {
                    shadow.ViewProjections = new Matrix[shadow.NumberOfCascades];
                }

                // Initialize the projection matrices to an empty matrix.
                // The unused matrices should not contain valid projections because
                // CsmComputeSplitOptimized in CascadedShadowMask.fxh should not choose
                // the wrong cascade.
                for (int j = 0; j < shadow.ViewProjections.Length; j++)
                {
                    if (!shadow.IsCascadeLocked[j]) // Do not delete cached info for cached cascade.
                    {
                        shadow.ViewProjections[j] = new Matrix();
                    }
                }

                // If some cascades are cached, we have to create a new shadow map and copy
                // the old cascades into the new shadow map.
                if (shadow.IsCascadeLocked[0] || shadow.IsCascadeLocked[1] || shadow.IsCascadeLocked[2] || shadow.IsCascadeLocked[3])
                {
                    var oldShadowMap = shadow.ShadowMap;
                    shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(new RenderTargetFormat(oldShadowMap));

                    graphicsDevice.SetRenderTarget(shadow.ShadowMap);
                    graphicsDevice.Clear(Color.White);

                    var spriteBatch = graphicsService.GetSpriteBatch();
                    spriteBatch.Begin(SpriteSortMode.Deferred, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
                    for (int cascade = 0; cascade < shadow.NumberOfCascades; cascade++)
                    {
                        if (shadow.IsCascadeLocked[cascade])
                        {
                            var viewport  = GetViewport(shadow, cascade);
                            var rectangle = new Rectangle(viewport.X, viewport.Y, viewport.Width, viewport.Height);
                            spriteBatch.Draw(oldShadowMap, rectangle, rectangle, Color.White);
                        }
                    }
                    spriteBatch.End();

                    graphicsService.RenderTargetPool.Recycle(oldShadowMap);
                }
                else
                {
                    graphicsDevice.SetRenderTarget(shadow.ShadowMap);
                    graphicsDevice.Clear(Color.White);
                }

                context.RenderTarget             = shadow.ShadowMap;
                graphicsDevice.DepthStencilState = DepthStencilState.Default;
                graphicsDevice.RasterizerState   = RasterizerState.CullCounterClockwise;
                graphicsDevice.BlendState        = BlendState.Opaque;

                context.ReferenceNode = lightNode;
                context.Object        = shadow;
                context.ShadowNear    = 0; // Obsolete: Only kept for backward compatibility.

                bool shadowMapContainsSomething = false;
                for (int split = 0; split < shadow.NumberOfCascades; split++)
                {
                    if (shadow.IsCascadeLocked[split])
                    {
                        continue;
                    }

                    context.Data[RenderContextKeys.ShadowTileIndex] = CubeMapShadowMapRenderer.BoxedIntegers[split];

                    // near/far of this split.
                    float near = _csmSplitDistances[split];
                    float far  = Math.Max(_csmSplitDistances[split + 1], near + Numeric.EpsilonF);

                    // Create a view volume for this split.
                    _splitVolume.SetFieldOfView(fieldOfViewY, aspectRatio, near, far);

                    // Find the bounding sphere of the split camera frustum.
                    Vector3 center;
                    float   radius;
                    GetBoundingSphere(_splitVolume, out center, out radius);

                    // Extend radius to get enough border for filtering.
                    int shadowMapSize = shadow.ShadowMap.Height;

                    // We could extend by (ShadowMapSize + BorderTexels) / ShadowMapSize;
                    // Add at least 1 texel. (This way, shadow mask shader can clamp uv to
                    // texture rect in without considering half texel border to avoid sampling outside..)
                    radius *= (float)(shadowMapSize + 1) / shadowMapSize;

                    // Convert center to light space.
                    Pose lightPose = lightNode.PoseWorld;
                    center = cameraPose.ToWorldPosition(center);
                    center = lightPose.ToLocalPosition(center);

                    // Snap center to texel positions to avoid shadow swimming.
                    SnapPositionToTexels(ref center, 2 * radius, shadowMapSize);

                    // Convert center back to world space.
                    center = lightPose.ToWorldPosition(center);

                    Matrix  orientation            = lightPose.Orientation;
                    Vector3 backward               = orientation.GetColumn(2);
                    var     orthographicProjection = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;

                    // Create a tight orthographic frustum around the cascade's bounding sphere.
                    orthographicProjection.SetOffCenter(-radius, radius, -radius, radius, 0, 2 * radius);
                    Vector3 cameraPosition = center + radius * backward;
                    Pose    frustumPose    = new Pose(cameraPosition, orientation);
                    Pose    view           = frustumPose.Inverse;
                    shadow.ViewProjections[split] = (Matrix)view * orthographicProjection;

                    // Convert depth bias from "texel" to light space [0, 1] depth.
                    // Minus sign to move receiver depth closer to light. Divide by depth to normalize.
                    float unitsPerTexel = orthographicProjection.Width / shadow.ShadowMap.Height;
                    shadow.EffectiveDepthBias[split] = -shadow.DepthBias[split] * unitsPerTexel / orthographicProjection.Depth;

                    // Convert normal offset from "texel" to world space.
                    shadow.EffectiveNormalOffset[split] = shadow.NormalOffset[split] * unitsPerTexel;

                    // For rendering the shadow map, move near plane back by MinLightDistance
                    // to catch occluders in front of the cascade.
                    orthographicProjection.Near       = -shadow.MinLightDistance;
                    _orthographicCameraNode.PoseWorld = frustumPose;

                    // Set a viewport to render a tile in the texture atlas.
                    graphicsDevice.Viewport = GetViewport(shadow, split);
                    context.Viewport        = graphicsDevice.Viewport;

                    shadowMapContainsSomething |= RenderCallback(context);
                }

                // Recycle shadow map if empty.
                if (!shadowMapContainsSomething)
                {
                    graphicsService.RenderTargetPool.Recycle(shadow.ShadowMap);
                    shadow.ShadowMap = null;
                }
            }

            graphicsDevice.SetRenderTarget(null);
            savedRenderState.Restore();

            context.CameraNode    = cameraNode;
            context.ShadowNear    = float.NaN;
            context.Technique     = null;
            context.RenderTarget  = originalRenderTarget;
            context.Viewport      = originalViewport;
            context.ReferenceNode = originalReferenceNode;
            context.Object        = null;
            context.Data[RenderContextKeys.ShadowTileIndex] = null;
        }
示例#44
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;

              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              if (TextureHelper.IsFloatingPointFormat(source.Format))
              {
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
              }
              else
              {
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
              }

              // Blur source texture.
              var tempFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
              var blurredImage = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
              context.RenderTarget = blurredImage;
              context.Viewport = new Viewport(0, 0, blurredImage.Width, blurredImage.Height);
              Blur.Process(context);

              // Unsharp masking.
              context.RenderTarget = target;
              context.Viewport = viewport;
              graphicsDevice.SetRenderTarget(context.RenderTarget);
              graphicsDevice.Viewport = context.Viewport;

              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _sharpnessParameter.SetValue(Sharpness);
              _sourceTextureParameter.SetValue(source);
              _blurredTextureParameter.SetValue(blurredImage);
              _effect.CurrentTechnique.Passes[0].Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Clean-up
              _sourceTextureParameter.SetValue((Texture2D)null);
              _blurredTextureParameter.SetValue((Texture2D)null);
              GraphicsService.RenderTargetPool.Recycle(blurredImage);
        }
        //--------------------------------------------------------------
        #region Methods
        //--------------------------------------------------------------

        /// <summary>
        /// Computes the intersection of <see cref="MeshNode"/>s.
        /// </summary>
        /// <param name="meshNodePairs">
        /// A collection of <see cref="MeshNode"/> pairs.The renderer computes the intersection volume
        /// of each pair.
        /// </param>
        /// <param name="color">The diffuse color used for the intersection.</param>
        /// <param name="alpha">The opacity of the intersection.</param>
        /// <param name="maxConvexity">
        /// The maximum convexity of the submeshes. A convex mesh has a convexity of 1. A concave mesh
        /// has a convexity greater than 1. Convexity is the number of layers required for depth peeling
        /// (= the number of front face layers when looking at the object).
        /// </param>
        /// <param name="context">The render context.</param>
        /// <remarks>
        /// <para>
        /// This method renders an off-screen image (color and depth) of the intersection volume. This
        /// operation destroys the currently set render target and depth/stencil buffer.
        /// </para>
        /// </remarks>
        /// <exception cref="ObjectDisposedException">
        /// The <see cref="IntersectionRenderer"/> has already been disposed.
        /// </exception>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="meshNodePairs"/> or <see cref="context"/> is
        /// <see langword="null"/>.
        /// </exception>
        /// <exception cref="ArgumentOutOfRangeException">
        /// The convexity must be greater than 0.
        /// </exception>
        /// <exception cref="GraphicsException">
        /// Invalid render context: Graphics service is not set.
        /// </exception>
        /// <exception cref="GraphicsException">
        /// Invalid render context: Wrong graphics device.
        /// </exception>
        /// <exception cref="GraphicsException">
        /// Invalid render context: Scene is not set.
        /// </exception>
        /// <exception cref="GraphicsException">
        /// Invalid render context: Camera node needs to be set in render context.
        /// </exception>
        public void ComputeIntersection(IEnumerable <Pair <MeshNode> > meshNodePairs,
                                        Vector3F color, float alpha, float maxConvexity, RenderContext context)
        {
            if (_isDisposed)
            {
                throw new ObjectDisposedException("IntersectionRenderer has already been disposed.");
            }
            if (meshNodePairs == null)
            {
                throw new ArgumentNullException("meshNodePairs");
            }
            if (maxConvexity < 1)
            {
                throw new ArgumentOutOfRangeException("maxConvexity", "The max convexity must be greater than 0.");
            }
            if (context == null)
            {
                throw new ArgumentNullException("context");
            }
            if (context.GraphicsService == null)
            {
                throw new GraphicsException("Invalid render context: Graphics service is not set.");
            }
            if (_graphicsService != context.GraphicsService)
            {
                throw new GraphicsException("Invalid render context: Wrong graphics service.");
            }
            if (context.CameraNode == null)
            {
                throw new GraphicsException("Camera node needs to be set in render context.");
            }
            if (context.Scene == null)
            {
                throw new GraphicsException("A scene needs to be set in the render context.");
            }

            // Create 2 ordered pairs for each unordered pair.
            _pairs.Clear();
            foreach (var pair in meshNodePairs)
            {
                if (pair.First == null || pair.Second == null)
                {
                    continue;
                }

                // Frustum culling.
                if (!context.Scene.HaveContact(pair.First, context.CameraNode))
                {
                    continue;
                }
                if (!context.Scene.HaveContact(pair.Second, context.CameraNode))
                {
                    continue;
                }

                _pairs.Add(new Pair <MeshNode, MeshNode>(pair.First, pair.Second));
                _pairs.Add(new Pair <MeshNode, MeshNode>(pair.Second, pair.First));
            }

            var renderTargetPool = _graphicsService.RenderTargetPool;

            if (_pairs.Count == 0)
            {
                renderTargetPool.Recycle(_intersectionImage);
                _intersectionImage = null;
                return;
            }

            // Color and alpha are applied in RenderIntersection().
            _color = color;
            _alpha = alpha;

            var graphicsDevice = _graphicsService.GraphicsDevice;

            // Save original render states.
            var originalBlendState        = graphicsDevice.BlendState;
            var originalDepthStencilState = graphicsDevice.DepthStencilState;
            var originalRasterizerState   = graphicsDevice.RasterizerState;
            var originalScissorRectangle  = graphicsDevice.ScissorRectangle;

            // Get offscreen render targets.
            var viewport = context.Viewport;

            viewport.X      = 0;
            viewport.Y      = 0;
            viewport.Width  = (int)(viewport.Width / DownsampleFactor);
            viewport.Height = (int)(viewport.Height / DownsampleFactor);
            var renderTargetFormat = new RenderTargetFormat(viewport.Width, viewport.Height, false, SurfaceFormat.Color, DepthFormat.Depth24Stencil8);

            // Try to reuse any existing render targets.
            // (Usually they are recycled in RenderIntersection()).
            var currentScene = _intersectionImage;

            if (currentScene == null || !renderTargetFormat.IsCompatibleWith(currentScene))
            {
                currentScene.SafeDispose();
                currentScene = renderTargetPool.Obtain2D(renderTargetFormat);
            }
            var lastScene = renderTargetPool.Obtain2D(renderTargetFormat);

            // Set shared effect parameters.
            var cameraNode = context.CameraNode;
            var view       = (Matrix)cameraNode.View;
            var projection = cameraNode.Camera.Projection;
            var near       = projection.Near;
            var far        = projection.Far;

            _parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));

            // The DepthEpsilon has to be tuned if depth peeling does not work because
            // of numerical problems equality z comparisons.
            _parameterCameraParameters.SetValue(new Vector3(near, far - near, 0.0000001f));
            _parameterView.SetValue(view);
            _parameterProjection.SetValue((Matrix)projection);

            var defaultTexture = _graphicsService.GetDefaultTexture2DBlack();

            // Handle all pairs.
            bool isFirstPass = true;

            while (true)
            {
                // Find a mesh node A and all mesh nodes to which it needs to be clipped.
                MeshNode meshNodeA = null;
                _partners.Clear();
                for (int i = 0; i < _pairs.Count; i++)
                {
                    var pair = _pairs[i];

                    if (pair.First == null)
                    {
                        continue;
                    }

                    if (meshNodeA == null)
                    {
                        meshNodeA = pair.First;
                    }

                    if (pair.First == meshNodeA)
                    {
                        _partners.Add(pair.Second);

                        //  Remove this pair.
                        _pairs[i] = new Pair <MeshNode, MeshNode>();
                    }
                }

                // Abort if we have handled all pairs.
                if (meshNodeA == null)
                {
                    break;
                }

                var worldTransformA = (Matrix)(meshNodeA.PoseWorld * Matrix44F.CreateScale(meshNodeA.ScaleWorld));

                if (EnableScissorTest)
                {
                    // Scissor rectangle of A.
                    var scissorA = GraphicsHelper.GetScissorRectangle(context.CameraNode, viewport, meshNodeA);

                    // Union of scissor rectangles of partners.
                    Rectangle partnerRectangle = GraphicsHelper.GetScissorRectangle(context.CameraNode, viewport, _partners[0]);
                    for (int i = 1; i < _partners.Count; i++)
                    {
                        var a = GraphicsHelper.GetScissorRectangle(context.CameraNode, viewport, _partners[i]);
                        partnerRectangle = Rectangle.Union(partnerRectangle, a);
                    }

                    // Use intersection of A and partners.
                    graphicsDevice.ScissorRectangle = Rectangle.Intersect(scissorA, partnerRectangle);

                    // We store the union of all scissor rectangles for use in RenderIntersection().
                    if (isFirstPass)
                    {
                        _totalScissorRectangle = graphicsDevice.ScissorRectangle;
                    }
                    else
                    {
                        _totalScissorRectangle = Rectangle.Union(_totalScissorRectangle, graphicsDevice.ScissorRectangle);
                    }
                }

                // Depth peeling of A.
                for (int layer = 0; layer < maxConvexity; layer++)
                {
                    // Set and clear render target.
                    graphicsDevice.SetRenderTarget(currentScene);
                    graphicsDevice.Clear(new Color(1, 1, 1, 0)); // RGB = "a large depth", A = "empty area"

                    // Render a depth layer of A.
                    graphicsDevice.DepthStencilState = DepthStencilStateWriteLess;
                    graphicsDevice.BlendState        = BlendState.Opaque;
                    graphicsDevice.RasterizerState   = EnableScissorTest ? CullCounterClockwiseScissor : RasterizerState.CullCounterClockwise;
                    _parameterWorld.SetValue(worldTransformA);
                    _parameterTexture.SetValue((layer == 0) ? defaultTexture : lastScene);
                    _passPeel.Apply();
                    foreach (var submesh in meshNodeA.Mesh.Submeshes)
                    {
                        submesh.Draw();
                    }

                    // Render partners to set stencil.
                    graphicsDevice.DepthStencilState = DepthStencilStateOnePassStencilFail;
                    graphicsDevice.BlendState        = BlendStateNoWrite;
                    graphicsDevice.RasterizerState   = EnableScissorTest ? CullNoneScissor : RasterizerState.CullNone;
                    foreach (var partner in _partners)
                    {
                        _parameterWorld.SetValue((Matrix)(partner.PoseWorld * Matrix44F.CreateScale(partner.ScaleWorld)));
                        _passMark.Apply();
                        foreach (var submesh in partner.Mesh.Submeshes)
                        {
                            submesh.Draw();
                        }
                    }

                    // Clear depth buffer. Leave stencil buffer unchanged.
                    graphicsDevice.Clear(ClearOptions.DepthBuffer, new Color(0, 1, 0), 1, 0);

                    // Render A to compute lighting.
                    graphicsDevice.DepthStencilState = DepthStencilStateStencilNotEqual0;
                    graphicsDevice.BlendState        = BlendState.Opaque;
                    graphicsDevice.RasterizerState   = EnableScissorTest ? CullCounterClockwiseScissor :  RasterizerState.CullCounterClockwise;
                    _parameterWorld.SetValue(worldTransformA);
                    _passDraw.Apply();
                    foreach (var submesh in meshNodeA.Mesh.Submeshes)
                    {
                        submesh.Draw();
                    }

                    // Combine last intersection image with current.
                    if (!isFirstPass)
                    {
                        graphicsDevice.DepthStencilState = DepthStencilState.DepthRead;
                        graphicsDevice.BlendState        = BlendState.Opaque;
                        graphicsDevice.RasterizerState   = EnableScissorTest ? CullNoneScissor : RasterizerState.CullNone;
                        _parameterTexture.SetValue(lastScene);
                        _passCombine.Apply();
                        graphicsDevice.DrawFullScreenQuad();
                    }

                    isFirstPass = false;

                    // ----- Swap render targets.
                    MathHelper.Swap(ref lastScene, ref currentScene);
                }
            }

            // Store final images for RenderIntersection.
            _intersectionImage = lastScene;

            // Scale scissor rectangle back to full-screen resolution.
            if (DownsampleFactor > 1)
            {
                _totalScissorRectangle.X      = (int)(_totalScissorRectangle.X * DownsampleFactor);
                _totalScissorRectangle.Y      = (int)(_totalScissorRectangle.Y * DownsampleFactor);
                _totalScissorRectangle.Width  = (int)(_totalScissorRectangle.Width * DownsampleFactor);
                _totalScissorRectangle.Height = (int)(_totalScissorRectangle.Height * DownsampleFactor);
            }


            // Restore original render state.
            graphicsDevice.BlendState        = originalBlendState ?? BlendState.Opaque;
            graphicsDevice.DepthStencilState = originalDepthStencilState ?? DepthStencilState.Default;
            graphicsDevice.RasterizerState   = originalRasterizerState ?? RasterizerState.CullCounterClockwise;
            graphicsDevice.ScissorRectangle  = originalScissorRectangle;

            renderTargetPool.Recycle(currentScene);
            _partners.Clear();
            _pairs.Clear();
        }
        protected override void OnRender(RenderContext context)
        {
            if (ActiveCameraNode == null)
            {
                return;
            }

            var renderTargetPool     = GraphicsService.RenderTargetPool;
            var graphicsDevice       = GraphicsService.GraphicsDevice;
            var originalRenderTarget = context.RenderTarget;
            var fullViewport         = context.Viewport;

            // Get a render target for the first camera. Use half the width and height.
            int halfWidth  = fullViewport.Width / 2;
            int halfHeight = fullViewport.Height / 2;
            var format     = new RenderTargetFormat(context.RenderTarget)
            {
                Width  = halfWidth,
                Height = halfHeight
            };

            var renderTarget0 = renderTargetPool.Obtain2D(format);
            var renderTarget1 = renderTargetPool.Obtain2D(format);
            var renderTarget2 = renderTargetPool.Obtain2D(format);
            var viewport0     = new Viewport(0, 0, halfWidth, halfHeight);
            var viewport1     = new Viewport(halfWidth, 0, halfWidth, halfHeight);
            var viewport2     = new Viewport(0, halfHeight, halfWidth, halfHeight);

            context.Scene         = Scene;
            context.CameraNode    = ActiveCameraNode;
            context.LodCameraNode = context.CameraNode;
            context.LodHysteresis = 0.5f;

            // Reduce detail level by increasing the LOD bias.
            context.LodBias = 2.0f;

            for (int i = 0; i < 4; i++)
            {
                if (i == 0)
                {
                    // TOP, LEFT
                    context.RenderTarget       = renderTarget0;
                    context.Viewport           = new Viewport(0, 0, viewport0.Width, viewport0.Height);
                    context.LodBlendingEnabled = false;
                }
                else if (i == 1)
                {
                    // TOP, RIGHT
                    context.RenderTarget       = renderTarget1;
                    context.Viewport           = new Viewport(0, 0, viewport1.Width, viewport1.Height);
                    context.LodBlendingEnabled = true;
                }
                else if (i == 2)
                {
                    // BOTTOM, LEFT
                    context.RenderTarget       = renderTarget2;
                    context.Viewport           = new Viewport(0, 0, viewport2.Width, viewport2.Height);
                    context.LodBlendingEnabled = false;
                }
                else
                {
                    // BOTTOM, RIGHT
                    context.RenderTarget       = originalRenderTarget;
                    context.Viewport           = new Viewport(fullViewport.X + halfWidth, fullViewport.Y + halfHeight, halfWidth, halfHeight);
                    context.LodBlendingEnabled = true;
                }

                var sceneQuery = Scene.Query <SceneQueryWithLodBlending>(context.CameraNode, context);

                if (i == 0 || i == 1)
                {
                    // TOP
                    for (int j = 0; j < sceneQuery.RenderableNodes.Count; j++)
                    {
                        if (sceneQuery.RenderableNodes[j].UserFlags == 1)
                        {
                            sceneQuery.RenderableNodes[j] = null;
                        }
                    }
                }
                else
                {
                    // BOTTOM
                    for (int j = 0; j < sceneQuery.RenderableNodes.Count; j++)
                    {
                        if (sceneQuery.RenderableNodes[j].UserFlags == 2)
                        {
                            sceneQuery.RenderableNodes[j] = null;
                        }
                    }
                }

                RenderScene(sceneQuery, context, true, true, true, true);

                sceneQuery.Reset();
            }

            // ----- Copy screens.
            // Copy the previous screens from the temporary render targets into the back buffer.
            context.Viewport        = fullViewport;
            graphicsDevice.Viewport = fullViewport;

            SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, SamplerState.PointClamp, DepthStencilState.None, RasterizerState.CullNone);
            SpriteBatch.Draw(renderTarget0, viewport0.Bounds, Color.White);
            SpriteBatch.Draw(renderTarget1, viewport1.Bounds, Color.White);
            SpriteBatch.Draw(renderTarget2, viewport2.Bounds, Color.White);
            SpriteBatch.End();

            renderTargetPool.Recycle(renderTarget0);
            renderTargetPool.Recycle(renderTarget1);
            renderTargetPool.Recycle(renderTarget2);

            context.Scene         = null;
            context.CameraNode    = null;
            context.LodCameraNode = null;
            context.RenderPass    = null;
        }
示例#47
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            // Get velocity buffers from render context.
            object value0, value1;

            context.Data.TryGetValue(RenderContextKeys.VelocityBuffer, out value0);
            context.Data.TryGetValue(RenderContextKeys.LastVelocityBuffer, out value1);
            var velocityBuffer0 = value0 as Texture2D;
            var velocityBuffer1 = value1 as Texture2D;

            if (velocityBuffer0 == null)
            {
                throw new GraphicsException("VelocityBuffer needs to be set in the render context (RenderContext.Data[\"VelocityBuffer\"]).");
            }

            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            {
                graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            }
            else
            {
                graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            }

            if (!SoftenEdges)
            {
                // ----- Motion blur using one or two velocity buffers
                _effect.CurrentTechnique = _effect.Techniques[0];
                graphicsDevice.SetRenderTarget(context.RenderTarget);
                graphicsDevice.Viewport = context.Viewport;
                _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
                _sourceTextureParameter.SetValue(context.SourceTexture);
                _numberOfSamplesParameter.SetValue((int)NumberOfSamples);

                _velocityTextureParameter.SetValue(velocityBuffer0);
                if (TextureHelper.IsFloatingPointFormat(velocityBuffer0.Format))
                {
                    graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
                }
                else
                {
                    graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
                }

                if (velocityBuffer1 == null || !UseLastVelocityBuffer)
                {
                    _singlePass.Apply();
                }
                else
                {
                    _velocityTexture2Parameter.SetValue(velocityBuffer1);
                    if (TextureHelper.IsFloatingPointFormat(velocityBuffer1.Format))
                    {
                        graphicsDevice.SamplerStates[2] = SamplerState.PointClamp;
                    }
                    else
                    {
                        graphicsDevice.SamplerStates[2] = SamplerState.LinearClamp;
                    }

                    _dualPass.Apply();
                }
                graphicsDevice.DrawFullScreenQuad();
            }
            else
            {
                // ----- Advanced motion blur (based on paper "A Reconstruction Filter for Plausible Motion Blur")
                context.ThrowIfCameraMissing();
                context.ThrowIfGBuffer0Missing();

                // The width/height of the current velocity input.
                int sourceWidth;
                int sourceHeight;
                if (context.RenderTarget != null)
                {
                    sourceWidth  = velocityBuffer0.Width;
                    sourceHeight = velocityBuffer0.Height;
                }
                else
                {
                    sourceWidth  = context.Viewport.Width;
                    sourceHeight = context.Viewport.Height;
                }

                // The downsampled target width/height.
                int targetWidth  = Math.Max(1, (int)(sourceWidth / MaxBlurRadius));
                int targetHeight = Math.Max(1, (int)(sourceHeight / MaxBlurRadius));

                var            tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.None);
                RenderTarget2D temp0      = renderTargetPool.Obtain2D(tempFormat);
                RenderTarget2D temp1      = renderTargetPool.Obtain2D(tempFormat);

                // ----- Downsample max velocity buffer
                _effect.CurrentTechnique = _effect.Techniques[0];
                _maxBlurRadiusParameter.SetValue(new Vector2(MaxBlurRadius / sourceWidth, MaxBlurRadius / sourceHeight));
                Texture2D currentVelocityBuffer = velocityBuffer0;
                bool      isFinalPass;
                do
                {
                    // Downsample to this target size.
                    sourceWidth  = Math.Max(targetWidth, sourceWidth / 2);
                    sourceHeight = Math.Max(targetHeight, sourceHeight / 2);

                    // Is this the final downsample pass?
                    isFinalPass = (sourceWidth <= targetWidth && sourceHeight <= targetHeight);

                    // Get temporary render target for intermediate steps.
                    RenderTarget2D temp = null;
                    if (!isFinalPass)
                    {
                        tempFormat.Width  = sourceWidth;
                        tempFormat.Height = sourceHeight;
                        temp = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
                    }

                    graphicsDevice.SetRenderTarget(isFinalPass ? temp0 : temp);

                    _sourceSizeParameter.SetValue(new Vector2(sourceWidth * 2, sourceHeight * 2));
                    _viewportSizeParameter.SetValue(new Vector2(sourceWidth, sourceHeight));
                    _velocityTextureParameter.SetValue(currentVelocityBuffer);
                    if (TextureHelper.IsFloatingPointFormat(currentVelocityBuffer.Format))
                    {
                        graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
                    }
                    else
                    {
                        graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
                    }

                    if (currentVelocityBuffer == velocityBuffer0)
                    {
                        _downsampleMaxFromFloatBufferParameter.Apply();
                    }
                    else
                    {
                        _downsampleMaxParameter.Apply();
                    }

                    graphicsDevice.DrawFullScreenQuad();

                    if (currentVelocityBuffer != velocityBuffer0)
                    {
                        GraphicsService.RenderTargetPool.Recycle((RenderTarget2D)currentVelocityBuffer);
                    }

                    currentVelocityBuffer = temp;
                } while (!isFinalPass);

                // ----- Compute max velocity of neighborhood.
                graphicsDevice.SetRenderTarget(temp1);
                _velocityTextureParameter.SetValue(temp0);
                _neighborMaxPass.Apply();
                graphicsDevice.DrawFullScreenQuad();

                renderTargetPool.Recycle(temp0);

                // Compute motion blur.
                graphicsDevice.SetRenderTarget(context.RenderTarget);
                graphicsDevice.Viewport = context.Viewport;
                _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
                _sourceTextureParameter.SetValue(context.SourceTexture);
                _numberOfSamplesParameter.SetValue((int)NumberOfSamples);
                _velocityTextureParameter.SetValue(velocityBuffer0);
                if (TextureHelper.IsFloatingPointFormat(velocityBuffer0.Format))
                {
                    graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
                }
                else
                {
                    graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
                }

                _velocityTexture2Parameter.SetValue(temp1);
                if (TextureHelper.IsFloatingPointFormat(temp1.Format))
                {
                    graphicsDevice.SamplerStates[2] = SamplerState.PointClamp;
                }
                else
                {
                    graphicsDevice.SamplerStates[2] = SamplerState.LinearClamp;
                }

                _gBuffer0Parameter.SetValue(context.GBuffer0);
                _jitterTextureParameter.SetValue(_jitterTexture);
                _softZExtentParameter.SetValue(0.01f / context.CameraNode.Camera.Projection.Far); // 1 mm to 10 cm.
                _softEdgePass.Apply();
                graphicsDevice.DrawFullScreenQuad();

                _sourceTextureParameter.SetValue((Texture2D)null);
                _velocityTextureParameter.SetValue((Texture2D)null);
                _velocityTexture2Parameter.SetValue((Texture2D)null);
                _gBuffer0Parameter.SetValue((Texture2D)null);

                renderTargetPool.Recycle(temp1);
            }
        }
示例#48
0
        protected override void OnProcess(RenderContext context)
        {
            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            {
                throw new GraphicsException("Source texture format must not be a floating-point format.");
            }

            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            var source   = context.SourceTexture;
            var target   = context.RenderTarget;
            var viewport = context.Viewport;

            int downsampledWidth  = Math.Max(1, source.Width / DownsampleFactor);
            int downsampledHeight = Math.Max(1, source.Height / DownsampleFactor);

            // ----- Get temporary render targets.
            var            bloomFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, SurfaceFormat.Color, DepthFormat.None);
            RenderTarget2D bloom0      = renderTargetPool.Obtain2D(bloomFormat);
            RenderTarget2D bloom1      = renderTargetPool.Obtain2D(bloomFormat);

            // ----- Downsample scene to bloom0.
            context.RenderTarget = bloom0;
            context.Viewport     = new Viewport(0, 0, bloom0.Width, bloom0.Height);
            _downsampleFilter.Process(context);

            // ----- Create bloom image
            graphicsDevice.SetRenderTarget(bloom1);
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _bloomThresholdParameter.SetValue(Threshold);
            _bloomIntensityParameter.SetValue(Intensity);
            _bloomSaturationParameter.SetValue(Saturation);
            _sceneTextureParameter.SetValue(bloom0);
            _brightnessPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // bloom0 is not needed anymore.
            renderTargetPool.Recycle(bloom0);

            // We make a two-pass blur, so source can be equal to target.
            context.SourceTexture = bloom1;
            context.RenderTarget  = bloom1;
            _blur.Process(context);

            // ----- Combine scene and bloom.
            graphicsDevice.SetRenderTarget(target);
            graphicsDevice.Viewport = viewport;
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sceneTextureParameter.SetValue(source);
            _bloomTextureParameter.SetValue(bloom1);
            _combinePass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // ----- Clean-up
            _sceneTextureParameter.SetValue((Texture2D)null);
            _bloomTextureParameter.SetValue((Texture2D)null);
            renderTargetPool.Recycle(bloom1);

            // Restore original context.
            context.SourceTexture = source;
            context.RenderTarget  = target;
            context.Viewport      = viewport;
        }
示例#49
0
        protected override void OnProcess(RenderContext context)
        {
            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            throw new GraphicsException("Source texture format must not be a floating-point format.");

              var graphicsDevice = GraphicsService.GraphicsDevice;

              // The target width/height.
              int targetWidth = context.Viewport.Width;
              int targetHeight = context.Viewport.Height;

              _pixelSizeParameter.SetValue(new Vector2(1.0f / targetWidth, 1.0f / targetHeight));
              _viewportSizeParameter.SetValue(new Vector2(targetWidth, targetHeight));

              // Cannot use render target from pool because we need a stencil buffer.
              //var edgeRenderTarget = graphicsService.RenderTargetPool.Obtain2D(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.Depth24Stencil8);
              //var blendRenderTarget = graphicsService.RenderTargetPool.Obtain2D(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.Depth24Stencil8);
              var tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.None);
              var edgeRenderTarget = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
              var blendRenderTarget = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

              //graphicsDevice.DepthStencilState = _stencilStateReplace;
              graphicsDevice.SetRenderTarget(edgeRenderTarget);
              // Clear color + stencil buffer.
              //graphicsDevice.Clear(ClearOptions.Target | ClearOptions.Stencil, new Color(0, 0, 0, 0), 1, 0);
              graphicsDevice.Clear(ClearOptions.Target, new Color(0, 0, 0, 0), 1, 0);
              _sourceTextureParameter.SetValue(context.SourceTexture);
              _lumaEdgeDetectionPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              //graphicsDevice.DepthStencilState = _stencilStateKeep;
              graphicsDevice.SetRenderTarget(blendRenderTarget);
              //graphicsDevice.Clear(ClearOptions.Target, new Color(0, 0, 0, 0), 1, 1);
              _edgesTextureParameter.SetValue(edgeRenderTarget);
              _areaLookupTextureParameter.SetValue(_areaLookupTexture);
              _searchLookupTextureParameter.SetValue(_searchLookupTexture);
              _blendWeightCalculationPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              //graphicsDevice.DepthStencilState = DepthStencilState.None;
              graphicsDevice.SetRenderTarget(context.RenderTarget);
              graphicsDevice.Viewport = context.Viewport;
              _blendTextureParameter.SetValue(blendRenderTarget);
              _neighborhoodBlendingPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              _sourceTextureParameter.SetValue((Texture2D)null);
              _edgesTextureParameter.SetValue((Texture2D)null);
              _blendTextureParameter.SetValue((Texture2D)null);

              GraphicsService.RenderTargetPool.Recycle(blendRenderTarget);
              GraphicsService.RenderTargetPool.Recycle(edgeRenderTarget);
        }
示例#50
0
    /// <summary>
    /// Renders the environment maps for the image-based lights.
    /// </summary>
    /// <remarks>
    /// This method uses the current DeferredGraphicsScreen to render new environment maps at
    /// runtime. The DeferredGraphicsScreen has a SceneCaptureRenderer which we can use to
    /// capture environment maps of the current scene.
    /// To capture new environment maps the flag _updateEnvironmentMaps must be set to true.
    /// When this flag is set, SceneCaptureNodes are added to the scene. When the graphics
    /// screen calls the SceneCaptureRenderer the next time, the new environment maps will be
    /// captured.
    /// The flag _updateEnvironmentMaps remains true until the new environment maps are available.
    /// This method checks the SceneCaptureNode.LastFrame property to check if new environment maps
    /// have been computed. Usually, the environment maps will be available in the next frame.
    /// (However, the XNA Game class can skip graphics rendering if the game is running slowly.
    /// Then we would have to wait more than 1 frame.)
    /// When environment maps are being rendered, the image-based lights are disabled to capture
    /// only the scene with ambient and directional lights. Dynamic objects are also disabled
    /// to capture only the static scene.
    /// </remarks>
    private void UpdateEnvironmentMaps()
    {
      if (!_updateEnvironmentMaps)
        return;

      // One-time initializations: 
      if (_sceneCaptureNodes[0] == null)
      {
        // Create cube maps and scene capture nodes.
        // (Note: A cube map size of 256 is enough for surfaces with a specular power
        // in the range [0, 200000].)
        for (int i = 0; i < _sceneCaptureNodes.Length; i++)
        {
          var renderTargetCube = new RenderTargetCube(
            GraphicsService.GraphicsDevice,
            256,
            true,
            SurfaceFormat.Color,
            DepthFormat.None);

          var renderToTexture = new RenderToTexture { Texture = renderTargetCube };
          var projection = new PerspectiveProjection();
          projection.SetFieldOfView(ConstantsF.PiOver2, 1, 1, 100);
          _sceneCaptureNodes[i] = new SceneCaptureNode(renderToTexture)
          {
            CameraNode = new CameraNode(new Camera(projection))
            {
              PoseWorld = _lightNodes[i].PoseWorld,
            },
          };

          _imageBasedLights[i].Texture = renderTargetCube;
        }

        // We use a ColorEncoder to encode a HDR image in a normal Color texture.
        _colorEncoder = new ColorEncoder(GraphicsService)
        {
          SourceEncoding = ColorEncoding.Rgb,
          TargetEncoding = ColorEncoding.Rgbm,
        };

        // The SceneCaptureRenderer has a render callback which defines what is rendered
        // into the scene capture render targets.
        _graphicsScreen.SceneCaptureRenderer.RenderCallback = context =>
        {
          var graphicsDevice = GraphicsService.GraphicsDevice;
          var renderTargetPool = GraphicsService.RenderTargetPool;

          // Get scene nodes which are visible by the current camera.
          CustomSceneQuery sceneQuery = context.Scene.Query<CustomSceneQuery>(context.CameraNode, context);

          // The final image has to be rendered into this render target.
          var ldrTarget = context.RenderTarget;

          // Use an intermediate HDR render target with the same resolution as the final target.
          var format = new RenderTargetFormat(ldrTarget)
          {
            SurfaceFormat = SurfaceFormat.HdrBlendable,
            DepthStencilFormat = DepthFormat.Depth24Stencil8
          };
          var hdrTarget = renderTargetPool.Obtain2D(format);

          graphicsDevice.SetRenderTarget(hdrTarget);
          context.RenderTarget = hdrTarget;

          // Render scene (without post-processing, without lens flares, no debug rendering, no reticle).
          _graphicsScreen.RenderScene(sceneQuery, context, false, false, false, false);

          // Convert the HDR image to RGBM image.
          context.SourceTexture = hdrTarget;
          context.RenderTarget = ldrTarget;
          _colorEncoder.Process(context);
          context.SourceTexture = null;

          // Clean up.
          renderTargetPool.Recycle(hdrTarget);
          context.RenderTarget = ldrTarget;
        };
      }

      if (_sceneCaptureNodes[0].Parent == null)
      {
        // Add the scene capture nodes to the scene.
        for (int i = 0; i < _sceneCaptureNodes.Length; i++)
          _graphicsScreen.Scene.Children.Add(_sceneCaptureNodes[i]);

        // Remember the old time stamp of the nodes.
        _oldEnvironmentMapTimeStamp = _sceneCaptureNodes[0].LastFrame;

        // Disable all lights except ambient and directional lights.
        // We do not capture the image-based lights or any other lights (e.g. point lights)
        // in the cube map.
        foreach (var lightNode in _graphicsScreen.Scene.GetDescendants().OfType<LightNode>())
          lightNode.IsEnabled = (lightNode.Light is AmbientLight) || (lightNode.Light is DirectionalLight);

        // Disable dynamic objects.
        foreach (var node in _graphicsScreen.Scene.GetDescendants())
          if (node is MeshNode || node is LodGroupNode)
            if (!node.IsStatic)
              node.IsEnabled = false;
      }
      else
      {
        // The scene capture nodes are part of the scene. Check if they have been
        // updated.
        if (_sceneCaptureNodes[0].LastFrame != _oldEnvironmentMapTimeStamp)
        {
          // We have new environment maps. Restore the normal scene.
          for (int i = 0; i < _sceneCaptureNodes.Length; i++)
            _graphicsScreen.Scene.Children.Remove(_sceneCaptureNodes[i]);

          _updateEnvironmentMaps = false;

          foreach (var lightNode in _graphicsScreen.Scene.GetDescendants().OfType<LightNode>())
            lightNode.IsEnabled = true;

          foreach (var node in _graphicsScreen.Scene.GetDescendants())
            if (node is MeshNode || node is LodGroupNode)
              if (!node.IsStatic)
                node.IsEnabled = true;
        }
      }
    }
示例#51
0
        public void Render(IList <SceneNode> lights, RenderContext context)
        {
            var graphicsService  = context.GraphicsService;
            var graphicsDevice   = graphicsService.GraphicsDevice;
            var renderTargetPool = graphicsService.RenderTargetPool;

            var target   = context.RenderTarget;
            var viewport = context.Viewport;
            var width    = viewport.Width;
            var height   = viewport.Height;

            // Render ambient occlusion info into a render target.
            var aoRenderTarget = renderTargetPool.Obtain2D(new RenderTargetFormat(
                                                               width / _ssaoFilter.DownsampleFactor,
                                                               height / _ssaoFilter.DownsampleFactor,
                                                               false,
                                                               SurfaceFormat.Color,
                                                               DepthFormat.None));

            // PostProcessors require that context.SourceTexture is set. But since
            // _ssaoFilter.CombineWithSource is set to false, the SourceTexture is not
            // used and we can set it to anything except null.
            context.SourceTexture = aoRenderTarget;
            context.RenderTarget  = aoRenderTarget;
            context.Viewport      = new Viewport(0, 0, aoRenderTarget.Width, aoRenderTarget.Height);
            _ssaoFilter.Process(context);
            context.SourceTexture = null;

            // The light buffer consists of two full-screen render targets into which we
            // render the accumulated diffuse and specular light intensities.
            var lightBufferFormat = new RenderTargetFormat(width, height, false, SurfaceFormat.HdrBlendable, DepthFormat.Depth24Stencil8);

            context.LightBuffer0 = renderTargetPool.Obtain2D(lightBufferFormat);
            context.LightBuffer1 = renderTargetPool.Obtain2D(lightBufferFormat);

            // Set the device render target to the light buffer.
            _renderTargetBindings[0] = new RenderTargetBinding(context.LightBuffer0); // Diffuse light accumulation
            _renderTargetBindings[1] = new RenderTargetBinding(context.LightBuffer1); // Specular light accumulation
            graphicsDevice.SetRenderTargets(_renderTargetBindings);
            context.RenderTarget = context.LightBuffer0;
            context.Viewport     = graphicsDevice.Viewport;

            // Clear the light buffer. (The alpha channel is not used. We can set it to anything.)
            graphicsDevice.Clear(new Color(0, 0, 0, 255));

            // Render all lights into the light buffers.
            LightRenderer.Render(lights, context);

            // Render the ambient occlusion texture using multiplicative blending.
            // This will darken the light buffers depending on the ambient occlusion term.
            // Note: Theoretically, this should be done after the ambient light renderer
            // and before the directional light renderer because AO should not affect
            // directional lights. But doing this here has more impact.
            context.SourceTexture     = aoRenderTarget;
            graphicsDevice.BlendState = GraphicsHelper.BlendStateMultiply;
            _copyFilter.Process(context);

            // Clean up.
            graphicsService.RenderTargetPool.Recycle(aoRenderTarget);
            context.RenderTarget = target;
            context.Viewport     = viewport;

#if MONOGAME
            graphicsDevice.SetRenderTarget(null); // Cannot clear _renderTargetbindings if it is still set in the MonoGame device.
#endif
            _renderTargetBindings[0] = new RenderTargetBinding();
            _renderTargetBindings[1] = new RenderTargetBinding();
        }
        public override void Render(IList <SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            {
                throw new ArgumentNullException("nodes");
            }
            if (context == null)
            {
                throw new ArgumentNullException("context");
            }

            int numberOfNodes = nodes.Count;

            if (numberOfNodes == 0)
            {
                return;
            }

            var graphicsService  = context.GraphicsService;
            var graphicsDevice   = graphicsService.GraphicsDevice;
            var renderTargetPool = graphicsService.RenderTargetPool;
            int frame            = context.Frame;

            var savedRenderState = new RenderStateSnapshot(graphicsDevice);

            var originalRenderTarget  = context.RenderTarget;
            var originalViewport      = context.Viewport;
            var originalCameraNode    = context.CameraNode;
            var originalLodCameraNode = context.LodCameraNode;
            var originalReferenceNode = context.ReferenceNode;

            try
            {
                // Use foreach instead of for-loop to catch InvalidOperationExceptions in
                // case the collection is modified.
                for (int i = 0; i < numberOfNodes; i++)
                {
                    var node = nodes[i] as SceneCaptureNode;
                    if (node == null)
                    {
                        continue;
                    }

                    // Update each node only once per frame.
                    if (node.LastFrame == frame)
                    {
                        continue;
                    }

                    node.LastFrame = frame;

                    var cameraNode = node.CameraNode;
                    if (cameraNode == null)
                    {
                        continue;
                    }

                    var texture = node.RenderToTexture.Texture;
                    if (texture == null)
                    {
                        continue;
                    }

                    // RenderToTexture instances can be shared. --> Update them only once per frame.
                    if (node.RenderToTexture.LastFrame == frame)
                    {
                        continue;
                    }

                    context.CameraNode    = cameraNode;
                    context.LodCameraNode = cameraNode;
                    context.ReferenceNode = node;

                    var renderTarget2D = texture as RenderTarget2D;
                    var projection     = cameraNode.Camera.Projection;
                    if (renderTarget2D != null)
                    {
                        context.RenderTarget = renderTarget2D;
                        context.Viewport     = new Viewport(0, 0, renderTarget2D.Width, renderTarget2D.Height);

                        RenderCallback(context);

                        // Update other properties of RenderToTexture.
                        node.RenderToTexture.LastFrame     = frame;
                        node.RenderToTexture.TextureMatrix = GraphicsHelper.ProjectorBiasMatrix
                                                             * projection
                                                             * cameraNode.PoseWorld.Inverse;

                        continue;
                    }

                    var renderTargetCube = texture as RenderTargetCube;
                    if (renderTargetCube != null)
                    {
                        var format = new RenderTargetFormat(renderTargetCube)
                        {
                            Mipmap = false
                        };

                        renderTarget2D = renderTargetPool.Obtain2D(format);

                        context.RenderTarget = renderTarget2D;
                        context.Viewport     = new Viewport(0, 0, renderTarget2D.Width, renderTarget2D.Height);

                        if (_spriteBatch == null)
                        {
                            _spriteBatch = graphicsService.GetSpriteBatch();
                        }

                        var perspectiveProjection = projection as PerspectiveProjection;
                        if (perspectiveProjection == null)
                        {
                            throw new GraphicsException("The camera of the SceneCaptureNode must use a perspective projection.");
                        }

                        // ReSharper disable CompareOfFloatsByEqualityOperator
                        if (perspectiveProjection.FieldOfViewX != ConstantsF.PiOver2 ||
                            perspectiveProjection.FieldOfViewY != ConstantsF.PiOver2)
                        {
                            perspectiveProjection.SetFieldOfView(ConstantsF.PiOver2, 1, projection.Near, projection.Far);
                        }
                        // ReSharper restore CompareOfFloatsByEqualityOperator

                        var originalCameraPose = cameraNode.PoseWorld;
                        for (int side = 0; side < 6; side++)
                        {
                            // Rotate camera to face the current cube map face.
                            //var cubeMapFace = (CubeMapFace)side;
                            // AMD problem: If we generate in normal order, the last cube map face contains
                            // garbage when mipmaps are created.
                            var cubeMapFace = (CubeMapFace)(5 - side);
                            var position    = cameraNode.PoseWorld.Position;
                            cameraNode.View = Matrix.CreateLookAt(
                                position,
                                position + originalCameraPose.ToWorldDirection(GraphicsHelper.GetCubeMapForwardDirection(cubeMapFace)),
                                originalCameraPose.ToWorldDirection(GraphicsHelper.GetCubeMapUpDirection(cubeMapFace)));

                            RenderCallback(context);

                            // Copy RGBM texture into cube map face.
                            graphicsDevice.SetRenderTarget(renderTargetCube, cubeMapFace);
                            _spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.Opaque, null, null, null);
                            _spriteBatch.Draw(renderTarget2D, new Vector2(0, 0), Color.White);
                            _spriteBatch.End();
                        }
                        cameraNode.PoseWorld = originalCameraPose;

                        renderTargetPool.Recycle(renderTarget2D);

                        // Update other properties of RenderToTexture.
                        node.RenderToTexture.LastFrame     = frame;
                        node.RenderToTexture.TextureMatrix = GraphicsHelper.ProjectorBiasMatrix
                                                             * projection
                                                             * cameraNode.PoseWorld.Inverse;

                        continue;
                    }

                    throw new GraphicsException(
                              "SceneCaptureNode.RenderToTexture.Texture is invalid. The texture must be a RenderTarget2D or RenderTargetCube.");
                }
            }
            catch (InvalidOperationException exception)
            {
                throw new GraphicsException(
                          "InvalidOperationException was raised in SceneCaptureRenderer.Render(). "
                          + "This can happen if a SceneQuery instance that is currently in use is modified in the "
                          + "RenderCallback. --> Use different SceneQuery types in the method which calls "
                          + "SceneCaptureRenderer.Render() and in the RenderCallback method.",
                          exception);
            }

            graphicsDevice.SetRenderTarget(null);
            savedRenderState.Restore();

            context.RenderTarget  = originalRenderTarget;
            context.Viewport      = originalViewport;
            context.CameraNode    = originalCameraNode;
            context.LodCameraNode = originalLodCameraNode;
            context.ReferenceNode = originalReferenceNode;
        }
示例#53
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();
              context.ThrowIfGBuffer0Missing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var cameraNode = context.CameraNode;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              var sourceSize = new Vector2F(source.Width, source.Height);
              int width = (int)sourceSize.X;
              int height = (int)sourceSize.Y;
              int downsampledWidth = Math.Max(1, width / DownsampleFactor);
              int downsampledHeight = Math.Max(1, height / DownsampleFactor);

              if (TextureHelper.IsFloatingPointFormat(source.Format))
              {
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            InitializeGaussianBlur(new Vector2F(downsampledWidth, downsampledHeight), false);
              }
              else
              {
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            InitializeGaussianBlur(new Vector2F(downsampledWidth, downsampledHeight), true);
              }

              // Get temporary render targets.
              var downsampleFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, source.Format, DepthFormat.None);
              RenderTarget2D blurredScene0 = renderTargetPool.Obtain2D(downsampleFormat);
              RenderTarget2D blurredScene1 = renderTargetPool.Obtain2D(downsampleFormat);

              var blurredDepthFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, context.GBuffer0.Format, DepthFormat.None);
              RenderTarget2D blurredDepth0 = renderTargetPool.Obtain2D(blurredDepthFormat);

              var cocFormat = new RenderTargetFormat(width, height, false, SurfaceFormat.Single, DepthFormat.None);
              RenderTarget2D cocImage = renderTargetPool.Obtain2D(cocFormat);

              var downSampledCocFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, cocFormat.SurfaceFormat, DepthFormat.None);
              RenderTarget2D cocImageBlurred = renderTargetPool.Obtain2D(downSampledCocFormat);

              // ----- Create CoC map.
              _effect.CurrentTechnique = _effect.Techniques[0];
              graphicsDevice.SetRenderTarget(cocImage);
              _screenSizeParameter.SetValue(new Vector2(cocImage.Width, cocImage.Height));
              _depthTextureParameter.SetValue(context.GBuffer0);
              _nearBlurDistanceParameter.SetValue(NearBlurDistance);
              _nearFocusDistanceParameter.SetValue(NearFocusDistance);
              _farFocusDistanceParameter.SetValue(FarFocusDistance);
              _farBlurDistanceParameter.SetValue(FarBlurDistance);
              _farParameter.SetValue(cameraNode.Camera.Projection.Far);
              _circleOfConfusionPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // ----- Downsample cocImage to cocImageBlurred.
              context.SourceTexture = cocImage;
              context.RenderTarget = cocImageBlurred;
              context.Viewport = new Viewport(0, 0, cocImageBlurred.Width, cocImageBlurred.Height);
              _downsampleFilter.Process(context);

              renderTargetPool.Recycle(cocImage);

              // ----- Downsample source to blurredScene0.
              context.SourceTexture = source;
              context.RenderTarget = blurredScene0;
              context.Viewport = new Viewport(0, 0, blurredScene0.Width, blurredScene0.Height);
              _downsampleFilter.Process(context);

              // ----- Downsample depth texture to blurredDepth0.
              context.SourceTexture = context.GBuffer0;
              context.RenderTarget = blurredDepth0;
              context.Viewport = new Viewport(0, 0, blurredDepth0.Width, blurredDepth0.Height);
              _downsampleFilter.Process(context);

              // ----- Blur scene.
              // Horizontal blur
              graphicsDevice.SetRenderTarget(blurredScene1);
              _screenSizeParameter.SetValue(new Vector2(blurredScene0.Width, blurredScene0.Height));
              _blurTextureParameter.SetValue(blurredScene0);
              _downsampledDepthTextureParameter.SetValue(blurredDepth0);
            _downsampledCocTextureParameter.SetValue(cocImageBlurred);
              _offsetsParameter.SetValue(_horizontalOffsets);
              _weightsParameter.SetValue(_weights);
              _blurPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // Vertical blur.
              graphicsDevice.SetRenderTarget(blurredScene0);
              _blurTextureParameter.SetValue(blurredScene1);
              _offsetsParameter.SetValue(_verticalOffsets);
              _blurPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              renderTargetPool.Recycle(blurredScene1);

              // ----- Blur cocImageBlurred.
              context.SourceTexture = cocImageBlurred;
              context.RenderTarget = cocImageBlurred;
              context.Viewport = new Viewport(0, 0, cocImageBlurred.Width, cocImageBlurred.Height);
              _cocBlur.Process(context);   // We make a two pass blur, so context.SourceTexture can be equal to context.RenderTarget.

              // ----- Blur depth.
              context.SourceTexture = blurredDepth0;
              context.RenderTarget = blurredDepth0;
              context.Viewport = new Viewport(0, 0, blurredDepth0.Width, blurredDepth0.Height);
              _cocBlur.Process(context);

              // ----- Create final DoF image.
              _effect.CurrentTechnique = _effect.Techniques[0];
              graphicsDevice.SetRenderTarget(target);
              graphicsDevice.Viewport = viewport;
              _screenSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _sceneTextureParameter.SetValue(source);
              _blurTextureParameter.SetValue(blurredScene0);
              _depthTextureParameter.SetValue(context.GBuffer0);
              _downsampledDepthTextureParameter.SetValue(blurredDepth0);
              _downsampledCocTextureParameter.SetValue(cocImageBlurred);
              _depthOfFieldPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // ----- Clean-up
              _depthTextureParameter.SetValue((Texture2D)null);
              _blurTextureParameter.SetValue((Texture2D)null);
              _downsampledDepthTextureParameter.SetValue((Texture2D)null);
              _downsampledCocTextureParameter.SetValue((Texture2D)null);
              _sceneTextureParameter.SetValue((Texture2D)null);
              renderTargetPool.Recycle(blurredScene0);
              renderTargetPool.Recycle(blurredDepth0);
              renderTargetPool.Recycle(cocImageBlurred);
              context.SourceTexture = source;
              context.RenderTarget = target;
              context.Viewport = viewport;
        }
示例#54
0
        protected override void OnProcess(RenderContext context)
        {
            if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            throw new GraphicsException("Source texture format must not be a floating-point format.");

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              int downsampledWidth = Math.Max(1, source.Width / DownsampleFactor);
              int downsampledHeight = Math.Max(1, source.Height / DownsampleFactor);

              // ----- Get temporary render targets.
              var bloomFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, SurfaceFormat.Color, DepthFormat.None);
              RenderTarget2D bloom0 = renderTargetPool.Obtain2D(bloomFormat);
              RenderTarget2D bloom1 = renderTargetPool.Obtain2D(bloomFormat);

              // ----- Downsample scene to bloom0.
              context.RenderTarget = bloom0;
              context.Viewport = new Viewport(0, 0, bloom0.Width, bloom0.Height);
              _downsampleFilter.Process(context);

              // ----- Create bloom image
              graphicsDevice.SetRenderTarget(bloom1);
              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _bloomThresholdParameter.SetValue(Threshold);
              _bloomIntensityParameter.SetValue(Intensity);
              _bloomSaturationParameter.SetValue(Saturation);
              _sceneTextureParameter.SetValue(bloom0);
              _brightnessPass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // bloom0 is not needed anymore.
              renderTargetPool.Recycle(bloom0);

              // We make a two-pass blur, so source can be equal to target.
              context.SourceTexture = bloom1;
              context.RenderTarget = bloom1;
              _blur.Process(context);

              // ----- Combine scene and bloom.
              graphicsDevice.SetRenderTarget(target);
              graphicsDevice.Viewport = viewport;
              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _sceneTextureParameter.SetValue(source);
              _bloomTextureParameter.SetValue(bloom1);
              _combinePass.Apply();
              graphicsDevice.DrawFullScreenQuad();

              // ----- Clean-up
              _sceneTextureParameter.SetValue((Texture2D)null);
              _bloomTextureParameter.SetValue((Texture2D)null);
              renderTargetPool.Recycle(bloom1);

              // Restore original context.
              context.SourceTexture = source;
              context.RenderTarget = target;
              context.Viewport = viewport;
        }
示例#55
0
        internal override void ProcessJobs(RenderContext context, RenderOrder order)
        {
            var graphicsDevice   = _graphicsService.GraphicsDevice;
            var savedRenderState = new RenderStateSnapshot(graphicsDevice);
            var target           = context.RenderTarget;
            var viewport         = context.Viewport;

            Debug.Assert(_shadowMasks.Length > 0);
            Debug.Assert(_shadowMasks[0] != null);

            RenderTarget2D lowResTarget = null;

            if (UseHalfResolution && Numeric.IsGreater(UpsampleDepthSensitivity, 0))
            {
                // Half-res rendering with upsampling.
                var format = new RenderTargetFormat(_shadowMasks[0]);
                format.Width  /= 2;
                format.Height /= 2;
                lowResTarget   = _graphicsService.RenderTargetPool.Obtain2D(format);
            }

            int index               = 0;
            var jobs                = Jobs.Array;
            int jobCount            = Jobs.Count;
            int lastShadowMaskIndex = -1;

            while (index < jobCount)
            {
                int shadowMaskIndex = (int)(jobs[index].SortKey >> 16);
                var renderer        = jobs[index].Renderer;

                // Find end of current batch.
                int endIndexExclusive = index + 1;
                while (endIndexExclusive < jobCount)
                {
                    if ((int)(jobs[endIndexExclusive].SortKey >> 16) != lastShadowMaskIndex ||
                        jobs[endIndexExclusive].Renderer != renderer)
                    {
                        break;
                    }

                    endIndexExclusive++;
                }

                // Restore the render state. (The integrated scene node renderers properly
                // restore the render state, but third-party renderers might mess it up.)
                if (index > 0)
                {
                    savedRenderState.Restore();
                }

                if (shadowMaskIndex != lastShadowMaskIndex)
                {
                    // Done with current shadow mask. Apply filter.
                    if (lastShadowMaskIndex >= 0)
                    {
                        PostProcess(context, context.RenderTarget, _shadowMasks[lastShadowMaskIndex]);
                    }

                    // Switch to next shadow mask.
                    lastShadowMaskIndex = shadowMaskIndex;

                    var shadowMask = lowResTarget ?? _shadowMasks[shadowMaskIndex];

                    // Set device render target and clear it to white (= no shadow).
                    graphicsDevice.SetRenderTarget(shadowMask);
                    context.RenderTarget = shadowMask;
                    context.Viewport     = graphicsDevice.Viewport;
                    graphicsDevice.Clear(Color.White);
                }

                // Submit batch to renderer.
                // (Use Accessor to expose current batch as IList<SceneNode>.)
                JobsAccessor.Set(Jobs, index, endIndexExclusive);
                renderer.Render(JobsAccessor, context, order);
                JobsAccessor.Reset();

                index = endIndexExclusive;
            }

            // Done with last shadow mask. Apply filter.
            PostProcess(context, context.RenderTarget, _shadowMasks[lastShadowMaskIndex]);

            savedRenderState.Restore();
            graphicsDevice.ResetTextures();
            graphicsDevice.SetRenderTarget(null);
            context.RenderTarget = target;
            context.Viewport     = viewport;

            _graphicsService.RenderTargetPool.Recycle(lowResTarget);
        }
示例#56
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;

              // The width/height of the current input.
              int sourceWidth = context.SourceTexture.Width;
              int sourceHeight = context.SourceTexture.Height;

              // The target width/height.
              int targetWidth = context.Viewport.Width;
              int targetHeight = context.Viewport.Height;

              // Surface format of input.
              bool isFloatingPointFormat = TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format);

              // Floating-point formats cannot use linear filtering, so we need two different paths.
              RenderTarget2D last = null;
              if (!isFloatingPointFormat)
              {
            // ----- We can use bilinear hardware filtering.
            do
            {
              // Determine downsample factor. Use the largest possible factor to minimize passes.
              int factor;
              if (sourceWidth / 2 <= targetWidth && sourceHeight / 2 <= targetHeight)
            factor = 2;
              else if (sourceWidth / 4 <= targetWidth && sourceHeight / 4 <= targetHeight)
            factor = 4;
              else if (sourceWidth / 6 <= targetWidth && sourceHeight / 6 <= targetHeight)
            factor = 6;
              else
            factor = 8;

              // Downsample to this target size.
              int tempTargetWidth = Math.Max(targetWidth, sourceWidth / factor);
              int tempTargetHeight = Math.Max(targetHeight, sourceHeight / factor);

              // Is this the final pass that renders into context.RenderTarget?
              bool isFinalPass = (tempTargetWidth <= targetWidth && tempTargetHeight <= targetHeight);
              RenderTarget2D temp = null;
              if (isFinalPass)
              {
            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;
              }
              else
              {
            // Get temporary render target for intermediate steps.
            var tempFormat = new RenderTargetFormat(tempTargetWidth, tempTargetHeight, false, context.SourceTexture.Format, DepthFormat.None);
            temp = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
            graphicsDevice.SetRenderTarget(temp);
              }

              _sourceSizeParameter.SetValue(new Vector2(sourceWidth, sourceHeight));
              _targetSizeParameter.SetValue(new Vector2(tempTargetWidth, tempTargetHeight));
              _sourceTextureParameter.SetValue(last ?? context.SourceTexture);
              if (factor == 2)
            _linear2Pass.Apply();
              else if (factor == 4)
            _linear4Pass.Apply();
              else if (factor == 6)
            _linear6Pass.Apply();
              else if (factor == 8)
            _linear8Pass.Apply();

              graphicsDevice.DrawFullScreenQuad();

              GraphicsService.RenderTargetPool.Recycle(last);
              last = temp;
              sourceWidth = tempTargetWidth;
              sourceHeight = tempTargetHeight;
            } while (sourceWidth > targetWidth || sourceHeight > targetHeight);
              }
              else
              {
            // ----- We cannot use hardware filtering. :-(
            do
            {
              // Determine downsample factor. Use the largest possible factor to minimize passes.
              int factor;
              if (sourceWidth / 2 <= targetWidth && sourceHeight / 2 <= targetHeight)
            factor = 2;
              else if (sourceWidth / 3 <= targetWidth && sourceHeight / 3 <= targetHeight)
            factor = 3;
              else
            factor = 4;

              // Downsample to this target size.
              int tempTargetWidth = Math.Max(targetWidth, sourceWidth / factor);
              int tempTargetHeight = Math.Max(targetHeight, sourceHeight / factor);

              // Is this the final pass that renders into context.RenderTarget?
              bool isFinalPass = (tempTargetWidth <= targetWidth && tempTargetHeight <= targetHeight);
              RenderTarget2D temp = null;
              if (isFinalPass)
              {
            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;
              }
              else
              {
            // Get temporary render target for intermediate steps.
            var tempFormat = new RenderTargetFormat(tempTargetWidth, tempTargetHeight, false, context.SourceTexture.Format, DepthFormat.None);
            temp = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
            graphicsDevice.SetRenderTarget(temp);
              }

              _sourceSizeParameter.SetValue(new Vector2(sourceWidth, sourceHeight));
              _targetSizeParameter.SetValue(new Vector2(tempTargetWidth, tempTargetHeight));
              var source = last ?? context.SourceTexture;
              _sourceTextureParameter.SetValue(source);
              if (source != context.GBuffer0)
              {
            if (factor == 2)
              _point2Pass.Apply();
            else if (factor == 3)
              _point3Pass.Apply();
            else
              _point4Pass.Apply();
              }
              else
              {
            // This is the depth buffer and it needs special handling.
            if (factor == 2)
              _point2DepthPass.Apply();
            else if (factor == 3)
              _point3DepthPass.Apply();
            else
              _point4DepthPass.Apply();
              }

              graphicsDevice.DrawFullScreenQuad();

              GraphicsService.RenderTargetPool.Recycle(last);
              last = temp;
              sourceWidth = tempTargetWidth;
              sourceHeight = tempTargetHeight;
            } while (sourceWidth > targetWidth || sourceHeight > targetHeight);

            _sourceTextureParameter.SetValue((Texture2D)null);

            Debug.Assert(last == null, "Intermediate render target should have been recycled.");
              }
        }
示例#57
0
        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();
            context.ThrowIfGBuffer0Missing();

            var graphicsDevice   = GraphicsService.GraphicsDevice;
            var renderTargetPool = GraphicsService.RenderTargetPool;

            var source   = context.SourceTexture;
            var target   = context.RenderTarget;
            var viewport = context.Viewport;

            // Get temporary render targets.
            var sourceSize            = new Vector2F(source.Width, source.Height);
            var isFloatingPointFormat = TextureHelper.IsFloatingPointFormat(source.Format);

            var sceneFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
            var maskedScene = renderTargetPool.Obtain2D(sceneFormat);

            var rayFormat = new RenderTargetFormat(
                Math.Max(1, (int)(sourceSize.X / DownsampleFactor)),
                Math.Max(1, (int)(sourceSize.Y / DownsampleFactor)),
                false,
                source.Format,
                DepthFormat.None);
            var rayImage0 = renderTargetPool.Obtain2D(rayFormat);
            var rayImage1 = renderTargetPool.Obtain2D(rayFormat);

            // Get view and view-projection transforms.
            var       cameraNode     = context.CameraNode;
            Matrix44F projection     = cameraNode.Camera.Projection.ToMatrix44F();
            Matrix44F view           = cameraNode.View;
            Matrix44F viewProjection = projection * view;

            // We simply place the light source "far away" in opposite light ray direction.
            Vector4F lightPositionWorld = new Vector4F(-LightDirection * 10000, 1);

            // Convert to clip space.
            Vector4F lightPositionProj = viewProjection * lightPositionWorld;
            Vector3F lightPositionClip = Vector4F.HomogeneousDivide(lightPositionProj);

            // Convert from clip space [-1, 1] to texture space [0, 1].
            Vector2 lightPosition = new Vector2(lightPositionClip.X * 0.5f + 0.5f, -lightPositionClip.Y * 0.5f + 0.5f);

            // We use dot²(forward, -LightDirection) as a smooth S-shaped attenuation
            // curve to reduce the god ray effect when we look orthogonal or away from the sun.
            var   lightDirectionView = view.TransformDirection(LightDirection);
            float z           = Math.Max(0, lightDirectionView.Z);
            float attenuation = z * z;

            // Common effect parameters.
            _parameters0Parameter.SetValue(new Vector4(lightPosition.X, lightPosition.Y, LightRadius * LightRadius, Scale));
            _parameters1Parameter.SetValue(new Vector2(Softness, graphicsDevice.Viewport.AspectRatio));
            _intensityParameter.SetValue((Vector3)Intensity * attenuation);
            _numberOfSamplesParameter.SetValue(NumberOfSamples);
            _gBuffer0Parameter.SetValue(context.GBuffer0);

            // First, create a scene image where occluders are black.
            graphicsDevice.SetRenderTarget(maskedScene);
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sourceTextureParameter.SetValue(source);
            graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.PointClamp; // G-Buffer 0.
            _createMaskPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Downsample image.
            context.SourceTexture = maskedScene;
            context.RenderTarget  = rayImage0;
            context.Viewport      = new Viewport(0, 0, rayImage0.Width, rayImage0.Height);
            _downsampleFilter.Process(context);

            // Compute light shafts.
            _viewportSizeParameter.SetValue(new Vector2(context.Viewport.Width, context.Viewport.Height));
            graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
            for (int i = 0; i < NumberOfPasses; i++)
            {
                graphicsDevice.SetRenderTarget(rayImage1);
                _sourceTextureParameter.SetValue(rayImage0);
                _blurPass.Apply();
                graphicsDevice.DrawFullScreenQuad();

                // Put the current result in variable rayImage0.
                MathHelper.Swap(ref rayImage0, ref rayImage1);
            }

            // Combine light shaft image with scene.
            graphicsDevice.SetRenderTarget(target);
            graphicsDevice.Viewport = viewport;
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sourceTextureParameter.SetValue(source);
            _rayTextureParameter.SetValue(rayImage0);
            graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
            graphicsDevice.SamplerStates[1] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
            _combinePass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            // Clean-up
            _sourceTextureParameter.SetValue((Texture2D)null);
            _gBuffer0Parameter.SetValue((Texture2D)null);
            _rayTextureParameter.SetValue((Texture2D)null);
            renderTargetPool.Recycle(maskedScene);
            renderTargetPool.Recycle(rayImage0);
            renderTargetPool.Recycle(rayImage1);
            context.SourceTexture = source;
            context.RenderTarget  = target;
            context.Viewport      = viewport;
        }
示例#58
0
        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              // Get velocity buffers from render context.
              object value0, value1;
              context.Data.TryGetValue(RenderContextKeys.VelocityBuffer, out value0);
              context.Data.TryGetValue(RenderContextKeys.LastVelocityBuffer, out value1);
              var velocityBuffer0 = value0 as Texture2D;
              var velocityBuffer1 = value1 as Texture2D;

              if (velocityBuffer0 == null)
            throw new GraphicsException("VelocityBuffer needs to be set in the render context (RenderContext.Data[\"VelocityBuffer\"]).");

              if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

              if (!SoftenEdges)
              {
            // ----- Motion blur using one or two velocity buffers
            _effect.CurrentTechnique = _effect.Techniques[0];
            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sourceTextureParameter.SetValue(context.SourceTexture);
            _numberOfSamplesParameter.SetValue((int)NumberOfSamples);

            _velocityTextureParameter.SetValue(velocityBuffer0);
            if (TextureHelper.IsFloatingPointFormat(velocityBuffer0.Format))
              graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
            else
              graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;

            if (velocityBuffer1 == null || !UseLastVelocityBuffer)
            {
              _singlePass.Apply();
            }
            else
            {
              _velocityTexture2Parameter.SetValue(velocityBuffer1);
              if (TextureHelper.IsFloatingPointFormat(velocityBuffer1.Format))
            graphicsDevice.SamplerStates[2] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[2] = SamplerState.LinearClamp;

              _dualPass.Apply();
            }
            graphicsDevice.DrawFullScreenQuad();
              }
              else
              {
            // ----- Advanced motion blur (based on paper "A Reconstruction Filter for Plausible Motion Blur")
            context.ThrowIfCameraMissing();
            context.ThrowIfGBuffer0Missing();

            // The width/height of the current velocity input.
            int sourceWidth;
            int sourceHeight;
            if (context.RenderTarget != null)
            {
              sourceWidth = velocityBuffer0.Width;
              sourceHeight = velocityBuffer0.Height;
            }
            else
            {
              sourceWidth = context.Viewport.Width;
              sourceHeight = context.Viewport.Height;
            }

            // The downsampled target width/height.
            int targetWidth = Math.Max(1, (int)(sourceWidth / MaxBlurRadius));
            int targetHeight = Math.Max(1, (int)(sourceHeight / MaxBlurRadius));

            var tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.None);
            RenderTarget2D temp0 = renderTargetPool.Obtain2D(tempFormat);
            RenderTarget2D temp1 = renderTargetPool.Obtain2D(tempFormat);

            // ----- Downsample max velocity buffer
            _effect.CurrentTechnique = _effect.Techniques[0];
            _maxBlurRadiusParameter.SetValue(new Vector2(MaxBlurRadius / sourceWidth, MaxBlurRadius / sourceHeight));
            Texture2D currentVelocityBuffer = velocityBuffer0;
            bool isFinalPass;
            do
            {
              // Downsample to this target size.
              sourceWidth = Math.Max(targetWidth, sourceWidth / 2);
              sourceHeight = Math.Max(targetHeight, sourceHeight / 2);

              // Is this the final downsample pass?
              isFinalPass = (sourceWidth <= targetWidth && sourceHeight <= targetHeight);

              // Get temporary render target for intermediate steps.
              RenderTarget2D temp = null;
              if (!isFinalPass)
              {
            tempFormat.Width = sourceWidth;
            tempFormat.Height = sourceHeight;
            temp = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);
              }

              graphicsDevice.SetRenderTarget(isFinalPass ? temp0 : temp);

              _sourceSizeParameter.SetValue(new Vector2(sourceWidth * 2, sourceHeight * 2));
              _viewportSizeParameter.SetValue(new Vector2(sourceWidth, sourceHeight));
              _velocityTextureParameter.SetValue(currentVelocityBuffer);
              if (TextureHelper.IsFloatingPointFormat(currentVelocityBuffer.Format))
            graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;

              if (currentVelocityBuffer == velocityBuffer0)
            _downsampleMaxFromFloatBufferParameter.Apply();
              else
            _downsampleMaxParameter.Apply();

              graphicsDevice.DrawFullScreenQuad();

              if (currentVelocityBuffer != velocityBuffer0)
            GraphicsService.RenderTargetPool.Recycle((RenderTarget2D)currentVelocityBuffer);

              currentVelocityBuffer = temp;
            } while (!isFinalPass);

            // ----- Compute max velocity of neighborhood.
            graphicsDevice.SetRenderTarget(temp1);
            _velocityTextureParameter.SetValue(temp0);
            _neighborMaxPass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            renderTargetPool.Recycle(temp0);

            // Compute motion blur.
            graphicsDevice.SetRenderTarget(context.RenderTarget);
            graphicsDevice.Viewport = context.Viewport;
            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _sourceTextureParameter.SetValue(context.SourceTexture);
            _numberOfSamplesParameter.SetValue((int)NumberOfSamples);
            _velocityTextureParameter.SetValue(velocityBuffer0);
            if (TextureHelper.IsFloatingPointFormat(velocityBuffer0.Format))
              graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
            else
              graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;

            _velocityTexture2Parameter.SetValue(temp1);
            if (TextureHelper.IsFloatingPointFormat(temp1.Format))
              graphicsDevice.SamplerStates[2] = SamplerState.PointClamp;
            else
              graphicsDevice.SamplerStates[2] = SamplerState.LinearClamp;

            _gBuffer0Parameter.SetValue(context.GBuffer0);
            _jitterTextureParameter.SetValue(_jitterTexture);
            _softZExtentParameter.SetValue(0.01f / context.CameraNode.Camera.Projection.Far);  // 1 mm to 10 cm.
            _softEdgePass.Apply();
            graphicsDevice.DrawFullScreenQuad();

            _sourceTextureParameter.SetValue((Texture2D)null);
            _velocityTextureParameter.SetValue((Texture2D)null);
            _velocityTexture2Parameter.SetValue((Texture2D)null);
            _gBuffer0Parameter.SetValue((Texture2D)null);

            renderTargetPool.Recycle(temp1);
              }
        }