} // Lerp #endregion #region Generate Spherical Harmonic from CubeMap /// <summary> /// Generate a spherical harmonic from the faces of a cubemap, treating each pixel as a light source and averaging the result. /// This method only accepts RGBM and color textures. /// </summary> public static SphericalHarmonicL2 GenerateSphericalHarmonicFromCubeTexture(TextureCube cubeTexture) { if (cubeTexture.Resource.Format != SurfaceFormat.Color) { throw new InvalidOperationException("Spherical Harmonic: the texture has to have a color surface format. DXT and floating point formats are not supported for the moment."); } SphericalHarmonicL2 sh = new SphericalHarmonicL2(); // Extract the 6 faces of the cubemap. for (int face = 0; face < 6; face++) { CubeMapFace faceId = (CubeMapFace)face; // Get the transformation for this face. Matrix cubeFaceMatrix; switch (faceId) { case CubeMapFace.PositiveX: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(1, 0, 0), new Vector3(0, 1, 0)); break; case CubeMapFace.NegativeX: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(-1, 0, 0), new Vector3(0, 1, 0)); break; case CubeMapFace.PositiveY: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, 1, 0), new Vector3(0, 0, 1)); break; case CubeMapFace.NegativeY: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, -1, 0), new Vector3(0, 0, -1)); break; case CubeMapFace.PositiveZ: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, 0, -1), new Vector3(0, 1, 0)); break; case CubeMapFace.NegativeZ: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, 0, 1), new Vector3(0, 1, 0)); break; default: throw new ArgumentOutOfRangeException(); } Color[] colorArray = new Color[cubeTexture.Size * cubeTexture.Size]; cubeTexture.Resource.GetData(faceId, colorArray); // Extract the spherical harmonic for this face and accumulate it. sh += ExtractSphericalHarmonicForCubeFace(cubeFaceMatrix, colorArray, cubeTexture.Size, cubeTexture.IsRgbm, cubeTexture.RgbmMaxRange); } // Average out over the sphere. return(sh.GetWeightedAverageLightInputFromSphere()); } // GenerateSphericalHarmonicFromCubeTexture
} // SampleDirection #endregion #region Generate Spherical Harmonic from CubeMap /// <summary> /// Generate a spherical harmonic from the faces of a cubemap, treating each pixel as a light source and averaging the result. /// </summary> public static SphericalHarmonicL1 GenerateSphericalHarmonicFromCubeMap(TextureCube cubeMap) { SphericalHarmonicL1 sh = new SphericalHarmonicL1(); // Extract the 6 faces of the cubemap. for (int face = 0; face < 6; face++) { CubeMapFace faceId = (CubeMapFace)face; // Get the transformation for this face, Matrix cubeFaceMatrix; switch (faceId) { case CubeMapFace.PositiveX: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(1, 0, 0), new Vector3(0, 1, 0)); break; case CubeMapFace.NegativeX: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(-1, 0, 0), new Vector3(0, 1, 0)); break; case CubeMapFace.PositiveY: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, 1, 0), new Vector3(0, 0, 1)); break; case CubeMapFace.NegativeY: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, -1, 0), new Vector3(0, 0, -1)); break; case CubeMapFace.PositiveZ: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, 0, -1), new Vector3(0, 1, 0)); break; case CubeMapFace.NegativeZ: cubeFaceMatrix = Matrix.CreateLookAt(Vector3.Zero, new Vector3(0, 0, 1), new Vector3(0, 1, 0)); break; default: throw new ArgumentOutOfRangeException(); } Color[] colorArray = new Color[cubeMap.Size * cubeMap.Size]; cubeMap.Resource.GetData <Color>(faceId, colorArray); // Extract the spherical harmonic for this face and accumulate it. sh += ExtractSphericalHarmonicForCubeFace(cubeFaceMatrix, colorArray, cubeMap.Size, cubeMap.IsRgbm, cubeMap.RgbmMaxRange); } //average out over the sphere return(sh.GetWeightedAverageLightInputFromSphere()); } // GenerateSphericalHarmonicFromCubeMap
} // Begin #endregion #region Render /// <summary> /// Render the point light. /// </summary> public void Render(Color diffuseColor, Vector3 position, float intensity, float radius, TextureCube shadowTexture, Matrix worldMatrix, bool renderClipVolumeInLocalSpace, Model clipVolume = null) { try { // It is possible to use the depth information and the stencil buffer to mark in a two pass rendering exactly what pixels are affected by the light. // This helps to reduce pixel shader load but at the same time allows implementing clip volumes. // With clip volumes you can put, for example, a box and the light won’t bleed outside this box even if the radius is bigger. // I.e. you can place lights in a wall and the opposite side of that wall won’t be illuminated. // // The problem is I don’t have the Z-Buffer available because XNA 4 does not allow sharing depth buffers between render targets. // However I can reconstruct the Z-Buffer with a shader and my G-Buffer. // // If you don’t use custom clip volumes (i.e. we use the default sphere) and the light is too far then we could have more vertex processing // than pixel processing. Some games use glow planes (a colored mask) to see the light’s bright when they are far away, // this is good for open environment games but not for interior games. // Instead games like Killzone 2 ignore the first pass on these lights and only compute the second (and this second pass still does one // part of the filter). Also the far plane "problem" is addressed in this optimization. // // Another optimization that I made is the use of a Softimage sphere instead of my procedural spheres. // Models exported in this kind of tools are optimized for accessing. For example my stress test changes from 20/21 frames to 22 frames. // Not a big change, but still a change nevertheless. // // I also research the possibility to use instancing with some lights. // But no article talk about this technique so I try to think why is not useful and it was easy to find that: // 1) It can be only used with spheres (not custom clip volumes). // 2) The dynamic buffers used for the instancing information could be too dynamic or difficult to maintain. // 3) The stencil optimization could be very important on interior games and could not be mixed with instancing and custom clip volumes. // Extra complexity added (including the use of vfetch for Xbox 360). // Fill the stencil buffer with 0s. EngineManager.Device.Clear(ClearOptions.Stencil, Color.White, 1.0f, 0); #region Set Parameters spLightColor.Value = diffuseColor; spLightPosition.Value = Vector3.Transform(position, viewMatrix); spLightIntensity.Value = intensity; spInvLightRadius.Value = 1 / radius; if (shadowTexture != null) { spShadowTexture.Value = shadowTexture; spViewInverse.Value = Matrix.Invert(Matrix.Transpose(Matrix.Invert(viewMatrix))); spTextureSize.Value = new Vector3(shadowTexture.Size, shadowTexture.Size, shadowTexture.Size); spTextureSizeInv.Value = new Vector3(1.0f / shadowTexture.Size, 1.0f / shadowTexture.Size, 1.0f / shadowTexture.Size); } else { spShadowTexture.Value = TextureCube.BlackTexture; } // Compute the light world matrix. Matrix boundingLightObjectWorldMatrix; if (clipVolume != null) { boundingLightObjectWorldMatrix = renderClipVolumeInLocalSpace ? Matrix.Identity : worldMatrix; } else { // Scale according to light radius, and translate it to light position. boundingLightObjectWorldMatrix = Matrix.CreateScale(radius) * Matrix.CreateTranslation(position); } spWorldViewProj.Value = boundingLightObjectWorldMatrix * viewMatrix * projectionMatrix; spWorldView.Value = boundingLightObjectWorldMatrix * viewMatrix; #endregion // http://en.wikipedia.org/wiki/Angular_diameter // The formula was inspired from Guerilla´s GDC 09 presentation. float distanceToCamera = Vector3.Distance(Matrix.Invert(viewMatrix).Translation, position); float angularDiameter = (float)(2 * Math.Atan(radius / distanceToCamera)); if (angularDiameter > 0.2f * (3.1416f * fieldOfView / 180.0f)) // 0.2f is the original value. { // This only works when the clip volume does not intercept the camera´s far plane. // First pass. // The stencil buffer was already filled with 0 and if the back of the clip volume // is in front of the geometry then it marks the pixel as useful. // I prefer to do it in that way because when the clip volume intercept the camera’s near plane // we don’t need to perform a special case and we still have custom volume support. Resource.CurrentTechnique = pointLightStencilTechnique; EngineManager.Device.RasterizerState = RasterizerState.CullCounterClockwise; EngineManager.Device.BlendState = stencilBlendState; EngineManager.Device.DepthStencilState = stencilDepthStencilState; Resource.CurrentTechnique.Passes[0].Apply(); if (clipVolume != null) { clipVolume.Render(); } else { boundingLightObject.Render(); } // Second pass. // Render the clip volume back faces with the light shader. // The pixel with stencil value of 1 that are in front of the geometry will be discarded. Resource.CurrentTechnique = shadowTexture != null ? pointLightWithShadowsTechnique : pointLightTechnique; EngineManager.Device.RasterizerState = RasterizerState.CullClockwise; EngineManager.Device.BlendState = lightBlendState; EngineManager.Device.DepthStencilState = lightDepthStencilState; Resource.CurrentTechnique.Passes[0].Apply(); if (clipVolume != null) { clipVolume.Render(); } else { boundingLightObject.Render(); } } else // Far lights { // Render the clip volume front faces with the light shader. Resource.CurrentTechnique = shadowTexture != null ? pointLightWithShadowsTechnique : pointLightTechnique; EngineManager.Device.RasterizerState = RasterizerState.CullCounterClockwise; //EngineManager.Device.BlendState = lightBlendState; // Not need to set it. EngineManager.Device.DepthStencilState = DepthStencilState.DepthRead; Resource.CurrentTechnique.Passes[0].Apply(); if (clipVolume != null) { clipVolume.Render(); } else { boundingLightObject.Render(); } } } catch (Exception e) { throw new InvalidOperationException("Light Pre Pass Point Light: Unable to render.", e); } } // Render