/// <summary> /// Extracts perspective camera parameters from the frustum, doesn't work with orthographic frustums. /// </summary> /// <returns>Perspective camera parameters from the frustum</returns> public FrustumCameraParams GetCameraParams() { var corners = GetCorners(); var cameraParam = new FrustumCameraParams(); cameraParam.Position = Get3PlanesInterPoint(ref pRight, ref pTop, ref pLeft); cameraParam.LookAtDir = pNear.Normal; cameraParam.UpDir = Vector3.Normalize(Vector3.Cross(pRight.Normal, pNear.Normal)); cameraParam.FOV = (float)((Math.PI / 2.0 - Math.Acos(Vector3.Dot(pNear.Normal, pTop.Normal))) * 2); cameraParam.AspectRatio = (corners[6] - corners[5]).Length() / (corners[4] - corners[5]).Length(); cameraParam.ZNear = (cameraParam.Position + (pNear.Normal * pNear.D)).Length(); cameraParam.ZFar = (cameraParam.Position + (pFar.Normal * pFar.D)).Length(); return(cameraParam); }
public FrustumCameraParams GetCameraParams() { Vector3[] corners = this.GetCorners(); FrustumCameraParams frustumCameraParams = new FrustumCameraParams(); frustumCameraParams.Position = BoundingFrustum.Get3PlanesInterPoint(ref this.pRight, ref this.pTop, ref this.pLeft); frustumCameraParams.LookAtDir = this.pNear.Normal; frustumCameraParams.UpDir = Vector3.Normalize(Vector3.Cross(this.pRight.Normal, this.pNear.Normal)); frustumCameraParams.FOV = (float)((Math.PI / 2.0 - Math.Acos((double)Vector3.Dot(this.pNear.Normal, this.pTop.Normal))) * 2.0); frustumCameraParams.AspectRatio = (corners[6] - corners[5]).Length() / (corners[4] - corners[5]).Length(); frustumCameraParams.ZNear = (frustumCameraParams.Position + this.pNear.Normal * this.pNear.D).Length(); frustumCameraParams.ZFar = (frustumCameraParams.Position + this.pFar.Normal * this.pFar.D).Length(); return(frustumCameraParams); }
/// <summary> /// Creates a new frustum relaying on perspective camera parameters /// </summary> /// <param name="cameraParams">The camera params.</param> /// <returns>The bounding frustum from camera params</returns> public static BoundingFrustum FromCamera(FrustumCameraParams cameraParams) { return(FromCamera(cameraParams.Position, cameraParams.LookAtDir, cameraParams.UpDir, cameraParams.FOV, cameraParams.ZNear, cameraParams.ZFar, cameraParams.AspectRatio)); }
protected override void DoRender() { // Early exit if (Lights.Count == 0) { return; } // Retrieve device context var context = this.DeviceManager.Direct3DContext; // backup existing context state int oldStencilRef = 0; RawColor4 oldBlendFactor; int oldSampleMaskRef; using (var oldVertexLayout = context.InputAssembler.InputLayout) using (var oldPixelShader = context.PixelShader.Get()) using (var oldVertexShader = context.VertexShader.Get()) using (var oldBlendState = context.OutputMerger.GetBlendState(out oldBlendFactor, out oldSampleMaskRef)) using (var oldDepthState = context.OutputMerger.GetDepthStencilState(out oldStencilRef)) using (var oldRSState = context.Rasterizer.State) { // Assign shader resources - TODO: create array in CreateDeviceDependentResources instead context.PixelShader.SetShaderResources(0, gbuffer.SRVs.ToArray().Concat(new[] { gbuffer.DSSRV }).ToArray()); // Assign the additive blend state context.OutputMerger.BlendState = blendStateAdd; // Retrieve camera parameters SharpDX.FrustumCameraParams cameraParams = Frustum.GetCameraParams(); // For each configured light for (var i = 0; i < Lights.Count; i++) { PerLight light = Lights[i]; PixelShader shader = null; // Assign shader if (light.Type == LightType.Ambient) { shader = psAmbientLight; } else if (light.Type == LightType.Directional) { shader = psDirectionalLight; } else if (light.Type == LightType.Point) { shader = psPointLight; } //else if (light.Type == LightType.Spot) // shader = psSpotLight; // Update the perLight constant buffer // Calculate view space position (for frustum checks) Vector3 lightDir = Vector3.Normalize(Lights[i].Direction); Vector4 viewSpaceDir = Vector4.Transform(Vector3.Transform(lightDir, PerObject.World), PerObject.View); light.Direction = new Vector3(viewSpaceDir.X, viewSpaceDir.Y, viewSpaceDir.Z); Vector4 viewSpacePos = Vector4.Transform(Vector3.Transform(Lights[i].Position, PerObject.World), PerObject.View); light.Position = new Vector3(viewSpacePos.X, viewSpacePos.Y, viewSpacePos.Z); context.UpdateSubresource(ref light, perLightBuffer); context.PixelShader.SetConstantBuffer(4, perLightBuffer); light.Position = Lights[i].Position; light.Direction = Lights[i].Direction; // Check if the light should be considered full screen bool isFullScreen = light.Type == LightType.Directional || light.Type == LightType.Ambient; if (!isFullScreen) { isFullScreen = (cameraParams.ZNear > viewSpacePos.Z - light.Range && cameraParams.ZFar < viewSpacePos.Z + light.Range); } if (isFullScreen) { context.OutputMerger.DepthStencilState = depthDisabled; // Use SAQuad saQuad.ShaderResources = null; saQuad.Shader = shader; saQuad.Render(); } else // Render volume { context.PixelShader.Set(shader); context.VertexShader.Set(vertexShader); Matrix world = Matrix.Identity; MeshRenderer volume = null;; switch (light.Type) { case LightType.Point: // Prepare world matrix // Ensure no abrupt light edges with +50% world.ScaleVector = Vector3.One * light.Range * 2f; volume = pointLightVolume; break; /* TODO: Spot light support * case LightType.Spot: * // Determine rotation! * var D = Vector3.Normalize(light.Direction); * var s1 = Vector3.Cross(D, Vector3.UnitZ); * var s2 = Vector3.Cross(D, Vector3.UnitY); * Vector3 S; * if (s1.LengthSquared() > s2.LengthSquared()) * S = s1; * else * S = s2; * var U = Vector3.Cross(D, S); * Matrix rotate = Matrix.Identity; * rotate.Forward = D; * rotate.Down = U; * rotate.Left = S; * * float scaleZ = light.Range; * // Need to Abs - if negative it will invert our model and result in incorrect normals * float scaleXY = light.Range * Math.Abs((float)Math.Tan(Math.Acos(light.SpotOuterCosine*2)/2)); * * world.ScaleVector = new Vector3(scaleXY, scaleXY, scaleZ); * world *= rotate; * volume = spotLightVolume; * break; * */ default: continue; } world.TranslationVector = light.Position; volume.World = world; // Transpose the PerObject matrices var transposed = PerObject; transposed.World = volume.World * PerObject.World; transposed.WorldViewProjection = transposed.World * PerObject.ViewProjection; transposed.Transpose(); context.UpdateSubresource(ref transposed, PerObjectBuffer); if (cameraParams.ZFar < viewSpacePos.Z + light.Range) { // Cull the back face and only render where there is something // behind the front face. context.Rasterizer.State = rsCullBack; context.OutputMerger.DepthStencilState = depthLessThan; } else { // Cull front faces and only render where there is something // before the back face. context.Rasterizer.State = rsCullFront; context.OutputMerger.DepthStencilState = depthGreaterThan; } volume.Render(); // Show the light volumes for debugging if (Debug > 0) { if (Debug == 1) { context.OutputMerger.SetDepthStencilState(depthGreaterThan); } else { context.OutputMerger.SetDepthStencilState(depthLessThan); } context.PixelShader.Set(psDebugLight); context.Rasterizer.State = rsWireframe; volume.Render(); } } } // Reset pixel shader resources (all to null) context.PixelShader.SetShaderResources(0, new ShaderResourceView[gbuffer.SRVs.Count + 1]); // Restore context states context.PixelShader.Set(oldPixelShader); context.VertexShader.Set(oldVertexShader); context.InputAssembler.InputLayout = oldVertexLayout; context.OutputMerger.SetBlendState(oldBlendState, oldBlendFactor, oldSampleMaskRef); context.OutputMerger.SetDepthStencilState(oldDepthState, oldStencilRef); context.Rasterizer.State = oldRSState; } }