public OcclusionCullingScreen(IServiceLocator services) : base(services) { _sceneNodes = new List<SceneNode>(); // Create new occlusion buffer with default settings. OcclusionBuffer = new OcclusionBuffer(GraphicsService); OcclusionBuffer.ProgressiveShadowCasterCulling = true; EnableCulling = true; // Create a second camera for rendering a top-down view of the scene. var topDownPerspective = new PerspectiveProjection(); topDownPerspective.SetFieldOfView(MathHelper.ToRadians(90), 1, 1, 512); _topDownCameraNode = new CameraNode(new Camera(topDownPerspective)); _topDownCameraNode.PoseWorld = new Pose(new Vector3F(-10, 120, -10)); _topDownCameraNode.LookAt(new Vector3F(-10, 0, -10), Vector3F.UnitZ); _sceneQuery = new CustomSceneQuery(); _debugRenderer = new DebugRenderer(GraphicsService, null); // The DigitalRune Profiler is used to measure execution times. Profiler.SetFormat("Occlusion.Render", 1e3f, "[ms]"); Profiler.SetFormat("Occlusion.Query", 1e3f, "[ms]"); }
public MyGraphicsScreen(IGraphicsService graphicsService) : base(graphicsService) { _meshRenderer = new MeshRenderer(); var contentManager = ServiceLocator.Current.GetInstance<ContentManager>(); var spriteFont = contentManager.Load<SpriteFont>("SpriteFont1"); _debugRenderer = new DebugRenderer(graphicsService, spriteFont); Scene = new Scene(); // Add a camera with a perspective projection. var projection = new PerspectiveProjection(); projection.SetFieldOfView( ConstantsF.PiOver4, graphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 100.0f); CameraNode = new CameraNode(new Camera(projection)) { Name = "CameraPerspective", PoseWorld = Pose.FromMatrix(Matrix44F.CreateLookAt(new Vector3F(10, 5, 10), new Vector3F(0, 1, 0), new Vector3F(0, 1, 0)).Inverse), }; Scene.Children.Add(CameraNode); }
/// <summary> /// Creates an perspective projection from a 4x4 transformation matrix. /// </summary> /// <param name="matrix">The projection matrix.</param> /// <returns>The perspective projection.</returns> public static PerspectiveProjection FromMatrix(Matrix44F matrix) { var projection = new PerspectiveProjection(); projection.Set(matrix); return(projection); }
public CharacterControllerSample(Microsoft.Xna.Framework.Game game) : base(game) { SampleFramework.IsMouseVisible = false; GraphicsScreen.ClearBackground = true; GraphicsScreen.BackgroundColor = Color.CornflowerBlue; // Create a camera. var projection = new PerspectiveProjection(); projection.SetFieldOfView( ConstantsF.PiOver4, GraphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 1000.0f); _cameraNode = new CameraNode(new Camera(projection)); GraphicsScreen.CameraNode = _cameraNode; // We use one collision domain that computes collision info for all game objects. _domain = new CollisionDomain(new CollisionDetection()); // Create collision objects for a test level. CharacterControllerLevel.Load(_domain); // Add collision filter: // The _domain contains a lot of collision objects for obstacles in the level. // We do not need to compute contacts between these static obstacles. To avoid // this, the CharacterControllerLevel puts all level collision objects into // the collision group 1. We add a broad phase collision filter which filters out // collision checks between objects of collision group 1. _domain.BroadPhase.Filter = new DelegatePairFilter<CollisionObject>( pair => { if (pair.First.CollisionGroup == 1 && pair.Second.CollisionGroup == 1) return false; return true; }); // Create character controller. _character = new CharacterController(_domain); _character.Position = new Vector3F(0, 0, 1); // Create the trigger volume. _triggerVolume = new CollisionObject( new GeometricObject(new SphereShape(3), new Pose(new Vector3F(-5, 0, 5)))) { // We do not want to compute detailed contact information (contact points, contact // normal vectors, etc.). We are only interested if the object touches another object or not. // Therefore, we set the collision object type to "trigger". Trigger objects are better for // performance than normal collision objects. Additionally, the character controller should // be able to walk through the trigger volume. The character controller treats objects as // solids if it finds contact information (contact positions with contact normal vectors). Type = CollisionObjectType.Trigger }; _domain.CollisionObjects.Add(_triggerVolume); }
public CameraComponent(GameScreen sc) { PerspectiveProjection projection = new PerspectiveProjection(); projection.SetFieldOfView(MathHelper.PiOver4, sc._graphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 1000.0f); _cameraNode = new CameraNode(new Camera(projection)); _position = new Vector3F(50f, 120.0f, 50f); _yaw = MathHelper.ToRadians(45.0f); }
private static PerspectiveProjection GetDefaultProjection() { // The default value of the PerspectiveProjection are too big for typical // projector lights. Therefore, we use our own default value. const float near = 0.25f; const float far = 5.0f; const float aspectRatio = 4.0f / 3.0f; const float fieldOfViewY = 45.0f * ConstantsF.Pi / 180.0f; // 45° var projection = new PerspectiveProjection(); projection.SetFieldOfView(fieldOfViewY, aspectRatio, near, far); return(projection); }
//-------------------------------------------------------------- #region Methods //-------------------------------------------------------------- protected override void OnLoad() { var graphicsService = _services.GetInstance<IGraphicsService>(); // Define camera projection. var projection = new PerspectiveProjection(); projection.SetFieldOfView( ConstantsF.PiOver4, graphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 1000.0f); // Create a camera node. CameraNode = new CameraNode(new Camera(projection)); }
public WindowsPhoneSample(Microsoft.Xna.Framework.Game game) : base(game) { GraphicsScreen.ClearBackground = true; GraphicsScreen.BackgroundColor = Color.CornflowerBlue; // Set a fixed camera. var projection = new PerspectiveProjection(); projection.SetFieldOfView( MathHelper.ToRadians(30), GraphicsService.GraphicsDevice.Viewport.AspectRatio, 1f, 1000.0f); Vector3F cameraTarget = new Vector3F(0, 1, 0); Vector3F cameraPosition = new Vector3F(0, 12, 0); Vector3F cameraUpVector = new Vector3F(0, 0, -1); GraphicsScreen.CameraNode = new CameraNode(new Camera(projection)) { View = Matrix44F.CreateLookAt(cameraPosition, cameraTarget, cameraUpVector), }; // We use the accelerometer to control the camera view. The accelerometer registers every // little change, but we do not want a shaky camera. We can use a low-pass filter to smooth // the sensor signal. _lowPassFilter = new LowPassFilter(new Vector3F(0, -1, 0)) { TimeConstant = 0.15f, // Let's try a time constant of 0.15 seconds. // When increasing the time constant the camera becomes more stable, // but also slower. }; // Enable touch gestures _originalEnabledGestures = TouchPanel.EnabledGestures; TouchPanel.EnabledGestures = GestureType.Tap // Tap is used to drop new bodies. | GestureType.Flick // Flick creates an explosion. | GestureType.Hold // Hold to clear the scene. | GestureType.Pinch; // Pinch can be used to zoom in or out. InitializePhysics(); }
private void TestClippedProjection() { RandomHelper.Random = new Random(1234567); var p = new PerspectiveProjection(); p.SetOffCenter(-0.1f, 0.2f, -0.1f, 0.1f, 0.1f, 1); _debugRenderer.DrawViewVolume(p.ViewVolume, new Pose(new Vector3F(0, 2, 0)), Color.Red, true, false); p.NearClipPlane = new Plane(new Vector3F(-0.1f, +0.1f, 1).Normalized, -0.4f); PlaneShape.MeshSize = 2; _debugRenderer.DrawShape(new PlaneShape(p.NearClipPlane.Value), new Pose(new Vector3F(0, 2, 0)), Vector3F.One, Color.Green, false, false); Matrix44F m = p.ToMatrix44F(); for (int i = 0; i < 100000; i++) { Aabb aabb = p.ViewVolume.GetAabb(Pose.Identity); aabb.Minimum -= new Vector3F(1); aabb.Maximum += new Vector3F(1); float x = RandomHelper.Random.NextFloat(aabb.Minimum.X, aabb.Maximum.X); float y = RandomHelper.Random.NextFloat(aabb.Minimum.Y, aabb.Maximum.Y); float z = RandomHelper.Random.NextFloat(aabb.Minimum.Z, aabb.Maximum.Z); //if (RandomHelper.Random.NextBool()) // x = 0; //else // y = 0; Vector4F c = m * new Vector4F(x, y, z, 1); c /= c.W; Color color = Color.Orange; if (c.X < -1 || c.X > 1 || c.Y < -1 || c.Y > 1 || c.Z < 0 || c.Z > 1) continue;// color = Color.Gray; _debugRenderer.DrawPoint(new Vector3F(x, y + 2, z), color, false); } }
public WpRagdollSample(Microsoft.Xna.Framework.Game game) : base(game) { GraphicsScreen.ClearBackground = true; GraphicsScreen.BackgroundColor = Color.CornflowerBlue; // Set a fixed camera. var projection = new PerspectiveProjection(); projection.SetFieldOfView( MathHelper.ToRadians(30), GraphicsService.GraphicsDevice.Viewport.AspectRatio, 1f, 100.0f); Vector3F cameraTarget = new Vector3F(0, 1, 0); Vector3F cameraPosition = new Vector3F(0, 12, 0); Vector3F cameraUpVector = new Vector3F(0, 0, -1); GraphicsScreen.CameraNode = new CameraNode(new Camera(projection)) { View = Matrix44F.CreateLookAt(cameraPosition, cameraTarget, cameraUpVector), }; InitializePhysics(); }
public CameraObject() { base.Name = "Camera"; _inputService = ServiceLocator.Current.GetInstance<IInputService>(); var graphicsService = ServiceLocator.Current.GetInstance<IGraphicsService>(); var gameObjectManager = ServiceLocator.Current.GetInstance<IGameObjectService>(); var screen = ((BasicScreen)graphicsService.Screens["Default"]); //_debugRenderer = screen.DebugRenderer; PerspectiveProjection projection = new PerspectiveProjection(); projection.SetFieldOfView(Microsoft.Xna.Framework.MathHelper.PiOver4, graphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 1000.0f); _cameraNode = new CameraNode(new Camera(projection)); screen.Scene.Children.Add(_cameraNode); screen.ActiveCamera = _cameraNode; ResetCamera(); View = _cameraNode.View.ToXna(); Projection = _cameraNode.Camera.Projection.ToXna(); }
private static PerspectiveProjection GetDefaultProjection() { // The default value of the PerspectiveProjection are too big for typical // projector lights. Therefore, we use our own default value. const float near = 0.25f; const float far = 5.0f; const float aspectRatio = 4.0f / 3.0f; const float fieldOfViewY = 45.0f * ConstantsF.Pi / 180.0f; // 45° var projection = new PerspectiveProjection(); projection.SetFieldOfView(fieldOfViewY, aspectRatio, near, far); return projection; }
// OnLoad() is called when the GameObject is added to the IGameObjectService. protected override void OnLoad() { var contentManager = _services.GetInstance<ContentManager>(); if (_type == 1) { // A simple cube. RigidBody = new RigidBody(new BoxShape(1, 1, 1)); ModelNode = contentManager.Load<ModelNode>("RustyCube/RustyCube").Clone(); } else if (_type == 2) { // Another simple cube. RigidBody = new RigidBody(new BoxShape(1, 1, 1)); ModelNode = contentManager.Load<ModelNode>("MetalGrateBox/MetalGrateBox").Clone(); } else if (_type == 3) { // A TV-like box. RigidBody = new RigidBody(new BoxShape(1, 0.6f, 0.8f)) { UserData = "TV" }; ModelNode = contentManager.Load<ModelNode>("TVBox/TVBox"); if (ModelNode.Children.OfType<LightNode>().Count() == 0) { // This is the first time the "TVBox" is loaded. // Add a projector light to the model that projects the TV screen. The // TV screen is the emissive part of the TV mesh. var meshNode = ModelNode.Children.OfType<MeshNode>().First(); var material = meshNode.Mesh.Materials.First(m => m.Name == "TestCard"); // Get texture from material. // Note: In XNA the effect parameter type is Texture. In MonoGame it is Texture2D. Texture2D texture; EffectParameterBinding parameterBinding = material["Material"].ParameterBindings["EmissiveTexture"]; if (parameterBinding is EffectParameterBinding<Texture>) texture = (Texture2D)((EffectParameterBinding<Texture>)parameterBinding).Value; else texture = ((EffectParameterBinding<Texture2D>)parameterBinding).Value; var projection = new PerspectiveProjection(); projection.Near = 0.55f; projection.Far = 3.0f; projection.SetFieldOfView(MathHelper.ToRadians(60), 0.76f / 0.56f); var projectorLight = new ProjectorLight(texture, projection); projectorLight.Attenuation = 4; var projectorLightNode = new LightNode(projectorLight); projectorLightNode.LookAt(new Vector3F(0, 0.2f, 0), Vector3F.Zero, Vector3F.UnitZ); // Attach the projector light to the model. ModelNode.Children.Add(projectorLightNode); } ModelNode = ModelNode.Clone(); } else if (_type == 4) { // A "magic" sphere with a colored point light. RigidBody = new RigidBody(new SphereShape(0.25f)); ModelNode = contentManager.Load<ModelNode>("MagicSphere/MagicSphere"); if (ModelNode.Children.OfType<LightNode>().Count() == 0) { // This is the first time the "MagicSphere" is loaded. // Change the size of the sphere. var meshNode = ModelNode.Children.OfType<MeshNode>().First(); meshNode.ScaleLocal = new Vector3F(0.5f); // Disable shadows. (The sphere acts as a light source.) meshNode.CastsShadows = false; // Add a point light. var pointLight = new PointLight { Color = new Vector3F(1, 1, 1), DiffuseIntensity = 4, SpecularIntensity = 4, Range = 3, Attenuation = 1, Texture = contentManager.Load<TextureCube>("MagicSphere/ColorCube"), }; var pointLightNode = new LightNode(pointLight) { // The point light uses shadow mapping to cast an omnidirectional shadow. Shadow = new CubeMapShadow { PreferredSize = 64, } }; ModelNode.Children.Add(pointLightNode); } ModelNode = ModelNode.Clone(); } else if (_type == 5) { // A sphere of glass (or "bubble"). RigidBody = new RigidBody(new SphereShape(0.3f)); ModelNode = contentManager.Load<ModelNode>("Bubble/Bubble").Clone(); ModelNode.GetDescendants().OfType<MeshNode>().First().ScaleLocal = new Vector3F(0.3f); } else if (_type == 6) { // A rusty barrel with multiple levels of detail (LODs). RigidBody = new RigidBody(new CylinderShape(0.35f, 1)); ModelNode = contentManager.Load<ModelNode>("Barrel/Barrel").Clone(); } else { // A cube consisting of a frame and transparent sides. RigidBody = new RigidBody(new BoxShape(1, 1, 1)); ModelNode = contentManager.Load<ModelNode>("GlassBox/GlassBox").Clone(); } SampleHelper.EnablePerPixelLighting(ModelNode); // Set a random pose. var randomPosition = new Vector3F( RandomHelper.Random.NextFloat(-10, 10), RandomHelper.Random.NextFloat(2, 5), RandomHelper.Random.NextFloat(-20, 0)); RigidBody.Pose = new Pose(randomPosition, RandomHelper.Random.NextQuaternionF()); ModelNode.PoseWorld = RigidBody.Pose; // Add rigid body to physics simulation and model to scene. var simulation = _services.GetInstance<Simulation>(); simulation.RigidBodies.Add(RigidBody); var scene = _services.GetInstance<IScene>(); scene.Children.Add(ModelNode); }
public SceneCaptureCubeSample(Microsoft.Xna.Framework.Game game) : base(game) { SampleFramework.IsMouseVisible = false; // Create a graphics screen. This screen has to call the SceneCaptureRenderer // to handle the SceneCaptureNodes! _graphicsScreen = new DeferredGraphicsScreen(Services) { DrawReticle = true }; GraphicsService.Screens.Insert(0, _graphicsScreen); GameObjectService.Objects.Add(new DeferredGraphicsOptionsObject(Services)); Services.Register(typeof(DebugRenderer), null, _graphicsScreen.DebugRenderer); Services.Register(typeof(IScene), null, _graphicsScreen.Scene); // Add gravity and damping to the physics Simulation. Simulation.ForceEffects.Add(new Gravity()); Simulation.ForceEffects.Add(new Damping()); // Add a custom game object which controls the camera. var cameraGameObject = new CameraObject(Services); GameObjectService.Objects.Add(cameraGameObject); _graphicsScreen.ActiveCameraNode = cameraGameObject.CameraNode; // More standard objects. GameObjectService.Objects.Add(new GrabObject(Services)); GameObjectService.Objects.Add(new ObjectCreatorObject(Services)); //GameObjectService.Objects.Add(new StaticSkyObject(Services)); GameObjectService.Objects.Add(new DynamicSkyObject(Services, true, false, true)); GameObjectService.Objects.Add(new GroundObject(Services)); GameObjectService.Objects.Add(new DudeObject(Services)); GameObjectService.Objects.Add(new DynamicObject(Services, 1)); GameObjectService.Objects.Add(new DynamicObject(Services, 2)); GameObjectService.Objects.Add(new DynamicObject(Services, 5)); GameObjectService.Objects.Add(new DynamicObject(Services, 6)); GameObjectService.Objects.Add(new DynamicObject(Services, 7)); GameObjectService.Objects.Add(new FogObject(Services) { AttachToCamera = true }); GameObjectService.Objects.Add(new LavaBallsObject(Services)); // Add a few palm trees. Random random = new Random(12345); for (int i = 0; i < 10; i++) { Vector3F position = new Vector3F(random.NextFloat(-3, -8), 0, random.NextFloat(0, -5)); Matrix33F orientation = Matrix33F.CreateRotationY(random.NextFloat(0, ConstantsF.TwoPi)); float scale = random.NextFloat(0.5f, 1.2f); GameObjectService.Objects.Add(new StaticObject(Services, "PalmTree/palm_tree", scale, new Pose(position, orientation))); } // Load the "Bubble" mesh and place it at a fixed position in the scene. var modelNode = ContentManager.Load<ModelNode>("Bubble/Bubble"); var meshNode = modelNode.GetDescendants().OfType<MeshNode>().First().Clone(); meshNode.PoseWorld = new Pose(new Vector3F(0, 1, 0)); _graphicsScreen.Scene.Children.Add(meshNode); // Surface of the mesh should reflect the scene in real-time. Reflections are // created using environment mapping: The scene is rendered into a cube map, // which is then applied to the mesh. // To render the scene into a cube map, we need to define a CameraNode and a // SceneCaptureNode: The CameraNode defines the point from where the scene is // captured. The SceneCaptureNode defines where and in which format the captured // image is needed. // Attach a camera to the center of the mesh. var projection = new PerspectiveProjection(); projection.SetFieldOfView(ConstantsF.PiOver2, 1, 0.1f, 20); var captureCameraNode = new CameraNode(new Camera(projection)); meshNode.Children = new SceneNodeCollection { captureCameraNode }; // Attach a SceneCaptureNode with a cube map render target to the mesh. var renderToTexture = new RenderToTexture { Texture = new RenderTargetCube( GraphicsService.GraphicsDevice, 256, true, SurfaceFormat.Color, DepthFormat.None), }; var sceneCaptureNode = new SceneCaptureNode(renderToTexture) { Shape = meshNode.Shape, CameraNode = captureCameraNode, }; meshNode.Children.Add(sceneCaptureNode); // The bubble model uses a special effect and is rendered in the "AlphaBlend" // render pass. Let's modify the effect parameters to use the created cube map // as the reflection map of the bubble. var effectBinding = meshNode.Mesh.Materials[0]["AlphaBlend"]; effectBinding.Set("ReflectionStrength", 0.5f); effectBinding.Set("RefractionStrength", 0.0f); effectBinding.Set("FresnelBias", 1.0f); effectBinding.Set("BlendMode", 1.0f); effectBinding.Set("Alpha", 1.0f); effectBinding.Set("CustomEnvironmentMap", (TextureCube)renderToTexture.Texture); }
private void InitializeGraphicsScreen() { Debug.Assert(GraphicsScreens.Count == 0, "Reset graphics screens before calling InitializeGraphicsScreen()."); var services = Document.Editor.Services; var graphicsService = services.GetInstance<IGraphicsService>().ThrowIfMissing(); // Initialize graphics screens. var graphicsScreen = UseDeferredLighting ? (GraphicsScreen)new DeferredGraphicsScreen(services) : new BasicGraphicsScreen(services); GraphicsScreens.Add(graphicsScreen); GraphicsScreens.Add(new DebugGraphicsScreen(services)); // Add default lighting. var scene = UseDeferredLighting ? ((DeferredGraphicsScreen)graphicsScreen).Scene : ((BasicGraphicsScreen)graphicsScreen).Scene; GameHelper.AddLights(scene); // Add a ground plane (useful for orientation and to check model shadows). if (_groundModelNode == null) { var content = services.GetInstance<ContentManager>().ThrowIfMissing(); _groundModelNode = content.Load<ModelNode>("DigitalRune.Editor.Game/Models/Misc/GroundPlane/GroundPlane").Clone(); } _groundModelNode.IsEnabled = ShowGroundPlane; scene.Children.Add(_groundModelNode); // Add camera. if (CameraNode == null) { var projection = new PerspectiveProjection(); projection.SetFieldOfView( ConstantsF.PiOver4, graphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 10000.0f); CameraNode = new CameraNode(new Camera(projection)) { Name = "CameraPerspective", }; } if (UseDeferredLighting) ((DeferredGraphicsScreen)graphicsScreen).ActiveCameraNode = CameraNode; else ((BasicGraphicsScreen)graphicsScreen).CameraNode = CameraNode; }
private bool _cullingEnabled = true; // True to use frustum culling. False to disable frustum culling. public FrustumCullingSample(Microsoft.Xna.Framework.Game game) : base(game) { GraphicsScreen.ClearBackground = true; GraphicsScreen.BackgroundColor = Color.CornflowerBlue; // The top-down camera. var orthographicProjection = new OrthographicProjection(); orthographicProjection.Set( LevelSize * 1.1f * GraphicsService.GraphicsDevice.Viewport.AspectRatio, LevelSize * 1.1f, 1, 10000f); var topDownCamera = new Camera(orthographicProjection); _topDownCameraNode = new CameraNode(topDownCamera) { View = Matrix44F.CreateLookAt(new Vector3F(0, 1000, 0), new Vector3F(0, 0, 0), -Vector3F.UnitZ), }; // The perspective camera moving through the scene. var perspectiveProjection = new PerspectiveProjection(); perspectiveProjection.SetFieldOfView( MathHelper.ToRadians(45), GraphicsService.GraphicsDevice.Viewport.AspectRatio, 1, 500); var sceneCamera = new Camera(perspectiveProjection); _sceneCameraNode = new CameraNode(sceneCamera); // Initialize collision detection. // We use one collision domain that manages all objects. _domain = new CollisionDomain(new CollisionDetection()) { // We exchange the default broad phase with a DualPartition. The DualPartition // has special support for frustum culling. BroadPhase = new DualPartition<CollisionObject>(), }; // Create a lot of random objects and add them to the collision domain. RandomHelper.Random = new Random(12345); for (int i = 0; i < NumberOfObjects; i++) { // A real scene consists of a lot of complex objects such as characters, vehicles, // buildings, lights, etc. When doing frustum culling we need to test each objects against // the viewing frustum. If it intersects with the viewing frustum, the object is visible // from the camera's point of view. However, in practice we do not test the exact object // against the viewing frustum. Each objects is approximated by a simpler shape. In our // example, we assume that each object is approximated with an oriented bounding box. // (We could also use an other shape, such as a bounding sphere.) // Create a random box. Shape randomShape = new BoxShape(RandomHelper.Random.NextVector3F(1, 10)); // Create a random position. Vector3F randomPosition; randomPosition.X = RandomHelper.Random.NextFloat(-LevelSize / 2, LevelSize / 2); randomPosition.Y = RandomHelper.Random.NextFloat(0, 2); randomPosition.Z = RandomHelper.Random.NextFloat(-LevelSize / 2, LevelSize / 2); // Create a random orientation. QuaternionF randomOrientation = RandomHelper.Random.NextQuaternionF(); // Create object and add it to collision domain. var geometricObject = new GeometricObject(randomShape, new Pose(randomPosition, randomOrientation)); var collisionObject = new CollisionObject(geometricObject) { CollisionGroup = 0, }; _domain.CollisionObjects.Add(collisionObject); } // Per default, the collision domain computes collision between all objects. // In this sample we do not need this information and disable it with a collision // filter. // In a real application, we would use this collision information for rendering, // for example, to find out which lights overlap with which meshes, etc. var filter = new CollisionFilter(); // Disable collision between objects in collision group 0. filter.Set(0, 0, false); _domain.CollisionDetection.CollisionFilter = filter; // Start with the scene camera. GraphicsScreen.CameraNode = _sceneCameraNode; // We will collect a few statistics for debugging. Profiler.SetFormat("NoCull", 1000, "Time in ms to submit DebugRenderer draw jobs without frustum culling."); Profiler.SetFormat("WithCull", 1000, "Time in ms to submit DebugRenderer draw jobs with frustum culling."); }
/// <summary> /// Creates an perspective projection from a 4x4 transformation matrix. /// </summary> /// <param name="matrix">The projection matrix.</param> /// <returns>The perspective projection.</returns> public static PerspectiveProjection FromMatrix(Matrix44F matrix) { var projection = new PerspectiveProjection(); projection.Set(matrix); return projection; }
/// <summary> /// Renders the environment maps for the image-based lights. /// </summary> /// <remarks> /// This method uses the current DeferredGraphicsScreen to render new environment maps at /// runtime. The DeferredGraphicsScreen has a SceneCaptureRenderer which we can use to /// capture environment maps of the current scene. /// To capture new environment maps the flag _updateEnvironmentMaps must be set to true. /// When this flag is set, SceneCaptureNodes are added to the scene. When the graphics /// screen calls the SceneCaptureRenderer the next time, the new environment maps will be /// captured. /// The flag _updateEnvironmentMaps remains true until the new environment maps are available. /// This method checks the SceneCaptureNode.LastFrame property to check if new environment maps /// have been computed. Usually, the environment maps will be available in the next frame. /// (However, the XNA Game class can skip graphics rendering if the game is running slowly. /// Then we would have to wait more than 1 frame.) /// When environment maps are being rendered, the image-based lights are disabled to capture /// only the scene with ambient and directional lights. Dynamic objects are also disabled /// to capture only the static scene. /// </remarks> private void UpdateEnvironmentMaps() { if (!_updateEnvironmentMaps) return; // One-time initializations: if (_sceneCaptureNodes[0] == null) { // Create cube maps and scene capture nodes. // (Note: A cube map size of 256 is enough for surfaces with a specular power // in the range [0, 200000].) for (int i = 0; i < _sceneCaptureNodes.Length; i++) { var renderTargetCube = new RenderTargetCube( GraphicsService.GraphicsDevice, 256, true, SurfaceFormat.Color, DepthFormat.None); var renderToTexture = new RenderToTexture { Texture = renderTargetCube }; var projection = new PerspectiveProjection(); projection.SetFieldOfView(ConstantsF.PiOver2, 1, 1, 100); _sceneCaptureNodes[i] = new SceneCaptureNode(renderToTexture) { CameraNode = new CameraNode(new Camera(projection)) { PoseWorld = _lightNodes[i].PoseWorld, }, }; _imageBasedLights[i].Texture = renderTargetCube; } // We use a ColorEncoder to encode a HDR image in a normal Color texture. _colorEncoder = new ColorEncoder(GraphicsService) { SourceEncoding = ColorEncoding.Rgb, TargetEncoding = ColorEncoding.Rgbm, }; // The SceneCaptureRenderer has a render callback which defines what is rendered // into the scene capture render targets. _graphicsScreen.SceneCaptureRenderer.RenderCallback = context => { var graphicsDevice = GraphicsService.GraphicsDevice; var renderTargetPool = GraphicsService.RenderTargetPool; // Get scene nodes which are visible by the current camera. CustomSceneQuery sceneQuery = context.Scene.Query<CustomSceneQuery>(context.CameraNode, context); // The final image has to be rendered into this render target. var ldrTarget = context.RenderTarget; // Use an intermediate HDR render target with the same resolution as the final target. var format = new RenderTargetFormat(ldrTarget) { SurfaceFormat = SurfaceFormat.HdrBlendable, DepthStencilFormat = DepthFormat.Depth24Stencil8 }; var hdrTarget = renderTargetPool.Obtain2D(format); graphicsDevice.SetRenderTarget(hdrTarget); context.RenderTarget = hdrTarget; // Render scene (without post-processing, without lens flares, no debug rendering, no reticle). _graphicsScreen.RenderScene(sceneQuery, context, false, false, false, false); // Convert the HDR image to RGBM image. context.SourceTexture = hdrTarget; context.RenderTarget = ldrTarget; _colorEncoder.Process(context); context.SourceTexture = null; // Clean up. renderTargetPool.Recycle(hdrTarget); context.RenderTarget = ldrTarget; }; } if (_sceneCaptureNodes[0].Parent == null) { // Add the scene capture nodes to the scene. for (int i = 0; i < _sceneCaptureNodes.Length; i++) _graphicsScreen.Scene.Children.Add(_sceneCaptureNodes[i]); // Remember the old time stamp of the nodes. _oldEnvironmentMapTimeStamp = _sceneCaptureNodes[0].LastFrame; // Disable all lights except ambient and directional lights. // We do not capture the image-based lights or any other lights (e.g. point lights) // in the cube map. foreach (var lightNode in _graphicsScreen.Scene.GetDescendants().OfType<LightNode>()) lightNode.IsEnabled = (lightNode.Light is AmbientLight) || (lightNode.Light is DirectionalLight); // Disable dynamic objects. foreach (var node in _graphicsScreen.Scene.GetDescendants()) if (node is MeshNode || node is LodGroupNode) if (!node.IsStatic) node.IsEnabled = false; } else { // The scene capture nodes are part of the scene. Check if they have been // updated. if (_sceneCaptureNodes[0].LastFrame != _oldEnvironmentMapTimeStamp) { // We have new environment maps. Restore the normal scene. for (int i = 0; i < _sceneCaptureNodes.Length; i++) _graphicsScreen.Scene.Children.Remove(_sceneCaptureNodes[i]); _updateEnvironmentMaps = false; foreach (var lightNode in _graphicsScreen.Scene.GetDescendants().OfType<LightNode>()) lightNode.IsEnabled = true; foreach (var node in _graphicsScreen.Scene.GetDescendants()) if (node is MeshNode || node is LodGroupNode) if (!node.IsStatic) node.IsEnabled = true; } } }
public FacialAnimationSample(Microsoft.Xna.Framework.Game game) : base(game) { _graphicsScreen = new DeferredGraphicsScreen(Services) { DrawReticle = false }; GraphicsService.Screens.Insert(0, _graphicsScreen); Services.Register(typeof(DebugRenderer), null, _graphicsScreen.DebugRenderer); Services.Register(typeof(IScene), null, _graphicsScreen.Scene); // Add a game object which adds some GUI controls for the deferred graphics // screen to the Options window. GameObjectService.Objects.Add(new DeferredGraphicsOptionsObject(Services)); // Use a fixed camera. var projection = new PerspectiveProjection(); projection.SetFieldOfView( ConstantsF.PiOver4, GraphicsService.GraphicsDevice.Viewport.AspectRatio, 0.1f, 10); var cameraNode = new CameraNode(new Camera(projection)); cameraNode.LookAt(new Vector3F(0.15f, 0.15f, 0.5f), new Vector3F(0.1f, 0.15f, 0), Vector3F.Up); _graphicsScreen.Scene.Children.Add(cameraNode); _graphicsScreen.ActiveCameraNode = cameraNode; // Lighting setup: var keyLight = new LightNode(new Spotlight { DiffuseIntensity = 0.6f, SpecularIntensity = 0.4f }); keyLight.LookAt(new Vector3F(-2, 2, 2), new Vector3F(), Vector3F.Up); _graphicsScreen.Scene.Children.Add(keyLight); var backLight = new LightNode(new Spotlight { DiffuseIntensity = 0.3f, SpecularIntensity = 0.3f }); backLight.LookAt(new Vector3F(1, 0.5f, -2), new Vector3F(), Vector3F.Up); _graphicsScreen.Scene.Children.Add(backLight); var fillLight = new LightNode(new AmbientLight { HemisphericAttenuation = 1, Intensity = 0.1f }); _graphicsScreen.Scene.Children.Add(fillLight); // The scene does not have a proper background. That's why the exposure is a // bit off. --> Reduce the max exposure. var hdrFilter = _graphicsScreen.PostProcessors.OfType<HdrFilter>().First(); hdrFilter.MaxExposure = 6; // Load the customized "Sintel" model (original: Durian Open Movie Project - http://www.sintel.org/). var model = ContentManager.Load<ModelNode>("Sintel/Sintel-Head").Clone(); model.PoseWorld = new Pose(new Vector3F(0, 0, 0), Matrix33F.CreateRotationY(MathHelper.ToRadians(10)) * Matrix33F.CreateRotationX(-MathHelper.ToRadians(90))); _graphicsScreen.Scene.Children.Add(model); // The model consists of a root node and a mesh node. // ModelNode "Sintel-Head" // MeshNode "Sintel" _sintel = (MeshNode)model.Children[0]; // The model contains two skeletal animations: // - "MOUTH-open" is just a single frame. // - "Test" is a short animation (250 frames). // In the Options window, we will add a slider to move the jaw. // Slider.Value = 0 ... mouth closed (default) _mouthClosedPose = SkeletonPose.Create(_sintel.Mesh.Skeleton); // Slider.Value = 1 ... mouth open (copied from the "MOUTH-open" animation) SkeletonKeyFrameAnimation mouthOpen = _sintel.Mesh.Animations["MOUTH-open"]; _mouthOpenPose = SkeletonPose.Create(_sintel.Mesh.Skeleton); mouthOpen.GetValue(TimeSpan.Zero, ref _mouthOpenPose, ref _mouthOpenPose, ref _mouthOpenPose); // Turn the "Test" animation into an endless loop. _skeletalAnimation = new AnimationClip<SkeletonPose>(_sintel.Mesh.Animations["Test"]) { Duration = TimeSpan.MaxValue, LoopBehavior = LoopBehavior.Cycle }; // Mesh has several morph targets for facial animation, which are imported // automatically via the content pipeline. Unfortunately, the XNA content // pipeline cannot import morph target animations automatically. // In this demo, we will create a morph target animation in code. _morphingAnimation = CreateMorphingAnimation(); CreateGuiControls(); }