public World(int screenWidth, int screenHeight, GraphicsDevice graphicsDevice) { this.screenHeight = screenHeight; this.screenWidth = screenWidth; worldCollisions = new Bounce.CollissionEngine(200, screenWidth, screenHeight); camera = new Camera(screenHeight, screenWidth, graphicsDevice); }
public void Draw(Camera camera, SpriteBatch spriteBatch) { spriteBatch.Begin(SpriteBlendMode.Additive, SpriteSortMode.BackToFront, SaveStateMode.None, camera.ProjectionMatrix); spriteBatch.End(); }
public void Draw(Camera camera) { VertexPositionColor[] pointList = new VertexPositionColor[21]; for (int i = 0; i < 360; i+=18) { Vector3 point = new Vector3((float)(radius * Math.Cos((2.0 * Math.PI / 360) * i)), (float)(radius * Math.Sin((2.0 * Math.PI / 360) * i)), 0f); point += position; pointList[i/18] = new VertexPositionColor(point, Color.Black); } pointList[20] = new VertexPositionColor(position, Color.Black); short[] triangleListIndices = { 20, 0, 1, 20, 1, 2, 20, 2, 3, 20, 3, 4, 20, 4, 5, 20, 5, 6, 20, 6, 7, 20, 7, 8, 20, 8, 9, 20, 9, 10, 20, 10, 11, 20, 11, 12, 20, 12, 13, 20, 13, 14, 20, 14, 15, 20, 15, 16, 20, 16, 17, 20, 17, 18, 20, 18, 19, 20, 19, 0}; camera.BaseEffect.Begin(); foreach (EffectPass pass in camera.BaseEffect.CurrentTechnique.Passes) { pass.Begin(); camera.GrapicsDevice.DrawUserIndexedPrimitives<VertexPositionColor>( PrimitiveType.TriangleList, pointList, 0, // vertex buffer offset to add to each element of the index buffer 21, // number of vertices to draw triangleListIndices, 0, // first index element to read 20 // number of primitives to draw ); pass.End(); } camera.BaseEffect.End(); /*spriteBatch.Draw(texture, position, null, Color.White, 0.0f, new Vector2(texture.Width / 2, texture.Height / 2), 1.0f, SpriteEffects.None, 0.0f); */ }
public void Draw(Camera camera) { foreach (Segment segment in allSegments) { VertexPositionColor[] pointList = new VertexPositionColor[2]; pointList[0] = new VertexPositionColor( new Vector3(segment.StartPoint, 0), Color.Black); pointList[1] = new VertexPositionColor( new Vector3(segment.EndPoint, 0), Color.Black); // Initialize an array of indices of type short. short[] lineListIndices = new short[2]; lineListIndices[0] = 0; lineListIndices[1] = 1; camera.BaseEffect.Begin(); foreach (EffectPass pass in camera.BaseEffect.CurrentTechnique.Passes) { pass.Begin(); camera.GrapicsDevice.DrawUserIndexedPrimitives<VertexPositionColor>( PrimitiveType.LineList, pointList, 0, // vertex buffer offset to add to each element of the index buffer 2, // number of vertices in pointList lineListIndices, // the index buffer 0, // first index element to read 1 // number of primitives to draw ); pass.End(); } camera.BaseEffect.End(); } }
/// <summary> /// Instanced rendering. Draws instancedModel numInstances times. This demo uses hardware /// instancing: a secondary vertex stream is created, where the transform matrices of the /// individual instances are passed down to the shader. Note that in order to be efficient, /// the model should contain as little meshes and meshparts as possible. /// </summary> /// <param name="instancedModel">The model to be drawn</param> /// <param name="camera">The camera</param> /// <param name="model2worldTransformations">The instance transform matrices </param> /// <param name="numInstances">Number of instances to draw. Note: model2worldTransformations must be at least this long</param> public virtual void renderInstanced(Model model, Camera camera, Matrix[] model2worldTransformations, int numInstances) { if (numInstances <= 0) return; // Make sure our instance data vertex buffer is big enough. (4x4 float matrix) int instanceDataSize = 16 * sizeof(float) * numInstances; if ((mInstanceDataStream == null) || (mInstanceDataStream.VertexCount < numInstances)) { if (mInstanceDataStream != null) mInstanceDataStream.Dispose(); mInstanceDataStream = new DynamicVertexBuffer(mGraphicsDevice, mInstanceVertexDeclaration, numInstances, BufferUsage.WriteOnly); } // set vertex buffer stream // Upload transform matrices to the instance data vertex buffer. mInstanceDataStream.SetData(model2worldTransformations, 0, numInstances, SetDataOptions.Discard); // Draw the model. A model can have multiple meshes, so loop. Matrix[] transforms = new Matrix[model.Bones.Count]; model.CopyAbsoluteBoneTransformsTo(transforms); // loop through meshes foreach (ModelMesh mesh in model.Meshes) { // get bone matrix Matrix boneMatrix = transforms[mesh.ParentBone.Index]; foreach (ModelMeshPart part in mesh.MeshParts) { mGraphicsDevice.Indices = part.IndexBuffer; part.Effect.Parameters["World"].SetValue( boneMatrix ); part.Effect.Parameters["View"].SetValue(camera.viewMatrix); part.Effect.Parameters["Projection"].SetValue(camera.projectionMatrix); part.Effect.Parameters["LightPosition"].SetValue(mLightPosition); part.Effect.CurrentTechnique.Passes[0].Apply(); // set vertex buffer mGraphicsDevice.SetVertexBuffers( new[] { part.VertexBuffer, new VertexBufferBinding(mInstanceDataStream, 0, 1 ) }); // draw primitives mGraphicsDevice.DrawInstancedPrimitives( PrimitiveType.TriangleList, part.VertexOffset, 0, part.NumVertices, part.StartIndex, part.PrimitiveCount, numInstances ); } } return; }
public void renderCollisionGeometry(ModelGeometry collisionGeometry, Camera camera) { foreach (var mesh in collisionGeometry.visualizationData.Meshes) { foreach (var effect in mesh.Effects.Cast<BasicEffect>()) { effect.View = camera.viewMatrix; effect.Projection = camera.projectionMatrix; effect.LightingEnabled = true; } mesh.Draw(); } }
public void renderCollidingFaces(ModelGeometry collisionGeometry, IDictionary<Face, uint> collidingFaces, Camera camera, uint min, uint max) { if (collidingFaces.Count > 0) { // fill vertex buffer with collision geometry and set on device { float range = Math.Max(max - min, 0.0001f); var data = collidingFaces.SelectMany(face => { var colour = new Vector3((face.Value - min)/range); return new[] { collisionGeometry.vertices[face.Key.v1], collisionGeometry.normals[face.Key.n1], colour, collisionGeometry.vertices[face.Key.v2], collisionGeometry.normals[face.Key.n2], colour, collisionGeometry.vertices[face.Key.v3], collisionGeometry.normals[face.Key.n3], colour }; }); mCollidingFacesVertices.SetData<Vector3>(data.ToArray()); mGraphicsDevice.SetVertexBuffer(mCollidingFacesVertices); } // enable alpha blending var previousBlendState = mGraphicsDevice.BlendState; mGraphicsDevice.BlendState = mAlphaBlendState; // draw mCollisionsShader.Parameters["WorldViewProjection"].SetValue(camera.viewMatrix * camera.projectionMatrix); mCollisionsShader.CurrentTechnique.Passes[0].Apply(); mGraphicsDevice.DrawPrimitives(PrimitiveType.TriangleList, 0, collidingFaces.Count); // restore previous blend mode mGraphicsDevice.BlendState = previousBlendState; } }
public void render(Model model, Matrix rotation, Matrix translation, Camera camera) { Matrix[] transforms = new Matrix[model.Bones.Count]; model.CopyAbsoluteBoneTransformsTo(transforms); // Draw the model. A model can have multiple meshes, so loop. foreach (ModelMesh mesh in model.Meshes) { // This is where the mesh orientation is set, as well as our camera and projection. foreach (Effect effect in mesh.Effects) { effect.Parameters["World"].SetValue(rotation * transforms[mesh.ParentBone.Index] * translation); effect.Parameters["View"].SetValue(camera.viewMatrix); effect.Parameters["Projection"].SetValue(camera.projectionMatrix); effect.Parameters["LightPosition"].SetValue(mLightPosition); } // Draw the mesh, using the effects set above. mesh.Draw(); } }
public void render(Model model, Camera camera) { render(model, Matrix.Identity, Matrix.Identity, camera); }