/// <summary> /// Converts a single piece of input geometry into our instanced format. /// </summary> void ProcessGeometry(GeometryContent geometry) { int indexCount = geometry.Indices.Count; int vertexCount = geometry.Vertices.VertexCount; // Validate that the number of vertices is suitable for instancing. if (vertexCount > ushort.MaxValue) { throw new InvalidContentException( string.Format("Geometry contains {0} vertices: " + "this is too many to be instanced.", vertexCount)); } if (vertexCount > ushort.MaxValue / 8) { context.Logger.LogWarning(null, rootNode.Identity, "Geometry contains {0} vertices: " + "this will only allow it to be instanced " + "{1} times per batch. A model with fewer " + "vertices would be more efficient.", vertexCount, ushort.MaxValue / vertexCount); } // Validate that the vertex channels we are going to use to pass // through our instancing data aren't already in use. VertexChannelCollection vertexChannels = geometry.Vertices.Channels; for (int i = 1; i <= 4; i++) { if (vertexChannels.Contains(VertexChannelNames.TextureCoordinate(i))) { throw new InvalidContentException( string.Format("Model already contains data for texture " + "coordinate channel {0}, but instancing " + "requires this channel for its own use.", i)); } } // Flatten the flexible input vertex channel data into // a simple GPU style vertex buffer byte array. VertexBufferContent vertexBufferContent; VertexElement[] vertexElements; geometry.Vertices.CreateVertexBuffer(out vertexBufferContent, out vertexElements, context.TargetPlatform); int vertexStride = VertexDeclaration.GetVertexStrideSize(vertexElements, 0); // Convert the input material. MaterialContent material = ProcessMaterial(geometry.Material); // Add the new piece of geometry to our output model. outputModel.AddModelPart(indexCount, vertexCount, vertexStride, vertexElements, vertexBufferContent, geometry.Indices, material); }
} // Process #endregion #region Process Vertex Channel /// <summary> /// Processes geometry content vertex channels at the specified index. /// </summary> protected override void ProcessVertexChannel(GeometryContent geometry, int vertexChannelIndex, ContentProcessorContext context) { VertexChannelCollection channels = geometry.Vertices.Channels; // If the model has only position and normals a UV channel is added. // http://xnafinalengine.codeplex.com/wikipage?title=Compressed%20Vertex%20Data if (channels.Count == 1 && channels.Contains(VertexChannelNames.Normal())) { channels.Add<Vector2>(VertexChannelNames.TextureCoordinate(0), null); } // If the model has position, normal and UV then the data is packed on 32 bytes aliagned vertex data. if (channels.Count == 2 && channels.Contains(VertexChannelNames.Normal()) && channels.Contains(VertexChannelNames.TextureCoordinate(0))) { // No compressed Vertex Data base.ProcessVertexChannel(geometry, vertexChannelIndex, context); } else // If not then the data is compressed. { string name = channels[vertexChannelIndex].Name; if (name == VertexChannelNames.Normal()) { channels.ConvertChannelContent<NormalizedShort4>(vertexChannelIndex); } else if (name == VertexChannelNames.TextureCoordinate(0)) { // Clamp values. /*for (int i = 0; i < channels[vertexChannelIndex].Count; i++) { Vector2 uv = (Vector2)channels[vertexChannelIndex][i]; if (uv.X < 0) uv.X *= -1; if (uv.Y < 0) uv.Y *= -1; Vector2 uvCampled = new Vector2(uv.X - (float)Math.Truncate(uv.X), uv.Y - (float)Math.Truncate(uv.Y)); channels[vertexChannelIndex][i] = uvCampled; } // If the resource has texture coordinates outside the range [-1, 1] the values will be clamped. channels.ConvertChannelContent<NormalizedShort2>(vertexChannelIndex);*/ // Sometimes you can't just clamp values, because the distance between vertices surpass 1 uv unit. // And given that I am not removing the binormals I won't normalize the UVs. channels.ConvertChannelContent<HalfVector2>(vertexChannelIndex); } else if (name == VertexChannelNames.TextureCoordinate(1)) channels.Remove(VertexChannelNames.TextureCoordinate(1)); else if (name == VertexChannelNames.TextureCoordinate(2)) channels.Remove(VertexChannelNames.TextureCoordinate(2)); else if (name == VertexChannelNames.TextureCoordinate(3)) channels.Remove(VertexChannelNames.TextureCoordinate(3)); else if (name == VertexChannelNames.TextureCoordinate(4)) channels.Remove(VertexChannelNames.TextureCoordinate(4)); else if (name == VertexChannelNames.TextureCoordinate(5)) channels.Remove(VertexChannelNames.TextureCoordinate(5)); else if (name == VertexChannelNames.TextureCoordinate(6)) channels.Remove(VertexChannelNames.TextureCoordinate(6)); else if (name == VertexChannelNames.TextureCoordinate(7)) channels.Remove(VertexChannelNames.TextureCoordinate(7)); else if (name == VertexChannelNames.Color(0)) channels.Remove(VertexChannelNames.Color(0)); else if (name == VertexChannelNames.Tangent(0)) { channels.ConvertChannelContent<NormalizedShort4>(vertexChannelIndex); } else if (name == VertexChannelNames.Binormal(0)) { channels.ConvertChannelContent<NormalizedShort4>(vertexChannelIndex); // If the binormal is removed then the position, the normal, // the tangent and one texture coordinate can be fetched in one single block of 32 bytes. // Still, it is more fast to just pass the value. At least on the test I made. //channels.Remove(VertexChannelNames.Binormal(0)); } else { base.ProcessVertexChannel(geometry, vertexChannelIndex, context); } } } // ProcessVertexChannel