public override ModelContent Process(NodeContent input, ContentProcessorContext context) { if (input == null) { throw new ArgumentNullException("input"); } //we always want to generate tangent frames, as we use tangent space normal mapping GenerateTangentFrames = true; //merge transforms MeshHelper.TransformScene(input, input.Transform); input.Transform = Matrix.Identity; if (!_isSkinned) { MergeTransforms(input); } ModelContent model = base.Process(input, context); //gather some information that will be useful in run time MeshMetadata metadata = new MeshMetadata(); BoundingBox aabb = new BoundingBox(); metadata.BoundingBox = ComputeBoundingBox(input, ref aabb, metadata); //assign it to our Tag model.Tag = metadata; return(model); }
/// <summary> /// The main Process method converts an intermediate format content pipeline /// NodeContent tree to a ModelContent object with embedded animation data. /// </summary> public override ModelContent Process(NodeContent input, ContentProcessorContext context) { ValidateMesh(input, context, null); // Find the skeleton. BoneContent skeleton = MeshHelper.FindSkeleton(input); if (skeleton == null) { throw new InvalidContentException("Input skeleton not found."); } _isSkinned = true; // We don't want to have to worry about different parts of the model being // in different local coordinate systems, so let's just bake everything. FlattenTransforms(input, skeleton); // Read the bind pose and skeleton hierarchy data. IList <BoneContent> bones = MeshHelper.FlattenSkeleton(skeleton); if (bones.Count > SkinnedEffect.MaxBones) { throw new InvalidContentException(string.Format( "Skeleton has {0} bones, but the maximum supported is {1}.", bones.Count, SkinnedEffect.MaxBones)); } List <Matrix> bindPose = new List <Matrix>(); List <Matrix> inverseBindPose = new List <Matrix>(); List <int> skeletonHierarchy = new List <int>(); foreach (BoneContent bone in bones) { Matrix m = bone.Transform; //scale all translations m.Translation = m.Translation * Scale; bone.Transform = m; bindPose.Add(bone.Transform); inverseBindPose.Add(Matrix.Invert(bone.AbsoluteTransform)); skeletonHierarchy.Add(bones.IndexOf(bone.Parent as BoneContent)); } // Convert animation data to our runtime format. Dictionary <string, AnimationClip> animationClips; animationClips = ProcessAnimations(skeleton.Animations, bones); // Chain to the base ModelProcessor class so it can convert the model data. ModelContent model = base.Process(input, context); // Store our custom animation data in the Tag property of the model. MeshMetadata metadata = model.Tag as MeshMetadata; metadata.SkinningData = new SkinningData(animationClips, bindPose, inverseBindPose, skeletonHierarchy); return(model); }
private BoundingBox ComputeBoundingBox(NodeContent input, ref BoundingBox aabb, MeshMetadata metadata) { BoundingBox boundingBox; if (input is MeshContent) { MeshContent mc = (MeshContent)input; MeshHelper.TransformScene(mc, mc.Transform); mc.Transform = Matrix.Identity; boundingBox = BoundingBox.CreateFromPoints(mc.Positions); //create sub mesh information MeshMetadata.SubMeshMetadata subMeshMetadata = new MeshMetadata.SubMeshMetadata(); subMeshMetadata.BoundingBox = boundingBox; subMeshMetadata.RenderQueue = _renderQueue; subMeshMetadata.CastShadows = CastShadows; metadata.AddSubMeshMetadata(subMeshMetadata); if (metadata.SubMeshesMetadata.Count > 1) { boundingBox = BoundingBox.CreateMerged(boundingBox, aabb); } } else { boundingBox = aabb; } foreach (NodeContent c in input.Children) { boundingBox = BoundingBox.CreateMerged(boundingBox, ComputeBoundingBox(c, ref boundingBox, metadata)); } return(boundingBox); }