public override double Eval(T p, T q) { Vector mp = p.GetMeanVector(); Vector mq = q.GetMeanVector(); Matrix covp = p.GetCovarianceMatrix(); Matrix covq = q.GetCovarianceMatrix(); // dimensions int dp = mp.Count; int dq = mq.Count; if (dp != dq) { throw new ArgumentException("dimension mismatch"); } if (dp == 1) { if (gwidth2s.Length != 1) { throw new ArgumentException("Expect gwidth2s to have 1 dimension."); } if (!(covp.Rows == 1 && covp.Cols == 1)) { throw new SystemException(String.Format("Except covp to be 1x1. Got {0}x{1}", covp.Rows, covp.Cols)); } if (!(covq.Rows == 1 && covq.Cols == 1)) { throw new SystemException(String.Format("Except covq to be 1x1. Got {0}x{1}", covq.Rows, covq.Cols)); } double vp = covp[0, 0]; double vq = covq[0, 0]; double kerParam = this.gwidth2s[0]; double dpq = 1.0 / (vp + vq + kerParam); double meanDiff = mp[0] - mq[0]; double exp = Math.Exp(-0.5 * meanDiff * dpq * meanDiff); double eval = Math.Sqrt(dpq * kerParam) * exp; return(eval); } else { // not so efficient. // TODO: improve it Matrix dpq = MatrixUtils.Inverse(covp + covq + sigma); Vector meanDiff = mp - mq; double dist2 = dpq.QuadraticForm(meanDiff); // !! Infer.NET's Determinant() has a bug double dpqDet = MatrixUtils.Determinant(dpq); double z = Math.Sqrt(dpqDet * detSigma); double eval = z * Math.Exp(-0.5 * dist2); return(eval); } }
private ProjectedGeometry MeshToProjectedGeometry(MeshGeometry3D mesh, Transform3D tx) { MeshOperations.RemoveNullFields(mesh); if (mesh.TriangleIndices.Count == 0) { // Having triangle indices in a mesh isn't required // Generate them if they don't exist MeshOperations.GenerateTriangleIndices(mesh); } else { // If we didn't generate them, there could be bad indices in there. // Remove the triangles that would cause our renderer problems in the future. MeshOperations.RemoveBogusTriangles(mesh); } // Having texture coordinates in a mesh isn't required // Generate them if they don't exist if (mesh.TextureCoordinates.Count == 0) { MeshOperations.GenerateTextureCoordinates(mesh); } // We need explicit normal information, so calculate them when they're missing if (mesh.Normals.Count != mesh.Positions.Count) { // We default to counter-clockwise winding order ... MeshOperations.GenerateNormals(mesh, false); } Point3DCollection positions = mesh.Positions; Vector3DCollection normals = mesh.Normals; PointCollection textureCoordinates = mesh.TextureCoordinates; Int32Collection triangleIndices = mesh.TriangleIndices; // We think of the following coordinate systems: // Model space: Coordinates which are model-local // World space: Coordinates which relate all the models // Eye space: Coordinates where the eye point is (0,0,0) and is looking down -Z // Homogeneous space: Projected to a canonical rectangular solid view volume. (-1, -1, 0) -> (1, 1, 1) // Screen space: Scaled homogeneous space so that X,Y correspond to X,Y pixel values on the screen. Matrix3D modelToWorld = tx.Value; Matrix3D worldToEye = MatrixUtils.ViewMatrix(camera); Matrix3D modelToEyeMatrix = modelToWorld * worldToEye; Matrix3D normalTransform = MatrixUtils.MakeNormalTransform(modelToEyeMatrix); Matrix3D projectionMatrix = MatrixUtils.ProjectionMatrix(camera); Matrix3D toScreenSpace = MatrixUtils.HomogenousToScreenMatrix(bounds.ViewportBounds, camera is ProjectionCamera); Matrix3D projectToViewport = projectionMatrix * toScreenSpace; bool reverseWinding = MatrixUtils.Determinant(modelToEyeMatrix) < 0; MeshProjectedGeometry pg = new MeshProjectedGeometry(projectToViewport); int numTriangles = triangleIndices.Count / 3; Vertex v1, v2, v3; int index; for (int n = 0; n < numTriangles; n++) { v1 = new Vertex(); v2 = new Vertex(); v3 = new Vertex(); // We default material colors to pure White (0xff,0xff,0xff) since // we will iluminate against that and then modulate with // the actual textures on a per-pixel level. v1.Color = Colors.White; v2.Color = Colors.White; v3.Color = Colors.White; index = triangleIndices[n * 3]; v1.ModelSpacePosition = positions[index]; v1.ModelSpaceNormal = normals[index]; v1.Position = MatrixUtils.Transform((Point4D)positions[index], modelToEyeMatrix); v1.Normal = MatrixUtils.Transform(normals[index], normalTransform); v1.TextureCoordinates = textureCoordinates[index]; index = triangleIndices[n * 3 + 1]; v2.ModelSpacePosition = positions[index]; v2.ModelSpaceNormal = normals[index]; v2.Position = MatrixUtils.Transform((Point4D)positions[index], modelToEyeMatrix); v2.Normal = MatrixUtils.Transform(normals[index], normalTransform); v2.TextureCoordinates = textureCoordinates[index]; index = triangleIndices[n * 3 + 2]; v3.ModelSpacePosition = positions[index]; v3.ModelSpaceNormal = normals[index]; v3.Position = MatrixUtils.Transform((Point4D)positions[index], modelToEyeMatrix); v3.Normal = MatrixUtils.Transform(normals[index], normalTransform); v3.TextureCoordinates = textureCoordinates[index]; if (reverseWinding) { // Change winding-order so the mesh renders pg.AddTriangle(v1, v3, v2); } else { pg.AddTriangle(v1, v2, v3); } } pg.NormalizeTextureCoordinates(); return(pg); }