public override void MakeOrthoMatrix(Radian fov, Real aspectRatio, Real near, Real far, out Matrix4 dest, bool forGpuPrograms)
        {
            float thetaY = Utility.DegreesToRadians(fov / 2.0f);
            float tanThetaY = Utility.Tan(thetaY);
            float tanThetaX = tanThetaY * aspectRatio;

            float halfW = tanThetaX * near;
            float halfH = tanThetaY * near;

            var w = 1.0f / (halfW);
            var h = 1.0f / (halfH);
            var q = 0.0f;

            if (far != 0)
            {
                q = 1.0f / (far - near);
            }

            dest = Matrix4.Zero;
            dest.m00 = w;
            dest.m11 = h;
            dest.m22 = q;
            dest.m23 = -near / (far - near);
            dest.m33 = 1;

            if (forGpuPrograms)
            {
                dest.m22 = -dest.m22;
            }
        }
		public override void MakeOrthoMatrix( Radian fovy, Real aspectRatio, Real near, Real far, out Matrix4 dest,
		                                      bool forGpuPrograms )
		{
			var thetaY = fovy/2.0f;
			var tanThetaY = Utility.Tan( thetaY );
			var tanThetaX = tanThetaY*aspectRatio;

			var half_w = tanThetaX*near;
			var half_h = tanThetaY*near;

			var iw = 1.0f/( half_w );
			var ih = 1.0f/( half_h );
			Real q = 0.0f;

			if ( far != 0 )
			{
				q = 1.0/( far - near );
			}

			dest = Matrix4.Zero;
			dest.m00 = iw;
			dest.m11 = ih;
			dest.m22 = q;
			dest.m23 = -near/( far - near );
			dest.m33 = 1;

			if ( forGpuPrograms )
			{
				dest.m22 = -dest.m22;
			}
		}
        public override void ConvertProjectionMatrix(Matrix4 mat, out Matrix4 dest, bool forGpuProgram)
        {
            dest = new Matrix4(mat.m00, mat.m01, mat.m02, mat.m03,
                                mat.m10, mat.m11, mat.m12, mat.m13,
                                mat.m20, mat.m21, mat.m22, mat.m23,
                                mat.m30, mat.m31, mat.m32, mat.m33);

            // Convert depth range from [-1,+1] to [0,1]
            dest.m20 = (dest.m20 + dest.m30) / 2.0f;
            dest.m21 = (dest.m21 + dest.m31) / 2.0f;
            dest.m22 = (dest.m22 + dest.m32) / 2.0f;
            dest.m23 = (dest.m23 + dest.m33) / 2.0f;

            if ( forGpuProgram )
                return;
            // Convert right-handed to left-handed
            dest.m02 = -dest.m02;
            dest.m12 = -dest.m12;
            dest.m22 = -dest.m22;
            dest.m32 = -dest.m32;
        }
        public override void MakeProjectionMatrix(Radian fov, Real aspectRatio, Real near, Real far, out Matrix4 dest, bool forGpuProgram)
        {
            float theta = Utility.DegreesToRadians((float)fov * 0.5f);
            float h = 1.0f / Utility.Tan(theta);
            float w = h / aspectRatio;
            float q, qn;

            if (far == 0)
            {
                q = 1 - Frustum.InfiniteFarPlaneAdjust;
                qn = near * (Frustum.InfiniteFarPlaneAdjust - 1);
            }
            else
            {
                q = far / (far - near);
                qn = -q * near;
            }

            dest = Matrix4.Zero;

            dest.m00 = w;
            dest.m11 = h;

            if (forGpuProgram)
            {
                dest.m22 = -q;
                dest.m32 = -1.0f;
            }
            else
            {
                dest.m22 = q;
                dest.m32 = 1.0f;
            }

            dest.m23 = qn;
        }
Beispiel #5
0
			public override void GetWorldTransforms( Matrix4[] matrices )
			{
				if ( this.parent.BoneMatrixCount == 0 )
				{
					matrices[ 0 ] = this.parent.ParentNodeFullTransform;
				}
				else
				{
					// pretransformed
					matrices[ 0 ] = Matrix4.Identity;
				}
			}
Beispiel #6
0
		public void ManualRender( RenderOperation op,
								  Pass pass,
								  Viewport vp,
								  Matrix4 worldMatrix,
								  Matrix4 viewMatrix,
								  Matrix4 projMatrix )
		{
			this.ManualRender( op, pass, vp, worldMatrix, viewMatrix, projMatrix, false );
		}
Beispiel #7
0
		/// <summary>
		///
		/// </summary>
		/// <param name="matrices"></param>
		public void GetWorldTransforms( Matrix4[] matrices )
		{
			overlay.GetWorldTransforms( matrices );
		}
		protected static void ProcessManualProgramParam( bool isNamed, string commandName, string[] parameters,
		                                                 MaterialScriptContext context, int index, string paramName )
		{
			// NB we assume that the first element of vecparams is taken up with either
			// the index or the parameter name, which we ignore

			int dims, roundedDims;
			bool isReal;
			var isMatrix4x4 = false;
			var type = parameters[ 1 ].ToLower();

			if ( type == "matrix4x4" )
			{
				dims = 16;
				isReal = true;
				isMatrix4x4 = true;
			}
			else if ( type.IndexOf( "float" ) != -1 )
			{
				if ( type == "float" )
				{
					dims = 1;
				}
				else
				{
					// the first 5 letters are "float", get the dim indicator at the end
					// this handles entries like 'float4'
					dims = int.Parse( type.Substring( 5 ) );
				}

				isReal = true;
			}
			else if ( type.IndexOf( "int" ) != -1 )
			{
				if ( type == "int" )
				{
					dims = 1;
				}
				else
				{
					// the first 5 letters are "int", get the dim indicator at the end
					dims = int.Parse( type.Substring( 3 ) );
				}

				isReal = false;
			}
			else
			{
				LogParseError( context, "Invalid {0} attribute - unrecognized parameter type {1}.", commandName, type );
				return;
			}

			// make sure we have enough params for this type's size
			if ( parameters.Length != 2 + dims )
			{
				LogParseError( context, "Invalid {0} attribute - you need {1} parameters for a parameter of type {2}", commandName,
				               2 + dims, type );
				return;
			}

			// clear any auto parameter bound to this constant, it would override this setting
			// can cause problems overriding materials or changing default params
			if ( isNamed )
			{
				context.programParams.ClearNamedAutoConstant( paramName );
			}
			else
			{
				context.programParams.ClearAutoConstant( index );
			}


			// Round dims to multiple of 4
			if ( dims%4 != 0 )
			{
				roundedDims = dims + 4 - ( dims%4 );
			}
			else
			{
				roundedDims = dims;
			}

			int i;

			// now parse all the values
			if ( isReal )
			{
				var realBuffer = new float[roundedDims];

				// do specified values
				for ( i = 0; i < dims; i++ )
				{
					realBuffer[ i ] = StringConverter.ParseFloat( parameters[ i + 2 ] );
				}

				// fill up to multiple of 4 with zero
				for ( ; i < roundedDims; i++ )
				{
					realBuffer[ i ] = 0.0f;
				}

				if ( isMatrix4x4 )
				{
					// its a Matrix4x4 so pass as a Matrix4
					// use specialized setConstant that takes a matrix so matrix is transposed if required
					var m4X4 = new Matrix4( realBuffer[ 0 ], realBuffer[ 1 ], realBuffer[ 2 ], realBuffer[ 3 ], realBuffer[ 4 ],
					                        realBuffer[ 5 ], realBuffer[ 6 ], realBuffer[ 7 ], realBuffer[ 8 ], realBuffer[ 9 ],
					                        realBuffer[ 10 ], realBuffer[ 11 ], realBuffer[ 12 ], realBuffer[ 13 ], realBuffer[ 14 ],
					                        realBuffer[ 15 ] );
					if ( isNamed )
					{
						context.programParams.SetNamedConstant( paramName, m4X4 );
					}
					else
					{
						context.programParams.SetConstant( index, m4X4 );
					}
				}
				else
				{
					// Set
					if ( isNamed )
					{
						// For named, only set up to the precise number of elements
						// (no rounding to 4 elements)
						// GLSL can support sub-float4 elements and we support that
						// in the buffer now. Note how we set the 'multiple' param to 1
						context.programParams.SetNamedConstant( paramName, realBuffer, dims, 1 );
					}
					else
					{
						context.programParams.SetConstant( index, realBuffer, (int)( roundedDims*0.25 ) );
					}
				}
			}
			else
			{
				var buffer = new int[roundedDims];

				// do specified values
				for ( i = 0; i < dims; i++ )
				{
					buffer[ i ] = int.Parse( parameters[ i + 2 ] );
				}

				// fill up to multiple of 4 with zero
				for ( ; i < roundedDims; i++ )
				{
					buffer[ i ] = 0;
				}

				context.programParams.SetConstant( index, buffer, (int)( roundedDims*0.25 ) );
			}
		}
Beispiel #9
0
		/// <summary>
		///		
		/// </summary>
		/// <param name="vector"></param>
		/// <param name="matrix"></param>
		/// <returns></returns>
		public static Vector4 Multiply( Vector4 vector, Matrix4 matrix )
		{
			return vector*matrix;
		}
Beispiel #10
0
		private void _generateCurvedPlaneVertexData( HardwareVertexBuffer vbuf, int ySegments, int xSegments, float xSpace, float halfWidth, float ySpace, float halfHeight, Matrix4 transform, bool firstTime, bool normals, Matrix4 rotation, float curvature, int numTexCoordSets, float xTexCoord, float yTexCoord, SubMesh subMesh, ref Vector3 min, ref Vector3 max, ref float maxSquaredLength )
		{
			Vector3 vec;
			unsafe
			{
				// lock the vertex buffer
				IntPtr data = vbuf.Lock( BufferLocking.Discard );

				float* pData = (float*)data.ToPointer();

				for ( int y = 0; y <= ySegments; y++ )
				{
					for ( int x = 0; x <= xSegments; x++ )
					{
						// centered on origin
						vec.x = ( x * xSpace ) - halfWidth;
						vec.y = ( y * ySpace ) - halfHeight;

						// Here's where curved plane is different from standard plane.  Amazing, I know.
						Real diff_x = ( x - ( (Real)xSegments / 2 ) ) / (Real)xSegments;
						Real diff_y = ( y - ( (Real)ySegments / 2 ) ) / (Real)ySegments;
						Real dist = Utility.Sqrt( diff_x * diff_x + diff_y * diff_y );
						vec.z = ( -Utility.Sin( ( 1 - dist ) * ( Utility.PI / 2 ) ) * curvature ) + curvature;

						// Transform by orientation and distance
						Vector3 pos = transform.TransformAffine( vec );

						*pData++ = pos.x;
						*pData++ = pos.y;
						*pData++ = pos.z;

						// Build bounds as we go
						if ( firstTime )
						{
							min = vec;
							max = vec;
							maxSquaredLength = vec.LengthSquared;
							firstTime = false;
						}
						else
						{
							min.Floor( vec );
							max.Ceil( vec );
							maxSquaredLength = Utility.Max( maxSquaredLength, vec.LengthSquared );
						}

						if ( normals )
						{
							// This part is kinda 'wrong' for curved planes... but curved planes are
							//   very valuable outside sky planes, which don't typically need normals
							//   so I'm not going to mess with it for now.

							// Default normal is along unit Z
							//vec = Vector3::UNIT_Z;
							// Rotate
							vec = rotation.TransformAffine( vec );

							*pData++ = vec.x;
							*pData++ = vec.y;
							*pData++ = vec.z;
						}

						for ( int i = 0; i < numTexCoordSets; i++ )
						{
							*pData++ = x * xTexCoord;
							*pData++ = 1 - ( y * yTexCoord );
						} // for texCoords
					} // for x
				} // for y

				// unlock the buffer
				vbuf.Unlock();

				subMesh.useSharedVertices = true;

			} // unsafe
		}
		public override void ConvertProjectionMatrix( Matrix4 mat, out Matrix4 dest, bool forGpuProgram )
		{
			dest = mat;

			// Convert depth range from [-1,+1] to [0,1]
			dest.m20 = ( dest.m20 + dest.m30 )/2;
			dest.m21 = ( dest.m21 + dest.m31 )/2;
			dest.m22 = ( dest.m22 + dest.m32 )/2;
			dest.m23 = ( dest.m23 + dest.m33 )/2;

			if ( forGpuProgram )
			{
				return;
			}
			// Convert right-handed to left-handed
			dest.m02 = -dest.m02;
			dest.m12 = -dest.m12;
			dest.m22 = -dest.m22;
			dest.m32 = -dest.m32;
		}
Beispiel #12
0
		/// <summary>
		///		Used to add two matrices together.
		/// </summary>
		/// <param name="left"></param>
		/// <param name="right"></param>
		/// <returns></returns>
		public static Matrix4 Add( Matrix4 left, Matrix4 right )
		{
			return left + right;
		}
Beispiel #13
0
		/// <summary>
		///		Used to multiply a Matrix4 object by a scalar value..
		/// </summary>
		/// <returns></returns>
		public static Matrix4 operator *( Matrix4 left, Real scalar )
		{
			Matrix4 result = new Matrix4();

			result.m00 = left.m00 * scalar;
			result.m01 = left.m01 * scalar;
			result.m02 = left.m02 * scalar;
			result.m03 = left.m03 * scalar;

			result.m10 = left.m10 * scalar;
			result.m11 = left.m11 * scalar;
			result.m12 = left.m12 * scalar;
			result.m13 = left.m13 * scalar;

			result.m20 = left.m20 * scalar;
			result.m21 = left.m21 * scalar;
			result.m22 = left.m22 * scalar;
			result.m23 = left.m23 * scalar;

			result.m30 = left.m30 * scalar;
			result.m31 = left.m31 * scalar;
			result.m32 = left.m32 * scalar;
			result.m33 = left.m33 * scalar;

			return result;
		}
Beispiel #14
0
		/// <summary>
		///		Transforms a plane using the specified transform.
		/// </summary>
		/// <param name="matrix">Transformation matrix.</param>
		/// <param name="plane">Plane to transform.</param>
		/// <returns>A transformed plane.</returns>
		public static Plane Multiply( Matrix4 matrix, Plane plane )
		{
			return matrix * plane;
		}
Beispiel #15
0
		/// <summary>
		///		Transforms the given 3-D vector by the matrix, projecting the 
		///		result back into <i>w</i> = 1.
		///		<p/>
		///		This means that the initial <i>w</i> is considered to be 1.0,
		///		and then all the tree elements of the resulting 3-D vector are
		///		divided by the resulting <i>w</i>.
		/// </summary>
		/// <param name="matrix">A Matrix4.</param>
		/// <param name="vector">A Vector3.</param>
		/// <returns>A new vector.</returns>
		public static Vector3 Multiply( Matrix4 matrix, Vector3 vector )
		{
			return matrix * vector;
		}
Beispiel #16
0
		/// <summary>
		///		Used to add two matrices together.
		/// </summary>
		/// <param name="left"></param>
		/// <param name="right"></param>
		/// <returns></returns>
		public static Matrix4 operator +( Matrix4 left, Matrix4 right )
		{
			Matrix4 result = new Matrix4();

			result.m00 = left.m00 + right.m00;
			result.m01 = left.m01 + right.m01;
			result.m02 = left.m02 + right.m02;
			result.m03 = left.m03 + right.m03;

			result.m10 = left.m10 + right.m10;
			result.m11 = left.m11 + right.m11;
			result.m12 = left.m12 + right.m12;
			result.m13 = left.m13 + right.m13;

			result.m20 = left.m20 + right.m20;
			result.m21 = left.m21 + right.m21;
			result.m22 = left.m22 + right.m22;
			result.m23 = left.m23 + right.m23;

			result.m30 = left.m30 + right.m30;
			result.m31 = left.m31 + right.m31;
			result.m32 = left.m32 + right.m32;
			result.m33 = left.m33 + right.m33;

			return result;
		}
Beispiel #17
0
		/// <summary>
		///		Used to subtract two matrices.
		/// </summary>
		/// <param name="left"></param>
		/// <param name="right"></param>
		/// <returns></returns>
		public static Matrix4 Subtract( Matrix4 left, Matrix4 right )
		{
			return left - right;
		}
Beispiel #18
0
		private void _generatePlaneVertexData( HardwareVertexBuffer vbuf, int ySegments, int xSegments, float xSpace, float halfWidth, float ySpace, float halfHeight, Matrix4 transform, bool firstTime, bool normals, Matrix4 rotation, int numTexCoordSets, float xTexCoord, float yTexCoord, SubMesh subMesh, ref Vector3 min, ref Vector3 max, ref float maxSquaredLength )
		{
			Vector3 vec;
			unsafe
			{
				// lock the vertex buffer
				IntPtr data = vbuf.Lock( BufferLocking.Discard );

				float* pData = (float*)data.ToPointer();

				for ( int y = 0; y <= ySegments; y++ )
				{
					for ( int x = 0; x <= xSegments; x++ )
					{
						// centered on origin
						vec.x = ( x * xSpace ) - halfWidth;
						vec.y = ( y * ySpace ) - halfHeight;
						vec.z = 0.0f;

						vec = transform.TransformAffine( vec );

						*pData++ = vec.x;
						*pData++ = vec.y;
						*pData++ = vec.z;

						// Build bounds as we go
						if ( firstTime )
						{
							min = vec;
							max = vec;
							maxSquaredLength = vec.LengthSquared;
							firstTime = false;
						}
						else
						{
							min.Floor( vec );
							max.Ceil( vec );
							maxSquaredLength = Utility.Max( maxSquaredLength, vec.LengthSquared );
						}

						if ( normals )
						{
							vec = Vector3.UnitZ;
							vec = rotation.TransformAffine( vec );

							*pData++ = vec.x;
							*pData++ = vec.y;
							*pData++ = vec.z;
						}

						for ( int i = 0; i < numTexCoordSets; i++ )
						{
							*pData++ = x * xTexCoord;
							*pData++ = 1 - ( y * yTexCoord );
						} // for texCoords
					} // for x
				} // for y

				// unlock the buffer
				vbuf.Unlock();

				subMesh.useSharedVertices = true;

			} // unsafe
		}
Beispiel #19
0
		/// <summary>
		///		Used to subtract two matrices.
		/// </summary>
		/// <param name="left"></param>
		/// <param name="right"></param>
		/// <returns></returns>
		public static Matrix4 operator -( Matrix4 left, Matrix4 right )
		{
			Matrix4 result = new Matrix4();

			result.m00 = left.m00 - right.m00;
			result.m01 = left.m01 - right.m01;
			result.m02 = left.m02 - right.m02;
			result.m03 = left.m03 - right.m03;

			result.m10 = left.m10 - right.m10;
			result.m11 = left.m11 - right.m11;
			result.m12 = left.m12 - right.m12;
			result.m13 = left.m13 - right.m13;

			result.m20 = left.m20 - right.m20;
			result.m21 = left.m21 - right.m21;
			result.m22 = left.m22 - right.m22;
			result.m23 = left.m23 - right.m23;

			result.m30 = left.m30 - right.m30;
			result.m31 = left.m31 - right.m31;
			result.m32 = left.m32 - right.m32;
			result.m33 = left.m33 - right.m33;

			return result;
		}
Beispiel #20
0
		private void _generateCurvedIllusionPlaneVertexData( HardwareVertexBuffer vertexBuffer, int ySegments, int xSegments, float xSpace, float halfWidth, float ySpace, float halfHeight, Matrix4 xform, bool firstTime, bool normals, Quaternion orientation, float curvature, float uTiles, float vTiles, int numberOfTexCoordSets, ref Vector3 min, ref Vector3 max, ref float maxSquaredLength )
		{
			// Imagine a large sphere with the camera located near the top
			// The lower the curvature, the larger the sphere
			// Use the angle from viewer to the points on the plane
			// Credit to Aftershock for the general approach
			Real cameraPosition;      // Camera position relative to sphere center

			// Derive sphere radius
			//Vector3 vertPos;  // position relative to camera
			//Real sphDist;      // Distance from camera to sphere along box vertex vector
			// Vector3 camToSph; // camera position to sphere
			Real sphereRadius;// Sphere radius
			// Actual values irrelevant, it's the relation between sphere radius and camera position that's important
			Real sphRadius = 100.0f;
			Real camDistance = 5.0f;

			sphereRadius = sphRadius - curvature;
			cameraPosition = sphereRadius - camDistance;

			Vector3 vec;
			Vector3 norm;
			float sphereDistance;
			unsafe
			{
				// lock the vertex buffer
				IntPtr data = vertexBuffer.Lock( BufferLocking.Discard );

				float* pData = (float*)data.ToPointer();

				for ( int y = 0; y < ySegments + 1; ++y )
				{
					for ( int x = 0; x < xSegments + 1; ++x )
					{
						// centered on origin
						vec.x = ( x * xSpace ) - halfWidth;
						vec.y = ( y * ySpace ) - halfHeight;
						vec.z = 0.0f;

						// transform by orientation and distance
						vec = xform * vec;

						// assign to geometry
						*pData++ = vec.x;
						*pData++ = vec.y;
						*pData++ = vec.z;

						// build bounds as we go
						if ( firstTime )
						{
							min = vec;
							max = vec;
							maxSquaredLength = vec.LengthSquared;
							firstTime = false;
						}
						else
						{
							min.Floor( vec );
							max.Ceil( vec );
							maxSquaredLength = Utility.Max( maxSquaredLength, vec.LengthSquared );
						}

						if ( normals )
						{
							norm = Vector3.UnitZ;
							norm = orientation * norm;

							*pData++ = vec.x;
							*pData++ = vec.y;
							*pData++ = vec.z;
						}

						// generate texture coordinates, normalize position, modify by orientation to return +y up
						vec = orientation.Inverse() * vec;
						vec.Normalize();

						// find distance to sphere
						sphereDistance = Utility.Sqrt( cameraPosition * cameraPosition * ( vec.y * vec.y - 1.0f ) + sphereRadius * sphereRadius ) - cameraPosition * vec.y;

						vec.x *= sphereDistance;
						vec.z *= sphereDistance;

						// use x and y on sphere as texture coordinates, tiled
						float s = vec.x * ( 0.01f * uTiles );
						float t = vec.z * ( 0.01f * vTiles );
						for ( int i = 0; i < numberOfTexCoordSets; i++ )
						{
							*pData++ = s;
							*pData++ = ( 1 - t );
						}
					} // x
				} // y

				// unlock the buffer
				vertexBuffer.Unlock();
			} // unsafe
		}
Beispiel #21
0
		/// <summary>
		///		Performs a software indexed vertex blend, of the kind used for
		///		skeletal animation although it can be used for other purposes.
		/// </summary>
		/// <remarks>
		///		This function is supplied to update vertex data with blends
		///		done in software, either because no hardware support is available,
		///		or that you need the results of the blend for some other CPU operations.
		/// </remarks>
		/// <param name="sourceVertexData">
		///		<see cref="VertexData"/> class containing positions, normals, blend indices and blend weights.
		///	</param>
		/// <param name="targetVertexData">
		///		<see cref="VertexData"/> class containing target position
		///		and normal buffers which will be updated with the blended versions.
		///		Note that the layout of the source and target position / normal
		///		buffers must be identical, ie they must use the same buffer indexes.
		/// </param>
		/// <param name="matrices">An array of matrices to be used to blend.</param>
		/// <param name="blendNormals">If true, normals are blended as well as positions.</param>
		/// <param name="blendTangents"></param>
		/// <param name="blendBinorms"></param>
		public static void SoftwareVertexBlend( VertexData sourceVertexData, VertexData targetVertexData, Matrix4[] matrices,
		                                        bool blendNormals, bool blendTangents, bool blendBinorms )
		{
			// Source vectors
			var sourcePos = Vector3.Zero;
			var sourceNorm = Vector3.Zero;
			var sourceTan = Vector3.Zero;
			var sourceBinorm = Vector3.Zero;
			// Accumulation vectors
			var accumVecPos = Vector3.Zero;
			var accumVecNorm = Vector3.Zero;
			var accumVecTan = Vector3.Zero;
			var accumVecBinorm = Vector3.Zero;

			HardwareVertexBuffer srcPosBuf = null, srcNormBuf = null, srcTanBuf = null, srcBinormBuf = null;
			HardwareVertexBuffer destPosBuf = null, destNormBuf = null, destTanBuf = null, destBinormBuf = null;
			HardwareVertexBuffer srcIdxBuf = null, srcWeightBuf = null;

			var weightsIndexesShareBuffer = false;

			// Get elements for source
			var srcElemPos = sourceVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Position );
			var srcElemNorm = sourceVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Normal );
			var srcElemTan = sourceVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Tangent );
			var srcElemBinorm = sourceVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Binormal );
			var srcElemBlendIndices =
				sourceVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.BlendIndices );
			var srcElemBlendWeights =
				sourceVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.BlendWeights );

			Debug.Assert( srcElemPos != null && srcElemBlendIndices != null && srcElemBlendWeights != null,
			              "You must supply at least positions, blend indices and blend weights" );

			// Get elements for target
			var destElemPos = targetVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Position );
			var destElemNorm = targetVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Normal );
			var destElemTan = targetVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Tangent );
			var destElemBinorm = targetVertexData.vertexDeclaration.FindElementBySemantic( VertexElementSemantic.Binormal );

			// Do we have normals and want to blend them?
			var includeNormals = blendNormals && ( srcElemNorm != null ) && ( destElemNorm != null );
			var includeTangents = blendTangents && ( srcElemTan != null ) && ( destElemTan != null );
			var includeBinormals = blendBinorms && ( srcElemBinorm != null ) && ( destElemBinorm != null );

			// Get buffers for source
			srcPosBuf = sourceVertexData.vertexBufferBinding.GetBuffer( srcElemPos.Source );
			srcIdxBuf = sourceVertexData.vertexBufferBinding.GetBuffer( srcElemBlendIndices.Source );
			srcWeightBuf = sourceVertexData.vertexBufferBinding.GetBuffer( srcElemBlendWeights.Source );
			if ( includeNormals )
			{
				srcNormBuf = sourceVertexData.vertexBufferBinding.GetBuffer( srcElemNorm.Source );
			}
			if ( includeTangents )
			{
				srcTanBuf = sourceVertexData.vertexBufferBinding.GetBuffer( srcElemTan.Source );
			}
			if ( includeBinormals )
			{
				srcBinormBuf = sourceVertexData.vertexBufferBinding.GetBuffer( srcElemBinorm.Source );
			}

			// note: reference comparison
			weightsIndexesShareBuffer = ( srcIdxBuf == srcWeightBuf );

			// Get buffers for target
			destPosBuf = targetVertexData.vertexBufferBinding.GetBuffer( destElemPos.Source );
			if ( includeNormals )
			{
				destNormBuf = targetVertexData.vertexBufferBinding.GetBuffer( destElemNorm.Source );
			}
			if ( includeTangents )
			{
				destTanBuf = targetVertexData.vertexBufferBinding.GetBuffer( destElemTan.Source );
			}
			if ( includeBinormals )
			{
				destBinormBuf = targetVertexData.vertexBufferBinding.GetBuffer( destElemBinorm.Source );
			}

			// Lock source buffers for reading
			Debug.Assert( srcElemPos.Offset == 0, "Positions must be first element in dedicated buffer!" );

#if !AXIOM_SAFE_ONLY
			unsafe
#endif
			{
#if AXIOM_SAFE_ONLY
				ITypePointer<float> pSrcNorm = null, pSrcTan = null, pSrcBinorm = null;
				ITypePointer<float> pDestNorm = null, pDestTan = null, pDestBinorm = null;
				ITypePointer<float> pBlendWeight;
#else
				float* pSrcNorm = null, pSrcTan = null, pSrcBinorm = null;
				float* pDestNorm = null, pDestTan = null, pDestBinorm = null;
				float* pBlendWeight;
#endif
				var ptr = srcPosBuf.Lock( BufferLocking.ReadOnly );
				var pSrcPos = ptr.ToFloatPointer();

				if ( includeNormals )
				{
					if ( srcNormBuf == srcPosBuf )
					{
						pSrcNorm = pSrcPos;
					}
					else
					{
						ptr = srcNormBuf.Lock( BufferLocking.ReadOnly );
						pSrcNorm = ptr.ToFloatPointer();
					}
				}
				if ( includeTangents )
				{
					if ( srcTanBuf == srcPosBuf )
					{
						pSrcTan = pSrcPos;
					}
					else if ( srcTanBuf == srcNormBuf )
					{
						pSrcTan = pSrcNorm;
					}
					else
					{
						ptr = srcTanBuf.Lock( BufferLocking.ReadOnly );
						pSrcTan = ptr.ToFloatPointer();
					}
				}
				if ( includeBinormals )
				{
					if ( srcBinormBuf == srcPosBuf )
					{
						pSrcBinorm = pSrcPos;
					}
					else if ( srcBinormBuf == srcNormBuf )
					{
						pSrcBinorm = pSrcNorm;
					}
					else if ( srcBinormBuf == srcTanBuf )
					{
						pSrcBinorm = pSrcTan;
					}
					else
					{
						ptr = srcBinormBuf.Lock( BufferLocking.ReadOnly );
						pSrcBinorm = ptr.ToFloatPointer();
					}
				}

				// Indices must be 4 bytes
				Debug.Assert( srcElemBlendIndices.Type == VertexElementType.UByte4, "Blend indices must be VET_UBYTE4" );

				ptr = srcIdxBuf.Lock( BufferLocking.ReadOnly );
				var pBlendIdx = ptr.ToBytePointer();

				if ( srcWeightBuf == srcIdxBuf )
				{
					pBlendWeight = ptr.ToFloatPointer();
				}
				else
				{
					// Lock buffer
					ptr = srcWeightBuf.Lock( BufferLocking.ReadOnly );
					pBlendWeight = ptr.ToFloatPointer();
				}

				var numWeightsPerVertex = VertexElement.GetTypeCount( srcElemBlendWeights.Type );

				// Lock destination buffers for writing
				ptr = destPosBuf.Lock( BufferLocking.Discard );
				var pDestPos = ptr.ToFloatPointer();

				if ( includeNormals )
				{
					if ( destNormBuf == destPosBuf )
					{
						pDestNorm = pDestPos;
					}
					else
					{
						ptr = destNormBuf.Lock( BufferLocking.Discard );
						pDestNorm = ptr.ToFloatPointer();
					}
				}
				if ( includeTangents )
				{
					if ( destTanBuf == destPosBuf )
					{
						pDestTan = pDestPos;
					}
					else if ( destTanBuf == destNormBuf )
					{
						pDestTan = pDestNorm;
					}
					else
					{
						ptr = destTanBuf.Lock( BufferLocking.Discard );
						pDestTan = ptr.ToFloatPointer();
					}
				}
				if ( includeBinormals )
				{
					if ( destBinormBuf == destPosBuf )
					{
						pDestBinorm = pDestPos;
					}
					else if ( destBinormBuf == destNormBuf )
					{
						pDestBinorm = pDestNorm;
					}
					else if ( destBinormBuf == destTanBuf )
					{
						pDestBinorm = pDestTan;
					}
					else
					{
						ptr = destBinormBuf.Lock( BufferLocking.Discard );
						pDestBinorm = ptr.ToFloatPointer();
					}
				}

				// Loop per vertex
				for ( var vertIdx = 0; vertIdx < targetVertexData.vertexCount; vertIdx++ )
				{
					var srcPosOffset = ( vertIdx*srcPosBuf.VertexSize + srcElemPos.Offset )/4;
					// Load source vertex elements
					sourcePos.x = pSrcPos[ srcPosOffset ];
					sourcePos.y = pSrcPos[ srcPosOffset + 1 ];
					sourcePos.z = pSrcPos[ srcPosOffset + 2 ];

					if ( includeNormals )
					{
						var srcNormOffset = ( vertIdx*srcNormBuf.VertexSize + srcElemNorm.Offset )/4;
						sourceNorm.x = pSrcNorm[ srcNormOffset ];
						sourceNorm.y = pSrcNorm[ srcNormOffset + 1 ];
						sourceNorm.z = pSrcNorm[ srcNormOffset + 2 ];
					}

					if ( includeTangents )
					{
						var srcTanOffset = ( vertIdx*srcTanBuf.VertexSize + srcElemTan.Offset )/4;
						sourceTan.x = pSrcTan[ srcTanOffset ];
						sourceTan.y = pSrcTan[ srcTanOffset + 1 ];
						sourceTan.z = pSrcTan[ srcTanOffset + 2 ];
					}

					if ( includeBinormals )
					{
						var srcBinormOffset = ( vertIdx*srcBinormBuf.VertexSize + srcElemBinorm.Offset )/4;
						sourceBinorm.x = pSrcBinorm[ srcBinormOffset ];
						sourceBinorm.y = pSrcBinorm[ srcBinormOffset + 1 ];
						sourceBinorm.z = pSrcBinorm[ srcBinormOffset + 2 ];
					}

					// Load accumulators
					accumVecPos = Vector3.Zero;
					accumVecNorm = Vector3.Zero;
					accumVecTan = Vector3.Zero;
					accumVecBinorm = Vector3.Zero;

					var blendWeightOffset = ( vertIdx*srcWeightBuf.VertexSize + srcElemBlendWeights.Offset )/4;
					var blendMatrixOffset = vertIdx*srcIdxBuf.VertexSize + srcElemBlendIndices.Offset;
					// Loop per blend weight
					for ( var blendIdx = 0; blendIdx < numWeightsPerVertex; blendIdx++ )
					{
						var blendWeight = pBlendWeight[ blendWeightOffset + blendIdx ];
						int blendMatrixIdx = pBlendIdx[ blendMatrixOffset + blendIdx ];
						// Blend by multiplying source by blend matrix and scaling by weight
						// Add to accumulator
						// NB weights must be normalised!!
						if ( blendWeight != 0.0f )
						{
							// Blend position, use 3x4 matrix
							var mat = matrices[ blendMatrixIdx ];
							BlendPosVector( ref accumVecPos, ref mat, ref sourcePos, blendWeight );

							if ( includeNormals )
							{
								// Blend normal
								// We should blend by inverse transpose here, but because we're assuming the 3x3
								// aspect of the matrix is orthogonal (no non-uniform scaling), the inverse transpose
								// is equal to the main 3x3 matrix
								// Note because it's a normal we just extract the rotational part, saves us renormalising here
								BlendDirVector( ref accumVecNorm, ref mat, ref sourceNorm, blendWeight );
							}
							if ( includeTangents )
							{
								BlendDirVector( ref accumVecTan, ref mat, ref sourceTan, blendWeight );
							}
							if ( includeBinormals )
							{
								BlendDirVector( ref accumVecBinorm, ref mat, ref sourceBinorm, blendWeight );
							}
						}
					}

					// Stored blended vertex in hardware buffer
					var dstPosOffset = ( vertIdx*destPosBuf.VertexSize + destElemPos.Offset )/4;
					pDestPos[ dstPosOffset ] = accumVecPos.x;
					pDestPos[ dstPosOffset + 1 ] = accumVecPos.y;
					pDestPos[ dstPosOffset + 2 ] = accumVecPos.z;

					// Stored blended vertex in temp buffer
					if ( includeNormals )
					{
						// Normalise
						accumVecNorm.Normalize();
						var dstNormOffset = ( vertIdx*destNormBuf.VertexSize + destElemNorm.Offset )/4;
						pDestNorm[ dstNormOffset ] = accumVecNorm.x;
						pDestNorm[ dstNormOffset + 1 ] = accumVecNorm.y;
						pDestNorm[ dstNormOffset + 2 ] = accumVecNorm.z;
					}
					// Stored blended vertex in temp buffer
					if ( includeTangents )
					{
						// Normalise
						accumVecTan.Normalize();
						var dstTanOffset = ( vertIdx*destTanBuf.VertexSize + destElemTan.Offset )/4;
						pDestTan[ dstTanOffset ] = accumVecTan.x;
						pDestTan[ dstTanOffset + 1 ] = accumVecTan.y;
						pDestTan[ dstTanOffset + 2 ] = accumVecTan.z;
					}
					// Stored blended vertex in temp buffer
					if ( includeBinormals )
					{
						// Normalise
						accumVecBinorm.Normalize();
						var dstBinormOffset = ( vertIdx*destBinormBuf.VertexSize + destElemBinorm.Offset )/4;
						pDestBinorm[ dstBinormOffset ] = accumVecBinorm.x;
						pDestBinorm[ dstBinormOffset + 1 ] = accumVecBinorm.y;
						pDestBinorm[ dstBinormOffset + 2 ] = accumVecBinorm.z;
					}
				}
				// Unlock source buffers
				srcPosBuf.Unlock();
				srcIdxBuf.Unlock();

				if ( srcWeightBuf != srcIdxBuf )
				{
					srcWeightBuf.Unlock();
				}

				if ( includeNormals && srcNormBuf != srcPosBuf )
				{
					srcNormBuf.Unlock();
				}
				if ( includeTangents && srcTanBuf != srcPosBuf && srcTanBuf != srcNormBuf )
				{
					srcTanBuf.Unlock();
				}
				if ( includeBinormals && srcBinormBuf != srcPosBuf && srcBinormBuf != srcNormBuf && srcBinormBuf != srcTanBuf )
				{
					srcBinormBuf.Unlock();
				}

				// Unlock destination buffers
				destPosBuf.Unlock();

				if ( includeNormals && destNormBuf != destPosBuf )
				{
					destNormBuf.Unlock();
				}
				if ( includeTangents && destTanBuf != destPosBuf && destTanBuf != destNormBuf )
				{
					destTanBuf.Unlock();
				}
				if ( includeBinormals && destBinormBuf != destPosBuf && destBinormBuf != destNormBuf && destBinormBuf != destTanBuf )
				{
					destBinormBuf.Unlock();
				}
			} // unsafe
		}
		public override void NotifyWorldTransforms( Matrix4[] xform )
		{
			base.NotifyWorldTransforms( xform );

			// Update children
			foreach ( var child in this.children.Values )
			{
				child.NotifyWorldTransforms( xform );
			}
		}
Beispiel #23
0
		public static void BlendPosVector( ref Vector3 accumVec, ref Matrix4 mat, ref Vector3 srcVec, float blendWeight )
		{
			accumVec.x += ( mat.m00*srcVec.x + mat.m01*srcVec.y + mat.m02*srcVec.z + mat.m03 )*blendWeight;

			accumVec.y += ( mat.m10*srcVec.x + mat.m11*srcVec.y + mat.m12*srcVec.z + mat.m13 )*blendWeight;

			accumVec.z += ( mat.m20*srcVec.x + mat.m21*srcVec.y + mat.m22*srcVec.z + mat.m23 )*blendWeight;
		}
        /// <summary>
        /// 
        /// </summary>
        /// <param name="matrix"></param>
        public void Transform( Matrix4 matrix )
        {
            // do nothing for a null box
            if( isNull || isInfinite )
            {
                return;
            }

            Vector3 min;
            Vector3 max;
            Vector3 temp;

            temp = matrix * corners[ 0 ];
            min = max = temp;

            for( int i = 1; i < corners.Length; i++ )
            {
                // Transform and check extents
                temp = matrix * corners[ i ];

                if( temp.x > max.x )
                {
                    max.x = temp.x;
                }
                else if( temp.x < min.x )
                {
                    min.x = temp.x;
                }

                if( temp.y > max.y )
                {
                    max.y = temp.y;
                }
                else if( temp.y < min.y )
                {
                    min.y = temp.y;
                }

                if( temp.z > max.z )
                {
                    max.z = temp.z;
                }
                else if( temp.z < min.z )
                {
                    min.z = temp.z;
                }
            }

            SetExtents( min, max );
        }
		public override void MakeProjectionMatrix( Real left, Real right, Real bottom, Real top, Real nearPlane, Real farPlane,
		                                           out Matrix4 dest, bool forGpuProgram )
		{
			// Correct position for off-axis projection matrix
			if ( !forGpuProgram )
			{
				var offsetX = left + right;
				var offsetY = top + bottom;

				left -= offsetX;
				right -= offsetX;
				top -= offsetY;
				bottom -= offsetY;
			}

			var width = right - left;
			var height = top - bottom;
			Real q, qn;
			if ( farPlane == 0 )
			{
				q = 1 - Frustum.InfiniteFarPlaneAdjust;
				qn = nearPlane*( Frustum.InfiniteFarPlaneAdjust - 1 );
			}
			else
			{
				q = farPlane/( farPlane - nearPlane );
				qn = -q*nearPlane;
			}
			dest = Matrix4.Zero;
			dest.m00 = 2*nearPlane/width;
			dest.m02 = ( right + left )/width;
			dest.m11 = 2*nearPlane/height;
			dest.m12 = ( top + bottom )/height;
			if ( forGpuProgram )
			{
				dest.m22 = -q;
				dest.m32 = -1.0f;
			}
			else
			{
				dest.m22 = q;
				dest.m32 = 1.0f;
			}
			dest.m23 = qn;
		}
Beispiel #26
0
		/// <summary>
		/// Notifies the world transforms.
		/// </summary>
		/// <param name="xform">The xform.</param>
		public virtual void NotifyWorldTransforms( Matrix4[] xform )
		{
			this.xform = xform;
		}
		public override void SetTextureMatrix( int stage, Matrix4 xform )
		{
			// the matrix we'll apply after conv. to D3D format
			var newMat = xform;

			// cache this since it's used often
			var autoTexCoordType = this._texStageDesc[ stage ].AutoTexCoordType;

			// if a vertex program is bound, we mustn't set texture transforms
			if ( vertexProgramBound )
			{
				_setTextureStageState( stage, D3D9.TextureStage.TextureTransformFlags, (int)TextureTransform.Disable );
				return;
			}

			if ( autoTexCoordType == TexCoordCalcMethod.EnvironmentMap )
			{
				if ( ( this._deviceManager.ActiveDevice.D3D9DeviceCaps.VertexProcessingCaps &
				       D3D9.VertexProcessingCaps.TexGenSphereMap ) ==
				     D3D9.VertexProcessingCaps.TexGenSphereMap )
				{
					// inverts the texture for a spheremap
					var matEnvMap = Matrix4.Identity;
					// set env_map values
					matEnvMap.m11 = -1.0f;
					// concatenate
					newMat = newMat*matEnvMap;
				}
				else
				{
					/* If envmap is applied, but device doesn't support spheremap,
                    then we have to use texture transform to make the camera space normal
                    reference the envmap properly. This isn't exactly the same as spheremap
                    (it looks nasty on flat areas because the camera space normals are the same)
                    but it's the best approximation we have in the absence of a proper spheremap */

					// concatenate with the xform
					newMat = newMat*Matrix4.ClipSpace2DToImageSpace;
				}
			}

			// If this is a cubic reflection, we need to modify using the view matrix
			if ( autoTexCoordType == TexCoordCalcMethod.EnvironmentMapReflection )
			{
				// Get transposed 3x3, ie since D3D is transposed just copy
				// We want to transpose since that will invert an orthonormal matrix ie rotation
				var viewTransposed = Matrix4.Identity;
				viewTransposed.m00 = this._viewMatrix.m00;
				viewTransposed.m01 = this._viewMatrix.m10;
				viewTransposed.m02 = this._viewMatrix.m20;
				viewTransposed.m03 = 0.0f;

				viewTransposed.m10 = this._viewMatrix.m01;
				viewTransposed.m11 = this._viewMatrix.m11;
				viewTransposed.m12 = this._viewMatrix.m21;
				viewTransposed.m13 = 0.0f;

				viewTransposed.m20 = this._viewMatrix.m02;
				viewTransposed.m21 = this._viewMatrix.m12;
				viewTransposed.m22 = this._viewMatrix.m22;
				viewTransposed.m23 = 0.0f;

				viewTransposed.m30 = 0;
				viewTransposed.m31 = 0;
				viewTransposed.m32 = 0;
				viewTransposed.m33 = 1.0f;

				// concatenate
				newMat = newMat*viewTransposed;
			}

			if ( autoTexCoordType == TexCoordCalcMethod.ProjectiveTexture )
			{
				// Derive camera space to projector space transform
				// To do this, we need to undo the camera view matrix, then
				// apply the projector view & projection matrices
				newMat = this._viewMatrix.Inverse();

				if ( texProjRelative )
				{
					Matrix4 viewMatrix;
					this._texStageDesc[ stage ].Frustum.CalcViewMatrixRelative( texProjRelativeOrigin, out viewMatrix );
					newMat = viewMatrix*newMat;
				}
				else
				{
					newMat = this._texStageDesc[ stage ].Frustum.ViewMatrix*newMat;
				}
				newMat = this._texStageDesc[ stage ].Frustum.ProjectionMatrix*newMat;
				newMat = Matrix4.ClipSpace2DToImageSpace*newMat;
				newMat = xform*newMat;
			}

			// need this if texture is a cube map, to invert D3D's z coord
			if ( autoTexCoordType != TexCoordCalcMethod.None && autoTexCoordType != TexCoordCalcMethod.ProjectiveTexture )
			{
				newMat.m20 = -newMat.m20;
				newMat.m21 = -newMat.m21;
				newMat.m22 = -newMat.m22;
				newMat.m23 = -newMat.m23;
			}

			// convert our matrix to D3D format
			var d3dMat = D3D9Helper.MakeD3DMatrix( newMat );

			// set the matrix if it is not the identity
			if ( !D3D9Helper.IsIdentity( ref d3dMat ) )
			{
				//It's seems D3D automatically add a texture coordinate with value 1,
				//and fill up the remaining texture coordinates with 0 for the input
				//texture coordinates before pass to texture coordinate transformation.

				//NOTE: It's difference with D3DDECLTYPE enumerated type expand in
				//DirectX SDK documentation!

				//So we should prepare the texcoord transform, make the transformation
				//just like standardized vector expand, thus, fill w with value 1 and
				//others with 0.
				if ( autoTexCoordType == TexCoordCalcMethod.None )
				{
					//FIXME: The actually input texture coordinate dimensions should
					//be determine by texture coordinate vertex element. Now, just trust
					//user supplied texture type matchs texture coordinate vertex element.
					if ( this._texStageDesc[ stage ].TexType == D3D9TextureType.Normal )
					{
						/* It's 2D input texture coordinate:

                        texcoord in vertex buffer     D3D expanded to     We are adjusted to
                        -->                           -->
                        (u, v)                        (u, v, 1, 0)        (u, v, 0, 1)
                        */
						Utility.Swap( ref d3dMat.M31, ref d3dMat.M41 );
						Utility.Swap( ref d3dMat.M32, ref d3dMat.M42 );
						Utility.Swap( ref d3dMat.M33, ref d3dMat.M43 );
						Utility.Swap( ref d3dMat.M34, ref d3dMat.M44 );
					}
				}
				//else
				//{
				//    // All texgen generate 3D input texture coordinates.
				//}

				// tell D3D the dimension of tex. coord
				var texCoordDim = TextureTransform.Count2;

				if ( autoTexCoordType == TexCoordCalcMethod.ProjectiveTexture )
				{
					//We want texcoords (u, v, w, q) always get divided by q, but D3D
					//projected texcoords is divided by the last element (in the case of
					//2D texcoord, is w). So we tweak the transform matrix, transform the
					//texcoords with w and q swapped: (u, v, q, w), and then D3D will
					//divide u, v by q. The w and q just ignored as it wasn't used by
					//rasterizer.

					switch ( this._texStageDesc[ stage ].TexType )
					{
						case D3D9TextureType.Normal:
							Utility.Swap( ref d3dMat.M13, ref d3dMat.M14 );
							Utility.Swap( ref d3dMat.M23, ref d3dMat.M24 );
							Utility.Swap( ref d3dMat.M33, ref d3dMat.M34 );
							Utility.Swap( ref d3dMat.M43, ref d3dMat.M44 );

							texCoordDim = TextureTransform.Projected | TextureTransform.Count3;
							break;

						case D3D9TextureType.Cube:
						case D3D9TextureType.Volume:
							// Yes, we support 3D projective texture.
							texCoordDim = TextureTransform.Projected | TextureTransform.Count4;
							break;
					}
				}
				else
				{
					switch ( this._texStageDesc[ stage ].TexType )
					{
						case D3D9TextureType.Normal:
							texCoordDim = TextureTransform.Count2;
							break;

						case D3D9TextureType.Cube:
						case D3D9TextureType.Volume:
							texCoordDim = TextureTransform.Count3;
							break;
					}
				}

				// note: int values of D3D.TextureTransform correspond directly with tex dimension, so direct conversion is possible
				// i.e. Count1 = 1, Count2 = 2, etc
				_setTextureStageState( stage, D3D9.TextureStage.TextureTransformFlags, (int)texCoordDim );

				// set the manually calculated texture matrix
				var d3DTransType = (D3D9.TransformState)( (int)( D3D9.TransformState.Texture0 ) + stage );
				ActiveD3D9Device.SetTransform( d3DTransType, ref d3dMat );
			}
			else
			{
				// disable texture transformation
				_setTextureStageState( stage, D3D9.TextureStage.TextureTransformFlags, (int)TextureTransform.Disable );

				// Needless to sets texture transform here, it's never used at all
			}
		}
Beispiel #28
0
		/// <summary>
		///		Manual rendering method, for advanced users only.
		/// </summary>
		/// <remarks>
		///		This method allows you to send rendering commands through the pipeline on
		///		demand, bypassing any normal world processing. You should only use this if you
		///		really know what you're doing; the engine does lots of things for you that you really should
		///		let it do. However, there are times where it may be useful to have this manual interface,
		///		for example overlaying something on top of the scene.
		///		<p/>
		///		Because this is an instant rendering method, timing is important. The best
		///		time to call it is from a RenderTarget event handler.
		///		<p/>
		///		Don't call this method a lot, it's designed for rare (1 or 2 times per frame) use.
		///		Calling it regularly per frame will cause frame rate drops!
		/// </remarks>
		/// <param name="op">A RenderOperation object describing the rendering op.</param>
		/// <param name="pass">The Pass to use for this render.</param>
		/// <param name="vp">Reference to the viewport to render to.</param>
		/// <param name="worldMatrix">The transform to apply from object to world space.</param>
		/// <param name="viewMatrix">The transform to apply from object to view space.</param>
		/// <param name="projMatrix">The transform to apply from view to screen space.</param>
		/// <param name="doBeginEndFrame">
		///		If true, BeginFrame() and EndFrame() are called, otherwise not.
		///		You should leave this as false if you are calling this within the main render loop.
		/// </param>
		public virtual void ManualRender( RenderOperation op,
										  Pass pass,
										  Viewport vp,
										  Matrix4 worldMatrix,
										  Matrix4 viewMatrix,
										  Matrix4 projMatrix,
										  bool doBeginEndFrame )
		{
			// configure all necessary parameters
			this.targetRenderSystem.Viewport = vp;
			this.targetRenderSystem.WorldMatrix = worldMatrix;
			this.targetRenderSystem.ViewMatrix = viewMatrix;
			this.targetRenderSystem.ProjectionMatrix = projMatrix;

			if ( doBeginEndFrame )
			{
				this.targetRenderSystem.BeginFrame();
			}

			// set the pass and render the object
			this.SetPass( pass );
			this.targetRenderSystem.Render( op );

			if ( doBeginEndFrame )
			{
				this.targetRenderSystem.EndFrame();
			}
		}
		public override void ApplyObliqueDepthProjection( ref Matrix4 matrix, Plane plane, bool forGpuProgram )
		{
			// Thanks to Eric Lenyel for posting this calculation at www.terathon.com

			// Calculate the clip-space corner point opposite the clipping plane
			// as (sgn(clipPlane.x), sgn(clipPlane.y), 1, 1) and
			// transform it into camera space by multiplying it
			// by the inverse of the projection matrix

			/* generalised version
            Vector4 q = matrix.inverse() *
                Vector4(Math::Sign(plane.normal.x), Math::Sign(plane.normal.y), 1.0f, 1.0f);
            */
			var q = new Vector4();
			q.x = System.Math.Sign( plane.Normal.x )/matrix.m00;
			q.y = System.Math.Sign( plane.Normal.y )/matrix.m11;
			q.z = 1.0f;

			// flip the next bit from Lengyel since we're right-handed
			if ( forGpuProgram )
			{
				q.w = ( 1.0f - matrix.m22 )/matrix.m23;
			}
			else
			{
				q.w = ( 1.0f + matrix.m22 )/matrix.m23;
			}

			// Calculate the scaled plane vector
			var clipPlane4D = new Vector4( plane.Normal.x, plane.Normal.y, plane.Normal.z, plane.D );

			var c = clipPlane4D*( 1.0f/( clipPlane4D.Dot( q ) ) );

			// Replace the third row of the projection matrix
			matrix.m20 = c.x;
			matrix.m21 = c.y;

			// flip the next bit from Lengyel since we're right-handed
			if ( forGpuProgram )
			{
				matrix.m22 = c.z;
			}
			else
			{
				matrix.m22 = -c.z;
			}

			matrix.m23 = c.w;
		}
Beispiel #30
0
		/// <summary>
		///		Perform all the updates required for an animated entity.
		/// </summary>
		public void UpdateAnimation()
		{
			if ( !HasSkeleton && !this.mesh.HasVertexAnimation )
			{
				return;
			}

			// we only do these tasks if they have not already been done this frame
			var root = Root.Instance;
			var currentFrameNumber = root.CurrentFrameCount;
			var stencilShadows = false;
			if ( CastShadows && root.SceneManager != null )
			{
				stencilShadows = root.SceneManager.IsShadowTechniqueStencilBased;
			}
			var swAnimation = !this.hardwareAnimation || stencilShadows || this.softwareAnimationRequests > 0;
			// Blend normals in s/w only if we're not using h/w animation,
			// since shadows only require positions
			var blendNormals = !this.hardwareAnimation || this.softwareAnimationNormalsRequests > 0;
			var animationDirty = this.frameAnimationLastUpdated != currentFrameNumber
				// 				                  || (HasSkeleton && Skeleton.ManualBonesDirty)
				;
			if ( animationDirty || ( swAnimation && this.mesh.HasVertexAnimation && !TempVertexAnimBuffersBound() ) ||
			     ( swAnimation && HasSkeleton && !TempSkelAnimBuffersBound( blendNormals ) ) )
			{
				if ( this.mesh.HasVertexAnimation )
				{
					if ( swAnimation )
					{
						// grab & bind temporary buffer for positions
						if ( this.softwareVertexAnimVertexData != null &&
						     this.mesh.SharedVertexDataAnimationType != VertexAnimationType.None )
						{
							this.tempVertexAnimInfo.CheckoutTempCopies( true, false, false, false );
							// NB we suppress hardware upload while doing blend if we're
							// hardware animation, because the only reason for doing this
							// is for shadow, which need only be uploaded then
							this.tempVertexAnimInfo.BindTempCopies( this.softwareVertexAnimVertexData, this.hardwareAnimation );
						}
						foreach ( var subEntity in this.subEntityList )
						{
							if ( subEntity.IsVisible && subEntity.SoftwareVertexAnimVertexData != null &&
							     subEntity.SubMesh.VertexAnimationType != VertexAnimationType.None )
							{
								subEntity.TempVertexAnimInfo.CheckoutTempCopies( true, false, false, false );
								subEntity.TempVertexAnimInfo.BindTempCopies( subEntity.SoftwareVertexAnimVertexData, this.hardwareAnimation );
							}
						}
					}
					ApplyVertexAnimation( this.hardwareAnimation, stencilShadows );
				}
				if ( HasSkeleton )
				{
					CacheBoneMatrices();

					if ( swAnimation )
					{
						var blendTangents = blendNormals;
						var blendBinormals = blendNormals;
						if ( this.skelAnimVertexData != null )
						{
							// Blend shared geometry
							// NB we suppress hardware upload while doing blend if we're
							// hardware animation, because the only reason for doing this
							// is for shadow, which need only be uploaded then
							this.tempSkelAnimInfo.CheckoutTempCopies( true, blendNormals, blendTangents, blendBinormals );
							this.tempSkelAnimInfo.BindTempCopies( this.skelAnimVertexData, this.hardwareAnimation );
							// Blend, taking source from either mesh data or morph data
							Mesh.SoftwareVertexBlend(
								( this.mesh.SharedVertexDataAnimationType != VertexAnimationType.None
								  	? this.softwareVertexAnimVertexData
								  	: this.mesh.SharedVertexData ), this.skelAnimVertexData, this.boneMatrices, blendNormals, blendTangents,
								blendBinormals );
						}

						// Now check the per subentity vertex data to see if it needs to be
						// using software blend
						foreach ( var subEntity in this.subEntityList )
						{
							// Blend dedicated geometry
							if ( subEntity.IsVisible && subEntity.SkelAnimVertexData != null )
							{
								subEntity.TempSkelAnimInfo.CheckoutTempCopies( true, blendNormals, blendTangents, blendBinormals );
								subEntity.TempSkelAnimInfo.BindTempCopies( subEntity.SkelAnimVertexData, this.hardwareAnimation );
								// Blend, taking source from either mesh data or morph data
								Mesh.SoftwareVertexBlend(
									( subEntity.SubMesh.VertexAnimationType != VertexAnimationType.None
									  	? subEntity.SoftwareVertexAnimVertexData
									  	: subEntity.SubMesh.vertexData ), subEntity.SkelAnimVertexData, this.boneMatrices, blendNormals,
									blendTangents,
									blendBinormals );
							}
						}
					}
				}

				// trigger update of bounding box if necessary
				if ( this.childObjectList.Count != 0 )
				{
					parentNode.NeedUpdate();
				}

				// remember the last frame count
				this.frameAnimationLastUpdated = currentFrameNumber;
			}

			// Need to update the child object's transforms when animation dirty
			// or parent node transform has altered.
			if ( HasSkeleton && animationDirty || this.lastParentXform != ParentNodeFullTransform )
			{
				this.lastParentXform = ParentNodeFullTransform;
				for ( var i = 0; i < this.childObjectList.Count; i++ )
				{
					var child = this.childObjectList[ i ];
					child.ParentNode.Update( true, true );
				}

				if ( this.hardwareAnimation && IsSkeletonAnimated )
				{
					this.numBoneMatrices = this.skeletonInstance.BoneCount;
					if ( this.boneWorldMatrices == null )
					{
						this.boneWorldMatrices = new Matrix4[this.numBoneMatrices];
					}
					for ( var i = 0; i < this.numBoneMatrices; i++ )
					{
						this.boneWorldMatrices[ i ] = Matrix4.Multiply( this.lastParentXform, this.boneMatrices[ i ] );
					}
				}
			}
		}
Beispiel #31
0
        /// <summary>
        ///		Interpolates a single segment of the spline given a parametric value.
        /// </summary>
        /// <param name="index">The point index to treat as t=0. index + 1 is deemed to be t=1</param>
        /// <param name="t">Parametric value</param>
        /// <returns>An interpolated point along the spline.</returns>
        public override Vector3 Interpolate(int index, Real t)
        {
            Contract.Requires(index >= 0, "index", "Spline point index underrun.");
            Contract.Requires(index < pointList.Count, "index", "Spline point index overrun.");

            if ((index + 1) == pointList.Count)
            {
                // cant interpolate past the end of the list, just return the last point
                return(pointList[index]);
            }

            // quick special cases
            if (t == 0.0f)
            {
                return(pointList[index]);
            }
            else if (t == 1.0f)
            {
                return(pointList[index + 1]);
            }

            // Time for real interpolation
            // Construct a Vector4 of powers of 2
            Real t2, t3;

            // t^2
            t2 = t * t;
            // t^3
            t3 = t2 * t;

            Vector4 powers = new Vector4(t3, t2, t, 1);

            // Algorithm is result = powers * hermitePoly * Matrix4(point1, point2, tangent1, tangent2)
            Vector3 point1   = pointList[index];
            Vector3 point2   = pointList[index + 1];
            Vector3 tangent1 = tangentList[index];
            Vector3 tangent2 = tangentList[index + 1];
            Matrix4 point    = new Matrix4();

            // create the matrix 4 with the 2 point and tangent values
            point.m00 = point1.x;
            point.m01 = point1.y;
            point.m02 = point1.z;
            point.m03 = 1.0f;
            point.m10 = point2.x;
            point.m11 = point2.y;
            point.m12 = point2.z;
            point.m13 = 1.0f;
            point.m20 = tangent1.x;
            point.m21 = tangent1.y;
            point.m22 = tangent1.z;
            point.m23 = 1.0f;
            point.m30 = tangent2.x;
            point.m31 = tangent2.y;
            point.m32 = tangent2.z;
            point.m33 = 1.0f;

            // get the final result in a Vector4
            Vector4 result = powers * hermitePoly * point;

            // return the final result
            return(new Vector3(result.x, result.y, result.z));
        }