public static Vector3D Deserialize2(int maxBits, BitReader br)
        {
            var v = new Vector3D();

            // From ReadPackedVector

            v.NumBits = br.ReadInt32Max(maxBits);

            Int32 Bias = 1 << (v.NumBits + 1);
            Int32 Max = v.NumBits + 2;

            v.DX = br.ReadInt32FromBits(Max);
            v.DY = br.ReadInt32FromBits(Max);
            v.DZ = br.ReadInt32FromBits(Max);

            //float fact = 1; //(float)ScaleFactor; // 1 in our case, doesnt matter

            //v.X = (float)(static_cast<int32>(DX)-Bias) / fact; // Why bother with the static_cast? Why not make DX an int32 instead of uint32 in the first place?
            // always integers, hey?
            v.X = v.DX-Bias;
            v.Y = v.DY-Bias;
            v.Z = v.DZ-Bias;

            return v;
        }
        public static Vector3D DeserializeFixed(BitReader br)
        {
            var v = new Vector3D();

            v.X = ReadFixedCompressedFloat(1, 16, br);
            v.Y = ReadFixedCompressedFloat(1, 16, br);
            v.Z = ReadFixedCompressedFloat(1, 16, br);

            return v;
        }