public byte[] CompressChunk(Chunk chunk) { int numBlocks = (chunk.Uncompressed.Length + maxBlockSize - 1) / maxBlockSize; if (numBlocks > 8) throw new FormatException("Maximum block number exceeded"); ChunkHeader head = new ChunkHeader(); head.magic = -1641380927; head.blocksize = maxBlockSize; head.uncompressedsize = chunk.Uncompressed.Length; int pos = 0; MemoryStream mem = new MemoryStream(); List<Block> blockList = new List<Block>(); int startData = 16 + 8 * numBlocks; mem.Seek(startData, SeekOrigin.Begin); for (int i = 0; i < numBlocks; i++) { Block block = new Block(); byte[] result, temp; if (i != numBlocks - 1) { block.uncompressedsize = maxBlockSize; temp = new byte[maxBlockSize]; } else { block.uncompressedsize = head.uncompressedsize - pos; temp = new byte[block.uncompressedsize]; } Buffer.BlockCopy(chunk.Uncompressed, pos, temp, 0, temp.Length); result = LZO1X.Compress(temp); if (result.Length == 0) throw new Exception("LZO compression error!"); block.compressedsize = result.Length; mem.WriteBytes(result); blockList.Add(block); pos += maxBlockSize; } head.compressedsize = (int)mem.Length; mem.Seek(0, SeekOrigin.Begin); mem.WriteValueS32(head.magic); mem.WriteValueS32(head.blocksize); mem.WriteValueS32(head.compressedsize); mem.WriteValueS32(head.uncompressedsize); foreach (Block block in blockList) { mem.WriteValueS32(block.compressedsize); mem.WriteValueS32(block.uncompressedsize); } return mem.ToArray(); }
public void ReadAnimInfo(FileStream fs, ChunkHeader h) { data.Infos = new List <PSAAnimInfo>(); for (int i = 0; i < h.count; i++) { PSAAnimInfo info = new PSAAnimInfo(); byte[] buff = new byte[h.size]; for (int j = 0; j < h.size; j++) { buff[j] = (byte)fs.ReadByte(); } info.raw = buff; info.name = ""; for (int j = 0; j < 64; j++) { if (buff[j] != 0) { info.name += (char)buff[j]; } } info.group = ""; for (int j = 0; j < 64; j++) { if (buff[j + 64] != 0) { info.group += (char)buff[j + 64]; } } info.TotalBones = BitConverter.ToInt32(buff, 128); info.RootInclude = BitConverter.ToInt32(buff, 132); info.KeyCompressionStyle = BitConverter.ToInt32(buff, 136); info.KeyQuotum = BitConverter.ToInt32(buff, 140); info.KeyReduction = BitConverter.ToSingle(buff, 144); info.TrackTime = BitConverter.ToSingle(buff, 148); info.AnimRate = BitConverter.ToSingle(buff, 152); info.StartBone = BitConverter.ToInt32(buff, 156); info.FirstRawFrame = BitConverter.ToInt32(buff, 160); info.NumRawFrames = BitConverter.ToInt32(buff, 164); data.Infos.Add(info); } }
public RecordWriteResult TryAppend(LogRecord record) { if (_isReadOnly) { throw new InvalidOperationException("Cannot write to a read-only block."); } var workItem = _writerWorkItem; var buffer = workItem.Buffer; var bufferWriter = workItem.BufferWriter; buffer.SetLength(4); buffer.Position = 4; record.WriteTo(bufferWriter); var length = (int)buffer.Length - 4; bufferWriter.Write(length); // length suffix buffer.Position = 0; bufferWriter.Write(length); // length prefix if (workItem.StreamPosition + length + 2 * sizeof(int) > ChunkHeader.Size + _chunkHeader.ChunkSize) { return(RecordWriteResult.Failed(GetDataPosition(workItem))); } var oldPosition = WriteRawData(workItem, buffer); _physicalDataSize = (int)GetDataPosition(workItem); // should fit 32 bits _logicalDataSize = ChunkHeader.GetLocalLogPosition(record.LogPosition + length + 2 * sizeof(int)); // for non-scavenged chunk _physicalDataSize should be the same as _logicalDataSize // for scavenged chunk _logicalDataSize should be at least the same as _physicalDataSize if ((!ChunkHeader.IsScavenged && _logicalDataSize != _physicalDataSize) || (ChunkHeader.IsScavenged && _logicalDataSize < _physicalDataSize)) { throw new Exception(string.Format("Data sizes violation. Chunk: {0}, IsScavenged: {1}, LogicalDataSize: {2}, PhysicalDataSize: {3}.", FileName, ChunkHeader.IsScavenged, _logicalDataSize, _physicalDataSize)); } return(RecordWriteResult.Successful(oldPosition, _physicalDataSize)); }
public void ReadBones(FileStream fs,ChunkHeader h) { byte[] buffer; data.Bones= new List<PSABone>(); for (int i = 0; i < h.count; i++) { PSABone b = new PSABone(); buffer = new byte[64]; fs.Read(buffer, 0, 64); b.name = ""; for (int j = 0; j < 64; j++) if (buffer[j] != 0) b.name += (char)buffer[j]; b.name = b.name.Trim(); buffer = new byte[4]; fs.Read(buffer, 0, 4); fs.Read(buffer, 0, 4); b.childs = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.parent = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.rotation.x = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.rotation.y = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.rotation.z = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.rotation.w = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.location.x = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.location.y = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); b.location.z = BitConverter.ToInt32(buffer, 0); fs.Read(buffer, 0, 4); fs.Read(buffer, 0, 4); fs.Read(buffer, 0, 4); fs.Read(buffer, 0, 4); data.Bones.Add(b); } }
public virtual async ValueTask <bool> ParseReplayInfo() { using (CustomBinaryReaderAsync binaryReader = new CustomBinaryReaderAsync(SubStreamFactory.BaseStream, true)) { if (!await ParseReplayHeader(binaryReader)) { return(false); } ChunkHeader chunkHeader = await ParseChunkHeader(); if (chunkHeader.ChunkType != ChunkType.Header) { return(false); } await using (SubStream stream = SubStreamFactory.CreateSubstream(chunkHeader.ChunkSize)) using (CustomBinaryReaderAsync chunkReader = new CustomBinaryReaderAsync(stream, true)) { return(await ParseGameSpecificHeaderChunk(chunkReader)); } } }
public ChunkHeader ReadHeader(FileStream fs) { ChunkHeader res = new ChunkHeader(); res.name = ""; for (int i = 0; i < 20; i++) { byte b = (byte)fs.ReadByte(); if (b != 0) res.name += (char)b; } byte[] buff = new byte[4]; fs.Read(buff, 0, 4); res.flags = BitConverter.ToInt32(buff, 0); buff = new byte[4]; fs.Read(buff, 0, 4); res.size = BitConverter.ToInt32(buff, 0); buff = new byte[4]; fs.Read(buff, 0, 4); res.count = BitConverter.ToInt32(buff, 0); return res; }
public static void CreateMultiChunk(TFChunkDbConfig config, int chunkStartNum, int chunkEndNum, string filename, int?physicalSize = null, long?logicalSize = null) { if (chunkStartNum > chunkEndNum) { throw new ArgumentException("chunkStartNum"); } var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkStartNum, chunkEndNum, true, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var physicalDataSize = physicalSize ?? config.ChunkSize; var logicalDataSize = logicalSize ?? (chunkEndNum - chunkStartNum + 1) * config.ChunkSize; var buf = new byte[ChunkHeader.Size + physicalDataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, true, physicalDataSize, logicalDataSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
private void VerifyEnd(ChunkHeader chunkHeader = null) { chunkHeader = chunkHeader ?? ReadChunkHeader(); if (chunkHeader.ChunkType != "IEND") { throw new InvalidDataException("Invalid IEND chunk type"); } if (chunkHeader.ChunkLength != 0) { throw new InvalidDataException("Invalid IEND chunk length"); } ChunkFooter chunkFooter = ReadChunkFooter(); if (CRC.Calculate(chunkHeader.ChunkTypeBuffer, new byte[0]) != chunkFooter.ChunkCRC) { throw new InvalidDataException("Invalid IEND chunk CRC"); } }
private async Task CompressBigFileAsync(FileHeader header) { await using var readStream = ArchiveStream.OpenRead(header.FullPath, Settings.ThreadsCount); var chunkCompressor = GetChunkCompressor(); header.NumberOfChunks = readStream.GetChunksCount(); _outputStream.Seek(header.SizeOf, SeekOrigin.Current); header.Position = _outputStream.Position; var chunkSerialNumber = 0; await foreach (var chunks in readStream.ReadFileInChunksAsync(Settings.ChunkSize)) { var tasks = chunks.Select(x => chunkCompressor(x, header.CompressionType)); var chunksSizes = chunks.Select(x => x.Length).ToArray(); var compressedChunks = await Task.WhenAll(tasks); for (var i = 0; i < compressedChunks.Length; i++) { var chunkHeader = new ChunkHeader() { Size = compressedChunks[i].Length, SerialNumber = chunkSerialNumber++, }; header.Chunks.Add(chunkHeader); await _outputStream.WriteAsync(compressedChunks[i]); _archiveProgress.Report(header.RelativePath, chunksSizes[i], chunkHeader.SerialNumber, header.NumberOfChunks); } var currentPosition = _outputStream.Position; _outputStream.Position = header.Position - header.SizeOf; await _outputStream.WriteFileHeaderAsync(header); _outputStream.Position = currentPosition; } }
/// <summary> /// Reading surface links to frames<para/> /// Чтение ссылок поверхностей на фреймы /// </summary> /// <param name="f">BinaryReader</param> /// <param name="numAtomics">Count of atomics<para/>Количество ссылок</param> void ReadAtomics(BinaryReader f, int numAtomics) { for (int i = 0; i < numAtomics; i++) { ChunkHeader h = ReadHeader(f); if (h.Type != ChunkType.Atomic) { throw new Exception("[ModelFile] Unexpected chunk: " + h.Type); } h = ReadHeader(f); if (h.Type != ChunkType.Struct) { throw new Exception("[ModelFile] Unexpected chunk: " + h.Type); } // Read and skip some data // Чтение и пропуск некоторой информации int frameIndex = f.ReadInt32(); int geomIndex = f.ReadInt32(); f.BaseStream.Position += 8; // Prevent multiple assignment // Обход многоразового присвоения if (Surfaces[geomIndex].Frame != -1) { throw new Exception("[ModelFile] Geometry already assigned to frame: " + geomIndex); } Surfaces[geomIndex].Frame = frameIndex; // Extension part // Расширения h = ReadHeader(f); if (h.Type != ChunkType.Extension) { throw new Exception("[ModelFile] Unexpected chunk: " + h.Type); } f.BaseStream.Position += h.Size; } }
public static void CreateOngoingChunk(TFChunkDbConfig config, int chunkNum, string filename, int?actualSize = null, byte[] contents = null) { var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkNum, chunkNum, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var dataSize = actualSize ?? config.ChunkSize; var buf = new byte[ChunkHeader.Size + dataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); if (contents != null) { if (contents.Length != dataSize) { throw new Exception("Wrong contents size."); } Buffer.BlockCopy(contents, 0, buf, ChunkHeader.Size, contents.Length); } File.WriteAllBytes(filename, buf); }
private void CreateWriterWorkItemForExistingChunk(int writePosition, out ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = new FileStream(_filename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); try { chunkHeader = ReadHeader(stream); } catch { stream.Dispose(); ((IDisposable)md5).Dispose(); throw; } var realPosition = GetRawPosition(writePosition); MD5Hash.ContinuousHashFor(md5, stream, 0, realPosition); stream.Position = realPosition; // this reordering fixes bug in Mono implementation of FileStream _writerWorkItem = new WriterWorkItem(stream, null, md5); }
/// <summary> /// Gets and cache the asset url referenced by the chunk with the given identifier. /// </summary> /// <param name="objectId">The object identifier.</param> /// <returns>The list of asset url referenced.</returns> private List <string> GetChunkReferences(ref ObjectId objectId) { List <string> references; // Check the cache if (!referencesByObjectId.TryGetValue(objectId, out references)) { // First time, need to scan it referencesByObjectId[objectId] = references = new List <string>(); // Open stream to read list of chunk references using (var stream = AssetManager.FileProvider.OpenStream("obj/" + objectId, VirtualFileMode.Open, VirtualFileAccess.Read)) { // Read chunk header var streamReader = new BinarySerializationReader(stream); var header = ChunkHeader.Read(streamReader); // Only process chunks if (header != null) { if (header.OffsetToReferences != -1) { // Seek to where references are stored and deserialize them streamReader.NativeStream.Seek(header.OffsetToReferences, SeekOrigin.Begin); List <ChunkReference> chunkReferences = null; streamReader.Serialize(ref chunkReferences, ArchiveMode.Deserialize); foreach (var chunkReference in chunkReferences) { references.Add(chunkReference.Location); } } } } } return(references); }
private static void Collect(HashSet <ObjectId> objectIds, ObjectId objectId, IAssetIndexMap assetIndexMap) { // Already added? if (!objectIds.Add(objectId)) { return; } using (var stream = AssetManager.FileProvider.OpenStream(DatabaseFileProvider.ObjectIdUrl + objectId, VirtualFileMode.Open, VirtualFileAccess.Read)) { // Read chunk header var streamReader = new BinarySerializationReader(stream); var header = ChunkHeader.Read(streamReader); // Only process chunks if (header != null) { if (header.OffsetToReferences != -1) { // Seek to where references are stored and deserialize them streamReader.NativeStream.Seek(header.OffsetToReferences, SeekOrigin.Begin); List <ChunkReference> references = null; streamReader.Serialize(ref references, ArchiveMode.Deserialize); foreach (var reference in references) { ObjectId refObjectId; var databaseFileProvider = DatabaseFileProvider.ResolveObjectId(reference.Location, out refObjectId); if (databaseFileProvider != null) { Collect(objectIds, refObjectId, databaseFileProvider.AssetIndexMap); } } } } } }
private void InitNew(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); _fileSize = fileSize; _isReadOnly = false; _chunkHeader = chunkHeader; _physicalDataSize = 0; _logicalDataSize = 0; if (_inMem) { CreateInMemChunk(chunkHeader, fileSize); } else { CreateWriterWorkItemForNewChunk(chunkHeader, fileSize); SetAttributes(_filename, false); CreateReaderStreams(); } _readSide = chunkHeader.IsScavenged ? (IChunkReadSide) new TFChunkReadSideScavenged(this) : new TFChunkReadSideUnscavenged(this); }
protected override void ReadUndoData(List <Player.UndoPos> buffer, string path) { Player.UndoPos Pos; UndoCacheItem item = default(UndoCacheItem); using (Stream fs = File.OpenRead(path)) using (BinaryReader r = new BinaryReader(fs)) { int approxEntries = (int)(fs.Length / entrySize); if (buffer.Capacity < approxEntries) { buffer.Capacity = approxEntries; } while (fs.Position < fs.Length) { ChunkHeader chunk = ReadHeader(fs, r); Pos.mapName = chunk.LevelName; for (int j = 0; j < chunk.Entries; j++) { item.Flags = r.ReadUInt16(); DateTime time = chunk.BaseTime.AddTicks((item.Flags & 0x3FFF) * TimeSpan.TicksPerSecond); Pos.timeDelta = (int)time.Subtract(Server.StartTime).TotalSeconds; int index = r.ReadInt32(); Pos.x = (ushort)(index % chunk.Width); Pos.y = (ushort)((index / chunk.Width) / chunk.Length); Pos.z = (ushort)((index / chunk.Width) % chunk.Length); item.Type = r.ReadByte(); item.NewType = r.ReadByte(); item.GetBlock(out Pos.type, out Pos.extType); item.GetNewBlock(out Pos.newtype, out Pos.newExtType); buffer.Add(Pos); } } } }
private void CreateWriterWorkItemForExistingChunk(int writePosition, out ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = GetWriteStream(_filename); try { chunkHeader = ReadHeader(stream); if (chunkHeader.Version == (byte)ChunkVersions.Unaligned) { Log.Trace("Upgrading ongoing file " + _filename + " to version 3"); var newHeader = new ChunkHeader((byte)ChunkVersions.Aligned, chunkHeader.ChunkSize, chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, false, chunkHeader.ChunkId); stream.Seek(0, SeekOrigin.Begin); chunkHeader = newHeader; var head = newHeader.AsByteArray(); stream.Write(head, 0, head.Length); stream.Flush(); stream.Seek(0, SeekOrigin.Begin); } } catch { stream.Dispose(); ((IDisposable)md5).Dispose(); throw; } var realPosition = GetRawPosition(writePosition); MD5Hash.ContinuousHashFor(md5, stream, 0, realPosition); stream.Position = realPosition; // this reordering fixes bug in Mono implementation of FileStream _writerWorkItem = new WriterWorkItem(stream, null, md5); }
private static void WriteOctreeLeaves <TLeaf>(Stream stream, Dictionary <UInt64, TLeaf> leaves) where TLeaf : IMyOctreeLeafNode { foreach (var entry in leaves) { var header = new ChunkHeader() { ChunkType = entry.Value.SerializedChunkType, Size = entry.Value.SerializedChunkSize + sizeof(UInt64), // increase chunk size by the size of key (which is inserted before it) Version = CURRENT_VERSION_OCTREE_LEAVES, }; header.WriteTo(stream); stream.WriteNoAlloc(entry.Key); switch (header.ChunkType) { case ChunkTypeEnum.ContentLeafOctree: (entry.Value as MyMicroOctreeLeaf).WriteTo(stream); break; case ChunkTypeEnum.ContentLeafProvider: Debug.Assert(header.Size == sizeof(UInt64), "Provider leaf should not serialize any data."); break; case ChunkTypeEnum.MaterialLeafOctree: (entry.Value as MyMicroOctreeLeaf).WriteTo(stream); break; case ChunkTypeEnum.MaterialLeafProvider: Debug.Assert(header.Size == sizeof(UInt64), "Provider leaf should not serialize any data."); break; default: throw new InvalidBranchException(); } } }
public virtual async ValueTask <bool> VisitChunks() { while (true) { ChunkHeader chunkHeader = await ParseChunkHeader(); await using (SubStream stream = SubStreamFactory.CreateSubstream(chunkHeader.ChunkSize)) using (CustomBinaryReaderAsync binaryReader = new CustomBinaryReaderAsync(stream, true)) { if (chunkHeader.ChunkType == ChunkType.EndOfStream) { if (await VisitEndOfStream()) { continue; } return(true); } if (!await ChooseChunkType(binaryReader, chunkHeader.ChunkType)) { return(false); } } } }
public BitmapCastProperties(ChunkHeader header, ShockwaveReader input) { bool IsDataAvailable() => input.Position < header.Offset + header.Length; TotalWidth = input.ReadBigEndian <ushort>() & 0x7FFF; Rectangle = input.ReadRect(); AlphaThreshold = input.ReadByte(); OLE = input.ReadBytes(7); //TODO: short regX = input.ReadBigEndian <short>(); short regY = input.ReadBigEndian <short>(); RegistrationPoint = new Point(regX, regY); Flags = (BitmapFlags)input.ReadByte(); if (!IsDataAvailable()) { return; } BitDepth = input.ReadByte(); if (!IsDataAvailable()) { return; } Palette = input.ReadBigEndian <int>(); //TODO: PaletteRef or something if (!IsSystemPalette) { Palette &= 0x7FFF; } }
internal unsafe MddfChunk(ChunkHeader *header) : base(header) { var begin = (byte *)ChunkHeader.ChunkBegin(header); var end = begin + header->Size; Definitions = new List <IModelDefinition>(header->Size / sizeof(MddfEntry)); for (var entry = (MddfEntry *)begin; entry < end; entry++) { Definitions.Add(new RootModelDefinition { Id = entry->uniqueId, ReferenceIndex = entry->mmidIndex, Position = new Vector3( MapOriginOffset - entry->position.Z, MapOriginOffset - entry->position.X, entry->position.Y ), Rotation = entry->rotation, Scale = entry->scale / 1024.0f, Flags = entry->flags }); } }
public virtual async ValueTask <bool> VisitChunks() { while (true) { ChunkHeader chunkHeader = await ParseChunkHeader(); await using (SubStream stream = SubStreamFactory.CreateSubstream(chunkHeader.ChunkSize)) using (ReplayArchiveAsync binaryReader = new ReplayArchiveAsync(stream, DemoHeader !.EngineNetworkProtocolVersion, ReplayHeader !.Compressed, true)) { if (chunkHeader.ChunkType == ChunkType.EndOfStream) { if (await VisitEndOfStream()) { continue; } return(true); } if (!await ChooseChunkType(binaryReader, chunkHeader.ChunkType)) { return(false); } } } }
void ParseChunk(ChunkHeader chunk) { ChunkType type; if (!System.Enum.TryParse(chunk.chunkID, out type)) { throw new System.Exception(path + ": Bad File chunk"); } switch (type) { case ChunkType.SIZE: parseSize(); break; case ChunkType.XYZI: parseModel(); break; case ChunkType.RGBA: parsePalette(); break; } offset += chunk.Content; }
/** <summary> Constructs the default object. </summary> */ internal ParkEntrance(ObjectDataHeader objectHeader, ChunkHeader chunkHeader) : base(objectHeader, chunkHeader) { this.Header = new ParkEntranceHeader(); }
private void InitNew(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); _isReadOnly = false; _chunkHeader = chunkHeader; _actualDataSize = 0; CreateWriterWorkItemForNewChunk(chunkHeader, fileSize); CreateReaderStreams(); _readSide = new TFChunkReadSideUnscavenged(this); SetAttributes(); }
public static unsafe void* ChunkBegin(ChunkHeader* header) { return (byte*)header + sizeof(ChunkHeader); }
/**<summary>Constructs the default object.</summary>*/ internal Footpath(ObjectDataHeader objectHeader, ChunkHeader chunkHeader) : base(objectHeader, chunkHeader) { Header = new FootpathHeader(); }
public override object ReadJson(JsonReader reader, Type objectType, object existingValue, JsonSerializer serializer) { var nodeJson = JObject.Load(reader); var actorName = (string)nodeJson["Name"]; SerializableDOMNode newNode; if (m_parent is WDOMLayeredGroupNode) { if (actorName == null) { return(null); } WDOMLayeredGroupNode layerNode = m_parent as WDOMLayeredGroupNode; string unlayedFourCC = layerNode.FourCC.ToString(); MapLayer layer = ChunkHeader.FourCCToLayer(ref unlayedFourCC); FourCC fourcc = FourCCConversion.GetEnumFromString(unlayedFourCC); Type newObjType = WResourceManager.GetTypeByName(actorName); if (newObjType == typeof(Actor)) { return(null); } newNode = (SerializableDOMNode)Activator.CreateInstance(newObjType, fourcc, m_world); newNode.Layer = layer; } else if (m_parent is WDOMGroupNode) { WDOMGroupNode groupNode = m_parent as WDOMGroupNode; FourCC fourcc = groupNode.FourCC; if (fourcc == FourCC.ACTR || fourcc == FourCC.SCOB || fourcc == FourCC.TRES) { return(null); } if (fourcc == FourCC.TGDR || fourcc == FourCC.TGSC || fourcc == FourCC.TGOB) { if (actorName == null) { return(null); } Type newObjType = WResourceManager.GetTypeByName(actorName); if (newObjType == typeof(Actor)) { return(null); } newNode = (SerializableDOMNode)Activator.CreateInstance(newObjType, fourcc, m_world); } else { Type newObjType = FourCCConversion.GetTypeFromEnum(groupNode.FourCC); newNode = (SerializableDOMNode)Activator.CreateInstance(newObjType, fourcc, m_world); } } else { return(null); } newNode.SetParent(m_parent); try { var wproperties = newNode.GetType().GetProperties().Where(prop => { CustomAttributeData[] custom_attributes = prop.CustomAttributes.ToArray(); CustomAttributeData wproperty_attribute = custom_attributes.FirstOrDefault(x => x.AttributeType.Name == "WProperty"); if (wproperty_attribute == null) { return(false); } CustomAttributeData jsonignore_attribute = custom_attributes.FirstOrDefault(x => x.AttributeType.Name == "JsonIgnoreAttribute"); if (jsonignore_attribute != null) { return(false); } return(true); }); foreach (var prop in wproperties) { JToken jsonValue = nodeJson[prop.Name]; if (jsonValue == null) { continue; } if (prop.PropertyType == typeof(WTransform)) { if (!(jsonValue is JObject)) { continue; } JObject jsonValueObject = (JObject)jsonValue; WTransform transform = prop.GetValue(newNode, null) as WTransform; if (transform != null) { if (jsonValueObject.ContainsKey("Position")) { var position = transform.Position; position.X = ((float?)jsonValueObject["Position"]["X"]).GetValueOrDefault(); position.Y = ((float?)jsonValueObject["Position"]["Y"]).GetValueOrDefault(); position.Z = ((float?)jsonValueObject["Position"]["Z"]).GetValueOrDefault(); transform.Position = position; } if (jsonValueObject.ContainsKey("Rotation")) { var rotation = transform.Rotation; rotation.X = ((float?)jsonValueObject["Rotation"]["X"]).GetValueOrDefault(); rotation.Y = ((float?)jsonValueObject["Rotation"]["Y"]).GetValueOrDefault(); rotation.Z = ((float?)jsonValueObject["Rotation"]["Z"]).GetValueOrDefault(); rotation.W = ((float?)jsonValueObject["Rotation"]["W"]).GetValueOrDefault(); transform.Rotation = rotation; } if (jsonValueObject.ContainsKey("LocalScale")) { var localScale = transform.LocalScale; localScale.X = ((float?)jsonValueObject["LocalScale"]["X"]).GetValueOrDefault(1.0f); localScale.Y = ((float?)jsonValueObject["LocalScale"]["Y"]).GetValueOrDefault(1.0f); localScale.Z = ((float?)jsonValueObject["LocalScale"]["Z"]).GetValueOrDefault(1.0f); transform.LocalScale = localScale; } } } else if (prop.PropertyType == typeof(MessageReference)) { ushort messageID = (ushort)jsonValue; MessageReference msgRef = new MessageReference(messageID); prop.SetValue(newNode, msgRef); } else if (prop.PropertyType == typeof(Path_v2)) { int pathIndex = (int)jsonValue; WDOMNode cur_object = m_parent; while (cur_object.Parent != null) { cur_object = cur_object.Parent; } List <Path_v2> pathsList = cur_object.GetChildrenOfType <Path_v2>(); if (pathIndex < 0) { prop.SetValue(newNode, null); } else if (pathIndex < pathsList.Count) { Path_v2 path = pathsList[pathIndex]; prop.SetValue(newNode, path); } } else if (prop.PropertyType == typeof(ExitData)) { int exitIndex = (int)jsonValue; WScene scene; CustomAttributeData[] custom_attributes = prop.CustomAttributes.ToArray(); CustomAttributeData wproperty_attribute = custom_attributes.FirstOrDefault(x => x.AttributeType.Name == "WProperty"); SourceScene source_scene = (SourceScene)wproperty_attribute.ConstructorArguments[4].Value; if (source_scene == SourceScene.Stage) { scene = m_world.Map.SceneList.First(x => x.GetType() == typeof(WStage)) as WScene; } else { WDOMNode cur_object = m_parent; while (cur_object.Parent != null) { cur_object = cur_object.Parent; } scene = cur_object as WScene; } List <ExitData> exitsList = scene.GetChildrenOfType <ExitData>(); if (exitIndex < 0) { prop.SetValue(newNode, null); } else if (exitIndex < exitsList.Count) { ExitData exit = exitsList[exitIndex]; prop.SetValue(newNode, exit); } } else if (prop.PropertyType == typeof(MapEvent)) { int eventIndex = (int)jsonValue; WStage stage = m_world.Map.SceneList.First(x => x.GetType() == typeof(WStage)) as WStage; List <MapEvent> eventsList = stage.GetChildrenOfType <MapEvent>(); if (eventIndex < 0) { prop.SetValue(newNode, null); } else if (eventIndex < eventsList.Count) { MapEvent evnt = eventsList[eventIndex]; prop.SetValue(newNode, evnt); } } else { var value = Convert.ChangeType(jsonValue, prop.PropertyType); if (value != null) { prop.SetValue(newNode, value); } } } newNode.PostLoad(); return(newNode); } catch (Exception e) { // Creating the entity failed, so remove it from the scene. newNode.SetParent(null); throw; } }
private void CreateInMemChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); // ALLOCATE MEM Interlocked.Exchange(ref _isCached, 1); _cachedLength = fileSize; _cachedData = Marshal.AllocHGlobal(_cachedLength); // WRITER STREAM var memStream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength, _cachedLength, FileAccess.ReadWrite); WriteHeader(md5, memStream, chunkHeader); memStream.Position = ChunkHeader.Size; // READER STREAMS Interlocked.Add(ref _memStreamCount, _maxReaderCount); for (int i = 0; i < _maxReaderCount; i++) { var stream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength); var reader = new BinaryReader(stream); _memStreams.Enqueue(new ReaderWorkItem(stream, reader, isMemory: true)); } _writerWorkItem = new WriterWorkItem(null, memStream, md5); }
public void ReadAnimInfo(FileStream fs, ChunkHeader h) { data.Infos = new List<PSAAnimInfo>(); for (int i = 0; i < h.count; i++) { PSAAnimInfo info = new PSAAnimInfo(); byte[] buff = new byte[h.size]; for (int j = 0; j < h.size; j++) buff[j] = (byte)fs.ReadByte(); info.raw = buff; info.name = ""; for (int j = 0; j < 64; j++) if (buff[j] != 0) info.name += (char)buff[j]; info.group = ""; for (int j = 0; j < 64; j++) if (buff[j + 64] != 0) info.group += (char)buff[j + 64]; info.TotalBones = BitConverter.ToInt32(buff, 128); info.RootInclude = BitConverter.ToInt32(buff, 132); info.KeyCompressionStyle = BitConverter.ToInt32(buff, 136); info.KeyQuotum = BitConverter.ToInt32(buff, 140); info.KeyReduction = BitConverter.ToSingle(buff, 144); info.TrackTime = BitConverter.ToSingle(buff, 148); info.AnimRate = BitConverter.ToSingle(buff, 152); info.StartBone = BitConverter.ToInt32(buff, 156); info.FirstRawFrame = BitConverter.ToInt32(buff, 160); info.NumRawFrames = BitConverter.ToInt32(buff, 164); data.Infos.Add(info); } }
/// <summary> /// Read RW header<para/> /// Чтение заголовка /// </summary> /// <param name="f">Existing BinaryReader<para/>Открытый BinaryReader</param> /// <returns></returns> protected ChunkHeader ReadHeader(BinaryReader f) { ChunkHeader h = new ChunkHeader(); h.Type = (ChunkType)f.ReadUInt32(); h.Size = f.ReadUInt32(); h.Toolkit = f.ReadUInt16(); h.Version = f.ReadUInt16(); System.Threading.Thread.Sleep(0); return h; }
private void InitNew(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); _fileSize = fileSize; _isReadOnly = false; _chunkHeader = chunkHeader; _physicalDataSize = 0; _logicalDataSize = 0; if (_inMem) CreateInMemChunk(chunkHeader, fileSize); else { CreateWriterWorkItemForNewChunk(chunkHeader, fileSize); SetAttributes(); CreateReaderStreams(); } _readSide = chunkHeader.IsScavenged ? (IChunkReadSide) new TFChunkReadSideScavenged(this) : new TFChunkReadSideUnscavenged(this); }
public void LoadFromStream(Scene parentScene, EndianBinaryReader reader) { var mapEntities = new List<RawMapEntity>(); long fileOffsetStart = reader.BaseStream.Position; // File Header int chunkCount = reader.ReadInt32(); // Read the chunk headers List<ChunkHeader> chunks = new List<ChunkHeader>(); for (int i = 0; i < chunkCount; i++) { ChunkHeader chunk = new ChunkHeader(); chunk.FourCC = reader.ReadString(4); chunk.ElementCount = reader.ReadInt32(); chunk.ChunkOffset = reader.ReadInt32(); chunk.Layer = ResolveChunkFourCCToLayer(chunk.FourCC); chunk.FourCC = ResolveFourCCWithLayerToName(chunk.FourCC); chunks.Add(chunk); } // For each chunk, read all elements of that type of chunk. for (int i = 0; i < chunks.Count; i++) { ChunkHeader chunk = chunks[i]; // Find the appropriate JSON template that describes this chunk. MapEntityDataDescriptor template = m_editorCore.Templates.MapEntityDataDescriptors.Find(x => x.FourCC == chunk.FourCC); if (template == null) { WLog.Error(LogCategory.EntityLoading, null, "Unsupported entity FourCC: {0}. Map will save without this data!", chunk.FourCC); continue; } reader.BaseStream.Position = chunk.ChunkOffset; for (int k = 0; k < chunk.ElementCount; k++) { RawMapEntity entityInstance = LoadMapEntityFromStream(chunk.FourCC, reader, template); entityInstance.Layer = chunk.Layer; mapEntities.Add(entityInstance); } } m_entityData[parentScene] = mapEntities; }
private void InitCompleted(bool verifyHash) { var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) { throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); } _fileSize = (int)fileInfo.Length; _isReadOnly = true; SetAttributes(_filename, true); CreateReaderStreams(); var reader = GetReaderWorkItem(); try { _chunkHeader = ReadHeader(reader.Stream); Log.Debug("Opened completed " + _filename + " as version " + _chunkHeader.Version); if (_chunkHeader.Version != (byte)ChunkVersions.Unaligned && _chunkHeader.Version != (byte)ChunkVersions.Aligned) { throw new CorruptDatabaseException(new WrongFileVersionException(_filename, _chunkHeader.Version, CurrentChunkVersion)); } if (_chunkHeader.Version != (byte)ChunkVersions.Aligned && _unbuffered) { throw new Exception("You can only run unbuffered mode on v3 or higher chunk files. Please run scavenge on your database to upgrade your transaction file to v3."); } _chunkFooter = ReadFooter(reader.Stream); if (!_chunkFooter.IsCompleted) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should be completed, but is not.", _filename))); } _logicalDataSize = _chunkFooter.LogicalDataSize; _physicalDataSize = _chunkFooter.PhysicalDataSize; var expectedFileSize = _chunkFooter.PhysicalDataSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (_chunkHeader.Version == (byte)ChunkVersions.Unaligned && reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should have a file size of {1} bytes, but it has a size of {2} bytes.", _filename, expectedFileSize, reader.Stream.Length))); } } finally { ReturnReaderWorkItem(reader); } _readSide = _chunkHeader.IsScavenged ? (IChunkReadSide) new TFChunkReadSideScavenged(this) : new TFChunkReadSideUnscavenged(this); _readSide.Cache(); if (verifyHash) { VerifyFileHash(); } }
public static TFChunk CreateNew(string filename, int chunkSize, int chunkNumber, bool isScavenged) { var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkNumber, chunkNumber, isScavenged); return CreateWithHeader(filename, chunkHeader, chunkSize + ChunkHeader.Size + ChunkFooter.Size); }
// --------------------------------------------------------------------------- private bool HeaderEndReached(ChunkHeader Chunk) { // Check for header end return ( ((byte)(Chunk.ID[1]) < 32) || ((byte)(Chunk.ID[2]) < 32) || ((byte)(Chunk.ID[3]) < 32) || ((byte)(Chunk.ID[4]) < 32) || Utils.StringEqualsArr("DATA",Chunk.ID) ); }
private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); // create temp file first and set desired length // if there is not enough disk space or something else prevents file to be resized as desired // we'll end up with empty temp file, which won't trigger false error on next DB verification var tempFilename = string.Format("{0}.{1}.tmp", _filename, Guid.NewGuid()); var tempFile = new FileStream(tempFilename, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); tempFile.SetLength(fileSize); // we need to write header into temp file before moving it into correct chunk place, so in case of crash // we don't end up with seemingly valid chunk file with no header at all... WriteHeader(md5, tempFile, chunkHeader); tempFile.FlushToDisk(); tempFile.Close(); File.Move(tempFilename, _filename); var stream = new FileStream(_filename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); stream.Position = ChunkHeader.Size; _writerWorkItem = new WriterWorkItem(stream, null, md5); Flush(); // persist file move result }
public void ReadAnimKeys(FileStream fs, ChunkHeader h) { data.Keys = new List<PSAAnimKeys>(); for (int i = 0; i < h.count; i++) { PSAAnimKeys key = new PSAAnimKeys(); byte[] buff = new byte[h.size]; for (int j = 0; j < h.size; j++) buff[j] = (byte)fs.ReadByte(); key.raw = buff; key.location = new PSAPoint(buff, 0); key.rotation = new PSAQuad(buff, 12); key.time = BitConverter.ToSingle(buff, 28); data.Keys.Add(key); } }
private void WriteHeader(MD5 md5, Stream stream, ChunkHeader chunkHeader) { var chunkHeaderBytes = chunkHeader.AsByteArray(); md5.TransformBlock(chunkHeaderBytes, 0, ChunkHeader.Size, null, 0); stream.Write(chunkHeaderBytes, 0, ChunkHeader.Size); }
/**<summary>Constructs the default object.</summary>*/ internal Water(ObjectDataHeader objectHeader, ChunkHeader chunkHeader) : base(objectHeader, chunkHeader) { this.Header = new WaterHeader(); }
protected override void LoadInternal(int fileVersion, Stream stream, ref bool isOldFormat) { Debug.Assert(fileVersion == CURRENT_FILE_VERSION); ChunkHeader header = new ChunkHeader(); Dictionary<byte, MyVoxelMaterialDefinition> materialTable = null; HashSet<UInt64> materialLeaves = new HashSet<UInt64>(); HashSet<UInt64> contentLeaves = new HashSet<UInt64>(); while (header.ChunkType != ChunkTypeEnum.EndOfFile) { MyMicroOctreeLeaf contentLeaf; MyMicroOctreeLeaf materialLeaf; UInt64 key; header.ReadFrom(stream); Debug.Assert(Enum.IsDefined(typeof(ChunkTypeEnum), header.ChunkType)); switch (header.ChunkType) { case ChunkTypeEnum.StorageMetaData: ReadStorageMetaData(stream, header, ref isOldFormat); break; case ChunkTypeEnum.MaterialIndexTable: materialTable = ReadMaterialTable(stream, header, ref isOldFormat); break; case ChunkTypeEnum.MacroContentNodes: ReadOctreeNodes(stream, header, ref isOldFormat, m_contentNodes); break; case ChunkTypeEnum.MacroMaterialNodes: ReadOctreeNodes(stream, header, ref isOldFormat, m_materialNodes); break; case ChunkTypeEnum.ContentLeafProvider: ReadProviderLeaf(stream, header, ref isOldFormat, contentLeaves); break; case ChunkTypeEnum.ContentLeafOctree: ReadOctreeLeaf(stream, header, ref isOldFormat, MyStorageDataTypeEnum.Content, out key, out contentLeaf); m_contentLeaves.Add(key, contentLeaf); break; case ChunkTypeEnum.MaterialLeafProvider: ReadProviderLeaf(stream, header, ref isOldFormat, materialLeaves); break; case ChunkTypeEnum.MaterialLeafOctree: ReadOctreeLeaf(stream, header, ref isOldFormat, MyStorageDataTypeEnum.Material, out key, out materialLeaf); m_materialLeaves.Add(key, materialLeaf); break; case ChunkTypeEnum.DataProvider: ReadDataProvider(stream, header, ref isOldFormat, out m_dataProvider); break; case ChunkTypeEnum.EndOfFile: break; default: throw new InvalidBranchException(); } } { // At this point data provider should be loaded too, so have him create leaves MyCellCoord cell = new MyCellCoord(); foreach (var key in contentLeaves) { cell.SetUnpack(key); cell.Lod += LeafLodCount; m_contentLeaves.Add(key, new MyProviderLeaf(m_dataProvider, MyStorageDataTypeEnum.Content, ref cell)); } foreach (var key in materialLeaves) { cell.SetUnpack(key); cell.Lod += LeafLodCount; m_materialLeaves.Add(key, new MyProviderLeaf(m_dataProvider, MyStorageDataTypeEnum.Material, ref cell)); } } { // material reindexing when definitions change Debug.Assert(materialTable != null); bool needsReindexing = false; foreach (var entry in materialTable) { if (entry.Key != entry.Value.Index) needsReindexing = true; m_oldToNewIndexMap.Add(entry.Key, entry.Value.Index); } if (needsReindexing) { if (m_dataProvider != null) { m_dataProvider.ReindexMaterials(m_oldToNewIndexMap); } foreach (var entry in m_materialLeaves) { entry.Value.ReplaceValues(m_oldToNewIndexMap); } MySparseOctree.ReplaceValues(m_materialNodes, m_oldToNewIndexMap); } m_oldToNewIndexMap.Clear(); } }
public MemoryStream DecompressPCC(Stream raw, IPCCObject pcc) { raw.Seek(pcc.header.Length, SeekOrigin.Begin); int pos = 4; pcc.NumChunks = raw.ReadValueS32(); List<Chunk> Chunks = new List<Chunk>(); //DebugOutput.PrintLn("Reading chunk headers..."); for (int i = 0; i < pcc.NumChunks; i++) { Chunk c = new Chunk(); c.uncompressedOffset = raw.ReadValueS32(); c.uncompressedSize = raw.ReadValueS32(); c.compressedOffset = raw.ReadValueS32(); c.compressedSize = raw.ReadValueS32(); c.Compressed = new byte[c.compressedSize]; c.Uncompressed = new byte[c.uncompressedSize]; //DebugOutput.PrintLn("Chunk " + i + ", compressed size = " + c.compressedSize + ", uncompressed size = " + c.uncompressedSize); //DebugOutput.PrintLn("Compressed offset = " + c.compressedOffset + ", uncompressed offset = " + c.uncompressedOffset); Chunks.Add(c); } //DebugOutput.PrintLn("\tRead Chunks..."); int count = 0; for (int i = 0; i < Chunks.Count; i++) { Chunk c = Chunks[i]; raw.Seek(c.compressedOffset, SeekOrigin.Begin); c.Compressed = raw.ReadBytes(c.compressedSize); ChunkHeader h = new ChunkHeader(); h.magic = BitConverter.ToInt32(c.Compressed, 0); if (h.magic != -1641380927) throw new FormatException("Chunk magic number incorrect"); h.blocksize = BitConverter.ToInt32(c.Compressed, 4); h.compressedsize = BitConverter.ToInt32(c.Compressed, 8); h.uncompressedsize = BitConverter.ToInt32(c.Compressed, 12); //DebugOutput.PrintLn("Chunkheader read: Magic = " + h.magic + ", Blocksize = " + h.blocksize + ", Compressed Size = " + h.compressedsize + ", Uncompressed size = " + h.uncompressedsize); pos = 16; int blockCount = (h.uncompressedsize % h.blocksize == 0) ? h.uncompressedsize / h.blocksize : h.uncompressedsize / h.blocksize + 1; List<Block> BlockList = new List<Block>(); //DebugOutput.PrintLn("\t\t" + count + " Read Blockheaders..."); for (int j = 0; j < blockCount; j++) { Block b = new Block(); b.compressedsize = BitConverter.ToInt32(c.Compressed, pos); b.uncompressedsize = BitConverter.ToInt32(c.Compressed, pos + 4); //DebugOutput.PrintLn("Block " + j + ", compressed size = " + b.compressedsize + ", uncompressed size = " + b.uncompressedsize); pos += 8; BlockList.Add(b); } int outpos = 0; //DebugOutput.PrintLn("\t\t" + count + " Read and decompress Blocks..."); foreach (Block b in BlockList) { byte[] datain = new byte[b.compressedsize]; byte[] dataout = new byte[b.uncompressedsize]; for (int j = 0; j < b.compressedsize; j++) datain[j] = c.Compressed[pos + j]; pos += b.compressedsize; try { LZO1X.Decompress(datain, dataout); } catch { throw new Exception("LZO decompression failed!"); } for (int j = 0; j < b.uncompressedsize; j++) c.Uncompressed[outpos + j] = dataout[j]; outpos += b.uncompressedsize; } c.header = h; c.blocks = BlockList; count++; Chunks[i] = c; } MemoryStream result = new MemoryStream(); foreach (Chunk c in Chunks) { result.Seek(c.uncompressedOffset, SeekOrigin.Begin); result.WriteBytes(c.Uncompressed); } return result; }
// --------------------------------------------------------------------------- private void ReadTag(String FileName, ref HeaderInfo Header) { ChunkHeader Chunk = new ChunkHeader(); char[] Data = new char[250]; FileStream fs = null; BinaryReader SourceFile = null; try { // Set read-access, open file fs = new FileStream(FileName, FileMode.Open, FileAccess.Read); SourceFile = new BinaryReader(fs); fs.Seek(16, SeekOrigin.Begin); do { Array.Clear(Data,0,Data.Length); // Read chunk header //BlockRead(SourceFile, Chunk, 8); Chunk.ID = SourceFile.ReadChars(4); Chunk.Size = SourceFile.ReadUInt32(); // Read chunk data and set tag item if chunk header valid if ( HeaderEndReached(Chunk) ) break; Data = SourceFile.ReadChars(Swap32((int)(Chunk.Size >> 16)) % 250); SetTagItem(new String(Chunk.ID), new String(Data), ref Header); } while (fs.Position < fs.Length); } catch (Exception e) { System.Console.WriteLine(e.StackTrace); } if (SourceFile != null) SourceFile.Close(); if (fs != null) fs.Close(); }
/// <summary> /// decompress an entire ME1 or 2 pcc file. /// </summary> /// <param name="raw">pcc file passed in stream format</param> /// <returns>a decompressed stream.</returns> public static MemoryStream DecompressME1orME2(Stream raw) { raw.Seek(4, SeekOrigin.Begin); ushort versionLo = raw.ReadValueU16(); ushort versionHi = raw.ReadValueU16(); raw.Seek(12, SeekOrigin.Begin); int tempNameSize = raw.ReadValueS32(); raw.Seek(64 + tempNameSize, SeekOrigin.Begin); int tempGenerations = raw.ReadValueS32(); raw.Seek(36 + tempGenerations * 12, SeekOrigin.Current); //if ME1 if (versionLo == 491 && versionHi == 1008) { raw.Seek(4, SeekOrigin.Current); } int pos = 4; int NumChunks = raw.ReadValueS32(); List <Chunk> Chunks = new List <Chunk>(); //DebugOutput.PrintLn("Reading chunk headers..."); for (int i = 0; i < NumChunks; i++) { Chunk c = new Chunk(); c.uncompressedOffset = raw.ReadValueS32(); c.uncompressedSize = raw.ReadValueS32(); c.compressedOffset = raw.ReadValueS32(); c.compressedSize = raw.ReadValueS32(); c.Compressed = new byte[c.compressedSize]; c.Uncompressed = new byte[c.uncompressedSize]; //DebugOutput.PrintLn("Chunk " + i + ", compressed size = " + c.compressedSize + ", uncompressed size = " + c.uncompressedSize); //DebugOutput.PrintLn("Compressed offset = " + c.compressedOffset + ", uncompressed offset = " + c.uncompressedOffset); Chunks.Add(c); } //DebugOutput.PrintLn("\tRead Chunks..."); int count = 0; for (int i = 0; i < Chunks.Count; i++) { Chunk c = Chunks[i]; raw.Seek(c.compressedOffset, SeekOrigin.Begin); c.Compressed = raw.ReadBytes(c.compressedSize); ChunkHeader h = new ChunkHeader(); h.magic = BitConverter.ToInt32(c.Compressed, 0); if (h.magic != -1641380927) { throw new FormatException("Chunk magic number incorrect"); } h.blocksize = BitConverter.ToInt32(c.Compressed, 4); h.compressedsize = BitConverter.ToInt32(c.Compressed, 8); h.uncompressedsize = BitConverter.ToInt32(c.Compressed, 12); //DebugOutput.PrintLn("Chunkheader read: Magic = " + h.magic + ", Blocksize = " + h.blocksize + ", Compressed Size = " + h.compressedsize + ", Uncompressed size = " + h.uncompressedsize); pos = 16; int blockCount = (h.uncompressedsize % h.blocksize == 0) ? h.uncompressedsize / h.blocksize : h.uncompressedsize / h.blocksize + 1; List <Block> BlockList = new List <Block>(); //DebugOutput.PrintLn("\t\t" + count + " Read Blockheaders..."); for (int j = 0; j < blockCount; j++) { Block b = new Block(); b.compressedsize = BitConverter.ToInt32(c.Compressed, pos); b.uncompressedsize = BitConverter.ToInt32(c.Compressed, pos + 4); //DebugOutput.PrintLn("Block " + j + ", compressed size = " + b.compressedsize + ", uncompressed size = " + b.uncompressedsize); pos += 8; BlockList.Add(b); } int outpos = 0; //DebugOutput.PrintLn("\t\t" + count + " Read and decompress Blocks..."); foreach (Block b in BlockList) { byte[] datain = new byte[b.compressedsize]; byte[] dataout = new byte[b.uncompressedsize]; for (int j = 0; j < b.compressedsize; j++) { datain[j] = c.Compressed[pos + j]; } pos += b.compressedsize; try { LZO1X.Decompress(datain, dataout); } catch { throw new Exception("LZO decompression failed!"); } for (int j = 0; j < b.uncompressedsize; j++) { c.Uncompressed[outpos + j] = dataout[j]; } outpos += b.uncompressedsize; } c.header = h; c.blocks = BlockList; count++; Chunks[i] = c; } MemoryStream result = new MemoryStream(); foreach (Chunk c in Chunks) { result.Seek(c.uncompressedOffset, SeekOrigin.Begin); result.WriteBytes(c.Uncompressed); } return(result); }
public override void Save(BinaryWriter stream) { //Write the file header FileHeader header = new FileHeader(); header.ChunkCount = _chunkList.Count; header.Save(stream); //Save the current position of the stream, then allocate numChunkHeaders * chunkHeaderSize //bytes in the stream. We'll then create the chunk headers as we write the chunk data, //and then come back to this position and write the headers in afterwards. int chunkHeaderOffset = (int) stream.BaseStream.Position; stream.BaseStream.Position += _chunkList.Count*ChunkHeader.Size; List<ChunkHeader> chunkHeaders = new List<ChunkHeader>(); int rtblHeaderOffset; foreach (KeyValuePair<Type, List<BaseChunk>> pair in _chunkList) { ChunkHeader chunkHeader = new ChunkHeader(); chunkHeader.ChunkOffset = (int) stream.BaseStream.Position; chunkHeader.Tag = pair.Value[0].ChunkName; //ToDo: We're in trouble if the chunk has no children. chunkHeader.ElementCount = pair.Value.Count; chunkHeaders.Add(chunkHeader); if (chunkHeader.Tag == "RTBL") { rtblHeaderOffset = (int) stream.BaseStream.Position; stream.BaseStream.Position += pair.Value.Count*RTBLChunk.Header.Size; //Then write all of the Entry and Table pairs. foreach (BaseChunk chunk in pair.Value) { RTBLChunk rtblHeader = (RTBLChunk) chunk; rtblHeader.EntryHeader.EntryOffset = (int) stream.BaseStream.Position; //Write the EntryData to disk which writes the Table offset as being //immediately after itself. rtblHeader.EntryHeader.Entry.WriteData(stream); rtblHeader.EntryHeader.Entry.Table.WriteData(stream); } //Then go back and write all of the rtblHeaders to disk now that we've set their offsets. stream.BaseStream.Position = rtblHeaderOffset; foreach (BaseChunk baseChunk in pair.Value) { baseChunk.WriteData(stream); } //Finally skip us to the next clear spot in the damn file. stream.Seek(0, SeekOrigin.End); } else { //Write all of the chunk data into the stream for (int i = 0; i < pair.Value.Count; i++) { BaseChunk chunk = pair.Value[i]; chunk.WriteData(stream); } } } //Now that we've created teh chunk headers and they have correct offsets set, lets go back //and write them to the actual file. stream.BaseStream.Position = chunkHeaderOffset; foreach (ChunkHeader chunkHeader in chunkHeaders) { chunkHeader.WriteData(stream); } }
internal unsafe ModsChunk(ChunkHeader *header) : base(header) { ParseSets((Mods *)ChunkHeader.ChunkBegin(header), header->Size / sizeof(Mods)); }
public override void Load(byte[] data) { int offset = 0; FileHeader header = new FileHeader(); header.Load(data, ref offset); _chunkList = new Dictionary<Type, List<BaseChunk>>(); var chnkHeaders = new List<ChunkHeader>(); for (int i = 0; i < header.ChunkCount; i++) { ChunkHeader chunkHeader = new ChunkHeader(); chunkHeader.Load(data, ref offset); chnkHeaders.Add(chunkHeader); } var orderedList = chnkHeaders.OrderBy(kvp => kvp.ChunkOffset); foreach (ChunkHeader chunkHeader in orderedList) { for (int k = 0; k < chunkHeader.ElementCount; k++) { BaseChunk chunk; switch (chunkHeader.Tag.Substring(0, 3).ToUpper()) { case "ENV": chunk = new EnvrChunk(); break; case "COL": chunk = new ColoChunk(); break; case "PAL": chunk = new PaleChunk(); break; case "VIR": chunk = new VirtChunk(); break; case "SCL": chunk = new SclsChunk(); break; case "PLY": chunk = new PlyrChunk(); break; case "RPA": chunk = new RPATChunk(); break; case "PAT": chunk = new PathChunk(); break; case "RPP": chunk = new RppnChunk(); break; case "PPN": chunk = new PpntChunk(); break; case "SON": chunk = new SondChunk(); break; case "FIL": chunk = new FiliChunk(); break; case "MEC": chunk = new MecoChunk(); break; case "MEM": chunk = new MemaChunk(); break; case "TRE": chunk = new TresChunk(); break; case "SHI": chunk = new ShipChunk(); break; case "MUL": chunk = new MultChunk(); break; case "LGH": chunk = new LghtChunk(); break; case "LGT": chunk = new LgtvChunk(); break; case "RAR": chunk = new RaroChunk(); break; case "ARO": chunk = new ArobChunk(); break; case "EVN": chunk = new EvntChunk(); break; case "TGO": chunk = new TgobChunk(); break; case "ACT": chunk = new ActrChunk(); if (!chunkHeader.Tag.ToUpper().EndsWith("R")) { chunk.ChunkLayer = EditorHelpers.ConvertStringToLayerId(chunkHeader.Tag.ToUpper().Substring(3, 1)); } break; case "SCO": chunk = new ScobChunk(); if (!chunkHeader.Tag.EndsWith("B")) { chunk.ChunkLayer = EditorHelpers.ConvertStringToLayerId(chunkHeader.Tag.ToUpper().Substring(3, 1)); } break; case "STA": chunk = new StagChunk(); break; case "RCA": chunk = new RcamChunk(); break; case "CAM": chunk = new CamrChunk(); break; case "FLO": chunk = new FlorChunk(); break; case "TWO": chunk = new TwoDChunk(); break; case "2DM": chunk = new TwoDMAChunk(); break; case "DMA": chunk = new DMAPChunk(); break; case "LBN": chunk = new LbnkChunk(); break; case "TGD": chunk = new TgdrChunk(); break; case "RTB": chunk = new RTBLChunk(); break; default: Console.WriteLine("Unsupported Chunk Tag: " + chunkHeader.Tag + " Chunk will not be saved!"); chunk = null; break; } if(chunk == null) continue; //Console.WriteLine(chunkHeader.Tag + " offset: " + chunkHeader.ChunkOffset); chunk.ChunkName = chunkHeader.Tag; chunk.LoadData(data, ref chunkHeader.ChunkOffset); AddChunk(chunk); } } }
public static TFChunk CreateNew(string filename, int chunkSize, int chunkStartNumber, int chunkEndNumber, bool isScavenged, bool inMem = false) { var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkStartNumber, chunkEndNumber, isScavenged, Guid.NewGuid()); return CreateWithHeader(filename, chunkHeader, chunkSize + ChunkHeader.Size + ChunkFooter.Size, inMem); }
private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); var stream = new FileStream(_filename, FileMode.Create, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); var writer = new BinaryWriter(stream); stream.SetLength(fileSize); WriteHeader(md5, stream, chunkHeader); _writerWorkItem = new WriterWorkItem(stream, writer, md5); Flush(); }
public static TFChunk CreateWithHeader(string filename, ChunkHeader header, int fileSize, bool inMem) { var chunk = new TFChunk(filename, ESConsts.TFChunkInitialReaderCount, ESConsts.TFChunkMaxReaderCount, TFConsts.MidpointsDepth, inMem); try { chunk.InitNew(header, fileSize); } catch { chunk.Dispose(); throw; } return chunk; }
private void InitCompleted(bool verifyHash) { var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); _fileSize = (int)fileInfo.Length; _isReadOnly = true; SetAttributes(); CreateReaderStreams(); var reader = GetReaderWorkItem(); try { _chunkHeader = ReadHeader(reader.Stream); if (_chunkHeader.Version != CurrentChunkVersion) throw new CorruptDatabaseException(new WrongFileVersionException(_filename, _chunkHeader.Version, CurrentChunkVersion)); _chunkFooter = ReadFooter(reader.Stream); if (!_chunkFooter.IsCompleted) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should be completed, but is not.", _filename))); } _logicalDataSize = _chunkFooter.LogicalDataSize; _physicalDataSize = _chunkFooter.PhysicalDataSize; var expectedFileSize = _chunkFooter.PhysicalDataSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should have file size {1} bytes, but instead has {2} bytes length.", _filename, expectedFileSize, reader.Stream.Length))); } } finally { ReturnReaderWorkItem(reader); } _readSide = _chunkHeader.IsScavenged ? (IChunkReadSide) new TFChunkReadSideScavenged(this) : new TFChunkReadSideUnscavenged(this); _readSide.Cache(); if (verifyHash) VerifyFileHash(); }
public void Load(System.IO.Stream stream) { CanSave = false; using (var reader = new FileReader(stream)) { while (!reader.EndOfStream) { ChunkHeader chunk = new ChunkHeader(); chunk.Position = reader.Position; chunk.Identifier = reader.ReadUInt32(); uint unk = reader.ReadUInt32(); chunk.ChunkSize = reader.ReadUInt32(); chunk.ChunkId = reader.ReadUInt32(); chunk.NextFilePtr = reader.ReadUInt32(); chunk.FileSize = reader.ReadUInt32(); uint unk2 = reader.ReadUInt32(); uint unk3 = reader.ReadUInt32(); Chunks.Add(chunk); var Identifer = chunk.Identifier.Reverse(); switch (Identifer) { case ChunkTextureFile: if (chunk.ChunkSize > 0x88) { reader.Seek(chunk.Position + 0x88, System.IO.SeekOrigin.Begin); chunk.FileName = reader.ReadString(Syroot.BinaryData.BinaryStringFormat.ZeroTerminated); } break; case ChunkMetaInfo: break; case ChunkAnimInfo: if (chunk.ChunkSize > 0xB0) { // reader.Seek(chunk.Position + 0xB0, System.IO.SeekOrigin.Begin); // chunk.FileName = reader.ReadString(Syroot.BinaryData.BinaryStringFormat.ZeroTerminated); } break; case ChunkAnimData: AnimationFile animFile = new AnimationFile(); animFile.Read(reader); chunk.ChunkData = animFile; break; case ChunkSkeletonData: SkeletonFile skelFile = new SkeletonFile(); skelFile.Read(reader); chunk.ChunkData = skelFile; break; case ChunkModelData: ModelFile modelFile = new ModelFile(); modelFile.Read(reader); chunk.ChunkData = modelFile; break; case ChunkMaterialData: MaterialFile matFile = new MaterialFile(); matFile.Read(reader); chunk.ChunkData = matFile; break; } reader.Seek(chunk.Position + chunk.ChunkSize, System.IO.SeekOrigin.Begin); } ReadGPUFile(FilePath); } }
public byte[] getRawTFCComp(uint pos) { MemoryStream ret = new MemoryStream(); if (!File.Exists(filename)) return ret.ToArray(); FileStream fs = new FileStream(filename, FileMode.Open, FileAccess.Read); if (pos > fs.Length) { fs.Close(); return ret.ToArray(); } fs.Seek(pos, SeekOrigin.Begin); ChunkHeader ch = new ChunkHeader(); List<ChunkBlock> cb = new List<ChunkBlock>(); byte[] buff = new byte[4]; fs.Read(buff, 0, 4); uint magic = BitConverter.ToUInt32(buff, 0); if (magic == 0x9E2A83C1) { ch.magic = magic; fs.Read(buff, 0, 4); ch.blocksize = BitConverter.ToUInt32(buff, 0); fs.Read(buff, 0, 4); ch.compsize = BitConverter.ToUInt32(buff, 0); fs.Read(buff, 0, 4); ch.uncompsize = BitConverter.ToUInt32(buff, 0); } int n = (int)(ch.uncompsize / ch.blocksize); if (ch.uncompsize < ch.blocksize) n = 1; for (int i = 0; i < n; i++) { ChunkBlock t = new ChunkBlock(); fs.Read(buff, 0, 4); t.compsize = BitConverter.ToUInt32(buff, 0); fs.Read(buff, 0, 4); t.uncompsize = BitConverter.ToUInt32(buff, 0); cb.Add(t); } string loc = Path.GetDirectoryName(Application.ExecutablePath); for (int i = 0; i < n; i++) { if (File.Exists(loc + "\\exec\\temp.dat")) File.Delete(loc + "\\exec\\temp.dat"); if (File.Exists(loc + "\\exec\\out.dat")) File.Delete(loc + "\\exec\\out.dat"); FileStream fs2 = new FileStream(loc + "\\exec\\temp.dat", FileMode.Create, FileAccess.Write); for (int j = 0; j < cb[i].compsize; j++) fs2.WriteByte((byte)fs.ReadByte()); fs2.Close(); RunShell(loc + "\\exec\\zlibber.exe", "-sdc temp.dat out.dat"); fs2 = new FileStream(loc + "\\exec\\out.dat", FileMode.Open, FileAccess.Read); buff = new byte[fs2.Length]; for (int j = 0; j < fs2.Length; j++) buff[j] = (byte)fs2.ReadByte(); fs2.Close(); StreamAppend(ret, buff); } fs.Close(); return ret.ToArray(); }