/// <summary> /// Invoked by the inner foreach loop. This method stays just one step ahead /// of the client requests. It adds the next element of the chunk only after /// the clients requests the last element in the list so far. /// </summary> /// <returns></returns> public IEnumerator <TSource> GetEnumerator() { // specify the initial element to enumerate ChunkItem current = head; // there should always be at least one ChunkItem in a Chunk while (current != null) { // yield the current item in the list yield return(current.Value); // copy the next item from the source sequence, // if we are at the end of our local list lock (_lock) { if (current == tail) { CopyNextChunkElement(); } } // move to the next ChunkItem in the list current = current.Next; } }
// Invoked by the inner foreach loop. This method stays just one step ahead // of the client requests. It adds the next element of the chunk only after // the clients requests the last element in the list so far. public IEnumerator <TSource> GetEnumerator() { //Specify the initial element to enumerate. ChunkItem current = _head; // There should always be at least one ChunkItem in a Chunk. while (current != null) { // Yield the current item in the list. yield return(current.Value); // Copy the next item from the source sequence, // if we are at the end of our local list. lock (_mLock) { if (current == _tail) { CopyNextChunkElement(); } } // Move to the next ChunkItem in the list. current = current.Next; } }
public Chunk(TKey key, IEnumerator <TSource> enumerator, Func <TSource, bool> predicate) { Key = key; _enumerator = enumerator; _predicate = predicate; _head = new ChunkItem(enumerator.Current); _tail = _head; }
public Chunk(TKey key, IEnumerator <TSource> enumerator, Func <TSource, bool> predicate) { Key = key; this.enumerator = enumerator; this.predicate = predicate; head = new ChunkItem(enumerator.Current); tail = head; m_Lock = new object(); }
// REQUIRES: enumerator != null && predicate != null public Chunk(TKey key, IEnumerator <TSource> enumerator, Func <TSource, bool> predicate) { this.key = key; this.enumerator = enumerator; this.predicate = predicate; // A Chunk always contains at least one element. head = new ChunkItem(enumerator.Current); // The end and beginning are the same until the list contains > 1 elements. tail = head; m_Lock = new object(); }
private void CopyNextChunkElement() { _isLastSourceElement = !_enumerator.MoveNext(); if (_isLastSourceElement || !_predicate(_enumerator.Current)) { _enumerator = null; _predicate = null; } else { _tail.Next = new ChunkItem(_enumerator.Current); } _tail = _tail.Next; }
private void CopyNextChunkElement() { isLastSourceElement = !enumerator.MoveNext(); if (isLastSourceElement || !predicate(enumerator.Current)) { enumerator = null; predicate = null; } else { tail.Next = new ChunkItem(enumerator.Current); } tail = tail.Next; }
public void Disassemble(Action <ChunkItem> callback = null) { void HandleChunk(ChunkItem chunk) { callback?.Invoke(chunk); Chunks.Add(chunk); } if (Metadata.Codec == CodecKind.FGDM || Metadata.Codec == CodecKind.FGDC) { if (ChunkItem.Read(_input) is FileVersionChunk version && ChunkItem.Read(_input) is FileCompressionTypesChunk compressionTypes && ChunkItem.Read(_input) is AfterburnerMapChunk afterburnerMap && ChunkItem.Read(_input) is FGEIChunk fgei) { var ilsChunk = fgei.ReadInitialLoadSegment(afterburnerMap.Entries[0]); fgei.ReadChunks(afterburnerMap.Entries, HandleChunk); ilsChunk.ReadChunks(afterburnerMap.Entries, HandleChunk); } } else if (Metadata.Codec == CodecKind.MV93) { var imapChunk = ChunkItem.Read(_input) as InitialMapChunk; Version = imapChunk.Version; foreach (int offset in imapChunk.MemoryMapOffsets) { _input.Position = offset; if (ChunkItem.Read(_input) is MemoryMapChunk mmapChunk) { foreach (ChunkEntry entry in mmapChunk.Entries) { if (entry.Flags.HasFlag(ChunkEntryFlags.Ignore)) { HandleChunk(new UnknownChunk(_input, entry.Header)); //TODO: continue; } _input.Position = entry.Offset; ChunkItem chunk = ChunkItem.Read(_input); chunk.Header.Id = entry.Header.Id; HandleChunk(chunk); } } } } }
public void Disassemble() { var input = new ShockwaveReader(_input.Span, Metadata.IsBigEndian); input.Advance(Metadata.Header.GetBodySize() + Metadata.GetBodySize()); if (Metadata.Codec == CodecKind.FGDM || Metadata.Codec == CodecKind.FGDC) { if (ChunkItem.Read(ref input) is FileVersionChunk version && ChunkItem.Read(ref input) is FileCompressionTypesChunk compressionTypes && ChunkItem.Read(ref input) is AfterburnerMapChunk afterburnerMap && ChunkItem.Read(ref input) is FileGzipEmbeddedImageChunk fgei) { Chunks = fgei.ReadChunks(ref input, afterburnerMap.Entries); } } else if (Metadata.Codec == CodecKind.MV93) { var imapChunk = ChunkItem.Read(ref input) as InitialMapChunk; Version = imapChunk.Version; foreach (int offset in imapChunk.MemoryMapOffsets) { input.Position = offset; if (ChunkItem.Read(ref input) is MemoryMapChunk mmapChunk) { foreach (ChunkEntry entry in mmapChunk.Entries) { if (entry.Header.Kind == ChunkKind.RIFX) { continue; //TODO: HACK } if (entry.Flags.HasFlag(ChunkEntryFlags.Ignore)) { Chunks.Add(entry.Id, new UnknownChunk(ref input, entry.Header)); continue; } input.Position = entry.Offset; Chunks.Add(entry.Id, ChunkItem.Read(ref input)); } } } } //TODO: _input = null; }
public IEnumerator <TSource> GetEnumerator() { ChunkItem current = _head; while (current != null) { yield return(current.Value); lock (_mLock) if (current == _tail) { CopyNextChunkElement(); } current = current.Next; } }
public unsafe ChunkItem ReadCompressedChunk(AfterBurnerMapEntry entry) { Span <byte> decompressedData = entry.DecompressedLength <= 1024 ? stackalloc byte[entry.DecompressedLength] : new byte[entry.DecompressedLength]; fixed(byte *pBuffer = &_data.Slice(Position + 2)[0]) //Skip ZLib header { using var stream = new UnmanagedMemoryStream(pBuffer, entry.Length - 2); using var deflateStream = new DeflateStream(stream, CompressionMode.Decompress); deflateStream.Read(decompressedData); } Advance(entry.Length); ShockwaveReader input = new ShockwaveReader(decompressedData, IsBigEndian); return(ChunkItem.Read(ref input, entry.Header)); }
// Adds one ChunkItem to the current group // REQUIRES: !DoneCopyingChunk && lock(this) private void CopyNextChunkElement() { // Try to advance the iterator on the source sequence. // If MoveNext returns false we are at the end, and isLastSourceElement is set to true isLastSourceElement = !enumerator.MoveNext(); // If we are (a) at the end of the source, or (b) at the end of the current chunk // then null out the enumerator and predicate for reuse with the next chunk. if (isLastSourceElement || !predicate(enumerator.Current)) { enumerator = null; predicate = null; } else { tail.Next = new ChunkItem(enumerator.Current); } // tail will be null if we are at the end of the chunk elements // This check is made in DoneCopyingChunk. tail = tail.Next; }
private BaseItem CreateItem(NodeItem item, byte[] buffer, int offset) { var data = EndianUtilities.ToByteArray(buffer, (int)(offset + item.DataOffset), (int)item.DataSize); BaseItem result; switch (item.Key.ItemType) { case ItemType.ChunkItem: result = new ChunkItem(item.Key); break; case ItemType.DevItem: result = new DevItem(item.Key); break; case ItemType.RootItem: result = new RootItem(item.Key); break; case ItemType.InodeRef: result = new InodeRef(item.Key); break; case ItemType.InodeItem: result = new InodeItem(item.Key); break; case ItemType.DirItem: result = new DirItem(item.Key); break; case ItemType.DirIndex: result = new DirIndex(item.Key); break; case ItemType.ExtentData: result = new ExtentData(item.Key); break; case ItemType.RootRef: result = new RootRef(item.Key); break; case ItemType.RootBackref: result = new RootBackref(item.Key); break; case ItemType.XattrItem: result = new XattrItem(item.Key); break; case ItemType.OrphanItem: result = new OrphanItem(item.Key); break; default: throw new IOException($"Unsupported item type {item.Key.ItemType}"); } result.ReadFrom(data, 0); return(result); }
public int ReadFrom(byte[] buffer, int offset) { Magic = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x40); if (Magic != BtrfsMagic) { return(Size); } Checksum = EndianUtilities.ToByteArray(buffer, offset, 0x20); FsUuid = EndianUtilities.ToGuidLittleEndian(buffer, offset + 0x20); PhysicalAddress = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x30); Flags = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x38); Generation = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x48); Root = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x50); ChunkRoot = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x58); LogRoot = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x60); LogRootTransId = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x68); TotalBytes = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x70); BytesUsed = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x78); RootDirObjectid = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x80); NumDevices = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0x88); SectorSize = EndianUtilities.ToUInt32LittleEndian(buffer, offset + 0x90); NodeSize = EndianUtilities.ToUInt32LittleEndian(buffer, offset + 0x94); LeafSize = EndianUtilities.ToUInt32LittleEndian(buffer, offset + 0x98); StripeSize = EndianUtilities.ToUInt32LittleEndian(buffer, offset + 0x9c); ChunkRootGeneration = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0xa4); CompatFlags = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0xac); CompatRoFlags = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0xb4); IncompatFlags = EndianUtilities.ToUInt64LittleEndian(buffer, offset + 0xbc); ChecksumType = (ChecksumType)EndianUtilities.ToUInt16LittleEndian(buffer, offset + 0xc4); RootLevel = buffer[offset + 0xc6]; ChunkRootLevel = buffer[offset + 0xc7]; LogRootLevel = buffer[offset + 0xc8]; //c9 62 DEV_ITEM data for this device var labelData = EndianUtilities.ToByteArray(buffer, offset + 0x12b, 0x100); int eos = Array.IndexOf(labelData, (byte)0); if (eos != -1) { Label = Encoding.UTF8.GetString(labelData, 0, eos); } //22b 100 reserved var n = EndianUtilities.ToUInt32LittleEndian(buffer, offset + 0xa0); offset += 0x32b; var systemChunks = new List <ChunkItem>(); while (n > 0) { var key = new Key(); offset += key.ReadFrom(buffer, offset); var chunkItem = new ChunkItem(key); offset += chunkItem.ReadFrom(buffer, offset); systemChunks.Add(chunkItem); n = n - (uint)key.Size - (uint)chunkItem.Size; } SystemChunkArray = systemChunks.ToArray(); //32b 800 (n bytes valid) Contains (KEY, CHUNK_ITEM) pairs for all SYSTEM chunks. This is needed to bootstrap the mapping from logical addresses to physical. //b2b 4d5 Currently unused return(Size); }