/// <summary> /// Initializes a new instance of the <see cref="SymbolStream"/> class. /// </summary> /// <param name="reader">Binary reader.</param> /// <param name="end">End of the symbol stream in binary reader. If it is less than 0 or bigger than binary reader length, it will be read fully.</param> public DebugSubsectionStream(IBinaryReader reader, long end = -1) { Reader = reader; references = new List <DebugSubsectionReference>(); long position = reader.Position; if (end < 0 || end > reader.Length) { end = reader.Length; } while (position < end) { DebugSubsectionKind kind = (DebugSubsectionKind)reader.ReadUint(); uint dataLen = reader.ReadUint(); references.Add(new DebugSubsectionReference { DataOffset = position + 8, Kind = kind, DataLen = dataLen, }); position += dataLen + 8; reader.Move(dataLen); } debugSubsectionsByKind = new DictionaryCache <DebugSubsectionKind, DebugSubsection[]>(GetDebugSubsectionsByKind); }
/// <summary> /// Aligns binary reader to the specified number of alignment bytes. /// </summary> /// <param name="reader">Stream binary reader.</param> /// <param name="alignment">Number of bytes to be aligned</param> public static void Align(this IBinaryReader reader, long alignment) { long unaligned = reader.Position % alignment; if (unaligned != 0) { reader.Move((uint)(alignment - unaligned)); } }
/// <summary> /// Reads <see cref="OneMethodRecord"/> from the stream. /// </summary> /// <param name="reader">Stream binary reader.</param> /// <param name="kind">Type record kind.</param> /// <param name="isFromOverloadedList"><c>true</c> if we are reading this from <see cref="MethodOverloadListRecord"/>.</param> public static OneMethodRecord Read(IBinaryReader reader, TypeLeafKind kind, bool isFromOverloadedList = false) { var record = new OneMethodRecord { Kind = kind, Attributes = MemberAttributes.Read(reader) }; if (isFromOverloadedList) { reader.Move(2); // 2 = sizeof(ushort) } record.Type = TypeIndex.Read(reader); record.VFTableOffset = record.Attributes.IsIntroducedVirtual ? reader.ReadInt() : -1; if (!isFromOverloadedList) { record.Name = reader.ReadCString(); } return(record); }
/// <summary> /// Initializes a new instance of the <see cref="SymbolStream"/> class. /// </summary> /// <param name="reader">Binary reader.</param> /// <param name="end">End of the symbol stream in binary reader. If it is less than 0 or bigger than binary reader length, it will be read fully.</param> public SymbolStream(IBinaryReader reader, long end = -1) { Reader = reader; long position = reader.Position; if (end < 0 || end > reader.Length) { end = reader.Length; } long bytes = end - position; int estimatedCapacity = (int)(bytes / 35); references = new List <SymbolRecordReference>(estimatedCapacity); while (position < end) { RecordPrefix prefix = RecordPrefix.Read(reader); if (prefix.RecordLength < 2) { throw new Exception("CV corrupt record"); } SymbolRecordKind kind = (SymbolRecordKind)prefix.RecordKind; ushort dataLen = prefix.DataLen; references.Add(new SymbolRecordReference { DataOffset = (uint)position + RecordPrefix.Size, Kind = kind, DataLen = dataLen, }); position += dataLen + RecordPrefix.Size; reader.Move(dataLen); } symbolsByKind = new DictionaryCache <SymbolRecordKind, SymbolRecord[]>(GetSymbolsByKind); symbols = new ArrayCache <SymbolRecord>(references.Count, GetSymbol); }
/// <summary> /// Initializes a new instance of the <see cref="PdbStream"/> class. /// </summary> /// <param name="stream">PDB symbol stream.</param> public TpiStream(PdbStream stream) { Stream = stream; if (stream.Reader.BytesRemaining < TpiStreamHeader.Size) { throw new Exception("TPI Stream does not contain a header."); } Header = TpiStreamHeader.Read(stream.Reader); if (Header.Version != PdbTpiVersion.V80) { throw new Exception("Unsupported TPI Version."); } if (Header.HeaderSize != TpiStreamHeader.Size) { throw new Exception("Corrupt TPI Header size."); } if (Header.HashKeySize != 4) // 4 = sizeof(uint) { throw new Exception("TPI Stream expected 4 byte hash key size."); } if (Header.HashBucketsCount < MinTpiHashBuckets || Header.HashBucketsCount > MaxTpiHashBuckets) { throw new Exception("TPI Stream Invalid number of hash buckets."); } // The actual type records themselves come from this stream TypeRecordsSubStream = Stream.Reader.ReadSubstream(Header.TypeRecordBytes); typeRecordsSubStreamPerThread = new System.Threading.ThreadLocal <IBinaryReader>(() => TypeRecordsSubStream.Duplicate()); IBinaryReader reader = TypeRecordsSubStream; long position = reader.Position, end = reader.Length; references = new List <RecordReference>(); while (position < end) { RecordPrefix prefix = RecordPrefix.Read(reader); if (prefix.RecordLength < 2) { throw new Exception("CV corrupt record"); } TypeLeafKind kind = (TypeLeafKind)prefix.RecordKind; ushort dataLen = prefix.DataLen; references.Add(new RecordReference { DataOffset = (uint)position + RecordPrefix.Size, Kind = kind, DataLen = dataLen, }); position += dataLen + RecordPrefix.Size; reader.Move(dataLen); } typesCache = new ArrayCache <TypeRecord>(references.Count, true, ReadType); typesByKindCache = new DictionaryCache <TypeLeafKind, TypeRecord[]>(GetTypesByKind); // Hash indices, hash values, etc come from the hash stream. HashSubstream = Stream.File.GetStream(Header.HashStreamIndex)?.Reader; hashValuesCache = SimpleCache.CreateStruct(() => { if (HashSubstream != null) { // There should be a hash value for every type record, or no hashes at all. uint numHashValues = Header.HashValueBuffer.Length / 4; // 4 = sizeof(uint) if (numHashValues != references.Count && numHashValues != 0) { throw new Exception("TPI hash count does not match with the number of type records."); } HashSubstream.Position = Header.HashValueBuffer.Offset; return(HashSubstream.ReadUintArray(references.Count)); } return(null); }); typeIndexOffsetsCache = SimpleCache.CreateStruct(() => { if (HashSubstream != null) { HashSubstream.Position = Header.IndexOffsetBuffer.Offset; uint numTypeIndexOffsets = Header.IndexOffsetBuffer.Length / TypeIndexOffset.Size; TypeIndexOffset[] typeIndexOffsets = new TypeIndexOffset[numTypeIndexOffsets]; for (uint i = 0; i < typeIndexOffsets.Length; i++) { typeIndexOffsets[i] = TypeIndexOffset.Read(HashSubstream); } return(typeIndexOffsets); } return(null); }); hashAdjustersCache = SimpleCache.CreateStruct(() => { if (HashSubstream != null && Header.HashAdjustersBuffer.Length > 0) { HashSubstream.Position = Header.HashAdjustersBuffer.Offset; return(new HashTable(HashSubstream)); } return(null); }); hashTableCache = SimpleCache.CreateStruct(() => { uint[] hashes = HashValues; if (hashes != null) { // Construct hash table TypeIndexListItem[] hashTable = new TypeIndexListItem[Header.HashBucketsCount]; for (uint ti = Header.TypeIndexBegin, i = 0; ti < Header.TypeIndexEnd; ti++, i++) { uint bucket = hashes[i] % Header.HashBucketsCount; hashTable[bucket] = new TypeIndexListItem(new TypeIndex(ti), hashTable[bucket]); } // Use hash adjusters to improve hash table if (HashAdjusters != null) { var namesMap = Stream.File.InfoStream.NamesMap; foreach (var kvp in HashAdjusters.Dictionary) { uint nameIndex = kvp.Key; TypeIndex typeIndex = new TypeIndex(kvp.Value); string name = namesMap.GetString(nameIndex); uint hash = Windows.HashTable.HashStringV1(name) % (uint)hashTable.Length; // Find type index hash adjusters wants to be head for (TypeIndexListItem item = hashTable[hash], previousItem = null; item != null; previousItem = item, item = item.Next) { if (item.TypeIndex == typeIndex) { if (previousItem == null) { // Our type index is already at the head break; } previousItem.Next = item.Next; item.Next = hashTable[hash]; hashTable[hash] = item; break; } } } } return(hashTable); } return(null); }); }