public void Validate() { SuperBlock superBlock = CreateSuperBlock(SuperBlock.Magic, 512, 1, 4, 0, 0, 1); superBlock.Validate(); Assert.Equal(SuperBlock.Magic, superBlock.MagicString); }
/// <summary> /// Initializes a new instance of the <see cref="PdbFile"/> class. /// </summary> /// <param name="path">Path to PDB file.</param> public PdbFile(string path) { File = new MMFile(path); Reader = new MMFileReader(File); // Parse file headers // Initialize MSF super block SuperBlock = MSF.SuperBlock.Read(Reader); SuperBlock.Validate(); if (File.Length % SuperBlock.BlockSize != 0) { throw new Exception("File size is not a multiple of block size"); } // Initialize Free Page Map. // The Fpm exists either at block 1 or block 2 of the MSF. However, this // allows for a maximum of getBlockSize() * 8 blocks bits in the Fpm, and // thusly an equal number of total blocks in the file. For a block size // of 4KiB (very common), this would yield 32KiB total blocks in file, for a // maximum file size of 32KiB * 4KiB = 128MiB. Obviously this won't do, so // the Fpm is split across the file at `getBlockSize()` intervals. As a // result, every block whose index is of the form |{1,2} + getBlockSize() * k| // for any non-negative integer k is an Fpm block. In theory, we only really // need to reserve blocks of the form |{1,2} + getBlockSize() * 8 * k|, but // current versions of the MSF format already expect the Fpm to be arranged // at getBlockSize() intervals, so we have to be compatible. // See the function fpmPn() for more information: // https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/msf/msf.cpp#L489 uint fpmIntervals = (SuperBlock.NumBlocks + 8 * SuperBlock.BlockSize - 1) / (8 * SuperBlock.BlockSize); uint[] fpmBlocks = new uint[fpmIntervals]; uint currentFpmBlock = SuperBlock.FreeBlockMapBlock; for (int i = 0; i < fpmBlocks.Length; i++) { fpmBlocks[i] = currentFpmBlock; currentFpmBlock += SuperBlock.BlockSize; } IBinaryReader fpmStream = new MappedBlockBinaryReader <MMFileReader>(fpmBlocks, SuperBlock.BlockSize, (SuperBlock.NumBlocks + 7) / 8, Reader); FreePageMap = Reader.ReadByteArray((int)fpmStream.Length); // Read directory blocks Reader.Position = (long)SuperBlock.BlockMapOffset; uint[] directoryBlocks = Reader.ReadUintArray((int)SuperBlock.NumDirectoryBlocks); // Parse stream data uint NumStreams = 0; PdbStream directoryStream = new PdbStream(directoryBlocks, SuperBlock.NumDirectoryBytes, this); NumStreams = directoryStream.Reader.ReadUint(); streams = new PdbStream[NumStreams]; uint[] streamSizes = directoryStream.Reader.ReadUintArray(streams.Length); for (int i = 0; i < streams.Length; i++) { uint streamSize = streamSizes[i]; uint NumExpectedStreamBlocks = streamSize == uint.MaxValue ? 0 : SuperBlock.BytesToBlocks(streamSize); uint[] blocks = directoryStream.Reader.ReadUintArray((int)NumExpectedStreamBlocks); foreach (uint block in blocks) { ulong blockEndOffset = SuperBlock.BlocksToBytes(block + 1); if (blockEndOffset > (ulong)File.Length) { throw new Exception("Stream block map is corrupt."); } } streams[i] = new PdbStream(blocks, streamSize, this); } if (directoryStream.Reader.Position != SuperBlock.NumDirectoryBytes) { throw new Exception("Not whole directory stream was read"); } dbiStreamCache = SimpleCache.CreateStruct(() => new DbiStream(streams[(uint)SpecialStream.StreamDBI])); pdbSymbolStreamCache = SimpleCache.CreateStruct(() => new SymbolStream(streams[DbiStream.SymbolRecordStreamIndex])); tpiStreamCache = SimpleCache.CreateStruct(() => new TpiStream(streams[(uint)SpecialStream.StreamTPI])); ipiStreamCache = SimpleCache.CreateStruct(() => new TpiStream(streams[(uint)SpecialStream.StreamIPI])); }