internal static unsafe void Verify(UnusedBlockHeader *header) { VTable.Assert(header->magic == (UIntPtr)magicNumber, "Bad magic number in UnusedBlockHeader"); VTable.Assert(header->count > 0, "Count <= 0 in UnusedBlockHeader"); VTable.Assert(header->prev->next == header, "UnusedBlockHeader not linked properly (1)"); if (header->next != null) { VTable.Assert(header->next->prev == header, "UnusedBlockHeader not linked properly (2)"); } UIntPtr count = header->count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); VTable.Assert(tailBlock->curr == header, "UnusedBlockHeader tail->curr is incorrect"); if (PageManager.SlowDebug) { UIntPtr page = PageTable.Page((UIntPtr)header); for (UIntPtr i = UIntPtr.Zero; i < count; i++) { VTable.Assert(PageTable.IsUnusedPage(page + i) && PageTable.IsMyPage(page + i), "Incorrect page in unused region"); } } }
internal static void VerifyUnusedRegion(UIntPtr startPage, UIntPtr endPage) { // Verify that all of the pages are of the same Clean/Dirty type. PageType startType = PageTable.Type(startPage); for (UIntPtr page = startPage; page < endPage; ++page) { VTable.Assert(startType == PageTable.Type(page), "Unused page types don't match in region"); } if (startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)) { // We have already checked the region return; } UIntPtr regionAddr = PageTable.PageAddr(startPage); UnusedBlockHeader *regionHeader = (UnusedBlockHeader *)regionAddr; UIntPtr pageCount = regionHeader->count; VTable.Assert (pageCount >= (endPage - startPage), "Region-to-verify is larger than its header specifies"); endPage = startPage + pageCount; for (UIntPtr page = startPage; page < endPage; ++page) { VTable.Assert(PageTable.IsUnusedPage(page) && PageTable.IsMyPage(page), "Non my-unused page in unused region"); PageManager.VerifyUnusedPage (page, (page == startPage) || (page == (endPage - 1))); } VTable.Assert(!(endPage < PageTable.pageTableCount && PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)), "My-unused page immediately after unused region"); // Verify that the region is correctly linked into the // list of unused memory blocks int slot = SlotFromCount(pageCount); UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; UnusedBlockHeader.Verify(header); while (regionAddr != (UIntPtr)header) { header = header->next; VTable.Assert(header != null, "Unused region not list for its slot number"); UnusedBlockHeader.Verify(header); } }
internal static bool TryReservePages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReservePages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; UIntPtr index = startPage; while (index < endPage && PageTable.IsUnusedPage(index) && PageTable.IsMyPage(index)) { index++; } if (PageTable.IsUnallocatedPage(PageTable.Type(index))) { // We should try to extend the region of allocated pages UIntPtr pagesNeeded = pageCount - (index - startPage); UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = PageTable.PageAddr(index); bool gotMemory = false; bool iflag = EnterMutex(currentThread); try { gotMemory = MemoryManager.AllocateMemory(startAddr, allocSize); if (gotMemory) { UIntPtr allocPages = PageTable.PageCount(allocSize); MarkUnusedPages(/* avoid recursive locking */ null, index, allocPages, true); } } finally { LeaveMutex(currentThread, iflag); } if (gotMemory) { bool success = TryReserveUnusedPages(currentThread, startPage, pageCount, newType, ref fCleanPages); Trace.Log(Trace.Area.Page, "TryReservePages success={0}", __arglist(success)); return(success); } } return(false); }
private static void MarkUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, bool fCleanPages) { Trace.Log(Trace.Area.Page, "MarkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); UIntPtr endPage = startPage + pageCount; if (avoidDirtyPages && !fCleanPages) { UIntPtr dirtyStartAddr = PageTable.PageAddr(startPage); UIntPtr dirtySize = PageTable.RegionSize(pageCount); Util.MemClear(dirtyStartAddr, dirtySize); fCleanPages = true; } bool iflag = EnterMutex(currentThread); try { if (endPage < PageTable.pageTableCount) { if (PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)) { UIntPtr regionSize = UnlinkUnusedPages(endPage); endPage += regionSize; } } UIntPtr queryStartPage = startPage - 1; UIntPtr newStartPage = startPage; if (PageTable.IsUnusedPage(queryStartPage) && PageTable.IsMyPage(queryStartPage)) { UnusedBlockHeader *tailUnused = (UnusedBlockHeader *) PageTable.PageAddr(queryStartPage); UIntPtr newStartAddr = (UIntPtr)tailUnused->curr; newStartPage = PageTable.Page(newStartAddr); UIntPtr regionSize = UnlinkUnusedPages(newStartPage); VTable.Assert(newStartPage + regionSize == startPage); } PageType pageType = fCleanPages ? PageType.UnusedClean : PageType.UnusedDirty; PageTable.SetType(startPage, pageCount, pageType); LinkUnusedPages(newStartPage, endPage - newStartPage, false); } finally { LeaveMutex(currentThread, iflag); } }
private static void LinkUnusedPages(UIntPtr startPage, UIntPtr pageCount, bool asVictim) { if (PageManager.SlowDebug) { for (UIntPtr i = startPage; i < startPage + pageCount; i++) { VTable.Assert(PageTable.IsUnusedPage(i) && PageTable.IsMyPage(i), "Incorrect page to link into unused region"); } } Trace.Log(Trace.Area.Page, "LinkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); VTable.Deny(startPage + pageCount > PageTable.pageTableCount); VTable.Deny(startPage + pageCount < PageTable.pageTableCount && PageTable.IsUnusedPage(startPage + pageCount) && PageTable.IsMyPage(startPage + pageCount)); UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); UnusedBlockHeader.Initialize(header, pageCount); int slot = SlotFromCount(pageCount); // Unused blocks are linked into the free list either as the result of a collection // or as a result of carving a big block into a smaller allocation and a remainder. // When such a remainder is linked back into the free list, it is identified as a // victim. We favor subsequent allocations from these victims, in an attempt to // reduce fragmentation. This is achieved by keeping victims at the head of the // free list. // // TODO: the long term solution is to perform best fit on the free list. if (asVictim || unusedMemoryBlocks[slot].next == null) { fixed(UnusedBlockHeader *listHeader = &unusedMemoryBlocks[slot]) { UnusedBlockHeader.InsertNext(listHeader, header); } } else { UnusedBlockHeader *listHeader = unusedMemoryBlocks[slot].next; UnusedBlockHeader.InsertNext(listHeader, header); } }
private static UIntPtr UnlinkUnusedPages(UIntPtr startPage) { VTable.Assert(PageTable.IsUnusedPage(startPage) && PageTable.IsMyPage(startPage)); VTable.Deny(startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); UIntPtr pageCount = UnusedBlockHeader.Remove(header); Trace.Log(Trace.Area.Page, "UnlinkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); return(pageCount); }
public void VerifyHeap() { // Temporarily set the thread allocation point to the unused // space token, if necessary. Thread currentThread = Thread.CurrentThread; VerifyAllThreadsGCControlled(currentThread); // Do the real work VerifyPages(this.objectVisitor); StaticData.ScanStaticData(this.referenceVisitor); VTable.Deny(PageTable.IsUnusedPage(PageTable.Page(Magic.addressOf(Thread.threadTable)))); for (int i = 0; i < Thread.threadTable.Length; i++) { Thread t = Thread.threadTable[i]; if (t != null) { VTable.Deny(PageTable.IsUnusedPage(PageTable.Page(Magic.addressOf(t)))); this.stackVerifier.Verify(this.threadReferenceVisitor, t); } } }
private unsafe void FindDestinationArea(ref UIntPtr destPage, ref UIntPtr destCursor, ref UIntPtr destLimit, UIntPtr objectSize, PageType destGeneration) { VTable.Assert(IsValidGeneration((int)destGeneration)); UIntPtr cursorPage = PageTable.Page(destCursor); UIntPtr limitPage = PageTable.Page(destLimit); UIntPtr pageAddr = PageTable.PagePad(destCursor); UIntPtr testPage = limitPage; UIntPtr endTestPage = PageTable.PageCount(destCursor + objectSize); if (destCursor > UIntPtr.Zero && IsMyZombiePage(PageTable.Page(destCursor - 1))) { VTable.Assert(destPage == limitPage); while (IsMyZombiePage(testPage) || (testPage < endTestPage && (PageTable.IsUnusedPage(testPage)))) { testPage++; } if (testPage >= endTestPage) { // We can expand the current region endTestPage = testPage; VTable.Assert(PageTable.PageAligned(destLimit)); InteriorPtrTable.ClearFirst(limitPage, testPage); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(limitPage), PageTable.PageAddr(testPage) - 1); } while (limitPage != endTestPage) { VTable.Assert(PageTable.IsUnusedPage(destPage)); do { destPage++; } while (destPage < endTestPage && PageTable.IsUnusedPage(destPage)); bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, limitPage, destPage - limitPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(limitPage, destPage - limitPage, destGeneration); while (destPage < endTestPage && IsMyZombiePage(destPage)) { destPage++; } limitPage = destPage; } destLimit = PageTable.PageAddr(limitPage); return; } } if (destCursor != pageAddr) { cursorPage++; } if (cursorPage != limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } // Find new region big enough to contain object UIntPtr neededPages = PageTable.PageCount(objectSize); UIntPtr prefixPage; while (true) { do { destPage++; } while (!IsMyZombiePage(destPage)); cursorPage = destPage; prefixPage = cursorPage; do { destPage++; } while (IsMyZombiePage(destPage)); limitPage = destPage; if (neededPages <= limitPage - cursorPage) { break; } // Check for following unused pages endTestPage = cursorPage + neededPages; VTable.Assert(endTestPage <= PageTable.pageTableCount); while (destPage < endTestPage && (PageTable.IsUnusedPage(destPage) || (IsMyZombiePage(destPage)))) { destPage++; } if (destPage == endTestPage) { break; } // Check for preceding unused pages if (destPage >= neededPages) { endTestPage = destPage - neededPages; prefixPage = cursorPage - 1; while (prefixPage >= UIntPtr.Zero && PageTable.IsUnusedPage(prefixPage)) { prefixPage--; } prefixPage++; if (prefixPage == endTestPage) { break; } } // Register any skipped regions of pages this.RegisterSkippedPages(cursorPage, limitPage); while (limitPage < destPage) { VTable.Assert(PageTable.IsUnusedPage(limitPage)); do { limitPage++; } while (limitPage < destPage && PageTable.IsUnusedPage(limitPage)); cursorPage = limitPage; while (limitPage < destPage && IsMyZombiePage(limitPage)) { limitPage++; } if (cursorPage != limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } } } // We found an area big enough. Commit the pre- and // postfix areas of unused pages if (prefixPage != cursorPage) { bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, prefixPage, cursorPage - prefixPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(prefixPage, cursorPage - prefixPage, destGeneration); } while (destPage != limitPage) { // Mark the region of unused pages as fromspace UIntPtr unusedPage = limitPage; VTable.Assert(PageTable.IsUnusedPage(unusedPage)); do { unusedPage++; } while (unusedPage < destPage && PageTable.IsUnusedPage(unusedPage)); bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, limitPage, unusedPage - limitPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(limitPage, unusedPage - limitPage, destGeneration); // Skip any sections of pages already marked as fromspace limitPage = unusedPage; while (limitPage < destPage && IsMyZombiePage(limitPage)) { limitPage++; } } destCursor = PageTable.PageAddr(prefixPage); destLimit = PageTable.PageAddr(limitPage); // Take ownership of the new pages InteriorPtrTable.ClearFirst(prefixPage, limitPage); InteriorPtrTable.SetFirst(destCursor + PreHeader.Size); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(prefixPage), PageTable.PageAddr(limitPage) - 1); } }
internal static bool TryReserveUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReserveUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; if (endPage > PageTable.pageTableCount) { return(false); } if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } bool iflag = EnterMutex(currentThread); try { // GC can occur and page can be collected. if (startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1)) { return(false); } if (!PageTable.IsUnusedPage(startPage) || !PageTable.IsMyPage(startPage)) { return(false); } UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); if (header->count < pageCount) { return(false); } UIntPtr regionPages = UnlinkUnusedPages(startPage); Trace.Log(Trace.Area.Page, "TryReserveUnusedPages found={0:x}", __arglist(regionPages)); SetPageTypeClean(startPage, pageCount, newType); if (regionPages > pageCount) { UIntPtr suffixPages = regionPages - pageCount; LinkUnusedPages(endPage, suffixPages, true); } } finally { LeaveMutex(currentThread, iflag); } // Now that we are outside the Mutex, we should perform the // real cleaning of the gotten pages if (fCleanPages) { CleanFoundPages(startPage); } else { fCleanPages = FoundOnlyCleanPages(startPage); } return(true); }