internal static unsafe void Verify(UnusedBlockHeader *header) { VTable.Assert(header->magic == (UIntPtr)magicNumber, "Bad magic number in UnusedBlockHeader"); VTable.Assert(header->count > 0, "Count <= 0 in UnusedBlockHeader"); VTable.Assert(header->prev->next == header, "UnusedBlockHeader not linked properly (1)"); if (header->next != null) { VTable.Assert(header->next->prev == header, "UnusedBlockHeader not linked properly (2)"); } UIntPtr count = header->count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); VTable.Assert(tailBlock->curr == header, "UnusedBlockHeader tail->curr is incorrect"); if (PageManager.SlowDebug) { UIntPtr page = PageTable.Page((UIntPtr)header); for (UIntPtr i = UIntPtr.Zero; i < count; i++) { VTable.Assert(PageTable.IsUnusedPage(page + i) && PageTable.IsMyPage(page + i), "Incorrect page in unused region"); } } }
internal static void VerifyUnusedRegion(UIntPtr startPage, UIntPtr endPage) { // Verify that all of the pages are of the same Clean/Dirty type. PageType startType = PageTable.Type(startPage); for (UIntPtr page = startPage; page < endPage; ++page) { VTable.Assert(startType == PageTable.Type(page), "Unused page types don't match in region"); } if (startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)) { // We have already checked the region return; } UIntPtr regionAddr = PageTable.PageAddr(startPage); UnusedBlockHeader *regionHeader = (UnusedBlockHeader *)regionAddr; UIntPtr pageCount = regionHeader->count; VTable.Assert (pageCount >= (endPage - startPage), "Region-to-verify is larger than its header specifies"); endPage = startPage + pageCount; for (UIntPtr page = startPage; page < endPage; ++page) { VTable.Assert(PageTable.IsUnusedPage(page) && PageTable.IsMyPage(page), "Non my-unused page in unused region"); PageManager.VerifyUnusedPage (page, (page == startPage) || (page == (endPage - 1))); } VTable.Assert(!(endPage < PageTable.pageTableCount && PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)), "My-unused page immediately after unused region"); // Verify that the region is correctly linked into the // list of unused memory blocks int slot = SlotFromCount(pageCount); UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; UnusedBlockHeader.Verify(header); while (regionAddr != (UIntPtr)header) { header = header->next; VTable.Assert(header != null, "Unused region not list for its slot number"); UnusedBlockHeader.Verify(header); } }
internal static unsafe void Initialize(UnusedBlockHeader *header, UIntPtr count) { header->magic = (UIntPtr)magicNumber; header->next = null; header->prev = null; header->count = count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); tailBlock->curr = header; }
private static void MarkUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, bool fCleanPages) { Trace.Log(Trace.Area.Page, "MarkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); UIntPtr endPage = startPage + pageCount; if (avoidDirtyPages && !fCleanPages) { UIntPtr dirtyStartAddr = PageTable.PageAddr(startPage); UIntPtr dirtySize = PageTable.RegionSize(pageCount); Util.MemClear(dirtyStartAddr, dirtySize); fCleanPages = true; } bool iflag = EnterMutex(currentThread); try { if (endPage < PageTable.pageTableCount) { if (PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)) { UIntPtr regionSize = UnlinkUnusedPages(endPage); endPage += regionSize; } } UIntPtr queryStartPage = startPage - 1; UIntPtr newStartPage = startPage; if (PageTable.IsUnusedPage(queryStartPage) && PageTable.IsMyPage(queryStartPage)) { UnusedBlockHeader *tailUnused = (UnusedBlockHeader *) PageTable.PageAddr(queryStartPage); UIntPtr newStartAddr = (UIntPtr)tailUnused->curr; newStartPage = PageTable.Page(newStartAddr); UIntPtr regionSize = UnlinkUnusedPages(newStartPage); VTable.Assert(newStartPage + regionSize == startPage); } PageType pageType = fCleanPages ? PageType.UnusedClean : PageType.UnusedDirty; PageTable.SetType(startPage, pageCount, pageType); LinkUnusedPages(newStartPage, endPage - newStartPage, false); } finally { LeaveMutex(currentThread, iflag); } }
private static void LinkUnusedPages(UIntPtr startPage, UIntPtr pageCount, bool asVictim) { if (PageManager.SlowDebug) { for (UIntPtr i = startPage; i < startPage + pageCount; i++) { VTable.Assert(PageTable.IsUnusedPage(i) && PageTable.IsMyPage(i), "Incorrect page to link into unused region"); } } Trace.Log(Trace.Area.Page, "LinkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); VTable.Deny(startPage + pageCount > PageTable.pageTableCount); VTable.Deny(startPage + pageCount < PageTable.pageTableCount && PageTable.IsUnusedPage(startPage + pageCount) && PageTable.IsMyPage(startPage + pageCount)); UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); UnusedBlockHeader.Initialize(header, pageCount); int slot = SlotFromCount(pageCount); // Unused blocks are linked into the free list either as the result of a collection // or as a result of carving a big block into a smaller allocation and a remainder. // When such a remainder is linked back into the free list, it is identified as a // victim. We favor subsequent allocations from these victims, in an attempt to // reduce fragmentation. This is achieved by keeping victims at the head of the // free list. // // TODO: the long term solution is to perform best fit on the free list. if (asVictim || unusedMemoryBlocks[slot].next == null) { fixed(UnusedBlockHeader *listHeader = &unusedMemoryBlocks[slot]) { UnusedBlockHeader.InsertNext(listHeader, header); } } else { UnusedBlockHeader *listHeader = unusedMemoryBlocks[slot].next; UnusedBlockHeader.InsertNext(listHeader, header); } }
private static UIntPtr UnlinkUnusedPages(UIntPtr startPage) { VTable.Assert(PageTable.IsUnusedPage(startPage) && PageTable.IsMyPage(startPage)); VTable.Deny(startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); UIntPtr pageCount = UnusedBlockHeader.Remove(header); Trace.Log(Trace.Area.Page, "UnlinkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); return(pageCount); }
internal static unsafe void InsertNext(UnusedBlockHeader *header, UnusedBlockHeader *newNext) { //Trace.Log(Trace.Area.Page, // "UnusedBlockHeader.InsertNext {0} count={1}", // __arglist(newNext, newNext->count)); UnusedBlockHeader *oldNext = header->next; header->next = newNext; newNext->next = oldNext; newNext->prev = header; if (oldNext != null) { oldNext->prev = newNext; } UnusedBlockHeader.Verify(newNext); }
private static UIntPtr FindUnusedPages(Thread currentThread, UIntPtr pageCount, PageType newType) { VTable.Deny(PageTable.IsUnusedPageType(newType)); int slot = SlotFromCount(pageCount); Trace.Log(Trace.Area.Page, "FindUnusedPages count={0:x} slot={1}", __arglist(pageCount, slot)); bool iflag = EnterMutex(currentThread); try { while (slot < 32) { UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; while (header != null) { if (header->count >= pageCount) { UIntPtr startPage = PageTable.Page((UIntPtr)header); UIntPtr regionSize = UnlinkUnusedPages(startPage); SetPageTypeClean(startPage, pageCount, newType); if (regionSize > pageCount) { UIntPtr restCount = regionSize - pageCount; UIntPtr endPage = startPage + pageCount; LinkUnusedPages(endPage, restCount, true); } Trace.Log(Trace.Area.Page, "FindUnusedPages success {0:x}", __arglist(startPage)); return(startPage); } header = header->next; } slot++; } return(UIntPtr.Zero); } finally { LeaveMutex(currentThread, iflag); } }
internal static UIntPtr TotalUnusedPages() { Thread currentThread = Thread.CurrentThread; UIntPtr pageCount = (UIntPtr)0; bool iflag = EnterMutex(currentThread); try { for (int slot = 0; slot < 32; slot++) { UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; while (header != null) { pageCount += header->count; header = header->next; } } return(pageCount); } finally { LeaveMutex(currentThread, iflag); } }
internal static unsafe UIntPtr Remove(UnusedBlockHeader *header) { //Trace.Log(Trace.Area.Page, // "UnusedBlockHeader.Remove {0} count={1}", // __arglist(this.prev->next, this.count)); UnusedBlockHeader.Verify(header); header->prev->next = header->next; if (header->next != null) { header->next->prev = header->prev; } UIntPtr result = header->count; header->magic = UIntPtr.Zero; header->prev = null; header->next = null; header->count = UIntPtr.Zero; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(result - 1)); tailBlock->curr = null; return(result); }
internal static bool TryReserveUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReserveUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; if (endPage > PageTable.pageTableCount) { return(false); } if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } bool iflag = EnterMutex(currentThread); try { // GC can occur and page can be collected. if (startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1)) { return(false); } if (!PageTable.IsUnusedPage(startPage) || !PageTable.IsMyPage(startPage)) { return(false); } UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); if (header->count < pageCount) { return(false); } UIntPtr regionPages = UnlinkUnusedPages(startPage); Trace.Log(Trace.Area.Page, "TryReserveUnusedPages found={0:x}", __arglist(regionPages)); SetPageTypeClean(startPage, pageCount, newType); if (regionPages > pageCount) { UIntPtr suffixPages = regionPages - pageCount; LinkUnusedPages(endPage, suffixPages, true); } } finally { LeaveMutex(currentThread, iflag); } // Now that we are outside the Mutex, we should perform the // real cleaning of the gotten pages if (fCleanPages) { CleanFoundPages(startPage); } else { fCleanPages = FoundOnlyCleanPages(startPage); } return(true); }