internal static bool TryReservePages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReservePages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; UIntPtr index = startPage; while (index < endPage && PageTable.IsUnusedPage(index) && PageTable.IsMyPage(index)) { index++; } if (PageTable.IsUnallocatedPage(PageTable.Type(index))) { // We should try to extend the region of allocated pages UIntPtr pagesNeeded = pageCount - (index - startPage); UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = PageTable.PageAddr(index); bool gotMemory = false; bool iflag = EnterMutex(currentThread); try { gotMemory = MemoryManager.AllocateMemory(startAddr, allocSize); if (gotMemory) { UIntPtr allocPages = PageTable.PageCount(allocSize); MarkUnusedPages(/* avoid recursive locking */ null, index, allocPages, true); } } finally { LeaveMutex(currentThread, iflag); } if (gotMemory) { bool success = TryReserveUnusedPages(currentThread, startPage, pageCount, newType, ref fCleanPages); Trace.Log(Trace.Area.Page, "TryReservePages success={0}", __arglist(success)); return(success); } } return(false); }
private static UIntPtr FindUnusedPages(Thread currentThread, UIntPtr pageCount, PageType newType) { VTable.Deny(PageTable.IsUnusedPageType(newType)); int slot = SlotFromCount(pageCount); Trace.Log(Trace.Area.Page, "FindUnusedPages count={0:x} slot={1}", __arglist(pageCount, slot)); bool iflag = EnterMutex(currentThread); try { while (slot < 32) { UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; while (header != null) { if (header->count >= pageCount) { UIntPtr startPage = PageTable.Page((UIntPtr)header); UIntPtr regionSize = UnlinkUnusedPages(startPage); SetPageTypeClean(startPage, pageCount, newType); if (regionSize > pageCount) { UIntPtr restCount = regionSize - pageCount; UIntPtr endPage = startPage + pageCount; LinkUnusedPages(endPage, restCount, true); } Trace.Log(Trace.Area.Page, "FindUnusedPages success {0:x}", __arglist(startPage)); return(startPage); } header = header->next; } slot++; } return(UIntPtr.Zero); } finally { LeaveMutex(currentThread, iflag); } }
internal static bool TryReserveUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReserveUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; if (endPage > PageTable.pageTableCount) { return(false); } if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } bool iflag = EnterMutex(currentThread); try { // GC can occur and page can be collected. if (startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1)) { return(false); } if (!PageTable.IsUnusedPage(startPage) || !PageTable.IsMyPage(startPage)) { return(false); } UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); if (header->count < pageCount) { return(false); } UIntPtr regionPages = UnlinkUnusedPages(startPage); Trace.Log(Trace.Area.Page, "TryReserveUnusedPages found={0:x}", __arglist(regionPages)); SetPageTypeClean(startPage, pageCount, newType); if (regionPages > pageCount) { UIntPtr suffixPages = regionPages - pageCount; LinkUnusedPages(endPage, suffixPages, true); } } finally { LeaveMutex(currentThread, iflag); } // Now that we are outside the Mutex, we should perform the // real cleaning of the gotten pages if (fCleanPages) { CleanFoundPages(startPage); } else { fCleanPages = FoundOnlyCleanPages(startPage); } return(true); }
internal static UIntPtr EnsurePages(Thread currentThread, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } VTable.Deny(PageTable.IsUnusedPageType(newType)); // Try to find already allocated but unused pages UIntPtr foundPages = FindUnusedPages(currentThread, pageCount, newType); if (foundPages != UIntPtr.Zero) { if (fCleanPages) { CleanFoundPages(foundPages); } else { fCleanPages = FoundOnlyCleanPages(foundPages); } return(foundPages); } // We need to allocate new pages bool iflag = EnterMutex(currentThread); try { UIntPtr bytesNeeded = PageTable.RegionSize(pageCount); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = MemoryManager.AllocateMemory(allocSize); if (startAddr == UIntPtr.Zero) { if (heap_commit_size > os_commit_size) { allocSize = Util.Pad(bytesNeeded, os_commit_size); startAddr = MemoryManager.AllocateMemory(allocSize); } } if (startAddr == UIntPtr.Zero) { // BUGBUG: if in CMS, should wait on one complete GC cycle and // the retry. for STW, we may get here even if the collector // hasn't triggered just prior. PageTable.Dump("Out of memory"); throw outOfMemoryException; } UIntPtr startPage = PageTable.Page(startAddr); PageTable.SetType(startPage, pageCount, newType); PageTable.SetProcess(startPage, pageCount); UIntPtr extraPages = PageTable.PageCount(allocSize) - pageCount; if (extraPages > 0) { // Mark the new memory pages as allocated-but-unused MarkUnusedPages(/* avoid recursive locking */ null, startPage + pageCount, extraPages, true); } return(startPage); } finally { LeaveMutex(currentThread, iflag); } }