internal static unsafe void Verify(UnusedBlockHeader *header) { VTable.Assert(header->magic == (UIntPtr)magicNumber, "Bad magic number in UnusedBlockHeader"); VTable.Assert(header->count > 0, "Count <= 0 in UnusedBlockHeader"); VTable.Assert(header->prev->next == header, "UnusedBlockHeader not linked properly (1)"); if (header->next != null) { VTable.Assert(header->next->prev == header, "UnusedBlockHeader not linked properly (2)"); } UIntPtr count = header->count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); VTable.Assert(tailBlock->curr == header, "UnusedBlockHeader tail->curr is incorrect"); if (PageManager.SlowDebug) { UIntPtr page = PageTable.Page((UIntPtr)header); for (UIntPtr i = UIntPtr.Zero; i < count; i++) { VTable.Assert(PageTable.IsUnusedPage(page + i) && PageTable.IsMyPage(page + i), "Incorrect page in unused region"); } } }
internal static bool TryReservePages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReservePages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; UIntPtr index = startPage; while (index < endPage && PageTable.IsUnusedPage(index) && PageTable.IsMyPage(index)) { index++; } if (PageTable.IsUnallocatedPage(PageTable.Type(index))) { // We should try to extend the region of allocated pages UIntPtr pagesNeeded = pageCount - (index - startPage); UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = PageTable.PageAddr(index); bool gotMemory = false; bool iflag = EnterMutex(currentThread); try { gotMemory = MemoryManager.AllocateMemory(startAddr, allocSize); if (gotMemory) { UIntPtr allocPages = PageTable.PageCount(allocSize); MarkUnusedPages(/* avoid recursive locking */ null, index, allocPages, true); } } finally { LeaveMutex(currentThread, iflag); } if (gotMemory) { bool success = TryReserveUnusedPages(currentThread, startPage, pageCount, newType, ref fCleanPages); Trace.Log(Trace.Area.Page, "TryReservePages success={0}", __arglist(success)); return(success); } } return(false); }
private UIntPtr growFreeList(UIntPtr blockSize, Thread t) { UIntPtr pageCount = PageTable.PageCount(blockSize); bool fCleanPages = true; UIntPtr startPage = PageManager.EnsurePages(t, pageCount, PageType.Owner0, ref fCleanPages); UIntPtr newBlockSize = PageTable.RegionSize(pageCount); UIntPtr newBlockAddr = PageTable.PageAddr(startPage); return(FreeBlock(newBlockAddr, newBlockSize)); }
internal static unsafe void Initialize(UnusedBlockHeader *header, UIntPtr count) { header->magic = (UIntPtr)magicNumber; header->next = null; header->prev = null; header->count = count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); tailBlock->curr = header; }
private static void MarkUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, bool fCleanPages) { Trace.Log(Trace.Area.Page, "MarkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); UIntPtr endPage = startPage + pageCount; if (avoidDirtyPages && !fCleanPages) { UIntPtr dirtyStartAddr = PageTable.PageAddr(startPage); UIntPtr dirtySize = PageTable.RegionSize(pageCount); Util.MemClear(dirtyStartAddr, dirtySize); fCleanPages = true; } bool iflag = EnterMutex(currentThread); try { if (endPage < PageTable.pageTableCount) { if (PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)) { UIntPtr regionSize = UnlinkUnusedPages(endPage); endPage += regionSize; } } UIntPtr queryStartPage = startPage - 1; UIntPtr newStartPage = startPage; if (PageTable.IsUnusedPage(queryStartPage) && PageTable.IsMyPage(queryStartPage)) { UnusedBlockHeader *tailUnused = (UnusedBlockHeader *) PageTable.PageAddr(queryStartPage); UIntPtr newStartAddr = (UIntPtr)tailUnused->curr; newStartPage = PageTable.Page(newStartAddr); UIntPtr regionSize = UnlinkUnusedPages(newStartPage); VTable.Assert(newStartPage + regionSize == startPage); } PageType pageType = fCleanPages ? PageType.UnusedClean : PageType.UnusedDirty; PageTable.SetType(startPage, pageCount, pageType); LinkUnusedPages(newStartPage, endPage - newStartPage, false); } finally { LeaveMutex(currentThread, iflag); } }
internal static unsafe void ClearThreadStack(Thread thread) { short threadIndex = (short)thread.threadIndex; UIntPtr endPage = PageTable.Page(CallStack.StackBase(thread)); UIntPtr startPage = endPage - 1; VTable.Assert(PageTable.IsStackPage(PageTable.Type(startPage))); VTable.Assert(PageTable.Extra(startPage) == threadIndex); while (startPage > 0 && PageTable.IsStackPage(PageTable.Type(startPage - 1)) && PageTable.Extra(startPage - 1) == threadIndex) { startPage--; } UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr size = PageTable.RegionSize(endPage - startPage); SetUnallocatedPages(startAddr, size); }
internal static unsafe UIntPtr Remove(UnusedBlockHeader *header) { //Trace.Log(Trace.Area.Page, // "UnusedBlockHeader.Remove {0} count={1}", // __arglist(this.prev->next, this.count)); UnusedBlockHeader.Verify(header); header->prev->next = header->next; if (header->next != null) { header->next->prev = header->prev; } UIntPtr result = header->count; header->magic = UIntPtr.Zero; header->prev = null; header->next = null; header->count = UIntPtr.Zero; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(result - 1)); tailBlock->curr = null; return(result); }
// Interface with the compiler! internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes, uint alignment, Thread currentThread) { // Pretenure Trigger pretenuredSinceLastFullGC += numBytes; if (pretenuredSinceLastFullGC > PretenureHardGCTrigger) { GC.InvokeMajorCollection(currentThread); } // Potentially Join a collection GC.CheckForNeededGCWork(currentThread); int maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size; UIntPtr pageCount = PageTable.PageCount(numBytes + maxAlignmentOverhead); bool fCleanPages = true; UIntPtr page = PageManager.EnsurePages(currentThread, pageCount, largeObjectGeneration, ref fCleanPages); int unusedBytes = unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes)); int unusedCacheLines = unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5; int pageOffset = 0; if (unusedCacheLines != 0) { pageOffset = (bigOffset % unusedCacheLines) << 5; bigOffset++; } UIntPtr pageStart = PageTable.PageAddr(page); for (int i = 0; i < pageOffset; i += UIntPtr.Size) { Allocator.WriteAlignment(pageStart + i); } UIntPtr unalignedStartAddr = pageStart + pageOffset; UIntPtr startAddr = Allocator.AlignedAllocationPtr(unalignedStartAddr, pageStart + unusedBytes, alignment); pageOffset += unchecked ((int)(uint)(startAddr - unalignedStartAddr)); if (pageOffset < unusedBytes) { BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes); } UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); VTable.Assert(PageTable.Page(resultAddr) < PageTable.Page(startAddr + numBytes - 1), "Big object should cross pages"); if (GC.remsetType == RemSetType.Cards) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } return(resultAddr); }
internal static void ReleaseUnusedPages(UIntPtr startPage, UIntPtr pageCount, bool fCleanPages) { if (VTable.enableDebugPrint) { VTable.DebugPrint("ClearPages({0}, {1})\n", __arglist(startPage, pageCount)); } UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr endPage = startPage + pageCount; UIntPtr endAddr = PageTable.PageAddr(endPage); UIntPtr rangeSize = PageTable.RegionSize(pageCount); MarkUnusedPages(Thread.CurrentThread, PageTable.Page(startAddr), PageTable.PageCount(rangeSize), fCleanPages); if (PageManager.AggressiveMemReset) { // We cannot simply reset the memory range, as MEM_RESET can // only be used within a region returned from a single // VirtualAlloc call. UIntPtr regionAddr, regionSize; bool fUsed = MemoryManager.QueryMemory(startAddr, out regionAddr, out regionSize); if (VTable.enableDebugPrint) { VTable.DebugPrint(" 1 Query({0}, {1}, {2}) -> {3}\n", __arglist(startAddr, regionAddr, regionSize, fUsed)); } VTable.Assert(fUsed, "Memory to be cleared isn't used"); // We don't care if regionAddr < startAddr. We can MEM_RESET // part of the region. UIntPtr endRegion = regionAddr + regionSize; while (endRegion < endAddr) { if (VTable.enableDebugPrint) { VTable.DebugPrint("Clearing region [{0}, {1}]\n", __arglist(regionAddr, regionAddr + regionSize)); } MemoryManager.IgnoreMemoryContents(startAddr, endRegion - startAddr); startAddr = endRegion; fUsed = MemoryManager.QueryMemory(endRegion, out regionAddr, out regionSize); if (VTable.enableDebugPrint) { VTable.DebugPrint(" 2 Query({0}, {1}, {2}) -> {3}\n", __arglist(endRegion, regionAddr, regionSize, fUsed)); } VTable.Assert(fUsed, "Region to be freed isn't used"); endRegion = regionAddr + regionSize; } if (VTable.enableDebugPrint) { VTable.DebugPrint("Clearing final region [{0}, {1}]\n", __arglist(startAddr, endAddr)); } MemoryManager.IgnoreMemoryContents(startAddr, endAddr - startAddr); } if (VTable.enableDebugPrint) { VTable.DebugPrint(" --> ClearPages({0},{1})\n", __arglist(startPage, pageCount)); } }
internal static UIntPtr EnsurePages(Thread currentThread, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } VTable.Deny(PageTable.IsUnusedPageType(newType)); // Try to find already allocated but unused pages UIntPtr foundPages = FindUnusedPages(currentThread, pageCount, newType); if (foundPages != UIntPtr.Zero) { if (fCleanPages) { CleanFoundPages(foundPages); } else { fCleanPages = FoundOnlyCleanPages(foundPages); } return(foundPages); } // We need to allocate new pages bool iflag = EnterMutex(currentThread); try { UIntPtr bytesNeeded = PageTable.RegionSize(pageCount); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = MemoryManager.AllocateMemory(allocSize); if (startAddr == UIntPtr.Zero) { if (heap_commit_size > os_commit_size) { allocSize = Util.Pad(bytesNeeded, os_commit_size); startAddr = MemoryManager.AllocateMemory(allocSize); } } if (startAddr == UIntPtr.Zero) { // BUGBUG: if in CMS, should wait on one complete GC cycle and // the retry. for STW, we may get here even if the collector // hasn't triggered just prior. PageTable.Dump("Out of memory"); throw outOfMemoryException; } UIntPtr startPage = PageTable.Page(startAddr); PageTable.SetType(startPage, pageCount, newType); PageTable.SetProcess(startPage, pageCount); UIntPtr extraPages = PageTable.PageCount(allocSize) - pageCount; if (extraPages > 0) { // Mark the new memory pages as allocated-but-unused MarkUnusedPages(/* avoid recursive locking */ null, startPage + pageCount, extraPages, true); } return(startPage); } finally { LeaveMutex(currentThread, iflag); } }