private UIntPtr AllocateObjectMemorySlow(UIntPtr numBytes, uint alignment, Thread currentThread) { //Trace.Log(Trace.Area.Allocate, // "AllocateObjectMemorySlow numBytes={0}, alignment={1}, currentThread={2}", __arglist(numBytes, alignment, currentThread)); GC.CheckForNeededGCWork(currentThread); VTable.Assert(CurrentPhase != StopTheWorldPhase.SingleThreaded || currentThread.threadIndex == collectorThreadIndex); if (GenerationalCollector.IsLargeObjectSize(numBytes)) { return(AllocateBig(numBytes, alignment, currentThread)); } return(BumpAllocator.Allocate(currentThread, numBytes, alignment)); }
// Interface with the compiler! internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes, uint alignment, Thread currentThread) { // Pretenure Trigger pretenuredSinceLastFullGC += numBytes; if (pretenuredSinceLastFullGC > PretenureHardGCTrigger) { GC.InvokeMajorCollection(currentThread); } // Potentially Join a collection GC.CheckForNeededGCWork(currentThread); int maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size; UIntPtr pageCount = PageTable.PageCount(numBytes + maxAlignmentOverhead); bool fCleanPages = true; UIntPtr page = PageManager.EnsurePages(currentThread, pageCount, largeObjectGeneration, ref fCleanPages); int unusedBytes = unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes)); int unusedCacheLines = unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5; int pageOffset = 0; if (unusedCacheLines != 0) { pageOffset = (bigOffset % unusedCacheLines) << 5; bigOffset++; } UIntPtr pageStart = PageTable.PageAddr(page); for (int i = 0; i < pageOffset; i += UIntPtr.Size) { Allocator.WriteAlignment(pageStart + i); } UIntPtr unalignedStartAddr = pageStart + pageOffset; UIntPtr startAddr = Allocator.AlignedAllocationPtr(unalignedStartAddr, pageStart + unusedBytes, alignment); pageOffset += unchecked ((int)(uint)(startAddr - unalignedStartAddr)); if (pageOffset < unusedBytes) { BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes); } UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); VTable.Assert(PageTable.Page(resultAddr) < PageTable.Page(startAddr + numBytes - 1), "Big object should cross pages"); if (GC.remsetType == RemSetType.Cards) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } return(resultAddr); }
internal static bool TryReserveUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReserveUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; if (endPage > PageTable.pageTableCount) { return(false); } if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } bool iflag = EnterMutex(currentThread); try { // GC can occur and page can be collected. if (startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1)) { return(false); } if (!PageTable.IsUnusedPage(startPage) || !PageTable.IsMyPage(startPage)) { return(false); } UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); if (header->count < pageCount) { return(false); } UIntPtr regionPages = UnlinkUnusedPages(startPage); Trace.Log(Trace.Area.Page, "TryReserveUnusedPages found={0:x}", __arglist(regionPages)); SetPageTypeClean(startPage, pageCount, newType); if (regionPages > pageCount) { UIntPtr suffixPages = regionPages - pageCount; LinkUnusedPages(endPage, suffixPages, true); } } finally { LeaveMutex(currentThread, iflag); } // Now that we are outside the Mutex, we should perform the // real cleaning of the gotten pages if (fCleanPages) { CleanFoundPages(startPage); } else { fCleanPages = FoundOnlyCleanPages(startPage); } return(true); }
internal static UIntPtr EnsurePages(Thread currentThread, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } VTable.Deny(PageTable.IsUnusedPageType(newType)); // Try to find already allocated but unused pages UIntPtr foundPages = FindUnusedPages(currentThread, pageCount, newType); if (foundPages != UIntPtr.Zero) { if (fCleanPages) { CleanFoundPages(foundPages); } else { fCleanPages = FoundOnlyCleanPages(foundPages); } return(foundPages); } // We need to allocate new pages bool iflag = EnterMutex(currentThread); try { UIntPtr bytesNeeded = PageTable.RegionSize(pageCount); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = MemoryManager.AllocateMemory(allocSize); if (startAddr == UIntPtr.Zero) { if (heap_commit_size > os_commit_size) { allocSize = Util.Pad(bytesNeeded, os_commit_size); startAddr = MemoryManager.AllocateMemory(allocSize); } } if (startAddr == UIntPtr.Zero) { // BUGBUG: if in CMS, should wait on one complete GC cycle and // the retry. for STW, we may get here even if the collector // hasn't triggered just prior. PageTable.Dump("Out of memory"); throw outOfMemoryException; } UIntPtr startPage = PageTable.Page(startAddr); PageTable.SetType(startPage, pageCount, newType); PageTable.SetProcess(startPage, pageCount); UIntPtr extraPages = PageTable.PageCount(allocSize) - pageCount; if (extraPages > 0) { // Mark the new memory pages as allocated-but-unused MarkUnusedPages(/* avoid recursive locking */ null, startPage + pageCount, extraPages, true); } return(startPage); } finally { LeaveMutex(currentThread, iflag); } }