internal static unsafe void SetStaticDataPages(UIntPtr startAddr, UIntPtr size) { #if SINGULARITY // It's perfectly fine to be given memory outside the page table region, so // long as we intend to treat that memory as NonGC anyway. if (startAddr < PageTable.baseAddr) { if (startAddr + size < PageTable.baseAddr) { // nothing to do. All pages are below the base covered by the page table return; } // The range overlaps with the region covered by the page table size -= (PageTable.baseAddr - startAddr); startAddr = PageTable.baseAddr; } UIntPtr endAddr = startAddr + size; if (endAddr > PageTable.limitAddr) { if (startAddr > PageTable.limitAddr) { // nothing to do. All pages are above the limit covered by the page table return; } // The range overlaps with the region covered by the page table size -= (PageTable.limitAddr - endAddr); } #endif UIntPtr startIndex = PageTable.Page(startAddr); UIntPtr pageCount = PageTable.PageCount(size); PageTable.SetType(startIndex, pageCount, PageType.NonGC); }
internal static bool TryReservePages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReservePages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; UIntPtr index = startPage; while (index < endPage && PageTable.IsUnusedPage(index) && PageTable.IsMyPage(index)) { index++; } if (PageTable.IsUnallocatedPage(PageTable.Type(index))) { // We should try to extend the region of allocated pages UIntPtr pagesNeeded = pageCount - (index - startPage); UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = PageTable.PageAddr(index); bool gotMemory = false; bool iflag = EnterMutex(currentThread); try { gotMemory = MemoryManager.AllocateMemory(startAddr, allocSize); if (gotMemory) { UIntPtr allocPages = PageTable.PageCount(allocSize); MarkUnusedPages(/* avoid recursive locking */ null, index, allocPages, true); } } finally { LeaveMutex(currentThread, iflag); } if (gotMemory) { bool success = TryReserveUnusedPages(currentThread, startPage, pageCount, newType, ref fCleanPages); Trace.Log(Trace.Area.Page, "TryReservePages success={0}", __arglist(success)); return(success); } } return(false); }
private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment, Thread currentThread) { #if SINGULARITY_KERNEL Kernel.Waypoint(702); #endif this.Truncate(); UIntPtr paddedBytes = PageTable.PagePad(bytes + alignment - UIntPtr.Size); BaseCollector.IncrementNewBytesSinceGC(paddedBytes); UIntPtr pages = PageTable.PageCount(paddedBytes); bool fCleanPages = CLEAR_POOL_PAGES(); // We may eventually want to ask for specific pages // between asking if any pages are reusable and asking the // OS for any possible page. UIntPtr startPage = PageManager.EnsurePages(currentThread, pages, this.pageType, ref fCleanPages); UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr limitAddr = PageTable.PageAddr(startPage + pages); startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr, alignment); this.allocNew = startAddr; this.allocPtr = startAddr + bytes; if (fCleanPages) { this.zeroedLimit = limitAddr; } else { Util.MemClear(startAddr, bytes); this.zeroedLimit = this.allocPtr; } this.reserveLimit = limitAddr; UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); #if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR if (GC.remsetType == RemSetType.Cards) { UIntPtr nextPageAddr = startAddr + PageTable.PageSize; VTable.Assert(resultAddr < nextPageAddr); if (this.allocPtr > nextPageAddr) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } } #endif #if SINGULARITY_KERNEL Kernel.Waypoint(703); #endif return(resultAddr); }
private static void Clear(UIntPtr startAddr, UIntPtr regionSize) { VTable.Assert(PageTable.PageAligned(startAddr)); VTable.Assert(PageTable.PageAligned(regionSize)); MemoryManager.IgnoreMemoryContents(startAddr, regionSize); MarkUnusedPages(Thread.CurrentThread, PageTable.Page(startAddr), PageTable.PageCount(regionSize), false); }
private UIntPtr growFreeList(UIntPtr blockSize, Thread t) { UIntPtr pageCount = PageTable.PageCount(blockSize); bool fCleanPages = true; UIntPtr startPage = PageManager.EnsurePages(t, pageCount, PageType.Owner0, ref fCleanPages); UIntPtr newBlockSize = PageTable.RegionSize(pageCount); UIntPtr newBlockAddr = PageTable.PageAddr(startPage); return(FreeBlock(newBlockAddr, newBlockSize)); }
internal static void Initialize(UIntPtr systemMemorySize) { allocPtr = MemoryManager.AllocateMemory(systemMemorySize); limitPtr = allocPtr + systemMemorySize; if (GC.gcType != GCType.NullCollector) { PageManager.SetStaticDataPages(allocPtr, systemMemorySize); #if !SINGULARITY PageTable.SetProcess(PageTable.Page(allocPtr), PageTable.PageCount(systemMemorySize)); #endif } }
internal static void Truncate() { UIntPtr allocLimit = PageTable.PagePad(allocPtr); UIntPtr unusedSize = limitPtr - allocLimit; if (GC.gcType != GCType.NullCollector) { PageManager.ReleaseUnusedPages(PageTable.Page(allocLimit), PageTable.PageCount(unusedSize), true); } limitPtr = allocLimit; }
internal override void CollectGeneration(int generation, UIntPtr generationPageCount) { UIntPtr availableMemory = (UIntPtr) (MemoryManager.MemorySize - MemoryManager.OperatingSystemSize); UIntPtr softPageCountLimit = PageTable.PageCount(availableMemory); if (generation == (int)MAX_GENERATION && (generationPageCount << 1) > softPageCountLimit) { // Use sliding collector when fromSpace > 1/2 available memory SlidingCollector.instance.CollectGeneration(generation, generationPageCount); } else { SemispaceCollector.instance.CollectGeneration(generation, generationPageCount); } }
// Low-level routines based on the operating system interface private static unsafe UIntPtr AllocateMemoryHelper(UIntPtr startAddr, UIntPtr size) { void *result = VirtualAlloc((void *)startAddr, size, MEM_RESERVE, PAGE_READWRITE); VTable.Assert (PageTable.Page((UIntPtr)result + size - 1) < PageTable.pageTableCount, "OutOfMemory: MemoryManager: memory doesn't fit in page table"); if (result != null) { Trace.Log(Trace.Area.Page, "VirtualAlloc {0} at {1}", __arglist(size, result)); VTable.Assert((startAddr == UIntPtr.Zero) || (result == (void *)startAddr)); void *area = VirtualAlloc(result, size, MEM_COMMIT, PAGE_READWRITE); if (PageTable.halPageDescriptor != null) { PageTable.CreateNewPageTablesIfNecessary(PageTable.Page((UIntPtr)result), PageTable.PageCount(size)); PageTable.SetProcess(PageTable.Page((UIntPtr)area), PageTable.PageCount(size)); } VTable.Assert(result == area); #if HIMEM // This assertion intends only to catch bugs in Bartok. But if // the system (windows) frees memory before, the memory // may be in low memory, and if we happen to get that // memory, this assertion itself may not hold VTable.Assert((UIntPtr)result >= PageTable.HIMEMStart, ("High memory is expected to be allocated in HIMEM mode")); #endif } return(new UIntPtr(result)); }
internal unsafe static void ReportNonGCDetails() { VTable.DebugPrint("\nNon-GC data details:\n"); uint totalPageCount = TotalNumPages(PageType.NonGC); VTable.DebugPrint("\tTotal number of pages: {0}", __arglist(totalPageCount)); VTable.DebugPrint("\n\tMemory accounting storage: {0}B", __arglist((uint)totalSize)); staticPtrs.Initialize(); uint staticSize = StaticData.ScanStaticPointerData(staticPtrs); VTable.DebugPrint("\n\tCompile-time allocated data: {0}B", __arglist(staticSize)); VTable.DebugPrint("\n\tNumber of pointers: {0}", __arglist(staticPtrs.PtrCount)); uint bootstrapPageCount = totalPageCount - (uint)PageTable.PageCount((UIntPtr)staticSize); VTable.DebugPrint("\n\tNumber of bootstrap pages: {0}\n", __arglist(bootstrapPageCount)); }
// Interface with the compiler! internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes, uint alignment, Thread currentThread) { // Pretenure Trigger pretenuredSinceLastFullGC += numBytes; if (pretenuredSinceLastFullGC > PretenureHardGCTrigger) { GC.InvokeMajorCollection(currentThread); } // Potentially Join a collection GC.CheckForNeededGCWork(currentThread); int maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size; UIntPtr pageCount = PageTable.PageCount(numBytes + maxAlignmentOverhead); bool fCleanPages = true; UIntPtr page = PageManager.EnsurePages(currentThread, pageCount, largeObjectGeneration, ref fCleanPages); int unusedBytes = unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes)); int unusedCacheLines = unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5; int pageOffset = 0; if (unusedCacheLines != 0) { pageOffset = (bigOffset % unusedCacheLines) << 5; bigOffset++; } UIntPtr pageStart = PageTable.PageAddr(page); for (int i = 0; i < pageOffset; i += UIntPtr.Size) { Allocator.WriteAlignment(pageStart + i); } UIntPtr unalignedStartAddr = pageStart + pageOffset; UIntPtr startAddr = Allocator.AlignedAllocationPtr(unalignedStartAddr, pageStart + unusedBytes, alignment); pageOffset += unchecked ((int)(uint)(startAddr - unalignedStartAddr)); if (pageOffset < unusedBytes) { BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes); } UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); VTable.Assert(PageTable.Page(resultAddr) < PageTable.Page(startAddr + numBytes - 1), "Big object should cross pages"); if (GC.remsetType == RemSetType.Cards) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } return(resultAddr); }
private unsafe void FindDestinationArea(ref UIntPtr destPage, ref UIntPtr destCursor, ref UIntPtr destLimit, UIntPtr objectSize, PageType destGeneration) { VTable.Assert(IsValidGeneration((int)destGeneration)); UIntPtr cursorPage = PageTable.Page(destCursor); UIntPtr limitPage = PageTable.Page(destLimit); UIntPtr pageAddr = PageTable.PagePad(destCursor); UIntPtr testPage = limitPage; UIntPtr endTestPage = PageTable.PageCount(destCursor + objectSize); if (destCursor > UIntPtr.Zero && IsMyZombiePage(PageTable.Page(destCursor - 1))) { VTable.Assert(destPage == limitPage); while (IsMyZombiePage(testPage) || (testPage < endTestPage && (PageTable.IsUnusedPage(testPage)))) { testPage++; } if (testPage >= endTestPage) { // We can expand the current region endTestPage = testPage; VTable.Assert(PageTable.PageAligned(destLimit)); InteriorPtrTable.ClearFirst(limitPage, testPage); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(limitPage), PageTable.PageAddr(testPage) - 1); } while (limitPage != endTestPage) { VTable.Assert(PageTable.IsUnusedPage(destPage)); do { destPage++; } while (destPage < endTestPage && PageTable.IsUnusedPage(destPage)); bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, limitPage, destPage - limitPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(limitPage, destPage - limitPage, destGeneration); while (destPage < endTestPage && IsMyZombiePage(destPage)) { destPage++; } limitPage = destPage; } destLimit = PageTable.PageAddr(limitPage); return; } } if (destCursor != pageAddr) { cursorPage++; } if (cursorPage != limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } // Find new region big enough to contain object UIntPtr neededPages = PageTable.PageCount(objectSize); UIntPtr prefixPage; while (true) { do { destPage++; } while (!IsMyZombiePage(destPage)); cursorPage = destPage; prefixPage = cursorPage; do { destPage++; } while (IsMyZombiePage(destPage)); limitPage = destPage; if (neededPages <= limitPage - cursorPage) { break; } // Check for following unused pages endTestPage = cursorPage + neededPages; VTable.Assert(endTestPage <= PageTable.pageTableCount); while (destPage < endTestPage && (PageTable.IsUnusedPage(destPage) || (IsMyZombiePage(destPage)))) { destPage++; } if (destPage == endTestPage) { break; } // Check for preceding unused pages if (destPage >= neededPages) { endTestPage = destPage - neededPages; prefixPage = cursorPage - 1; while (prefixPage >= UIntPtr.Zero && PageTable.IsUnusedPage(prefixPage)) { prefixPage--; } prefixPage++; if (prefixPage == endTestPage) { break; } } // Register any skipped regions of pages this.RegisterSkippedPages(cursorPage, limitPage); while (limitPage < destPage) { VTable.Assert(PageTable.IsUnusedPage(limitPage)); do { limitPage++; } while (limitPage < destPage && PageTable.IsUnusedPage(limitPage)); cursorPage = limitPage; while (limitPage < destPage && IsMyZombiePage(limitPage)) { limitPage++; } if (cursorPage != limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } } } // We found an area big enough. Commit the pre- and // postfix areas of unused pages if (prefixPage != cursorPage) { bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, prefixPage, cursorPage - prefixPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(prefixPage, cursorPage - prefixPage, destGeneration); } while (destPage != limitPage) { // Mark the region of unused pages as fromspace UIntPtr unusedPage = limitPage; VTable.Assert(PageTable.IsUnusedPage(unusedPage)); do { unusedPage++; } while (unusedPage < destPage && PageTable.IsUnusedPage(unusedPage)); bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, limitPage, unusedPage - limitPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(limitPage, unusedPage - limitPage, destGeneration); // Skip any sections of pages already marked as fromspace limitPage = unusedPage; while (limitPage < destPage && IsMyZombiePage(limitPage)) { limitPage++; } } destCursor = PageTable.PageAddr(prefixPage); destLimit = PageTable.PageAddr(limitPage); // Take ownership of the new pages InteriorPtrTable.ClearFirst(prefixPage, limitPage); InteriorPtrTable.SetFirst(destCursor + PreHeader.Size); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(prefixPage), PageTable.PageAddr(limitPage) - 1); } }
internal static void ReleaseUnusedPages(UIntPtr startPage, UIntPtr pageCount, bool fCleanPages) { if (VTable.enableDebugPrint) { VTable.DebugPrint("ClearPages({0}, {1})\n", __arglist(startPage, pageCount)); } UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr endPage = startPage + pageCount; UIntPtr endAddr = PageTable.PageAddr(endPage); UIntPtr rangeSize = PageTable.RegionSize(pageCount); MarkUnusedPages(Thread.CurrentThread, PageTable.Page(startAddr), PageTable.PageCount(rangeSize), fCleanPages); if (PageManager.AggressiveMemReset) { // We cannot simply reset the memory range, as MEM_RESET can // only be used within a region returned from a single // VirtualAlloc call. UIntPtr regionAddr, regionSize; bool fUsed = MemoryManager.QueryMemory(startAddr, out regionAddr, out regionSize); if (VTable.enableDebugPrint) { VTable.DebugPrint(" 1 Query({0}, {1}, {2}) -> {3}\n", __arglist(startAddr, regionAddr, regionSize, fUsed)); } VTable.Assert(fUsed, "Memory to be cleared isn't used"); // We don't care if regionAddr < startAddr. We can MEM_RESET // part of the region. UIntPtr endRegion = regionAddr + regionSize; while (endRegion < endAddr) { if (VTable.enableDebugPrint) { VTable.DebugPrint("Clearing region [{0}, {1}]\n", __arglist(regionAddr, regionAddr + regionSize)); } MemoryManager.IgnoreMemoryContents(startAddr, endRegion - startAddr); startAddr = endRegion; fUsed = MemoryManager.QueryMemory(endRegion, out regionAddr, out regionSize); if (VTable.enableDebugPrint) { VTable.DebugPrint(" 2 Query({0}, {1}, {2}) -> {3}\n", __arglist(endRegion, regionAddr, regionSize, fUsed)); } VTable.Assert(fUsed, "Region to be freed isn't used"); endRegion = regionAddr + regionSize; } if (VTable.enableDebugPrint) { VTable.DebugPrint("Clearing final region [{0}, {1}]\n", __arglist(startAddr, endAddr)); } MemoryManager.IgnoreMemoryContents(startAddr, endAddr - startAddr); } if (VTable.enableDebugPrint) { VTable.DebugPrint(" --> ClearPages({0},{1})\n", __arglist(startPage, pageCount)); } }
internal static UIntPtr EnsurePages(Thread currentThread, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } VTable.Deny(PageTable.IsUnusedPageType(newType)); // Try to find already allocated but unused pages UIntPtr foundPages = FindUnusedPages(currentThread, pageCount, newType); if (foundPages != UIntPtr.Zero) { if (fCleanPages) { CleanFoundPages(foundPages); } else { fCleanPages = FoundOnlyCleanPages(foundPages); } return(foundPages); } // We need to allocate new pages bool iflag = EnterMutex(currentThread); try { UIntPtr bytesNeeded = PageTable.RegionSize(pageCount); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = MemoryManager.AllocateMemory(allocSize); if (startAddr == UIntPtr.Zero) { if (heap_commit_size > os_commit_size) { allocSize = Util.Pad(bytesNeeded, os_commit_size); startAddr = MemoryManager.AllocateMemory(allocSize); } } if (startAddr == UIntPtr.Zero) { // BUGBUG: if in CMS, should wait on one complete GC cycle and // the retry. for STW, we may get here even if the collector // hasn't triggered just prior. PageTable.Dump("Out of memory"); throw outOfMemoryException; } UIntPtr startPage = PageTable.Page(startAddr); PageTable.SetType(startPage, pageCount, newType); PageTable.SetProcess(startPage, pageCount); UIntPtr extraPages = PageTable.PageCount(allocSize) - pageCount; if (extraPages > 0) { // Mark the new memory pages as allocated-but-unused MarkUnusedPages(/* avoid recursive locking */ null, startPage + pageCount, extraPages, true); } return(startPage); } finally { LeaveMutex(currentThread, iflag); } }
private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment, Thread currentThread) { if (this.reserveLimit == UIntPtr.Zero) { return(UIntPtr.Zero); } #if SINGULARITY_KERNEL Kernel.Waypoint(700); #endif UIntPtr neededBytes = bytes + // Bytes required for object + alignment - UIntPtr.Size - // worst case alignment overhead + (this.reserveLimit - this.allocPtr); // bytes already available UIntPtr paddedNeed = PageTable.PagePad(neededBytes); UIntPtr pageCount = PageTable.PageCount(paddedNeed); UIntPtr startPage = PageTable.Page(this.reserveLimit); bool fCleanPages = CLEAR_POOL_PAGES(); bool gotPages = PageManager.TryReserveUnusedPages(currentThread, startPage, pageCount, this.pageType, ref fCleanPages); if (!gotPages) { // We can't indiscriminately ask for more memory if we have // unused pages already available. return(UIntPtr.Zero); } if (this.reserveLimit == UIntPtr.Zero) { // A collection occurred, so there is no region to extend PageManager.ReleaseUnusedPages(startPage, pageCount, fCleanPages); return(UIntPtr.Zero); } BaseCollector.IncrementNewBytesSinceGC(paddedNeed); this.allocNew = this.reserveLimit; // Pad alignment space if necessary. NB: a prior call to // AllocateFast may have started generating alignment tokens, // but we may need to finish the job here if the residual space // was insufficient for a multi-word alignment. UIntPtr oldReserveLimit = this.reserveLimit; this.reserveLimit += paddedNeed; this.allocPtr = Allocator.AlignedAllocationPtr(this.allocPtr, this.reserveLimit, alignment); if (this.zeroedLimit < this.allocPtr) { this.zeroedLimit = this.allocPtr; } UIntPtr objectAddr = this.allocPtr + PreHeader.Size; this.allocPtr += bytes; if (fCleanPages) { if (this.zeroedLimit < oldReserveLimit) { Util.MemClear(this.zeroedLimit, oldReserveLimit - this.zeroedLimit); } this.zeroedLimit = this.reserveLimit; } else { Util.MemClear(this.zeroedLimit, this.allocPtr - this.zeroedLimit); this.zeroedLimit = this.allocPtr; } VTable.Assert(this.allocPtr <= this.zeroedLimit); VTable.Assert(PageTable.PageAligned(this.reserveLimit)); if (objectAddr >= oldReserveLimit) { // Object is first on new page InteriorPtrTable.SetFirst(objectAddr); } else if (objectAddr + bytes < this.reserveLimit) { // The object does not end on new limit // N.B. The next object may not be allocated at exactly // (objectAddr + bytes) due to alignment considerations. It // also might not ever be allocated. These cases are handled // by InteriorPtrTable.First skipping over alignment tokens // and callers of First watching out for unused space tokens. InteriorPtrTable.SetFirst(objectAddr + bytes); } // We know an object is located as the last one in a page // when it extends through the page to the next. // Otherwise, it is totally before or below the page, and // we are not sure whether it is the last object or not. // So record only such an object for the last card in that // page. Many objects may have been omitted due to // this coarse-grain recording. But we should be able // to incrementally update the offset table and find them. // I believe this is a better choice than simply recording // any object to the offset table, because most objects // may just die and need not to record. #if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR if (GC.remsetType == RemSetType.Cards) { if (objectAddr < oldReserveLimit && allocPtr + bytes > oldReserveLimit) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(objectAddr); #endif } } #endif #if SINGULARITY_KERNEL Kernel.Waypoint(701); #endif return(objectAddr); }