internal static void ReleaseStandbyPages() { while (!pageCache.IsEmpty) { UIntPtr pageAddr = pageCache.RemoveHead(); PageManager.ReleaseUnusedPages(PageTable.Page(pageAddr), (UIntPtr) 1, false); } }
internal static void Truncate() { UIntPtr allocLimit = PageTable.PagePad(allocPtr); UIntPtr unusedSize = limitPtr - allocLimit; if (GC.gcType != GCType.NullCollector) { PageManager.ReleaseUnusedPages(PageTable.Page(allocLimit), PageTable.PageCount(unusedSize), true); } limitPtr = allocLimit; }
internal static void ReclaimZombiePages(UIntPtr heapPageCount, int generation) { // to indicate if we want to release pages back to the OS bool releasePages = true; UIntPtr reservePages = UIntPtr.Zero; if (generation == (int)nurseryGeneration) { // don't bother when we do nursery collection since // nursery size is small. releasePages = false; } else { reservePages = heapPageCount; UIntPtr alreadyReservedPages = PageManager.TotalUnusedPages(); if (reservePages > alreadyReservedPages) { reservePages = reservePages - alreadyReservedPages; } else { reservePages = UIntPtr.Zero; } } // MarkZombiePages updates the range for this generation, so we do // not need to take the union ranges of all target generations UIntPtr minZombiePage = MinGenPage[generation]; UIntPtr maxZombiePage = MaxGenPage[generation]; for (UIntPtr i = minZombiePage; i <= maxZombiePage; i++) { if (IsMyZombiePage(i)) { UIntPtr startPage = i; UIntPtr endPage = startPage; do { endPage++; } while (IsMyZombiePage(endPage)); InteriorPtrTable.ClearFirst(startPage, endPage); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(startPage), PageTable.PageAddr(endPage) - 1); } if (!releasePages) { // Don't need to worry about giving the pages back // Zero out the memory for reuse UIntPtr pageCount = endPage - startPage; PageManager.ReleaseUnusedPages(startPage, pageCount, false); } else if (reservePages > UIntPtr.Zero) { // Keep sufficient pages for the new nursery UIntPtr pageCount = endPage - startPage; if (pageCount > reservePages) { // Zero out the memory for reuse PageManager.ReleaseUnusedPages(startPage, reservePages, false); startPage += reservePages; PageManager.FreePageRange(startPage, endPage); reservePages = UIntPtr.Zero; } else { // Zero out the memory for reuse PageManager.ReleaseUnusedPages(startPage, pageCount, false); reservePages = reservePages - pageCount; } } else { PageManager.FreePageRange(startPage, endPage); } i = endPage - 1; } } }
private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment, Thread currentThread) { if (this.reserveLimit == UIntPtr.Zero) { return(UIntPtr.Zero); } #if SINGULARITY_KERNEL Kernel.Waypoint(700); #endif UIntPtr neededBytes = bytes + // Bytes required for object + alignment - UIntPtr.Size - // worst case alignment overhead + (this.reserveLimit - this.allocPtr); // bytes already available UIntPtr paddedNeed = PageTable.PagePad(neededBytes); UIntPtr pageCount = PageTable.PageCount(paddedNeed); UIntPtr startPage = PageTable.Page(this.reserveLimit); bool fCleanPages = CLEAR_POOL_PAGES(); bool gotPages = PageManager.TryReserveUnusedPages(currentThread, startPage, pageCount, this.pageType, ref fCleanPages); if (!gotPages) { // We can't indiscriminately ask for more memory if we have // unused pages already available. return(UIntPtr.Zero); } if (this.reserveLimit == UIntPtr.Zero) { // A collection occurred, so there is no region to extend PageManager.ReleaseUnusedPages(startPage, pageCount, fCleanPages); return(UIntPtr.Zero); } BaseCollector.IncrementNewBytesSinceGC(paddedNeed); this.allocNew = this.reserveLimit; // Pad alignment space if necessary. NB: a prior call to // AllocateFast may have started generating alignment tokens, // but we may need to finish the job here if the residual space // was insufficient for a multi-word alignment. UIntPtr oldReserveLimit = this.reserveLimit; this.reserveLimit += paddedNeed; this.allocPtr = Allocator.AlignedAllocationPtr(this.allocPtr, this.reserveLimit, alignment); if (this.zeroedLimit < this.allocPtr) { this.zeroedLimit = this.allocPtr; } UIntPtr objectAddr = this.allocPtr + PreHeader.Size; this.allocPtr += bytes; if (fCleanPages) { if (this.zeroedLimit < oldReserveLimit) { Util.MemClear(this.zeroedLimit, oldReserveLimit - this.zeroedLimit); } this.zeroedLimit = this.reserveLimit; } else { Util.MemClear(this.zeroedLimit, this.allocPtr - this.zeroedLimit); this.zeroedLimit = this.allocPtr; } VTable.Assert(this.allocPtr <= this.zeroedLimit); VTable.Assert(PageTable.PageAligned(this.reserveLimit)); if (objectAddr >= oldReserveLimit) { // Object is first on new page InteriorPtrTable.SetFirst(objectAddr); } else if (objectAddr + bytes < this.reserveLimit) { // The object does not end on new limit // N.B. The next object may not be allocated at exactly // (objectAddr + bytes) due to alignment considerations. It // also might not ever be allocated. These cases are handled // by InteriorPtrTable.First skipping over alignment tokens // and callers of First watching out for unused space tokens. InteriorPtrTable.SetFirst(objectAddr + bytes); } // We know an object is located as the last one in a page // when it extends through the page to the next. // Otherwise, it is totally before or below the page, and // we are not sure whether it is the last object or not. // So record only such an object for the last card in that // page. Many objects may have been omitted due to // this coarse-grain recording. But we should be able // to incrementally update the offset table and find them. // I believe this is a better choice than simply recording // any object to the offset table, because most objects // may just die and need not to record. #if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR if (GC.remsetType == RemSetType.Cards) { if (objectAddr < oldReserveLimit && allocPtr + bytes > oldReserveLimit) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(objectAddr); #endif } } #endif #if SINGULARITY_KERNEL Kernel.Waypoint(701); #endif return(objectAddr); }