internal static void Initialize() { // The card table covers all the heap. Below is not accurate heap // size. It includes all the memory space, starting from address 0. // TODO: find more accurate heap size and start UIntPtr heapSize = PageTable.pageTableCount * PageTable.PageSize; VTable.Assert((heapSize >> CardBits) << CardBits == heapSize, "Assumption: pageSize is expected to be multiple of CardSize"); UIntPtr heapStart = (UIntPtr)0; totalCards = heapSize >> CardBits; firstCardNo = heapStart >> CardBits; UIntPtr tablePtr = PageManager.AllocateNonheapMemory(null, totalCards); VTable.Assert(tablePtr != UIntPtr.Zero); cardTablePtr = (byte *)tablePtr; cardTableBase = (byte *)(tablePtr - firstCardNo); CardTable.instance = (CardTable) BootstrapMemory.Allocate(typeof(CardTable)); OffsetTable.Initialize(totalCards); for (UIntPtr c = firstCardNo; c < firstCardNo + totalCards; c++) { VTable.Assert(!CardIsDirty(c), "Card table initialization error"); } }
internal override UIntPtr Visit(Object obj) { #if DEBUG_OFFSETTABLE VTable.DebugPrint("visit obj {0:x8}\n", __arglist(Magic.addressOf(obj))); if (lastObjPtr != UIntPtr.Zero) { UIntPtr lastCard = CardTable.CardNo(lastObjPtr); if (lastCard != CardTable.CardNo(Magic.addressOf(obj))) { if (!OffsetTable.NoObjectPtrToTheCard(lastCard)) { UIntPtr realOffst = lastObjPtr - CardTable.CardAddr(lastCard); UIntPtr currOffst = OffsetTable.GetOffset(lastCard); if (realOffst != currOffst) { VTable.DebugPrint("Verifier: wrong offset. Card {0:x8} Offset {1:x8} Should be {2:x8}\n", __arglist(lastCard, currOffst, realOffst)); VTable.Assert(false); } } } } #endif return(this.referenceVisitor.VisitReferenceFields(obj)); }
private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment, Thread currentThread) { #if SINGULARITY_KERNEL Kernel.Waypoint(702); #endif this.Truncate(); UIntPtr paddedBytes = PageTable.PagePad(bytes + alignment - UIntPtr.Size); BaseCollector.IncrementNewBytesSinceGC(paddedBytes); UIntPtr pages = PageTable.PageCount(paddedBytes); bool fCleanPages = CLEAR_POOL_PAGES(); // We may eventually want to ask for specific pages // between asking if any pages are reusable and asking the // OS for any possible page. UIntPtr startPage = PageManager.EnsurePages(currentThread, pages, this.pageType, ref fCleanPages); UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr limitAddr = PageTable.PageAddr(startPage + pages); startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr, alignment); this.allocNew = startAddr; this.allocPtr = startAddr + bytes; if (fCleanPages) { this.zeroedLimit = limitAddr; } else { Util.MemClear(startAddr, bytes); this.zeroedLimit = this.allocPtr; } this.reserveLimit = limitAddr; UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); #if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR if (GC.remsetType == RemSetType.Cards) { UIntPtr nextPageAddr = startAddr + PageTable.PageSize; VTable.Assert(resultAddr < nextPageAddr); if (this.allocPtr > nextPageAddr) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } } #endif #if SINGULARITY_KERNEL Kernel.Waypoint(703); #endif return(resultAddr); }
private unsafe void SkipDestinationAreas(ref UIntPtr destPage, UIntPtr destCursor, ref UIntPtr destLimit, UIntPtr sourceCursor) { UIntPtr cursorPage = PageTable.Page(destCursor); UIntPtr sourcePage = PageTable.Page(sourceCursor); if (cursorPage != sourcePage) { UIntPtr destPageLimit = PageTable.PagePad(destCursor); if (destPageLimit != destCursor) { cursorPage++; } VTable.Assert(PageTable.PageAligned(destLimit)); UIntPtr limitPage = PageTable.Page(destLimit); while (destPage < sourcePage) { if (cursorPage < limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } do { destPage++; } while (!IsMyZombiePage(destPage)); cursorPage = destPage; do { destPage++; } while (IsMyZombiePage(destPage)); limitPage = destPage; } destLimit = PageTable.PageAddr(limitPage); VTable.Assert(destPage > sourcePage); VTable.Assert(cursorPage <= sourcePage); if (cursorPage < sourcePage) { this.RegisterSkippedPages(cursorPage, sourcePage); cursorPage = sourcePage; } InteriorPtrTable.ClearFirst(cursorPage, destPage); InteriorPtrTable.SetFirst(sourceCursor + PreHeader.Size); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(cursorPage), PageTable.PageAddr(destPage) - 1); } } }
private void CompactPhaseCleanup(Thread currentThread, PageType generation, UIntPtr newLimitPtr) { VTable.Assert(IsValidGeneration((int)generation)); registerThreadReferenceVisitor.Cleanup(); // Free up skipped pages while (!this.skippedPageQueue.IsEmpty) { UIntPtr start = this.skippedPageQueue.Read(); UIntPtr finish = this.skippedPageQueue.Read(); InteriorPtrTable.ClearFirst(start, finish); PageManager.FreePageRange(start, finish); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(start), PageTable.PageAddr(finish) - 1); } } this.skippedPageQueue.Cleanup(true); // Release the queue standby pages UnmanagedPageList.ReleaseStandbyPages(); // Update the ownership information for the copied data PageType destGeneration = (generation == MAX_GENERATION) ? MAX_GENERATION : (PageType)(generation + 1); UIntPtr limitPage = PageTable.Page(PageTable.PagePad(newLimitPtr)); for (UIntPtr i = UIntPtr.Zero; i < limitPage; i++) { if (IsMyZombiePage(i)) { PageTable.SetType(i, (PageType)destGeneration); } } }
private static void VisitObjectsInCards(NonNullReferenceVisitor ptrVisitor, UIntPtr first, UIntPtr last) { UIntPtr objectPtr = OffsetTable.FirstObjPtrWithFieldInCard(first); VTable.Assert(objectPtr != UIntPtr.Zero, "No first obj ptr"); // TODO: in semispace.Visit, update cards in visiting promoted objects. // So far, since we have only two generations, and after scanning, all nursery // objects are promoted to mature generation, thus there is no inter-generational // pointers anymore, and we do not need to update cards during visit. But we // need to do that once we have more than two generations. UIntPtr limit = NextCardAddr(last); while (objectPtr != UIntPtr.Zero && objectPtr < limit) { #if DEBUG_CARDS VTable.DebugPrint(" visiting obj ptr {0:x8}\n", __arglist(objectPtr)); #endif Object obj = Magic.fromAddress(objectPtr); UIntPtr objSize = ptrVisitor.VisitReferenceFields(obj); UIntPtr nextObjectPtr = OffsetTable.PtrToNextObject(objectPtr, objSize, limit); VTable.Assert(CardNo(objectPtr) <= CardNo(nextObjectPtr), "Next object should be below the current one"); if (CardNo(objectPtr) < CardNo(nextObjectPtr)) { UIntPtr c = CardNo(objectPtr); UIntPtr offset = objectPtr - CardAddr(c); if (offset > OffsetTable.GetOffset(c)) { OffsetTable.SetOffset(c, offset); } } objectPtr = nextObjectPtr; } }
internal static UIntPtr FirstObjPtrWithFieldInCard(UIntPtr c) { VTable.Assert(CardTable.IsValidCardNo(c), "Not valid card no"); VTable.Assert(CardTable.IsMyLiveGcCard(c), "Not my live GC card"); UIntPtr cardAddr = CardTable.CardAddr(c); UIntPtr nextCardAddr = CardTable.NextCardAddr(c); // Scan backward. May go to zombie areas UIntPtr cardBefore = c - 1; while (cardBefore >= CardTable.FirstCardNo && OffsetTable.NoObjectPtrToTheCard(cardBefore) && CardTable.IsMyGcCard(cardBefore)) { cardBefore--; } UIntPtr ptr; if (cardBefore < CardTable.FirstCardNo || !CardTable.IsMyGcCard(cardBefore)) { // this case happens when c is the first live card that has an object. VTable.Assert(CardTable.IsMyGcCard(cardBefore + 1), "The next card must be a GC card"); VTable.Assert(CardTable.CardAddr(cardBefore + 1) == CardTable.PageAddrOfCard(cardBefore + 1), "The next card must be at the boundary of a page"); UIntPtr pageAddr = CardTable.PageAddrOfCard(cardBefore + 1); ptr = PtrToNextObject(pageAddr, (UIntPtr)PreHeader.Size, nextCardAddr); } else { VTable.Assert(!OffsetTable.NoObjectPtrToTheCard(cardBefore), "A card with an object must have been found"); ptr = CardTable.CardAddr(cardBefore) + OffsetTable.GetOffset(cardBefore); } VTable.Assert(ptr < nextCardAddr, "No objptr in this card"); // Scan forward. Update offset table in the course. UIntPtr currentPtr = ptr; while (currentPtr < cardAddr) { UIntPtr size = OffsetTable.ObjectSize(currentPtr); if (currentPtr + size - PreHeader.Size > cardAddr) { VTable.Assert(CardTable.CardGeneration(CardTable.CardNo(currentPtr)) == CardTable.CardGeneration(c), "The whole object must be in the same generation"); break; } else { ptr = currentPtr; currentPtr = PtrToNextObject(currentPtr, size, nextCardAddr); VTable.Assert(CardTable.CardNo(ptr) <= CardTable.CardNo(currentPtr), "Previous ptr should be before current ptr"); if (CardTable.CardNo(ptr) < CardTable.CardNo(currentPtr)) { UIntPtr ptrC = CardTable.CardNo(ptr); UIntPtr offsetC = ptr - CardTable.CardAddr(ptrC); if (offsetC > GetOffset(ptrC)) { SetOffset(ptrC, offsetC); } } } } VTable.Assert(currentPtr != UIntPtr.Zero, "current ptr is zero"); VTable.Assert(currentPtr < nextCardAddr, "No objptr found in this card"); #if DEBUG_OFFSETTABLE UIntPtr temp = OffsetTable.FirstPtrFromInteriorTable(c); if (temp != currentPtr) { VTable.DebugPrint("Found ptr ({0:x8}) inconsistent with first ptr from interior table({1:x8})\n", __arglist(currentPtr, temp)); VTable.Assert(false); } #endif return(currentPtr); }
// Interface with the compiler! internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes, uint alignment, Thread currentThread) { // Pretenure Trigger pretenuredSinceLastFullGC += numBytes; if (pretenuredSinceLastFullGC > PretenureHardGCTrigger) { GC.InvokeMajorCollection(currentThread); } // Potentially Join a collection GC.CheckForNeededGCWork(currentThread); int maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size; UIntPtr pageCount = PageTable.PageCount(numBytes + maxAlignmentOverhead); bool fCleanPages = true; UIntPtr page = PageManager.EnsurePages(currentThread, pageCount, largeObjectGeneration, ref fCleanPages); int unusedBytes = unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes)); int unusedCacheLines = unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5; int pageOffset = 0; if (unusedCacheLines != 0) { pageOffset = (bigOffset % unusedCacheLines) << 5; bigOffset++; } UIntPtr pageStart = PageTable.PageAddr(page); for (int i = 0; i < pageOffset; i += UIntPtr.Size) { Allocator.WriteAlignment(pageStart + i); } UIntPtr unalignedStartAddr = pageStart + pageOffset; UIntPtr startAddr = Allocator.AlignedAllocationPtr(unalignedStartAddr, pageStart + unusedBytes, alignment); pageOffset += unchecked ((int)(uint)(startAddr - unalignedStartAddr)); if (pageOffset < unusedBytes) { BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes); } UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); VTable.Assert(PageTable.Page(resultAddr) < PageTable.Page(startAddr + numBytes - 1), "Big object should cross pages"); if (GC.remsetType == RemSetType.Cards) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } return(resultAddr); }
internal static void ReclaimZombiePages(UIntPtr heapPageCount, int generation) { // to indicate if we want to release pages back to the OS bool releasePages = true; UIntPtr reservePages = UIntPtr.Zero; if (generation == (int)nurseryGeneration) { // don't bother when we do nursery collection since // nursery size is small. releasePages = false; } else { reservePages = heapPageCount; UIntPtr alreadyReservedPages = PageManager.TotalUnusedPages(); if (reservePages > alreadyReservedPages) { reservePages = reservePages - alreadyReservedPages; } else { reservePages = UIntPtr.Zero; } } // MarkZombiePages updates the range for this generation, so we do // not need to take the union ranges of all target generations UIntPtr minZombiePage = MinGenPage[generation]; UIntPtr maxZombiePage = MaxGenPage[generation]; for (UIntPtr i = minZombiePage; i <= maxZombiePage; i++) { if (IsMyZombiePage(i)) { UIntPtr startPage = i; UIntPtr endPage = startPage; do { endPage++; } while (IsMyZombiePage(endPage)); InteriorPtrTable.ClearFirst(startPage, endPage); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(startPage), PageTable.PageAddr(endPage) - 1); } if (!releasePages) { // Don't need to worry about giving the pages back // Zero out the memory for reuse UIntPtr pageCount = endPage - startPage; PageManager.ReleaseUnusedPages(startPage, pageCount, false); } else if (reservePages > UIntPtr.Zero) { // Keep sufficient pages for the new nursery UIntPtr pageCount = endPage - startPage; if (pageCount > reservePages) { // Zero out the memory for reuse PageManager.ReleaseUnusedPages(startPage, reservePages, false); startPage += reservePages; PageManager.FreePageRange(startPage, endPage); reservePages = UIntPtr.Zero; } else { // Zero out the memory for reuse PageManager.ReleaseUnusedPages(startPage, pageCount, false); reservePages = reservePages - pageCount; } } else { PageManager.FreePageRange(startPage, endPage); } i = endPage - 1; } } }
private unsafe void FindDestinationArea(ref UIntPtr destPage, ref UIntPtr destCursor, ref UIntPtr destLimit, UIntPtr objectSize, PageType destGeneration) { VTable.Assert(IsValidGeneration((int)destGeneration)); UIntPtr cursorPage = PageTable.Page(destCursor); UIntPtr limitPage = PageTable.Page(destLimit); UIntPtr pageAddr = PageTable.PagePad(destCursor); UIntPtr testPage = limitPage; UIntPtr endTestPage = PageTable.PageCount(destCursor + objectSize); if (destCursor > UIntPtr.Zero && IsMyZombiePage(PageTable.Page(destCursor - 1))) { VTable.Assert(destPage == limitPage); while (IsMyZombiePage(testPage) || (testPage < endTestPage && (PageTable.IsUnusedPage(testPage)))) { testPage++; } if (testPage >= endTestPage) { // We can expand the current region endTestPage = testPage; VTable.Assert(PageTable.PageAligned(destLimit)); InteriorPtrTable.ClearFirst(limitPage, testPage); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(limitPage), PageTable.PageAddr(testPage) - 1); } while (limitPage != endTestPage) { VTable.Assert(PageTable.IsUnusedPage(destPage)); do { destPage++; } while (destPage < endTestPage && PageTable.IsUnusedPage(destPage)); bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, limitPage, destPage - limitPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(limitPage, destPage - limitPage, destGeneration); while (destPage < endTestPage && IsMyZombiePage(destPage)) { destPage++; } limitPage = destPage; } destLimit = PageTable.PageAddr(limitPage); return; } } if (destCursor != pageAddr) { cursorPage++; } if (cursorPage != limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } // Find new region big enough to contain object UIntPtr neededPages = PageTable.PageCount(objectSize); UIntPtr prefixPage; while (true) { do { destPage++; } while (!IsMyZombiePage(destPage)); cursorPage = destPage; prefixPage = cursorPage; do { destPage++; } while (IsMyZombiePage(destPage)); limitPage = destPage; if (neededPages <= limitPage - cursorPage) { break; } // Check for following unused pages endTestPage = cursorPage + neededPages; VTable.Assert(endTestPage <= PageTable.pageTableCount); while (destPage < endTestPage && (PageTable.IsUnusedPage(destPage) || (IsMyZombiePage(destPage)))) { destPage++; } if (destPage == endTestPage) { break; } // Check for preceding unused pages if (destPage >= neededPages) { endTestPage = destPage - neededPages; prefixPage = cursorPage - 1; while (prefixPage >= UIntPtr.Zero && PageTable.IsUnusedPage(prefixPage)) { prefixPage--; } prefixPage++; if (prefixPage == endTestPage) { break; } } // Register any skipped regions of pages this.RegisterSkippedPages(cursorPage, limitPage); while (limitPage < destPage) { VTable.Assert(PageTable.IsUnusedPage(limitPage)); do { limitPage++; } while (limitPage < destPage && PageTable.IsUnusedPage(limitPage)); cursorPage = limitPage; while (limitPage < destPage && IsMyZombiePage(limitPage)) { limitPage++; } if (cursorPage != limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } } } // We found an area big enough. Commit the pre- and // postfix areas of unused pages if (prefixPage != cursorPage) { bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, prefixPage, cursorPage - prefixPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(prefixPage, cursorPage - prefixPage, destGeneration); } while (destPage != limitPage) { // Mark the region of unused pages as fromspace UIntPtr unusedPage = limitPage; VTable.Assert(PageTable.IsUnusedPage(unusedPage)); do { unusedPage++; } while (unusedPage < destPage && PageTable.IsUnusedPage(unusedPage)); bool fCleanPages = true; bool status = PageManager.TryReserveUnusedPages(null, limitPage, unusedPage - limitPage, nurseryGeneration, ref fCleanPages); VTable.Assert(status); MakeZombiePages(limitPage, unusedPage - limitPage, destGeneration); // Skip any sections of pages already marked as fromspace limitPage = unusedPage; while (limitPage < destPage && IsMyZombiePage(limitPage)) { limitPage++; } } destCursor = PageTable.PageAddr(prefixPage); destLimit = PageTable.PageAddr(limitPage); // Take ownership of the new pages InteriorPtrTable.ClearFirst(prefixPage, limitPage); InteriorPtrTable.SetFirst(destCursor + PreHeader.Size); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(prefixPage), PageTable.PageAddr(limitPage) - 1); } }
private static void CleanPageTail(UIntPtr postPinnedAddr) { if (!PageTable.PageAligned(postPinnedAddr)) { // If postPinnedAddr points to the first object on its page, // then we are removing all objects (specifically the part // of the object that the InteriorPtrTable tracks, the // vtables) from the page, so we should clear the page's // entry in the InteriorPtrTable. UIntPtr page = PageTable.Page(postPinnedAddr); UIntPtr firstObjPtr = InteriorPtrTable.First(page); if (firstObjPtr > postPinnedAddr) { VTable.Assert (firstObjPtr - PreHeader.Size >= postPinnedAddr, "postPinnedAddr should not point to the " + "interior of an object (1)"); InteriorPtrTable.ClearFirst(page); } else if (!BumpAllocator.IsUnusedSpace(firstObjPtr)) { UIntPtr firstObjSize = InteriorPtrTable.ObjectSize(firstObjPtr); VTable.Assert (firstObjPtr + firstObjSize - PreHeader.Size <= postPinnedAddr, "postPinnedAddr should not point to the " + "interior of an object (2)"); } UIntPtr byteCount = PageTable.PagePad(postPinnedAddr) - postPinnedAddr; Util.MemClear(postPinnedAddr, byteCount); BumpAllocator.WriteUnusedMarker(postPinnedAddr); if (GC.remsetType == RemSetType.Cards && byteCount > 0) { UIntPtr firstCard = CardTable.CardNo(postPinnedAddr); UIntPtr lastCard = CardTable.CardNo(postPinnedAddr + byteCount - 1); if (!OffsetTable.NoObjectPtrToTheCard(firstCard)) { UIntPtr offset = OffsetTable.GetOffset(firstCard); UIntPtr objPtr = CardTable.CardAddr(firstCard) + offset; UIntPtr size = OffsetTable.ObjectSize(objPtr); VTable.Assert ((objPtr + size - PreHeader.Size <= postPinnedAddr) || (objPtr >= postPinnedAddr), "Object should be totally " + "above or below postPinnedAddr"); if (objPtr >= postPinnedAddr) { OffsetTable.ClearCards(firstCard, firstCard); } } OffsetTable.ClearCards(firstCard + 1, lastCard); } } }
internal void CleanPinnedPages() { if (pinnedPageList == null || pinnedPageList.Count == 0) { return; } int pageIndex = 0; int limit = pinnedPageList.Count; UIntPtr lastPostPinnedAddr = UIntPtr.Zero; while (pageIndex < limit) { UIntPtr startPage = (UIntPtr)pinnedPageList[pageIndex]; UIntPtr endPage = startPage + 1; pageIndex++; while (pageIndex < limit && (UIntPtr)pinnedPageList[pageIndex] == endPage) { pageIndex++; endPage++; } // Zero out the area between the start of the page and // the first object on the page UIntPtr firstObjectAddr = FirstPinnedObjectAddr(startPage); UIntPtr firstAddr = firstObjectAddr - PreHeader.Size; UIntPtr trashAddr = PageTable.PageAddr(startPage); if (firstAddr < trashAddr) { // The first object "spills" into the previous page, // presumably by no more than HEADER_BYTES bytes VTable.Assert( PageTable.Page(firstAddr) == startPage - 1, "Semispace:RegisterPinnedReference:3"); // Prepare to zero the preceding page unless it also // had pinned data on it trashAddr = PageTable.PageAddr(startPage - 1); InteriorPtrTable.ClearFirst(startPage - 1); if (trashAddr >= lastPostPinnedAddr) { // Need to mark the spilled-onto page live to // keep the spilled data around PageType fromSpaceType = PageTable.Type(startPage - 1); VTable.Assert( PageTable.IsZombiePage(fromSpaceType), "Semispace:RegisterPinnedReference:4"); PageType toSpaceType = PageTable.ZombieToLive(fromSpaceType); PageTable.SetType(startPage - 1, toSpaceType); } } // If lastPostPinnedAddr is on the page that trashAddr // starts, pinned data from the last run of pinned pages // and pinned data from this run of pinned data are on the // same page, so just write alignment tokens from // lastPostPinnedAddr to the first pinned object. // Otherwise, write an unused marker at lastPostPinnedAddr // since the rest of its page must be copied or dead. if (trashAddr < lastPostPinnedAddr) { trashAddr = lastPostPinnedAddr; } else { CleanPageTail(lastPostPinnedAddr); } if (GC.remsetType == RemSetType.Cards && trashAddr < firstAddr) { UIntPtr firstCard = CardTable.CardNo(trashAddr); UIntPtr lastCard = CardTable.CardNo(firstAddr - 1); if (!OffsetTable.NoObjectPtrToTheCard(firstCard)) { UIntPtr offset = OffsetTable.GetOffset(firstCard); UIntPtr objPtr = CardTable.CardAddr(firstCard) + offset; UIntPtr size = OffsetTable.ObjectSize(objPtr); VTable.Assert ((objPtr + size - PreHeader.Size <= trashAddr) || (objPtr >= trashAddr), "Object should be totally " + "above or below trashAddr"); if (objPtr >= trashAddr) { // The offset in this card needs to be updated OffsetTable.ClearCards(firstCard, firstCard); } } OffsetTable.ClearCards(firstCard + 1, lastCard - 1); if (lastCard != CardTable.CardNo(firstObjectAddr)) { OffsetTable.ClearCards(lastCard, lastCard); } else { VTable.Assert(OffsetTable.GetOffset(lastCard) >= (firstObjectAddr - CardTable.CardAddr(lastCard)), "wrong offset"); } } { // trashAddr should go back at most one page. UIntPtr trashPage = PageTable.Page(trashAddr); UIntPtr firstObjectAddrPage = PageTable.Page(firstObjectAddr); VTable.Assert((trashPage == firstObjectAddrPage - 1) || (trashPage == firstObjectAddrPage)); } // If the InteriorPtrTable already had a value, then this is // redundant, but if the call to First above has to compute // the value, then (since it won't store it in the table) we // should store it. Why? At this point the previous page // would be "connected" to this one. After this collection // the previous page will be unused or re-used and unrelated // to this page and subsequent calls to First would then // rely on it making the leap between unrelated pages. InteriorPtrTable.SetFirst(firstObjectAddr); while (trashAddr < firstAddr) { Allocator.WriteAlignment(trashAddr); trashAddr += UIntPtr.Size; } // Zero out the area between the last whole object on // the last page and the end of the last page UIntPtr pastAddr = PostPinnedObjectAddr(endPage); UIntPtr newEndPage = PageTable.Page(PageTable.PagePad(pastAddr)); while (endPage < newEndPage) { // The last object spills into the next page(s), so // mark those page(s) live PageType fromPageType = PageTable.Type(endPage); if (PageTable.IsZombiePage(fromPageType)) { PageType toSpaceType = PageTable.ZombieToLive(fromPageType); PageTable.SetType(endPage, toSpaceType); } else { // final page might be live already because // something else on it was pinned. // pageIndex has already been incremented, // so it points to the start of the next // set of contiguous pages VTable.Assert( PageTable.IsLiveGcPage(fromPageType) && pageIndex < limit && endPage == (UIntPtr)pinnedPageList[pageIndex], "Semispace:RegisterPinnedReference:5"); } ++endPage; } lastPostPinnedAddr = pastAddr; } CleanPageTail(lastPostPinnedAddr); pinnedPageList = null; comparer = null; }
private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment, Thread currentThread) { if (this.reserveLimit == UIntPtr.Zero) { return(UIntPtr.Zero); } #if SINGULARITY_KERNEL Kernel.Waypoint(700); #endif UIntPtr neededBytes = bytes + // Bytes required for object + alignment - UIntPtr.Size - // worst case alignment overhead + (this.reserveLimit - this.allocPtr); // bytes already available UIntPtr paddedNeed = PageTable.PagePad(neededBytes); UIntPtr pageCount = PageTable.PageCount(paddedNeed); UIntPtr startPage = PageTable.Page(this.reserveLimit); bool fCleanPages = CLEAR_POOL_PAGES(); bool gotPages = PageManager.TryReserveUnusedPages(currentThread, startPage, pageCount, this.pageType, ref fCleanPages); if (!gotPages) { // We can't indiscriminately ask for more memory if we have // unused pages already available. return(UIntPtr.Zero); } if (this.reserveLimit == UIntPtr.Zero) { // A collection occurred, so there is no region to extend PageManager.ReleaseUnusedPages(startPage, pageCount, fCleanPages); return(UIntPtr.Zero); } BaseCollector.IncrementNewBytesSinceGC(paddedNeed); this.allocNew = this.reserveLimit; // Pad alignment space if necessary. NB: a prior call to // AllocateFast may have started generating alignment tokens, // but we may need to finish the job here if the residual space // was insufficient for a multi-word alignment. UIntPtr oldReserveLimit = this.reserveLimit; this.reserveLimit += paddedNeed; this.allocPtr = Allocator.AlignedAllocationPtr(this.allocPtr, this.reserveLimit, alignment); if (this.zeroedLimit < this.allocPtr) { this.zeroedLimit = this.allocPtr; } UIntPtr objectAddr = this.allocPtr + PreHeader.Size; this.allocPtr += bytes; if (fCleanPages) { if (this.zeroedLimit < oldReserveLimit) { Util.MemClear(this.zeroedLimit, oldReserveLimit - this.zeroedLimit); } this.zeroedLimit = this.reserveLimit; } else { Util.MemClear(this.zeroedLimit, this.allocPtr - this.zeroedLimit); this.zeroedLimit = this.allocPtr; } VTable.Assert(this.allocPtr <= this.zeroedLimit); VTable.Assert(PageTable.PageAligned(this.reserveLimit)); if (objectAddr >= oldReserveLimit) { // Object is first on new page InteriorPtrTable.SetFirst(objectAddr); } else if (objectAddr + bytes < this.reserveLimit) { // The object does not end on new limit // N.B. The next object may not be allocated at exactly // (objectAddr + bytes) due to alignment considerations. It // also might not ever be allocated. These cases are handled // by InteriorPtrTable.First skipping over alignment tokens // and callers of First watching out for unused space tokens. InteriorPtrTable.SetFirst(objectAddr + bytes); } // We know an object is located as the last one in a page // when it extends through the page to the next. // Otherwise, it is totally before or below the page, and // we are not sure whether it is the last object or not. // So record only such an object for the last card in that // page. Many objects may have been omitted due to // this coarse-grain recording. But we should be able // to incrementally update the offset table and find them. // I believe this is a better choice than simply recording // any object to the offset table, because most objects // may just die and need not to record. #if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR if (GC.remsetType == RemSetType.Cards) { if (objectAddr < oldReserveLimit && allocPtr + bytes > oldReserveLimit) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(objectAddr); #endif } } #endif #if SINGULARITY_KERNEL Kernel.Waypoint(701); #endif return(objectAddr); }