internal override int GetGeneration(Object obj) { UIntPtr addr = Magic.addressOf(obj); UIntPtr page = PageTable.Page(addr); return((int)PageTable.Type(page)); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); // Mark object if (vtableAddr == UIntPtr.Zero) { VTable.DebugPrint("Found null vtable in MarkReference (loc = 0x{0:x8}, addr = 0x{1:x8})\n", __arglist(((UIntPtr)loc), addr)); VTable.NotReached(); } *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); // If first visit to the object, schedule visit of fields if ((vtableAddr & 0x1) == 0) { MarkVisit(addr, vtableAddr & (UIntPtr) ~2U); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); if ((vtableAddr & 0x1) == 0x1) { // Link this field to be updated *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); } else { // Zero the reference (not marked) *loc = UIntPtr.Zero; } }
internal static void VerifyUnusedPage(UIntPtr page, bool containsHeader) { if (PageTable.Type(page) == PageType.UnusedDirty) { return; } // Verify that the page is indeed clean UIntPtr *startAddr = (UIntPtr *)PageTable.PageAddr(page); UIntPtr *endAddr = (UIntPtr *)PageTable.PageAddr(page + 1); // If the page contains a header then we can't expect the header // to be clean. if (containsHeader) { startAddr += (uint) (Util.UIntPtrPad((UIntPtr)sizeof(UnusedBlockHeader)) / (uint)sizeof(UIntPtr)); } while (startAddr < endAddr) { VTable.Assert(*startAddr == UIntPtr.Zero, "UnusedClean page contains nonzero data"); startAddr++; } }
internal static unsafe void SetStaticDataPages(UIntPtr startAddr, UIntPtr size) { #if SINGULARITY // It's perfectly fine to be given memory outside the page table region, so // long as we intend to treat that memory as NonGC anyway. if (startAddr < PageTable.baseAddr) { if (startAddr + size < PageTable.baseAddr) { // nothing to do. All pages are below the base covered by the page table return; } // The range overlaps with the region covered by the page table size -= (PageTable.baseAddr - startAddr); startAddr = PageTable.baseAddr; } UIntPtr endAddr = startAddr + size; if (endAddr > PageTable.limitAddr) { if (startAddr > PageTable.limitAddr) { // nothing to do. All pages are above the limit covered by the page table return; } // The range overlaps with the region covered by the page table size -= (PageTable.limitAddr - endAddr); } #endif UIntPtr startIndex = PageTable.Page(startAddr); UIntPtr pageCount = PageTable.PageCount(size); PageTable.SetType(startIndex, pageCount, PageType.NonGC); }
internal static bool IsMyGcCard(UIntPtr c) { VTable.Assert(IsValidCardNo(c), "IsMyGcCard invalid"); UIntPtr page = PageTable.Page(CardAddr(c)); return(PageTable.IsMyGcPage(page)); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardOnlyReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer *loc = vtableAddr; } else { // The object was not live *loc = UIntPtr.Zero; } }
internal static unsafe void Verify(UnusedBlockHeader *header) { VTable.Assert(header->magic == (UIntPtr)magicNumber, "Bad magic number in UnusedBlockHeader"); VTable.Assert(header->count > 0, "Count <= 0 in UnusedBlockHeader"); VTable.Assert(header->prev->next == header, "UnusedBlockHeader not linked properly (1)"); if (header->next != null) { VTable.Assert(header->next->prev == header, "UnusedBlockHeader not linked properly (2)"); } UIntPtr count = header->count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); VTable.Assert(tailBlock->curr == header, "UnusedBlockHeader tail->curr is incorrect"); if (PageManager.SlowDebug) { UIntPtr page = PageTable.Page((UIntPtr)header); for (UIntPtr i = UIntPtr.Zero; i < count; i++) { VTable.Assert(PageTable.IsUnusedPage(page + i) && PageTable.IsMyPage(page + i), "Incorrect page in unused region"); } } }
private unsafe void CompactHeapObjects(UIntPtr previousEnd) { while (!this.relocationQueue.IsEmpty) { UIntPtr sourceAddress = this.relocationQueue.Read(); UIntPtr destinationAddress = this.relocationQueue.Read(); UIntPtr runLength = this.relocationQueue.Read(); if (previousEnd != destinationAddress) { VTable.Assert(previousEnd < destinationAddress); if (PageTable.Page(destinationAddress) != PageTable.Page(previousEnd + PreHeader.Size)) { if (!PageTable.PageAligned(previousEnd)) { UIntPtr pageLimit = PageTable.PagePad(previousEnd); BumpAllocator.WriteUnusedMarker(previousEnd); previousEnd += UIntPtr.Size; Util.MemClear(previousEnd, pageLimit - previousEnd); } if (!PageTable.PageAligned(destinationAddress)) { // This only happens before pinned objects and // large objects UIntPtr start = PageTable.PageAlign(destinationAddress); VTable.Assert(previousEnd <= start); while (start < destinationAddress) { Allocator.WriteAlignment(start); start += UIntPtr.Size; } } UIntPtr objAddr = destinationAddress + PreHeader.Size; InteriorPtrTable.SetFirst(objAddr); } else { VTable.Assert(previousEnd < destinationAddress); UIntPtr start = previousEnd; while (start < destinationAddress) { Allocator.WriteAlignment(start); start += UIntPtr.Size; } } } Util.MemCopy(destinationAddress, sourceAddress, runLength); previousEnd = destinationAddress + runLength; } // Zero out the end of the allocation page if (!PageTable.PageAligned(previousEnd)) { UIntPtr pageLimit = PageTable.PagePad(previousEnd); Util.MemClear(previousEnd, pageLimit - previousEnd); } this.relocationQueue.Cleanup(true); }
internal static void VerifyUnusedRegion(UIntPtr startPage, UIntPtr endPage) { // Verify that all of the pages are of the same Clean/Dirty type. PageType startType = PageTable.Type(startPage); for (UIntPtr page = startPage; page < endPage; ++page) { VTable.Assert(startType == PageTable.Type(page), "Unused page types don't match in region"); } if (startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)) { // We have already checked the region return; } UIntPtr regionAddr = PageTable.PageAddr(startPage); UnusedBlockHeader *regionHeader = (UnusedBlockHeader *)regionAddr; UIntPtr pageCount = regionHeader->count; VTable.Assert (pageCount >= (endPage - startPage), "Region-to-verify is larger than its header specifies"); endPage = startPage + pageCount; for (UIntPtr page = startPage; page < endPage; ++page) { VTable.Assert(PageTable.IsUnusedPage(page) && PageTable.IsMyPage(page), "Non my-unused page in unused region"); PageManager.VerifyUnusedPage (page, (page == startPage) || (page == (endPage - 1))); } VTable.Assert(!(endPage < PageTable.pageTableCount && PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)), "My-unused page immediately after unused region"); // Verify that the region is correctly linked into the // list of unused memory blocks int slot = SlotFromCount(pageCount); UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; UnusedBlockHeader.Verify(header); while (regionAddr != (UIntPtr)header) { header = header->next; VTable.Assert(header != null, "Unused region not list for its slot number"); UnusedBlockHeader.Verify(header); } }
internal static bool IsMyPage(UIntPtr page) { #if SINGULARITY return(PageTable.ProcessTag(page) == processTag); #else return(true); #endif }
internal static bool TryReservePages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReservePages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; UIntPtr index = startPage; while (index < endPage && PageTable.IsUnusedPage(index) && PageTable.IsMyPage(index)) { index++; } if (PageTable.IsUnallocatedPage(PageTable.Type(index))) { // We should try to extend the region of allocated pages UIntPtr pagesNeeded = pageCount - (index - startPage); UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = PageTable.PageAddr(index); bool gotMemory = false; bool iflag = EnterMutex(currentThread); try { gotMemory = MemoryManager.AllocateMemory(startAddr, allocSize); if (gotMemory) { UIntPtr allocPages = PageTable.PageCount(allocSize); MarkUnusedPages(/* avoid recursive locking */ null, index, allocPages, true); } } finally { LeaveMutex(currentThread, iflag); } if (gotMemory) { bool success = TryReserveUnusedPages(currentThread, startPage, pageCount, newType, ref fCleanPages); Trace.Log(Trace.Area.Page, "TryReservePages success={0}", __arglist(success)); return(success); } } return(false); }
internal static unsafe void VerifyFirst(UIntPtr previousObjectAddr, UIntPtr objectAddr) { UIntPtr page = PageTable.Page(objectAddr); if (previousObjectAddr != UIntPtr.Zero) { UIntPtr previousPage = PageTable.Page(previousObjectAddr); UIntPtr pageCursor = previousPage + 1; while (pageCursor < page) { uint cursorOffset = PageTable.Extra(pageCursor); UIntPtr objAddr = (PageTable.PageAddr(pageCursor) + cursorOffset - OFFSET_SKEW); if (!(cursorOffset <= OFFSET_NO_DATA || BumpAllocator.IsUnusedSpace(objAddr) || Allocator.IsAlignment(objAddr) || BumpAllocator.IsRestOfPageZero(objAddr))) { VTable.DebugPrint ("cursorOffset={0:x} OFFSET_NO_DATA={1:x} objAddr={2:x} unused={3} isalign={4} iszero={5}\n", __arglist((cursorOffset), (OFFSET_NO_DATA), ((long)objAddr), (BumpAllocator.IsUnusedSpace(objAddr)), (Allocator.IsAlignment(objAddr)), (BumpAllocator.IsRestOfPageZero(objAddr)))); } VTable.Assert(cursorOffset <= OFFSET_NO_DATA || BumpAllocator.IsUnusedSpace(objAddr) || Allocator.IsAlignment(objAddr) || BumpAllocator.IsRestOfPageZero(objAddr), "VerifyFirst 1"); pageCursor++; } } uint offset = PageTable.Extra(page); if (offset > OFFSET_NO_DATA) { UIntPtr firstAddr = PageTable.PageAddr(page) + offset - OFFSET_SKEW; if (!(firstAddr == objectAddr || (firstAddr + UIntPtr.Size == objectAddr && Allocator.IsAlignment(firstAddr)))) { VTable.DebugPrint ("firstAddr={0:x} objectAddr={1:x} isalign={2}\n", __arglist(((long)firstAddr), ((long)objectAddr), (Allocator.IsAlignment(firstAddr)))); } VTable.Assert(firstAddr == objectAddr || (firstAddr + 4 == objectAddr && Allocator.IsAlignment(firstAddr)), "VerifyFirst 2"); } }
internal static unsafe void SetFirst(UIntPtr newAddr) { VTable.Assert(PageTable.IsGcPage(PageTable.Page(newAddr)), "SetFirst on a non-GC page"); UIntPtr page = PageTable.Page(newAddr); UIntPtr offset = newAddr - PageTable.PageAddr(page); PageTable.SetExtra(page, unchecked ((uint)(offset + OFFSET_SKEW))); }
internal static void ReleaseStandbyPages() { while (!pageCache.IsEmpty) { UIntPtr pageAddr = pageCache.RemoveHead(); PageManager.ReleaseUnusedPages(PageTable.Page(pageAddr), (UIntPtr)1, false); } }
internal unsafe static void MakeZombiePage(UIntPtr page, PageType destGeneration) { VTable.Assert(destGeneration >= nurseryGeneration && destGeneration <= MAX_GENERATION); PageType pageType = PageTable.LiveToZombie(destGeneration); PageTable.SetType(page, pageType); }
private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment, Thread currentThread) { #if SINGULARITY_KERNEL Kernel.Waypoint(702); #endif this.Truncate(); UIntPtr paddedBytes = PageTable.PagePad(bytes + alignment - UIntPtr.Size); BaseCollector.IncrementNewBytesSinceGC(paddedBytes); UIntPtr pages = PageTable.PageCount(paddedBytes); bool fCleanPages = CLEAR_POOL_PAGES(); // We may eventually want to ask for specific pages // between asking if any pages are reusable and asking the // OS for any possible page. UIntPtr startPage = PageManager.EnsurePages(currentThread, pages, this.pageType, ref fCleanPages); UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr limitAddr = PageTable.PageAddr(startPage + pages); startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr, alignment); this.allocNew = startAddr; this.allocPtr = startAddr + bytes; if (fCleanPages) { this.zeroedLimit = limitAddr; } else { Util.MemClear(startAddr, bytes); this.zeroedLimit = this.allocPtr; } this.reserveLimit = limitAddr; UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); #if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR if (GC.remsetType == RemSetType.Cards) { UIntPtr nextPageAddr = startAddr + PageTable.PageSize; VTable.Assert(resultAddr < nextPageAddr); if (this.allocPtr > nextPageAddr) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } } #endif #if SINGULARITY_KERNEL Kernel.Waypoint(703); #endif return(resultAddr); }
void VisitObjects(ObjectLayout.ObjectVisitor objectVisitor, UIntPtr lowAddr, UIntPtr highAddr) { VTable.Assert(PageTable.PageAligned(lowAddr)); VTable.Assert(PageTable.PageAligned(highAddr)); UIntPtr lowPage = PageTable.Page(lowAddr); UIntPtr highPage = PageTable.Page(highAddr); SegregatedFreeList.VisitObjects(lowPage, highPage, objectVisitor); }
private static void Clear(UIntPtr startAddr, UIntPtr regionSize) { VTable.Assert(PageTable.PageAligned(startAddr)); VTable.Assert(PageTable.PageAligned(regionSize)); MemoryManager.IgnoreMemoryContents(startAddr, regionSize); MarkUnusedPages(Thread.CurrentThread, PageTable.Page(startAddr), PageTable.PageCount(regionSize), false); }
internal static unsafe void Initialize(UnusedBlockHeader *header, UIntPtr count) { header->magic = (UIntPtr)magicNumber; header->next = null; header->prev = null; header->count = count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); tailBlock->curr = header; }
internal override void VisitObjects (ObjectLayout.ObjectVisitor objVisitor, UIntPtr lowAddr, UIntPtr highAddr) { UIntPtr lowPage = PageTable.Page(lowAddr); UIntPtr highPage = PageTable.Page(highAddr); SegregatedFreeList.VisitObjects(lowPage, highPage, objVisitor); }
internal void ProcessPinnedPages(ReferenceVisitor ptrVisitor) { if (pinnedPageList == null || pinnedPageList.Count == 0) { return; } pinnedPageList.Sort(comparer); int limit = pinnedPageList.Count; for (int i = 0; i < limit; i++) { UIntPtr page = (UIntPtr)pinnedPageList[i]; PageType fromSpaceType = PageTable.Type(page); VTable.Assert(PageTable.IsZombiePage(fromSpaceType), "Semispace:RegisterPinnedReference:2"); PageType toSpaceType = PageTable.ZombieToLive(fromSpaceType); PageTable.SetType(page, toSpaceType); } int pageIndex = 0; while (pageIndex < limit) { UIntPtr startPage = (UIntPtr)pinnedPageList[pageIndex]; UIntPtr endPage = startPage + 1; pageIndex++; while (pageIndex < limit && (UIntPtr)pinnedPageList[pageIndex] == endPage) { pageIndex++; endPage++; } UIntPtr objectAddr = FirstPinnedObjectAddr(startPage); UIntPtr pastAddr = PostPinnedObjectAddr(endPage); while (objectAddr < pastAddr) { if (Allocator.IsAlignment(objectAddr)) { objectAddr += UIntPtr.Size; } else if (BumpAllocator.IsUnusedSpace(objectAddr)) { objectAddr = (PageTable.PagePad(objectAddr) + PreHeader.Size); } else { Object obj = Magic.fromAddress(objectAddr); objectAddr += ptrVisitor.VisitReferenceFields(obj); } } } }
// Finds the object base for an interior pointer. In the case of a // pointer to the tail of an object and the head of another, it will // return the former object (the one whose tail we point at). To // get the base pointer for a pointer into the pre-header, you should // add PreHeader.Size before calling this. internal static UIntPtr Find(UIntPtr addr) { UIntPtr page = PageTable.Page(addr); UIntPtr currAddr = InteriorPtrTable.First(page); // Look out for the unused space token: this page may not // have been completely allocated: its "first" object might not // be valid. if (BumpAllocator.IsUnusedSpace(currAddr) || currAddr > addr) { // Back up to the previous object. Should be fast // since First updated the InteriorPtrTable entries. currAddr = Before(PageTable.PageAddr(page)); } VTable.Assert(!BumpAllocator.IsUnusedSpace(currAddr), "InteriorPtrTable.Find 0"); VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 1"); while (true) { // Watch out for alignment padding; advance the pointer if // it points to a syncblock index rather than a vtable // pointer. Note that we must do this before scrolling, // since the page table value was set before we knew the // required alignment. if (Allocator.IsAlignment(currAddr)) { currAddr += UIntPtr.Size; } else if (BumpAllocator.IsUnusedSpace(currAddr)) { UIntPtr postAddr = PageTable.PagePad(currAddr) + PreHeader.Size; VTable.Assert(postAddr <= addr, "InteriorPtrTable.Find 2"); currAddr = postAddr; } else { VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 3"); UIntPtr size = ObjectSize(currAddr); VTable.Assert(size >= UIntPtr.Zero, "InteriorPtrTable.Find 4"); UIntPtr postAddr = currAddr + size; if (postAddr > addr) { return(currAddr); } else { currAddr = postAddr; } } } }
internal override unsafe void Visit(UIntPtr *loc) { // <loc> is an address of a reference location // in the static data area. UIntPtr page = PageTable.Page((UIntPtr)loc); PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC, @"pageType == PageType.NonGC"); PtrCount++; }
private UIntPtr growFreeList(UIntPtr blockSize, Thread t) { UIntPtr pageCount = PageTable.PageCount(blockSize); bool fCleanPages = true; UIntPtr startPage = PageManager.EnsurePages(t, pageCount, PageType.Owner0, ref fCleanPages); UIntPtr newBlockSize = PageTable.RegionSize(pageCount); UIntPtr newBlockAddr = PageTable.PageAddr(startPage); return(FreeBlock(newBlockAddr, newBlockSize)); }
internal static void Initialize(UIntPtr systemMemorySize) { allocPtr = MemoryManager.AllocateMemory(systemMemorySize); limitPtr = allocPtr + systemMemorySize; if (GC.gcType != GCType.NullCollector) { PageManager.SetStaticDataPages(allocPtr, systemMemorySize); #if !SINGULARITY PageTable.SetProcess(PageTable.Page(allocPtr), PageTable.PageCount(systemMemorySize)); #endif } }
internal static void Truncate() { UIntPtr allocLimit = PageTable.PagePad(allocPtr); UIntPtr unusedSize = limitPtr - allocLimit; if (GC.gcType != GCType.NullCollector) { PageManager.ReleaseUnusedPages(PageTable.Page(allocLimit), PageTable.PageCount(unusedSize), true); } limitPtr = allocLimit; }
internal static bool ShouldPin(UIntPtr objAddr) { UIntPtr page = PageTable.Page(objAddr); if (PageTable.Type(page) != SegregatedFreeList.SMALL_OBJ_PAGE) { // in practice this won't be reached return(true); } SegregatedFreeList.PageHeader *ph = (SegregatedFreeList.PageHeader *)PageTable.PageAddr(page); return(new CoCoPageUserValue(ph->userValue).Pinned); }
internal static void MarkIfNecessaryInline(UIntPtr value, Thread thread) { #if !SINGULARITY || CONCURRENT_MS_COLLECTOR UIntPtr marked = markedColor; if (ThreadHeaderQueue.GcMark(Magic.fromAddress(value)) != marked) { VTable.Assert(PageTable.IsMyPage(PageTable.Page(value))); UIntPtr unmarked = unmarkedColor; ThreadHeaderQueue.Push(thread, value, marked, unmarked); } #endif // CONCURRENT_MS_COLLECTOR }
private unsafe void SkipDestinationAreas(ref UIntPtr destPage, UIntPtr destCursor, ref UIntPtr destLimit, UIntPtr sourceCursor) { UIntPtr cursorPage = PageTable.Page(destCursor); UIntPtr sourcePage = PageTable.Page(sourceCursor); if (cursorPage != sourcePage) { UIntPtr destPageLimit = PageTable.PagePad(destCursor); if (destPageLimit != destCursor) { cursorPage++; } VTable.Assert(PageTable.PageAligned(destLimit)); UIntPtr limitPage = PageTable.Page(destLimit); while (destPage < sourcePage) { if (cursorPage < limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } do { destPage++; } while (!IsMyZombiePage(destPage)); cursorPage = destPage; do { destPage++; } while (IsMyZombiePage(destPage)); limitPage = destPage; } destLimit = PageTable.PageAddr(limitPage); VTable.Assert(destPage > sourcePage); VTable.Assert(cursorPage <= sourcePage); if (cursorPage < sourcePage) { this.RegisterSkippedPages(cursorPage, sourcePage); cursorPage = sourcePage; } InteriorPtrTable.ClearFirst(cursorPage, destPage); InteriorPtrTable.SetFirst(sourceCursor + PreHeader.Size); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(cursorPage), PageTable.PageAddr(destPage) - 1); } } }