internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); if ((vtableAddr & 0x1) == 0x1) { // Link this field to be updated *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); } else { // Zero the reference (not marked) *loc = UIntPtr.Zero; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); Object obj = Magic.fromAddress(addr); VTable.Assert(IsPossiblyObject(obj), "Bad Object/VTable"); if (obj.GcMark() == UIntPtr.Zero) { // The object was not live *loc = UIntPtr.Zero; } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; UIntPtr page = PageTable.Page(objAddr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } Object obj = Magic.fromAddress(objAddr); if (obj.GcMark((UIntPtr)1)) { this.time = this.time + 1; setDfsDiscoveryTime(obj, this.time); UIntPtr vtableAddr = Magic.addressOf(obj.vtable); this.Visit(&vtableAddr); this.VisitReferenceFields(obj); this.time = this.time + 1; setDfsFinishingTime(obj, this.time); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); // Mark object if (vtableAddr == UIntPtr.Zero) { VTable.DebugPrint("Found null vtable in MarkReference (loc = 0x{0:x8}, addr = 0x{1:x8})\n", __arglist(((UIntPtr)loc), addr)); VTable.NotReached(); } *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); // If first visit to the object, schedule visit of fields if ((vtableAddr & 0x1) == 0) { MarkVisit(addr, vtableAddr & (UIntPtr) ~2U); } }
internal static void ReleaseStandbyPages() { while (!pageCache.IsEmpty) { UIntPtr pageAddr = pageCache.RemoveHead(); PageManager.ReleaseUnusedPages(PageTable.Page(pageAddr), (UIntPtr) 1, false); } }
internal override int GetGeneration(Object obj) { UIntPtr addr = Magic.addressOf(obj); UIntPtr page = PageTable.Page(addr); return((int)PageTable.Type(page)); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); if (!PageTable.IsMyGcPage(page)) { PageType pageType = PageTable.Type(page); #if SINGULARITY_PROCESS // We have to allow reference pointers to the // ThreadContext, which lives in the kernel space. VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || (PageTable.IsGcPage(pageType) && PageTable.IsKernelPage(page))); #else VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); #endif return; } UIntPtr objectAddr = SegregatedFreeList.Find(addr); markAndProcessReferenceVisitor.Visit(&objectAddr); }
internal static bool IsMyGcCard(UIntPtr c) { VTable.Assert(IsValidCardNo(c), "IsMyGcCard invalid"); UIntPtr page = PageTable.Page(CardAddr(c)); return(PageTable.IsMyGcPage(page)); }
protected static void deallocationListChecker() { // Check for nonzero reference counts and for // loops in the delayed deallocation list. for (Object block = delayedDeallocationList; block != null; block = getNextLink(block)) { UIntPtr objAddr = Magic.addressOf(block); UIntPtr page = PageTable.Page(objAddr); if (!PageTable.IsGcPage(page)) { VTable.DebugPrint("Non-GC memory for freeing!\n"); VTable.DebugBreak(); } uint refState = block.REF_STATE; if ((refState & RSMasks.refCount) != 0) { VTable.DebugPrint("Non-zero reference count!\n"); VTable.DebugBreak(); } block.REF_STATE = refState + 1; } // Make another pass to reset reference counts. for (Object block = delayedDeallocationList; block != null; block = getNextLink(block)) { block.REF_STATE--; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); Object obj = Magic.fromAddress(addr); VTable.Assert(IsPossiblyObject(obj), "Bad object/vtable"); if (obj.GcMark((UIntPtr)1)) { // We changed the color of the object, so we // have to mark the objects reachable from the fields workList.Write(addr); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardOnlyReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer *loc = vtableAddr; } else { // The object was not live *loc = UIntPtr.Zero; } }
internal static unsafe void Verify(UnusedBlockHeader *header) { VTable.Assert(header->magic == (UIntPtr)magicNumber, "Bad magic number in UnusedBlockHeader"); VTable.Assert(header->count > 0, "Count <= 0 in UnusedBlockHeader"); VTable.Assert(header->prev->next == header, "UnusedBlockHeader not linked properly (1)"); if (header->next != null) { VTable.Assert(header->next->prev == header, "UnusedBlockHeader not linked properly (2)"); } UIntPtr count = header->count; UnusedBlockHeader *tailBlock = (UnusedBlockHeader *) (((UIntPtr)header) + PageTable.RegionSize(count - 1)); VTable.Assert(tailBlock->curr == header, "UnusedBlockHeader tail->curr is incorrect"); if (PageManager.SlowDebug) { UIntPtr page = PageTable.Page((UIntPtr)header); for (UIntPtr i = UIntPtr.Zero; i < count; i++) { VTable.Assert(PageTable.IsUnusedPage(page + i) && PageTable.IsMyPage(page + i), "Incorrect page in unused region"); } } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } UIntPtr objAddr = SegregatedFreeList.Find(addr); Object obj = Magic.fromAddress(objAddr); UIntPtr count = getBackupRefcount(obj); setBackupRefcount(obj, count + 1); if (obj.GcMark((UIntPtr)1)) { this.workList.Write(objAddr); } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr pageLoc = PageTable.Page((UIntPtr)loc); PageType pageType = PageTable.Type(pageLoc); if (pageType != PageType.NonGC && pageType != PageType.Stack) { VTable.Assert(PageTable.IsGcPage(pageLoc), @"PageTable.IsGcPage(pageLoc)"); return; } uint addr = (uint)*loc; if (pageType == PageType.NonGC || (addr & 0x03) == 0) { this.visitor.Visit(loc); } if (pageType == PageType.Stack) { *loc = (UIntPtr)(addr | 0x01); } }
internal static unsafe void SetStaticDataPages(UIntPtr startAddr, UIntPtr size) { #if SINGULARITY // It's perfectly fine to be given memory outside the page table region, so // long as we intend to treat that memory as NonGC anyway. if (startAddr < PageTable.baseAddr) { if (startAddr + size < PageTable.baseAddr) { // nothing to do. All pages are below the base covered by the page table return; } // The range overlaps with the region covered by the page table size -= (PageTable.baseAddr - startAddr); startAddr = PageTable.baseAddr; } UIntPtr endAddr = startAddr + size; if (endAddr > PageTable.limitAddr) { if (startAddr > PageTable.limitAddr) { // nothing to do. All pages are above the limit covered by the page table return; } // The range overlaps with the region covered by the page table size -= (PageTable.limitAddr - endAddr); } #endif UIntPtr startIndex = PageTable.Page(startAddr); UIntPtr pageCount = PageTable.PageCount(size); PageTable.SetType(startIndex, pageCount, PageType.NonGC); }
private unsafe void CompactHeapObjects(UIntPtr previousEnd) { while (!this.relocationQueue.IsEmpty) { UIntPtr sourceAddress = this.relocationQueue.Read(); UIntPtr destinationAddress = this.relocationQueue.Read(); UIntPtr runLength = this.relocationQueue.Read(); if (previousEnd != destinationAddress) { VTable.Assert(previousEnd < destinationAddress); if (PageTable.Page(destinationAddress) != PageTable.Page(previousEnd + PreHeader.Size)) { if (!PageTable.PageAligned(previousEnd)) { UIntPtr pageLimit = PageTable.PagePad(previousEnd); BumpAllocator.WriteUnusedMarker(previousEnd); previousEnd += UIntPtr.Size; Util.MemClear(previousEnd, pageLimit - previousEnd); } if (!PageTable.PageAligned(destinationAddress)) { // This only happens before pinned objects and // large objects UIntPtr start = PageTable.PageAlign(destinationAddress); VTable.Assert(previousEnd <= start); while (start < destinationAddress) { Allocator.WriteAlignment(start); start += UIntPtr.Size; } } UIntPtr objAddr = destinationAddress + PreHeader.Size; InteriorPtrTable.SetFirst(objAddr); } else { VTable.Assert(previousEnd < destinationAddress); UIntPtr start = previousEnd; while (start < destinationAddress) { Allocator.WriteAlignment(start); start += UIntPtr.Size; } } } Util.MemCopy(destinationAddress, sourceAddress, runLength); previousEnd = destinationAddress + runLength; } // Zero out the end of the allocation page if (!PageTable.PageAligned(previousEnd)) { UIntPtr pageLimit = PageTable.PagePad(previousEnd); Util.MemClear(previousEnd, pageLimit - previousEnd); } this.relocationQueue.Cleanup(true); }
internal static unsafe void VerifyFirst(UIntPtr previousObjectAddr, UIntPtr objectAddr) { UIntPtr page = PageTable.Page(objectAddr); if (previousObjectAddr != UIntPtr.Zero) { UIntPtr previousPage = PageTable.Page(previousObjectAddr); UIntPtr pageCursor = previousPage + 1; while (pageCursor < page) { uint cursorOffset = PageTable.Extra(pageCursor); UIntPtr objAddr = (PageTable.PageAddr(pageCursor) + cursorOffset - OFFSET_SKEW); if (!(cursorOffset <= OFFSET_NO_DATA || BumpAllocator.IsUnusedSpace(objAddr) || Allocator.IsAlignment(objAddr) || BumpAllocator.IsRestOfPageZero(objAddr))) { VTable.DebugPrint ("cursorOffset={0:x} OFFSET_NO_DATA={1:x} objAddr={2:x} unused={3} isalign={4} iszero={5}\n", __arglist((cursorOffset), (OFFSET_NO_DATA), ((long)objAddr), (BumpAllocator.IsUnusedSpace(objAddr)), (Allocator.IsAlignment(objAddr)), (BumpAllocator.IsRestOfPageZero(objAddr)))); } VTable.Assert(cursorOffset <= OFFSET_NO_DATA || BumpAllocator.IsUnusedSpace(objAddr) || Allocator.IsAlignment(objAddr) || BumpAllocator.IsRestOfPageZero(objAddr), "VerifyFirst 1"); pageCursor++; } } uint offset = PageTable.Extra(page); if (offset > OFFSET_NO_DATA) { UIntPtr firstAddr = PageTable.PageAddr(page) + offset - OFFSET_SKEW; if (!(firstAddr == objectAddr || (firstAddr + UIntPtr.Size == objectAddr && Allocator.IsAlignment(firstAddr)))) { VTable.DebugPrint ("firstAddr={0:x} objectAddr={1:x} isalign={2}\n", __arglist(((long)firstAddr), ((long)objectAddr), (Allocator.IsAlignment(firstAddr)))); } VTable.Assert(firstAddr == objectAddr || (firstAddr + 4 == objectAddr && Allocator.IsAlignment(firstAddr)), "VerifyFirst 2"); } }
internal static unsafe void SetFirst(UIntPtr newAddr) { VTable.Assert(PageTable.IsGcPage(PageTable.Page(newAddr)), "SetFirst on a non-GC page"); UIntPtr page = PageTable.Page(newAddr); UIntPtr offset = newAddr - PageTable.PageAddr(page); PageTable.SetExtra(page, unchecked ((uint)(offset + OFFSET_SKEW))); }
void VisitObjects(ObjectLayout.ObjectVisitor objectVisitor, UIntPtr lowAddr, UIntPtr highAddr) { VTable.Assert(PageTable.PageAligned(lowAddr)); VTable.Assert(PageTable.PageAligned(highAddr)); UIntPtr lowPage = PageTable.Page(lowAddr); UIntPtr highPage = PageTable.Page(highAddr); SegregatedFreeList.VisitObjects(lowPage, highPage, objectVisitor); }
private static void Clear(UIntPtr startAddr, UIntPtr regionSize) { VTable.Assert(PageTable.PageAligned(startAddr)); VTable.Assert(PageTable.PageAligned(regionSize)); MemoryManager.IgnoreMemoryContents(startAddr, regionSize); MarkUnusedPages(Thread.CurrentThread, PageTable.Page(startAddr), PageTable.PageCount(regionSize), false); }
internal override void VisitObjects (ObjectLayout.ObjectVisitor objVisitor, UIntPtr lowAddr, UIntPtr highAddr) { UIntPtr lowPage = PageTable.Page(lowAddr); UIntPtr highPage = PageTable.Page(highAddr); SegregatedFreeList.VisitObjects(lowPage, highPage, objVisitor); }
internal override unsafe void Visit(UIntPtr *loc) { // <loc> is an address of a reference location // in the static data area. UIntPtr page = PageTable.Page((UIntPtr)loc); PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC, @"pageType == PageType.NonGC"); PtrCount++; }
// Finds the object base for an interior pointer. In the case of a // pointer to the tail of an object and the head of another, it will // return the former object (the one whose tail we point at). To // get the base pointer for a pointer into the pre-header, you should // add PreHeader.Size before calling this. internal static UIntPtr Find(UIntPtr addr) { UIntPtr page = PageTable.Page(addr); UIntPtr currAddr = InteriorPtrTable.First(page); // Look out for the unused space token: this page may not // have been completely allocated: its "first" object might not // be valid. if (BumpAllocator.IsUnusedSpace(currAddr) || currAddr > addr) { // Back up to the previous object. Should be fast // since First updated the InteriorPtrTable entries. currAddr = Before(PageTable.PageAddr(page)); } VTable.Assert(!BumpAllocator.IsUnusedSpace(currAddr), "InteriorPtrTable.Find 0"); VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 1"); while (true) { // Watch out for alignment padding; advance the pointer if // it points to a syncblock index rather than a vtable // pointer. Note that we must do this before scrolling, // since the page table value was set before we knew the // required alignment. if (Allocator.IsAlignment(currAddr)) { currAddr += UIntPtr.Size; } else if (BumpAllocator.IsUnusedSpace(currAddr)) { UIntPtr postAddr = PageTable.PagePad(currAddr) + PreHeader.Size; VTable.Assert(postAddr <= addr, "InteriorPtrTable.Find 2"); currAddr = postAddr; } else { VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 3"); UIntPtr size = ObjectSize(currAddr); VTable.Assert(size >= UIntPtr.Zero, "InteriorPtrTable.Find 4"); UIntPtr postAddr = currAddr + size; if (postAddr > addr) { return(currAddr); } else { currAddr = postAddr; } } } }
internal static void Initialize(UIntPtr systemMemorySize) { allocPtr = MemoryManager.AllocateMemory(systemMemorySize); limitPtr = allocPtr + systemMemorySize; if (GC.gcType != GCType.NullCollector) { PageManager.SetStaticDataPages(allocPtr, systemMemorySize); #if !SINGULARITY PageTable.SetProcess(PageTable.Page(allocPtr), PageTable.PageCount(systemMemorySize)); #endif } }
internal static void Truncate() { UIntPtr allocLimit = PageTable.PagePad(allocPtr); UIntPtr unusedSize = limitPtr - allocLimit; if (GC.gcType != GCType.NullCollector) { PageManager.ReleaseUnusedPages(PageTable.Page(allocLimit), PageTable.PageCount(unusedSize), true); } limitPtr = allocLimit; }
internal static bool ShouldPin(UIntPtr objAddr) { UIntPtr page = PageTable.Page(objAddr); if (PageTable.Type(page) != SegregatedFreeList.SMALL_OBJ_PAGE) { // in practice this won't be reached return(true); } SegregatedFreeList.PageHeader *ph = (SegregatedFreeList.PageHeader *)PageTable.PageAddr(page); return(new CoCoPageUserValue(ph->userValue).Pinned); }
internal static void MarkIfNecessaryInline(UIntPtr value, Thread thread) { #if !SINGULARITY || CONCURRENT_MS_COLLECTOR UIntPtr marked = markedColor; if (ThreadHeaderQueue.GcMark(Magic.fromAddress(value)) != marked) { VTable.Assert(PageTable.IsMyPage(PageTable.Page(value))); UIntPtr unmarked = unmarkedColor; ThreadHeaderQueue.Push(thread, value, marked, unmarked); } #endif // CONCURRENT_MS_COLLECTOR }
private static void MarkUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, bool fCleanPages) { Trace.Log(Trace.Area.Page, "MarkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); UIntPtr endPage = startPage + pageCount; if (avoidDirtyPages && !fCleanPages) { UIntPtr dirtyStartAddr = PageTable.PageAddr(startPage); UIntPtr dirtySize = PageTable.RegionSize(pageCount); Util.MemClear(dirtyStartAddr, dirtySize); fCleanPages = true; } bool iflag = EnterMutex(currentThread); try { if (endPage < PageTable.pageTableCount) { if (PageTable.IsUnusedPage(endPage) && PageTable.IsMyPage(endPage)) { UIntPtr regionSize = UnlinkUnusedPages(endPage); endPage += regionSize; } } UIntPtr queryStartPage = startPage - 1; UIntPtr newStartPage = startPage; if (PageTable.IsUnusedPage(queryStartPage) && PageTable.IsMyPage(queryStartPage)) { UnusedBlockHeader *tailUnused = (UnusedBlockHeader *) PageTable.PageAddr(queryStartPage); UIntPtr newStartAddr = (UIntPtr)tailUnused->curr; newStartPage = PageTable.Page(newStartAddr); UIntPtr regionSize = UnlinkUnusedPages(newStartPage); VTable.Assert(newStartPage + regionSize == startPage); } PageType pageType = fCleanPages ? PageType.UnusedClean : PageType.UnusedDirty; PageTable.SetType(startPage, pageCount, pageType); LinkUnusedPages(newStartPage, endPage - newStartPage, false); } finally { LeaveMutex(currentThread, iflag); } }
private unsafe void SkipDestinationAreas(ref UIntPtr destPage, UIntPtr destCursor, ref UIntPtr destLimit, UIntPtr sourceCursor) { UIntPtr cursorPage = PageTable.Page(destCursor); UIntPtr sourcePage = PageTable.Page(sourceCursor); if (cursorPage != sourcePage) { UIntPtr destPageLimit = PageTable.PagePad(destCursor); if (destPageLimit != destCursor) { cursorPage++; } VTable.Assert(PageTable.PageAligned(destLimit)); UIntPtr limitPage = PageTable.Page(destLimit); while (destPage < sourcePage) { if (cursorPage < limitPage) { this.RegisterSkippedPages(cursorPage, limitPage); } do { destPage++; } while (!IsMyZombiePage(destPage)); cursorPage = destPage; do { destPage++; } while (IsMyZombiePage(destPage)); limitPage = destPage; } destLimit = PageTable.PageAddr(limitPage); VTable.Assert(destPage > sourcePage); VTable.Assert(cursorPage <= sourcePage); if (cursorPage < sourcePage) { this.RegisterSkippedPages(cursorPage, sourcePage); cursorPage = sourcePage; } InteriorPtrTable.ClearFirst(cursorPage, destPage); InteriorPtrTable.SetFirst(sourceCursor + PreHeader.Size); if (GC.remsetType == RemSetType.Cards) { OffsetTable.ClearLast(PageTable.PageAddr(cursorPage), PageTable.PageAddr(destPage) - 1); } } }
// Bartok uses the extra field in the PageTable to easily // map from a stack address to the thread that owns that stack. // This is used to implement GetCurrentThread. // Singularity uses a different mechanism that does not involve // the extra data at all. private static unsafe void SetStackPages(UIntPtr startAddr, UIntPtr endAddr, Thread thread) { UIntPtr startPage = PageTable.Page(startAddr); UIntPtr endPage = PageTable.Page(endAddr); UIntPtr pageCount = endPage - startPage; PageTable.VerifyType(startPage, pageCount, PageType.Unallocated); PageTable.VerifyExtra(startPage, pageCount, 0); PageTable.SetType(startPage, pageCount, PageType.Stack); PageTable.SetExtra(startPage, pageCount, (uint)thread.threadIndex); }