internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); Object obj = Magic.fromAddress(addr); VTable.Assert(IsPossiblyObject(obj), "Bad object/vtable"); if (obj.GcMark((UIntPtr)1)) { // We changed the color of the object, so we // have to mark the objects reachable from the fields workList.Write(addr); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); if (!PageTable.IsMyGcPage(page)) { PageType pageType = PageTable.Type(page); #if SINGULARITY_PROCESS // We have to allow reference pointers to the // ThreadContext, which lives in the kernel space. VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || (PageTable.IsGcPage(pageType) && PageTable.IsKernelPage(page))); #else VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); #endif return; } UIntPtr objectAddr = SegregatedFreeList.Find(addr); markAndProcessReferenceVisitor.Visit(&objectAddr); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); // Mark object if (vtableAddr == UIntPtr.Zero) { VTable.DebugPrint("Found null vtable in MarkReference (loc = 0x{0:x8}, addr = 0x{1:x8})\n", __arglist(((UIntPtr)loc), addr)); VTable.NotReached(); } *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); // If first visit to the object, schedule visit of fields if ((vtableAddr & 0x1) == 0) { MarkVisit(addr, vtableAddr & (UIntPtr) ~2U); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); Object obj = Magic.fromAddress(addr); VTable.Assert(IsPossiblyObject(obj), "Bad Object/VTable"); if (obj.GcMark() == UIntPtr.Zero) { // The object was not live *loc = UIntPtr.Zero; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); if ((vtableAddr & 0x1) == 0x1) { // Link this field to be updated *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); } else { // Zero the reference (not marked) *loc = UIntPtr.Zero; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardOnlyReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer *loc = vtableAddr; } else { // The object was not live *loc = UIntPtr.Zero; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); Trace.Log(Trace.Area.Pointer, "FwdThrRef: loc={0}, addr={1}, page={2}", __arglist(loc, addr, page)); PageType pageType = PageTable.Type(page); // if an object "spills" into a page that // is pinned, and the object is copied // during a collection, we will end up with // the first part of the object in a zombie page // the second part of the object in a GC page. // We need to find the start of the object and // use that to determine whether the object has // been moved. if (!PageTable.IsZombiePage(pageType) && !PageTable.IsGcPage(pageType)) { VTable.Assert(PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || VTable.BuildC2Mods, "Semispace:ForwardThreadReference"); return; } UIntPtr objectPtr = InteriorPtrTable.Find(addr); if (objectPtr == addr) { generalReferenceVisitor.Visit(loc); } else { // we can check for the page type of // objectPtr here to see if it is zombie page. // If true we can just return. UIntPtr newObject = objectPtr; generalReferenceVisitor.Visit(&newObject); UIntPtr newAddr = newObject + (addr - objectPtr); Trace.Log(Trace.Area.Pointer, "FwdThrRef: {0} -> {1}", __arglist(addr, newAddr)); *loc = newAddr; } }
// BUGBUG: We are allocating an ArrayList while the collector // is running. If the ArrayList gets big enough to be // allocated in the older generation, then the RemSet has the // potential to overflow since the boxed integers will reside // in the young generation. We should eventually eliminate // the use of ArrayList in this class as well as avoid boxing // the page indices. internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || VTable.BuildC2Mods, "Semispace:RegisterPinnedReference:1"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr pinnedObjectAddr = InteriorPtrTable.Find(addr); if (pinnedPageList == null) { pinnedPageList = new ArrayList(); comparer = new UIntPtrComparer(); } Object pinnedObject = Magic.fromAddress(pinnedObjectAddr); UIntPtr objectSize = ObjectLayout.ObjectSize(pinnedObjectAddr, pinnedObject.vtable); UIntPtr beforeObjectAddr = pinnedObjectAddr - PreHeader.Size; UIntPtr pastObjectAddr = beforeObjectAddr + objectSize; UIntPtr firstPage = PageTable.Page(beforeObjectAddr); UIntPtr lastPage = PageTable.Page(pastObjectAddr - 1); for (UIntPtr i = firstPage; i <= lastPage; i++) { if (!pinnedPageList.Contains(i)) { Trace.Log(Trace.Area.Pointer, "RegPin: ptr={0} page={1} gen={2}", __arglist(pinnedObjectAddr, i, gen)); GenerationalCollector.gcPromotedTable[(int)gen - 1] += PageTable.PageSize; pinnedPageList.Add(i); } } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr objectAddr = InteriorPtrTable.Find(addr); this.threadPtrQueue.Write(objectAddr); this.threadPtrQueue.Write(addr - objectAddr); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr objectAddr = InteriorPtrTable.Find(addr); registerThreadReferenceVisitor.threadPtrQueue.Write(objectAddr); registerThreadReferenceVisitor.threadPtrQueue.Write(addr - objectAddr); *Allocator.GetObjectVTableAddress(objectAddr) |= (UIntPtr)2U; }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; UIntPtr page = PageTable.Page(objAddr); Trace.Log(Trace.Area.Pointer, "FwdRef: loc={0}, addr={1}, page={2}", __arglist(loc, objAddr, page)); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); VTable.Assert(gen > MIN_GENERATION); Object obj = Magic.fromAddress(objAddr); UIntPtr vtableAddr = *obj.VTableFieldAddr; UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer Trace.Log(Trace.Area.Pointer, "FwdRef: VT fwd: {0} -> {1}", __arglist(objAddr, vtableAddr)); *loc = vtableAddr; return; } Object newObject = copyScanners[(int)gen].Copy(obj); *loc = Magic.addressOf(newObject); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert(PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); UIntPtr objectAddr = GC.installedGC.FindObjectAddr(addr); VTable.Assert(objectAddr <= addr); UIntPtr vtableAddr = Magic.addressOf(Magic.fromAddress(objectAddr).vtable); VTable.Assert(PageTable.IsNonGcPage(PageTable.Type(PageTable.Page(vtableAddr)))); VTable.Assert(Magic.fromAddress(vtableAddr) is VTable); }
// Reference updates and object relocation private unsafe UIntPtr ForwardReferences(PageType generation, out UIntPtr oldAllocPtr) { VTable.Assert(IsValidGeneration((int)generation)); UIntPtr destPage = UIntPtr.Zero; UIntPtr destCursor; UIntPtr destLimit; PageType destGeneration; if (generation < MAX_GENERATION) { destGeneration = generation + 1; } else { destGeneration = MAX_GENERATION; } destCursor = UIntPtr.Zero; destLimit = UIntPtr.Zero; oldAllocPtr = destCursor; UIntPtr runLength = UIntPtr.Zero; for (UIntPtr i = UIntPtr.Zero; i < PageTable.pageTableCount; i++) { if (!IsMyZombiePage(i)) { continue; } UIntPtr deltaBytes = (UIntPtr)0x80000000; UIntPtr sourceCursor = PageTable.PageAddr(i); do { i++; } while (i < PageTable.pageTableCount && IsMyZombiePage(i)); UIntPtr sourceLimit = PageTable.PageAddr(i); while (true) { if (sourceCursor >= sourceLimit) { break; } if (Allocator.IsAlignmentMarkerAddr(sourceCursor)) { sourceCursor += UIntPtr.Size; deltaBytes += UIntPtr.Size; continue; } if (BumpAllocator.IsUnusedMarkerAddr(sourceCursor)) { sourceCursor += UIntPtr.Size; sourceCursor = PageTable.PagePad(sourceCursor); deltaBytes = (UIntPtr)0x80000000; continue; } UIntPtr objectAddr = sourceCursor + PreHeader.Size; UIntPtr vtableOrMarker = Allocator.GetObjectVTable(objectAddr); if (vtableOrMarker == UIntPtr.Zero) { // We found the end of an allocation page sourceCursor = PageTable.PagePad(sourceCursor); deltaBytes = (UIntPtr)0x80000000; continue; } UIntPtr vtableAddr; if ((vtableOrMarker & 1) != 0) { UIntPtr temp = *(UIntPtr *)(vtableOrMarker - 1); while ((temp & 1) != 0) { temp = *(UIntPtr *)(temp - 1); } VTable.Assert(PageTable.IsNonGcPage(PageTable.Type(PageTable.Page(temp)))); vtableAddr = temp; if ((temp & 2) != 0) { // Found pinned object SkipDestinationAreas(ref destPage, destCursor, ref destLimit, sourceCursor); deltaBytes -= (sourceCursor - destCursor); destCursor = sourceCursor; vtableAddr -= 2; // Remove "pinned" bit } Allocator.SetObjectVTable(objectAddr, vtableAddr); } else { vtableAddr = vtableOrMarker; } VTable vtable = Magic.toVTable(Magic.fromAddress(vtableAddr)); UIntPtr objectSize = ObjectLayout.ObjectSize(objectAddr, vtable); VTable.Assert(objectSize > 0); if ((vtableOrMarker & 1) != 0) { if (GenerationalCollector.IsLargeObjectSize (objectSize)) { // Don't move large objects SkipDestinationAreas(ref destPage, destCursor, ref destLimit, sourceCursor); UIntPtr localDelta = sourceCursor - destCursor; deltaBytes -= localDelta; if (deltaBytes == UIntPtr.Zero && runLength != UIntPtr.Zero) { runLength += localDelta; } destCursor = sourceCursor; UIntPtr objLimit = sourceCursor + objectSize; UIntPtr pageEndAddr = PageTable.PagePad(objLimit); objectSize = (pageEndAddr - sourceCursor); } else if (destCursor + objectSize > destLimit) { UIntPtr oldDestCursor = destCursor; FindDestinationArea(ref destPage, ref destCursor, ref destLimit, objectSize, destGeneration); VTable.Assert(destCursor <= sourceCursor); VTable.Assert(destCursor + objectSize <= destLimit); deltaBytes -= (destCursor - oldDestCursor); } else if (vtable.baseAlignment > UIntPtr.Size) { uint alignmentMask = vtable.baseAlignment - 1; int offset = PreHeader.Size + UIntPtr.Size; while (((destCursor + offset) & alignmentMask) != 0) { destCursor += UIntPtr.Size; deltaBytes -= UIntPtr.Size; if (deltaBytes == UIntPtr.Zero && runLength != UIntPtr.Zero) { runLength += UIntPtr.Size; } } } if (runLength == UIntPtr.Zero || deltaBytes != UIntPtr.Zero) { if (runLength != UIntPtr.Zero) { RegisterRelocationEnd(runLength); } RegisterRelocationStart(sourceCursor, destCursor); deltaBytes = UIntPtr.Zero; runLength = UIntPtr.Zero; } UIntPtr newObjectAddr = destCursor + PreHeader.Size; do { UIntPtr *ptrAddr = (UIntPtr *)(vtableOrMarker - 1); vtableOrMarker = *ptrAddr; *ptrAddr = newObjectAddr; } while ((vtableOrMarker & 1) != 0); destCursor += objectSize; runLength += objectSize; } else { deltaBytes += objectSize; if (runLength != UIntPtr.Zero) { RegisterRelocationEnd(runLength); } runLength = UIntPtr.Zero; } sourceCursor += objectSize; } } if (runLength != UIntPtr.Zero) { RegisterRelocationEnd(runLength); } return(destCursor); }