internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardOnlyReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer *loc = vtableAddr; } else { // The object was not live *loc = UIntPtr.Zero; } }
internal void ProcessPinnedPages(ReferenceVisitor ptrVisitor) { if (pinnedPageList == null || pinnedPageList.Count == 0) { return; } pinnedPageList.Sort(comparer); int limit = pinnedPageList.Count; for (int i = 0; i < limit; i++) { UIntPtr page = (UIntPtr)pinnedPageList[i]; PageType fromSpaceType = PageTable.Type(page); VTable.Assert(PageTable.IsZombiePage(fromSpaceType), "Semispace:RegisterPinnedReference:2"); PageType toSpaceType = PageTable.ZombieToLive(fromSpaceType); PageTable.SetType(page, toSpaceType); } int pageIndex = 0; while (pageIndex < limit) { UIntPtr startPage = (UIntPtr)pinnedPageList[pageIndex]; UIntPtr endPage = startPage + 1; pageIndex++; while (pageIndex < limit && (UIntPtr)pinnedPageList[pageIndex] == endPage) { pageIndex++; endPage++; } UIntPtr objectAddr = FirstPinnedObjectAddr(startPage); UIntPtr pastAddr = PostPinnedObjectAddr(endPage); while (objectAddr < pastAddr) { if (Allocator.IsAlignment(objectAddr)) { objectAddr += UIntPtr.Size; } else if (BumpAllocator.IsUnusedSpace(objectAddr)) { objectAddr = (PageTable.PagePad(objectAddr) + PreHeader.Size); } else { Object obj = Magic.fromAddress(objectAddr); objectAddr += ptrVisitor.VisitReferenceFields(obj); } } } }
// BUGBUG: We are allocating an ArrayList while the collector // is running. If the ArrayList gets big enough to be // allocated in the older generation, then the RemSet has the // potential to overflow since the boxed integers will reside // in the young generation. We should eventually eliminate // the use of ArrayList in this class as well as avoid boxing // the page indices. internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || VTable.BuildC2Mods, "Semispace:RegisterPinnedReference:1"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr pinnedObjectAddr = InteriorPtrTable.Find(addr); if (pinnedPageList == null) { pinnedPageList = new ArrayList(); comparer = new UIntPtrComparer(); } Object pinnedObject = Magic.fromAddress(pinnedObjectAddr); UIntPtr objectSize = ObjectLayout.ObjectSize(pinnedObjectAddr, pinnedObject.vtable); UIntPtr beforeObjectAddr = pinnedObjectAddr - PreHeader.Size; UIntPtr pastObjectAddr = beforeObjectAddr + objectSize; UIntPtr firstPage = PageTable.Page(beforeObjectAddr); UIntPtr lastPage = PageTable.Page(pastObjectAddr - 1); for (UIntPtr i = firstPage; i <= lastPage; i++) { if (!pinnedPageList.Contains(i)) { Trace.Log(Trace.Area.Pointer, "RegPin: ptr={0} page={1} gen={2}", __arglist(pinnedObjectAddr, i, gen)); GenerationalCollector.gcPromotedTable[(int)gen - 1] += PageTable.PageSize; pinnedPageList.Add(i); } } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; UIntPtr page = PageTable.Page(objAddr); Trace.Log(Trace.Area.Pointer, "FwdRef: loc={0}, addr={1}, page={2}", __arglist(loc, objAddr, page)); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); VTable.Assert(gen > MIN_GENERATION); Object obj = Magic.fromAddress(objAddr); UIntPtr vtableAddr = *obj.VTableFieldAddr; UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer Trace.Log(Trace.Area.Pointer, "FwdRef: VT fwd: {0} -> {1}", __arglist(objAddr, vtableAddr)); *loc = vtableAddr; return; } Object newObject = copyScanners[(int)gen].Copy(obj); *loc = Magic.addressOf(newObject); }
private Object CopyLarge(Object obj, UIntPtr size) { // Don't copy large objects. // Change the page type instead. UIntPtr startAddr = Magic.addressOf(obj) - PreHeader.Size; UIntPtr startPage = PageTable.Page(startAddr); UIntPtr endPage = PageTable.Page(startAddr + size - 1) + 1; VTable.Assert(this.pageType == PageTable.ZombieToLive(PageTable.Type(startPage))); PageTable.SetType(startPage, (endPage - startPage), this.pageType); UIntPtr sizeOfPages = PageTable.PageSize * (endPage - startPage); Trace.Log(Trace.Area.Pointer, "FwdRef: large object: {0} gen: {1} pagesize: {2}", __arglist(Magic.addressOf(obj), this.pageType, sizeOfPages)); GenerationalGCData.gcPromotedTable[(int)this.pageType - 1] += sizeOfPages; this.AddWork(startAddr, startAddr + size); return(obj); }
internal void CleanPinnedPages() { if (pinnedPageList == null || pinnedPageList.Count == 0) { return; } int pageIndex = 0; int limit = pinnedPageList.Count; UIntPtr lastPostPinnedAddr = UIntPtr.Zero; while (pageIndex < limit) { UIntPtr startPage = (UIntPtr)pinnedPageList[pageIndex]; UIntPtr endPage = startPage + 1; pageIndex++; while (pageIndex < limit && (UIntPtr)pinnedPageList[pageIndex] == endPage) { pageIndex++; endPage++; } // Zero out the area between the start of the page and // the first object on the page UIntPtr firstObjectAddr = FirstPinnedObjectAddr(startPage); UIntPtr firstAddr = firstObjectAddr - PreHeader.Size; UIntPtr trashAddr = PageTable.PageAddr(startPage); if (firstAddr < trashAddr) { // The first object "spills" into the previous page, // presumably by no more than HEADER_BYTES bytes VTable.Assert( PageTable.Page(firstAddr) == startPage - 1, "Semispace:RegisterPinnedReference:3"); // Prepare to zero the preceding page unless it also // had pinned data on it trashAddr = PageTable.PageAddr(startPage - 1); InteriorPtrTable.ClearFirst(startPage - 1); if (trashAddr >= lastPostPinnedAddr) { // Need to mark the spilled-onto page live to // keep the spilled data around PageType fromSpaceType = PageTable.Type(startPage - 1); VTable.Assert( PageTable.IsZombiePage(fromSpaceType), "Semispace:RegisterPinnedReference:4"); PageType toSpaceType = PageTable.ZombieToLive(fromSpaceType); PageTable.SetType(startPage - 1, toSpaceType); } } // If lastPostPinnedAddr is on the page that trashAddr // starts, pinned data from the last run of pinned pages // and pinned data from this run of pinned data are on the // same page, so just write alignment tokens from // lastPostPinnedAddr to the first pinned object. // Otherwise, write an unused marker at lastPostPinnedAddr // since the rest of its page must be copied or dead. if (trashAddr < lastPostPinnedAddr) { trashAddr = lastPostPinnedAddr; } else { CleanPageTail(lastPostPinnedAddr); } if (GC.remsetType == RemSetType.Cards && trashAddr < firstAddr) { UIntPtr firstCard = CardTable.CardNo(trashAddr); UIntPtr lastCard = CardTable.CardNo(firstAddr - 1); if (!OffsetTable.NoObjectPtrToTheCard(firstCard)) { UIntPtr offset = OffsetTable.GetOffset(firstCard); UIntPtr objPtr = CardTable.CardAddr(firstCard) + offset; UIntPtr size = OffsetTable.ObjectSize(objPtr); VTable.Assert ((objPtr + size - PreHeader.Size <= trashAddr) || (objPtr >= trashAddr), "Object should be totally " + "above or below trashAddr"); if (objPtr >= trashAddr) { // The offset in this card needs to be updated OffsetTable.ClearCards(firstCard, firstCard); } } OffsetTable.ClearCards(firstCard + 1, lastCard - 1); if (lastCard != CardTable.CardNo(firstObjectAddr)) { OffsetTable.ClearCards(lastCard, lastCard); } else { VTable.Assert(OffsetTable.GetOffset(lastCard) >= (firstObjectAddr - CardTable.CardAddr(lastCard)), "wrong offset"); } } { // trashAddr should go back at most one page. UIntPtr trashPage = PageTable.Page(trashAddr); UIntPtr firstObjectAddrPage = PageTable.Page(firstObjectAddr); VTable.Assert((trashPage == firstObjectAddrPage - 1) || (trashPage == firstObjectAddrPage)); } // If the InteriorPtrTable already had a value, then this is // redundant, but if the call to First above has to compute // the value, then (since it won't store it in the table) we // should store it. Why? At this point the previous page // would be "connected" to this one. After this collection // the previous page will be unused or re-used and unrelated // to this page and subsequent calls to First would then // rely on it making the leap between unrelated pages. InteriorPtrTable.SetFirst(firstObjectAddr); while (trashAddr < firstAddr) { Allocator.WriteAlignment(trashAddr); trashAddr += UIntPtr.Size; } // Zero out the area between the last whole object on // the last page and the end of the last page UIntPtr pastAddr = PostPinnedObjectAddr(endPage); UIntPtr newEndPage = PageTable.Page(PageTable.PagePad(pastAddr)); while (endPage < newEndPage) { // The last object spills into the next page(s), so // mark those page(s) live PageType fromPageType = PageTable.Type(endPage); if (PageTable.IsZombiePage(fromPageType)) { PageType toSpaceType = PageTable.ZombieToLive(fromPageType); PageTable.SetType(endPage, toSpaceType); } else { // final page might be live already because // something else on it was pinned. // pageIndex has already been incremented, // so it points to the start of the next // set of contiguous pages VTable.Assert( PageTable.IsLiveGcPage(fromPageType) && pageIndex < limit && endPage == (UIntPtr)pinnedPageList[pageIndex], "Semispace:RegisterPinnedReference:5"); } ++endPage; } lastPostPinnedAddr = pastAddr; } CleanPageTail(lastPostPinnedAddr); pinnedPageList = null; comparer = null; }