internal override unsafe void Visit(UIntPtr *loc) { UIntPtr pageLoc = PageTable.Page((UIntPtr)loc); PageType pageType = PageTable.Type(pageLoc); if (pageType != PageType.NonGC && pageType != PageType.Stack) { VTable.Assert(PageTable.IsGcPage(pageLoc), @"PageTable.IsGcPage(pageLoc)"); return; } uint addr = (uint)*loc; if (pageType == PageType.NonGC || (addr & 0x03) == 0) { this.visitor.Visit(loc); } if (pageType == PageType.Stack) { *loc = (UIntPtr)(addr | 0x01); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); // Mark object if (vtableAddr == UIntPtr.Zero) { VTable.DebugPrint("Found null vtable in MarkReference (loc = 0x{0:x8}, addr = 0x{1:x8})\n", __arglist(((UIntPtr)loc), addr)); VTable.NotReached(); } *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); // If first visit to the object, schedule visit of fields if ((vtableAddr & 0x1) == 0) { MarkVisit(addr, vtableAddr & (UIntPtr) ~2U); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardOnlyReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer *loc = vtableAddr; } else { // The object was not live *loc = UIntPtr.Zero; } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; UIntPtr page = PageTable.Page(objAddr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } Object obj = Magic.fromAddress(objAddr); if (obj.GcMark((UIntPtr)1)) { this.time = this.time + 1; setDfsDiscoveryTime(obj, this.time); UIntPtr vtableAddr = Magic.addressOf(obj.vtable); this.Visit(&vtableAddr); this.VisitReferenceFields(obj); this.time = this.time + 1; setDfsFinishingTime(obj, this.time); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); if ((vtableAddr & 0x1) == 0x1) { // Link this field to be updated *loc = vtableAddr; Allocator.SetObjectVTable(addr, (UIntPtr)loc + 1); } else { // Zero the reference (not marked) *loc = UIntPtr.Zero; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); Object obj = Magic.fromAddress(addr); VTable.Assert(IsPossiblyObject(obj), "Bad Object/VTable"); if (obj.GcMark() == UIntPtr.Zero) { // The object was not live *loc = UIntPtr.Zero; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); Object obj = Magic.fromAddress(addr); VTable.Assert(IsPossiblyObject(obj), "Bad object/vtable"); if (obj.GcMark((UIntPtr)1)) { // We changed the color of the object, so we // have to mark the objects reachable from the fields workList.Write(addr); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); if (!PageTable.IsMyGcPage(page)) { PageType pageType = PageTable.Type(page); #if SINGULARITY_PROCESS // We have to allow reference pointers to the // ThreadContext, which lives in the kernel space. VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || (PageTable.IsGcPage(pageType) && PageTable.IsKernelPage(page))); #else VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); #endif return; } UIntPtr objectAddr = SegregatedFreeList.Find(addr); markAndProcessReferenceVisitor.Visit(&objectAddr); }
protected static void deallocationListChecker() { // Check for nonzero reference counts and for // loops in the delayed deallocation list. for (Object block = delayedDeallocationList; block != null; block = getNextLink(block)) { UIntPtr objAddr = Magic.addressOf(block); UIntPtr page = PageTable.Page(objAddr); if (!PageTable.IsGcPage(page)) { VTable.DebugPrint("Non-GC memory for freeing!\n"); VTable.DebugBreak(); } uint refState = block.REF_STATE; if ((refState & RSMasks.refCount) != 0) { VTable.DebugPrint("Non-zero reference count!\n"); VTable.DebugBreak(); } block.REF_STATE = refState + 1; } // Make another pass to reset reference counts. for (Object block = delayedDeallocationList; block != null; block = getNextLink(block)) { block.REF_STATE--; } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } UIntPtr objAddr = SegregatedFreeList.Find(addr); Object obj = Magic.fromAddress(objAddr); UIntPtr count = getBackupRefcount(obj); setBackupRefcount(obj, count + 1); if (obj.GcMark((UIntPtr)1)) { this.workList.Write(objAddr); } }
internal static unsafe void SetFirst(UIntPtr newAddr) { VTable.Assert(PageTable.IsGcPage(PageTable.Page(newAddr)), "SetFirst on a non-GC page"); UIntPtr page = PageTable.Page(newAddr); UIntPtr offset = newAddr - PageTable.PageAddr(page); PageTable.SetExtra(page, unchecked ((uint)(offset + OFFSET_SKEW))); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); Trace.Log(Trace.Area.Pointer, "FwdThrRef: loc={0}, addr={1}, page={2}", __arglist(loc, addr, page)); PageType pageType = PageTable.Type(page); // if an object "spills" into a page that // is pinned, and the object is copied // during a collection, we will end up with // the first part of the object in a zombie page // the second part of the object in a GC page. // We need to find the start of the object and // use that to determine whether the object has // been moved. if (!PageTable.IsZombiePage(pageType) && !PageTable.IsGcPage(pageType)) { VTable.Assert(PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || VTable.BuildC2Mods, "Semispace:ForwardThreadReference"); return; } UIntPtr objectPtr = InteriorPtrTable.Find(addr); if (objectPtr == addr) { generalReferenceVisitor.Visit(loc); } else { // we can check for the page type of // objectPtr here to see if it is zombie page. // If true we can just return. UIntPtr newObject = objectPtr; generalReferenceVisitor.Visit(&newObject); UIntPtr newAddr = newObject + (addr - objectPtr); Trace.Log(Trace.Area.Pointer, "FwdThrRef: {0} -> {1}", __arglist(addr, newAddr)); *loc = newAddr; } }
// OffsetTable records all objects promoted or directly allocated in mature // generation. internal static void SetLast(UIntPtr objPtr) { VTable.Assert(PageTable.IsGcPage(PageTable.Page(objPtr)), "Not GC page"); UIntPtr card = CardTable.CardNo(objPtr); UIntPtr mask = (UIntPtr)(CardTable.CardSize - 1); UIntPtr offset = objPtr & mask; VTable.Assert(offset < CardTable.CardSize, "Overflow"); SetOffset(card, offset); #if DEBUG_OFFSETTABLE VTable.DebugPrint("SetLast objPtr {0:x8}, card {1:x8}, offset {2:x8}\n", __arglist(objPtr, card, offset)); #endif }
// BUGBUG: We are allocating an ArrayList while the collector // is running. If the ArrayList gets big enough to be // allocated in the older generation, then the RemSet has the // potential to overflow since the boxed integers will reside // in the young generation. We should eventually eliminate // the use of ArrayList in this class as well as avoid boxing // the page indices. internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || VTable.BuildC2Mods, "Semispace:RegisterPinnedReference:1"); return; } PageType gen = PageTable.ZombieToLive(pageType); UIntPtr pinnedObjectAddr = InteriorPtrTable.Find(addr); if (pinnedPageList == null) { pinnedPageList = new ArrayList(); comparer = new UIntPtrComparer(); } Object pinnedObject = Magic.fromAddress(pinnedObjectAddr); UIntPtr objectSize = ObjectLayout.ObjectSize(pinnedObjectAddr, pinnedObject.vtable); UIntPtr beforeObjectAddr = pinnedObjectAddr - PreHeader.Size; UIntPtr pastObjectAddr = beforeObjectAddr + objectSize; UIntPtr firstPage = PageTable.Page(beforeObjectAddr); UIntPtr lastPage = PageTable.Page(pastObjectAddr - 1); for (UIntPtr i = firstPage; i <= lastPage; i++) { if (!pinnedPageList.Contains(i)) { Trace.Log(Trace.Area.Pointer, "RegPin: ptr={0} page={1} gen={2}", __arglist(pinnedObjectAddr, i, gen)); GenerationalCollector.gcPromotedTable[(int)gen - 1] += PageTable.PageSize; pinnedPageList.Add(i); } } }
internal override unsafe void Visit(UIntPtr *loc) { // <loc> is a traceable pointer; its referent // either resides in the heap, the stack or in // the static data area. UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); VTable.Assert(PageTable.IsMyPage(page), "MemoryAccounting: !IsMyPage"); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack || pageType == PageType.Shared, "unexpected page type"); if (pageType == PageType.NonGC) { // A managed pointer into the static data area. managedPtrsToStaticData++; } else { // A managed pointer into the stack area. managedPtrsToStack++; } return; } UIntPtr objAddr = GC.installedGC.FindObjectAddr(addr); if (objAddr != addr) { // A "truly" interior pointer into a heap object. interiorManagedHeapPtrs++; } else { exteriorManagedHeapPtrs++; } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr pageLoc = PageTable.Page((UIntPtr)loc); PageType pageType = PageTable.Type(pageLoc); if (pageType != PageType.NonGC && pageType != PageType.Stack) { VTable.Assert(PageTable.IsGcPage(pageLoc), @"PageTable.IsGcPage(pageLoc)"); return; } if (pageType == PageType.Stack) { *loc = (UIntPtr)((uint)*loc & 0xfffffffc); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (PageTable.IsGcPage(pageType)) { Object obj = Magic.fromAddress(addr); VTable.Assert(obj.GcMark() != UIntPtr.Zero); VTable.Assert(PageTable.IsMyPage(page)); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr objectAddr = InteriorPtrTable.Find(addr); this.threadPtrQueue.Write(objectAddr); this.threadPtrQueue.Write(addr - objectAddr); }
// Copied from InteriorPtrTable.cs internal static unsafe UIntPtr ObjectSize(UIntPtr addr) { UIntPtr vtableAddr = Allocator.GetObjectVTable(addr); UIntPtr vtablePage = PageTable.Page(vtableAddr); if (PageTable.IsGcPage(vtablePage)) { // The vtable field is really a forwarding pointer vtableAddr = Allocator.GetObjectVTable(vtableAddr); } else { // Clear the lowest bits, if set vtableAddr &= ~((UIntPtr)3); } VTable vtable = Magic.toVTable(Magic.fromAddress(vtableAddr)); return(ObjectLayout.ObjectSize(addr, vtable)); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr objectAddr = InteriorPtrTable.Find(addr); registerThreadReferenceVisitor.threadPtrQueue.Write(objectAddr); registerThreadReferenceVisitor.threadPtrQueue.Write(addr - objectAddr); *Allocator.GetObjectVTableAddress(objectAddr) |= (UIntPtr)2U; }
internal static bool MarkIfNecessary(UIntPtr value) { #if !SINGULARITY || CONCURRENT_MS_COLLECTOR if (value == 0) { return(false); } UIntPtr marked = markedColor; if (PageTable.IsGcPage(PageTable.Page(value)) && ThreadHeaderQueue.GcMark(Magic.fromAddress(value)) != marked) { VTable.Assert(PageTable.IsMyPage(PageTable.Page(value))); Thread thread = Thread.CurrentThread; UIntPtr unmarked = unmarkedColor; ThreadHeaderQueue.Push(thread, value, marked, unmarked); return(true); } #endif // CONCURRENT_MS_COLLECTOR return(false); }
private static void ReferenceCheck(PageType addrType, UIntPtr *addr, Object value) { VTable.Assert(PageTable.IsGcPage(addrType)); if (GC.remsetType == RemSetType.Cards) { GenerationalGCData. installedRemSet.RecordReference(addr, value); return; } UIntPtr valueAddr = Magic.addressOf(value); PageType valType = PageTable.Type(PageTable.Page(valueAddr)); if (PageTable.IsGcPage(valType) && (addrType > valType)) { GenerationalGCData. installedRemSet.RecordReference(addr, value); } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } UIntPtr objAddr = SegregatedFreeList.Find(addr); incrementBackupRefCount.Traverse(objAddr); }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; UIntPtr page = PageTable.Page(objAddr); Trace.Log(Trace.Area.Pointer, "FwdRef: loc={0}, addr={1}, page={2}", __arglist(loc, objAddr, page)); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType), "Semispace:ForwardReferenceVisitor"); return; } PageType gen = PageTable.ZombieToLive(pageType); VTable.Assert(gen > MIN_GENERATION); Object obj = Magic.fromAddress(objAddr); UIntPtr vtableAddr = *obj.VTableFieldAddr; UIntPtr vtablePageIndex = PageTable.Page(vtableAddr); if (PageTable.Type(vtablePageIndex) == gen) { // The vtable field is really a forwarding pointer Trace.Log(Trace.Area.Pointer, "FwdRef: VT fwd: {0} -> {1}", __arglist(objAddr, vtableAddr)); *loc = vtableAddr; return; } Object newObject = copyScanners[(int)gen].Copy(obj); *loc = Magic.addressOf(newObject); }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; UIntPtr page = PageTable.Page(objAddr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } Object obj = Magic.fromAddress(objAddr); UIntPtr dTime = getDfsDiscoveryTime(obj); UIntPtr fTime = getDfsFinishingTime(obj); VTable.Assert(this.predDiscoveryTime > UIntPtr.Zero && this.predFinishingTime > UIntPtr.Zero && dTime > UIntPtr.Zero && fTime > UIntPtr.Zero, @"this.predDiscoveryTime > UIntPtr.Zero && this.predFinishingTime > UIntPtr.Zero && dTime > UIntPtr.Zero && fTime > UIntPtr.Zero"); if (dTime < this.predDiscoveryTime && this.predDiscoveryTime < this.predFinishingTime && this.predFinishingTime < fTime) { // A back edge is incident on this node; // therefore, the node is part of a cycle. backupRefCount.Visit(&objAddr); } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsGcPage(pageType)) { VTable.Assert(PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } VTable.Assert(PageTable.IsMyPage(page)); UIntPtr objectAddr = GC.installedGC.FindObjectAddr(addr); VTable.Assert(objectAddr <= addr); UIntPtr vtableAddr = Magic.addressOf(Magic.fromAddress(objectAddr).vtable); VTable.Assert(PageTable.IsNonGcPage(PageTable.Type(PageTable.Page(vtableAddr)))); VTable.Assert(Magic.fromAddress(vtableAddr) is VTable); }
internal static bool IsMyGcPage(UIntPtr page) { return(PageTable.IsMyPage(page) && PageTable.IsGcPage(page)); }
void VerifyPages(ObjectLayout.ObjectVisitor objectVisitor) { UIntPtr page = UIntPtr.Zero; while (page < PageTable.pageTableCount) { UIntPtr startPage = page; if (!PageTable.IsMyPage(startPage)) { page++; continue; } PageType pageType = PageTable.Type(page); uint pageProcess = PageTable.Process(page); do { page++; } while (page < PageTable.pageTableCount && PageTable.Type(page) == pageType && PageTable.Process(page) == pageProcess); UIntPtr endPage = page; switch (pageType) { case PageType.Unallocated: case PageType.Unknown: case PageType.Shared: { // The region does not belong to us, so there is // nothing to check. break; } case PageType.UnusedClean: case PageType.UnusedDirty: { PageManager.VerifyUnusedRegion(startPage, endPage); break; } case PageType.System: { // We have looked at the region, but it is off-limits // for the verifier. break; } case PageType.NonGC: { // Since there may be non-objects in the static data // pages, we cannot apply the heapVerifier to the // region. break; } case PageType.Stack: { // The page contains (part of) the activation record // stack for one or more threads. break; } default: { // We have found a data region VTable.Assert(PageTable.IsGcPage(startPage)); UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr endAddr = PageTable.PageAddr(endPage); GC.installedGC.VisitObjects(objectVisitor, startAddr, endAddr); break; } } } }
protected static void deallocateObjects (NonNullReferenceVisitor decrementer) { int startTicks = 0; bool enableGCTiming = VTable.enableGCTiming; if (enableGCTiming) { VTable.enableGCTiming = false; startTicks = Environment.TickCount; } if (VTable.enableGCWatermarks) { MemoryAccounting.RecordHeapWatermarks(); } // Set up a block to deallocate, if one doesn't exist. if (beingDeallocatedBlock == null && delayedDeallocationList != null) { beingDeallocatedBlock = delayedDeallocationList; delayedDeallocationList = getNextLink(delayedDeallocationList); delayedDeallocationLength--; UIntPtr objAddr = Magic.addressOf(beingDeallocatedBlock); VTable vtable = beingDeallocatedBlock.vtable; initIncrementalDecrement(objAddr, vtable); } // Perform up to a constant number of work chunks on the // block being deallocated. A "work chunk" is either // decrementing up to a fixed number of references held in // an object, decrementing up to a fixed number of slots // if the object is an array, or reclaiming the object // after all decrements on its internal contents are done. for (uint workDone = 0; beingDeallocatedBlock != null && workDone < deallocationSpan; workDone++) { // Continue work on block. UIntPtr objAddr = Magic.addressOf(beingDeallocatedBlock); #if DEBUG UIntPtr page = PageTable.Page(objAddr); VTable.Assert(PageTable.IsGcPage(page), @"PageTable.IsGcPage(page)"); #endif // DEBUG VTable vtable = beingDeallocatedBlock.vtable; if (incrementalDecrement(objAddr, vtable, decrementer) != 0) { continue; } // All decrements on contained references are over. Object obj = beingDeallocatedBlock; VTable.Assert((obj.REF_STATE & RSMasks.refCount) == 0, @"(obj.REF_STATE & RSMasks.refCount) == 0"); #if DEBUG PLCLink *plcLinkAddr = GetPLCLink(obj); VTable.Assert(plcLinkAddr == null, @"plcLinkAddr == null"); #endif // DEBUG SegregatedFreeList.Free(obj); // Set up block to work on next. beingDeallocatedBlock = delayedDeallocationList; if (delayedDeallocationList != null) { delayedDeallocationList = getNextLink(delayedDeallocationList); delayedDeallocationLength--; objAddr = Magic.addressOf(beingDeallocatedBlock); vtable = beingDeallocatedBlock.vtable; initIncrementalDecrement(objAddr, vtable); } } SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; if (enableGCTiming) { int elapsedTicks = Environment.TickCount - startTicks; System.GC.gcTotalTime += elapsedTicks; if (System.GC.maxPauseTime < elapsedTicks) { System.GC.maxPauseTime = elapsedTicks; } System.GC.pauseCount++; VTable.enableGCTiming = true; } }