internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; // Ignore pointers out of our memory area if (PageTable.IsForeignAddr(addr)) { return; } UIntPtr page = PageTable.Page(addr); if (!PageTable.IsMyGcPage(page)) { PageType pageType = PageTable.Type(page); #if SINGULARITY_PROCESS // We have to allow reference pointers to the // ThreadContext, which lives in the kernel space. VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType) || (PageTable.IsGcPage(pageType) && PageTable.IsKernelPage(page))); #else VTable.Assert((PageTable.IsNonGcPage(pageType) && PageTable.IsMyPage(page)) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); #endif return; } UIntPtr objectAddr = SegregatedFreeList.Find(addr); markAndProcessReferenceVisitor.Visit(&objectAddr); }
protected void postVerifyHeap(bool beforeCollection) { VTable.Assert(RCCollector.VerificationMode, @"RCCollector.VerificationMode"); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; // Initialize the "backup" reference count. SegregatedFreeList.VisitAllObjects(backupInit); // Count all references and managed pointers. rootsScanner.Initialize(backupRefCount); CallStack.ScanStacks(rootsScanner, rootsScanner); Thread.VisitBootstrapData(rootsScanner); StaticData.ScanStaticData(rootsScanner); MultiUseWord.VisitStrongRefs(rootsScanner, false); CallStack.ScanStacks(resetRoots, resetRoots); Thread.VisitBootstrapData(resetRoots); StaticData.ScanStaticData(resetRoots); SegregatedFreeList.VisitAllObjects(resetTraversal); // Actual leaks (refCount > 0 and backup refCount = 0). leakAccumulator.Initialize(); SegregatedFreeList.VisitAllObjects(leakAccumulator); VTable.DebugPrint("Leaked storage: "); VTable.DebugPrint((int)leakAccumulator.Size); VTable.DebugPrint("B"); if (VerifyLeakedCycles) { // Find leaked data that *should* have been reclaimed. // (If L is the set of all leaked nodes, and L' the // transitive closure of leaked cycles, then L-L' is // the set of nodes that should have been captured // by a pure RC collector.) SegregatedFreeList.VisitAllObjects(leakedNodesDFS); SegregatedFreeList.VisitAllObjects(resetTraversal); SegregatedFreeList.VisitAllObjects(leakedCycleClosure); SegregatedFreeList.VisitAllObjects(resetTraversal); leakAccumulator.Initialize(); SegregatedFreeList.VisitAllObjects(leakAccumulator); VTable.DebugPrint(" ("); VTable.DebugPrint((int)leakAccumulator.Size); VTable.DebugPrint("B acyclic)"); } // Find the roots of leaked data. leakedRoots.Initialize(); SegregatedFreeList.VisitAllObjects(leakedRoots); leakedRootsCounter.Initialize(); SegregatedFreeList.VisitAllObjects(leakedRootsCounter); SegregatedFreeList.VisitAllObjects(resetTraversal); VTable.DebugPrint("; leaked heap roots: "); VTable.DebugPrint((int)leakedRootsCounter.Total); VTable.DebugPrint("\n"); }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } UIntPtr objAddr = SegregatedFreeList.Find(addr); Object obj = Magic.fromAddress(objAddr); UIntPtr count = getBackupRefcount(obj); setBackupRefcount(obj, count + 1); if (obj.GcMark((UIntPtr)1)) { this.workList.Write(objAddr); } }
void VisitObjects(ObjectLayout.ObjectVisitor objectVisitor, UIntPtr lowAddr, UIntPtr highAddr) { VTable.Assert(PageTable.PageAligned(lowAddr)); VTable.Assert(PageTable.PageAligned(highAddr)); UIntPtr lowPage = PageTable.Page(lowAddr); UIntPtr highPage = PageTable.Page(highAddr); SegregatedFreeList.VisitObjects(lowPage, highPage, objectVisitor); }
void ProfileScanObjects(SegregatedFreeList.ObjectVisitor visitor) { #if !SINGULARITY VTable.Assert((System.GC.installedGC as MarkSweepCollector != null) || (System.GC.installedGC as ConcurrentMSCollector != null) || (System.GC.installedGC as ReferenceCountingCollector != null) || (System.GC.installedGC as DeferredReferenceCountingCollector != null), "ProfileScanObjects is only valid for MarkSweep, ConcurrentMS, " + "ReferenceCounting, and DeferredReferenceCounting collectors"); #endif SegregatedFreeList.VisitAllObjects(visitor); }
internal override void VisitObjects (ObjectLayout.ObjectVisitor objVisitor, UIntPtr lowAddr, UIntPtr highAddr) { UIntPtr lowPage = PageTable.Page(lowAddr); UIntPtr highPage = PageTable.Page(highAddr); SegregatedFreeList.VisitObjects(lowPage, highPage, objVisitor); }
private UIntPtr AllocateObjectMemorySlow(UIntPtr numBytes, uint alignment, Thread currentThread) { if (NewBytesSinceGCExceeds(collectionTrigger) && GC.allocationGCInhibitCount == 0) { //REVIEW: This actually happens after the trigger... GC.InvokeCollection(currentThread); } return(SegregatedFreeList.AllocateSlow(currentThread, numBytes, alignment)); }
// Given a pointer to an object or into the PostHeader or payload // parts of an object, return the address of the object. internal static UIntPtr FindObjectForInteriorPtr(UIntPtr addr) { UIntPtr result; if (SegregatedFreeList.IsGcPtr(addr)) { result = GC.installedGC.FindObjectAddr(addr); } else { result = UIntPtr.Zero; } return(result); }
internal override UIntPtr AllocateObjectMemory(UIntPtr numBytes, uint alignment, Thread currentThread) { UIntPtr resultAddr = SegregatedFreeList.AllocateFast(currentThread, numBytes, alignment); if (resultAddr == UIntPtr.Zero) { resultAddr = this.AllocateObjectMemorySlow(numBytes, alignment, currentThread); } return(resultAddr); }
internal override UIntPtr VisitLarge(Object obj) { UIntPtr objectSize = ObjectLayout.ObjectSize(Magic.addressOf(obj), obj.GcUnmarkedVTable); if (!obj.GcMark(UIntPtr.Zero)) { // We did not change the color of the object back // to unmarked, so we are responsible for freeing it. SegregatedFreeList.FreeLarge(obj); } // REVIEW: Should we return a real size here? return(objectSize); }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } UIntPtr objAddr = SegregatedFreeList.Find(addr); incrementBackupRefCount.Traverse(objAddr); }
public static new void Initialize() { StopTheWorldCollector.Initialize(); SegregatedFreeList.Initialize(); // instance = new MarkSweepCollector(); MarkSweepCollector.instance = (MarkSweepCollector) BootstrapMemory.Allocate(typeof(MarkSweepCollector)); // markReferenceVisitor = new MarkReferenceVisitor(); markReferenceVisitor = (MarkReferenceVisitor) BootstrapMemory.Allocate(typeof(MarkReferenceVisitor)); // markAndProcessReferenceVisitor = new MarkAndProcessReferenceVisitor(); markAndProcessReferenceVisitor = (MarkAndProcessReferenceVisitor) BootstrapMemory.Allocate(typeof(MarkAndProcessReferenceVisitor)); // updateReferenceVisitor = new UpdateReferenceVisitor(); updateReferenceVisitor = (UpdateReferenceVisitor) BootstrapMemory.Allocate(typeof(UpdateReferenceVisitor)); // threadMarkReferenceVisitor = new ThreadMarkReferenceVisitor(); threadMarkReferenceVisitor = (ThreadMarkReferenceVisitor) BootstrapMemory.Allocate(typeof(ThreadMarkReferenceVisitor)); // sweepVisitor = new SweepVisitor(); sweepVisitor = (SweepVisitor) BootstrapMemory.Allocate(typeof(SweepVisitor)); collectionTrigger = (UIntPtr)InitialTrigger; }
internal override void NewThreadNotification(Thread newThread, bool initial) { base.NewThreadNotification(newThread, initial); SegregatedFreeList.NewThreadNotification(newThread, initial); }
internal override void DeadThreadNotification(Thread deadThread) { MultiUseWord.CollectFromThread(deadThread); SegregatedFreeList.DeadThreadNotification(deadThread); base.DeadThreadNotification(deadThread); }
internal override UIntPtr FindObjectAddr(UIntPtr interiorPtr) { return(SegregatedFreeList.Find(interiorPtr)); }
private static unsafe void processPLCList() { int startTicks = 0; bool enableGCTiming = VTable.enableGCTiming; if (enableGCTiming) { VTable.enableGCTiming = false; startTicks = Environment.TickCount; } if (VTable.enableGCWatermarks) { MemoryAccounting.RecordHeapWatermarks(); } #if DEBUG VTable.Assert(firstPLCLink->objAddr == UIntPtr.Zero, @"firstPLCLink->objAddr == UIntPtr.Zero"); #endif // DEBUG // Let S be the subgraph of heap objects reachable from // the PLC list. Decrement counts due to references in S. for (PLCLink *link = firstPLCLink->next; link != null; link = link->next) { UIntPtr objAddr = link->objAddr; VTable.Assert(objAddr != UIntPtr.Zero, @"objAddr != UIntPtr.Zero"); Object obj = Magic.fromAddress(objAddr); VTable.Assert((obj.REF_STATE & countingONFlagMask) != 0, @"(obj.REF_STATE & countingONFlagMask) != 0"); uint refState = obj.REF_STATE; if ((refState & markFlagMask) == 0) { obj.REF_STATE = refState | markFlagMask; internalDecrementer.Traverse(objAddr); } } // Objects that now have non-zero counts are those that // have references external to S incident on them. // Recompute counts due to reachability from such objects. for (PLCLink *link = firstPLCLink->next; link != null; link = link->next) { UIntPtr objAddr = link->objAddr; internalScanner.Traverse(objAddr); } // String together objects with reference count // of zero for reclamation. internalReclaimer.Initialize(); for (PLCLink *link = firstPLCLink->next; link != null; link = link->next) { UIntPtr objAddr = link->objAddr; internalReclaimer.Traverse(objAddr); } ulong reclaimedBytes = 0; Object reclaimedObj = internalReclaimer.ReclaimedObjects; while (reclaimedObj != null) { if (VTable.enableGCProfiling) { UIntPtr size = ObjectLayout.Sizeof(reclaimedObj); reclaimedBytes += (ulong)size; } Object nextReclaimedObj = getNextLink(reclaimedObj); SegregatedFreeList.Free(reclaimedObj); reclaimedObj = nextReclaimedObj; } // Recycle the PLC list. if (firstPLCLink->next != null) { PLCLink *lastPLCLink = firstPLCLink; do { lastPLCLink = lastPLCLink->next; } while (lastPLCLink->next != null); lastPLCLink->next = plcListChunk; plcListChunk = firstPLCLink->next; firstPLCLink->next = null; } // Release the memory used up by work lists. UIntPtrQueue.ReleaseStandbyPages(null); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; if (enableGCTiming) { int elapsedTicks = Environment.TickCount - startTicks; System.GC.gcTotalTime += elapsedTicks; if (System.GC.maxPauseTime < elapsedTicks) { System.GC.maxPauseTime = elapsedTicks; } System.GC.pauseCount++; VTable.enableGCTiming = true; } if (VTable.enableGCProfiling) { if (maxCyclicGarbage < reclaimedBytes) { maxCyclicGarbage = reclaimedBytes; } totalCyclicGarbage += reclaimedBytes; cycleCollections++; } }
internal override void DeadThreadNotification(Thread deadThread) { SegregatedFreeList.DeadThreadNotification(deadThread); base.DeadThreadNotification(deadThread); }
protected static void deallocateObjects (NonNullReferenceVisitor decrementer) { int startTicks = 0; bool enableGCTiming = VTable.enableGCTiming; if (enableGCTiming) { VTable.enableGCTiming = false; startTicks = Environment.TickCount; } if (VTable.enableGCWatermarks) { MemoryAccounting.RecordHeapWatermarks(); } // Set up a block to deallocate, if one doesn't exist. if (beingDeallocatedBlock == null && delayedDeallocationList != null) { beingDeallocatedBlock = delayedDeallocationList; delayedDeallocationList = getNextLink(delayedDeallocationList); delayedDeallocationLength--; UIntPtr objAddr = Magic.addressOf(beingDeallocatedBlock); VTable vtable = beingDeallocatedBlock.vtable; initIncrementalDecrement(objAddr, vtable); } // Perform up to a constant number of work chunks on the // block being deallocated. A "work chunk" is either // decrementing up to a fixed number of references held in // an object, decrementing up to a fixed number of slots // if the object is an array, or reclaiming the object // after all decrements on its internal contents are done. for (uint workDone = 0; beingDeallocatedBlock != null && workDone < deallocationSpan; workDone++) { // Continue work on block. UIntPtr objAddr = Magic.addressOf(beingDeallocatedBlock); #if DEBUG UIntPtr page = PageTable.Page(objAddr); VTable.Assert(PageTable.IsGcPage(page), @"PageTable.IsGcPage(page)"); #endif // DEBUG VTable vtable = beingDeallocatedBlock.vtable; if (incrementalDecrement(objAddr, vtable, decrementer) != 0) { continue; } // All decrements on contained references are over. Object obj = beingDeallocatedBlock; VTable.Assert((obj.REF_STATE & RSMasks.refCount) == 0, @"(obj.REF_STATE & RSMasks.refCount) == 0"); #if DEBUG PLCLink *plcLinkAddr = GetPLCLink(obj); VTable.Assert(plcLinkAddr == null, @"plcLinkAddr == null"); #endif // DEBUG SegregatedFreeList.Free(obj); // Set up block to work on next. beingDeallocatedBlock = delayedDeallocationList; if (delayedDeallocationList != null) { delayedDeallocationList = getNextLink(delayedDeallocationList); delayedDeallocationLength--; objAddr = Magic.addressOf(beingDeallocatedBlock); vtable = beingDeallocatedBlock.vtable; initIncrementalDecrement(objAddr, vtable); } } SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; if (enableGCTiming) { int elapsedTicks = Environment.TickCount - startTicks; System.GC.gcTotalTime += elapsedTicks; if (System.GC.maxPauseTime < elapsedTicks) { System.GC.maxPauseTime = elapsedTicks; } System.GC.pauseCount++; VTable.enableGCTiming = true; } }
public static void Initialize() { SegregatedFreeList.Initialize(); }
internal override void CollectStopped(int currentThreadIndex, int generation) { #if SINGULARITY #if DEBUG #if THREAD_TIME_ACCOUNTING UIntPtr preGcTotalBytes = SegregatedFreeList.TotalBytes; #endif DebugStub.WriteLine("~~~~~ Start MarkSweep Cleanup [data={0:x8}, pid={1:x3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16)); #endif #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING TimeSpan ticks = Thread.CurrentThread.ExecutionTime; TimeSpan ticks2 = SystemClock.KernelUpTime; #else TimeSpan ticks = SystemClock.KernelUpTime; #endif #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING TimeSpan ticks = ProcessService.GetThreadTime(); TimeSpan ticks2 = ProcessService.GetUpTime(); #else TimeSpan ticks = ProcessService.GetUpTime(); #endif #endif #endif int before = 0; if (VTable.enableGCTiming) { before = Environment.TickCount; } if (GC.IsProfiling) { GcProfiler.NotifyPreGC(MinGeneration); // non-generational collector, so pretend Gen0 // Calls like ResurrectCandidates below can cause // allocations and thus, potentially, profiler // notifications. However, at that time the heap is // damaged in the sense that VPtrs have bits OR-ed in // for object marking. We do not want to accept // profiling during this window. // // There is no synchronization issue with setting this // flag because it will only be consulted by the // thread that sets and resets it. HeapDamaged = true; } // 1) Mark the live objects CollectorStatistics.Event(GCEvent.TraceStart); #if !VC TryAllManager.PreGCHookTryAll(); #endif MultiUseWord.PreGCHook(false /* don't use shadows */); Finalizer.PrepareCollectFinalizers(); int countThreads = CallStack.ScanStacks(threadMarkReferenceVisitor, threadMarkReferenceVisitor); Thread.VisitBootstrapData(markAndProcessReferenceVisitor); #if SINGULARITY_KERNEL Kernel.VisitSpecialData(markAndProcessReferenceVisitor); #endif MultiUseWord.VisitStrongRefs(markAndProcessReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitStrongRefs(markAndProcessReferenceVisitor); #endif StaticData.ScanStaticData(markAndProcessReferenceVisitor); CollectorStatistics.Event(GCEvent.TraceSpecial); WeakReference.Process(updateReferenceVisitor, true, true); Finalizer.ResurrectCandidates(updateReferenceVisitor, markAndProcessReferenceVisitor, true); markReferenceVisitor.Cleanup(); UnmanagedPageList.ReleaseStandbyPages(); // 2) Sweep the garbage objects int afterTrace = 0; if (VTable.enableGCTiming) { afterTrace = Environment.TickCount; } CollectorStatistics.Event(GCEvent.SweepStart, TotalMemory); WeakReference.Process(updateReferenceVisitor, true, false); MultiUseWord.VisitWeakRefs(updateReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitWeakRefs(updateReferenceVisitor); #endif SegregatedFreeList.VisitAllObjects(sweepVisitor); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); CollectorStatistics.Event(GCEvent.SweepSpecial); MultiUseWord.PostGCHook(); if (GC.IsProfiling) { HeapDamaged = false; // Allocations may occur inside the PostGCHook. Hopefully a // sufficiently limited quantity that we don't recursively // trigger a GC. GcProfiler.NotifyPostGC(ProfileRoots, ProfileObjects); } Finalizer.ReleaseCollectFinalizers(); #if !VC TryAllManager.PostGCHookTryAll(); #endif CollectorStatistics.Event(GCEvent.CollectionComplete, TotalMemory); if (VTable.enableGCTiming) { int after = Environment.TickCount; numCollections++; traceTime += (afterTrace - before); sweepTime += (after - afterTrace); } // 3) Determine a new collection trigger UIntPtr testTrigger = (UIntPtr)this.TotalMemory >> 2; UIntPtr minTrigger = (UIntPtr)MinTrigger; UIntPtr maxTrigger = (UIntPtr)MaxTrigger; collectionTrigger = (testTrigger > minTrigger) ? (testTrigger < maxTrigger ? testTrigger : maxTrigger) : minTrigger; #if SINGULARITY #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING int procId = Thread.CurrentProcess.ProcessId; ticks = Thread.CurrentThread.ExecutionTime - ticks; ticks2 = SystemClock.KernelUpTime - ticks2; #else ticks = SystemClock.KernelUpTime - ticks; #endif //Thread.CurrentProcess.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING ushort procId = ProcessService.GetCurrentProcessId(); ticks = ProcessService.GetThreadTime() - ticks; ticks2 = ProcessService.GetUpTime() - ticks2; #else ticks = ProcessService.GetUpTime() - ticks; #endif //ProcessService.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #endif #if DEBUG #if THREAD_TIME_ACCOUNTING DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, diff={7:x8} pid={1:x3}, ms(Thread)={2:d6}, ms(System)={3:d6}, thds={4}, procId={5}, tid={6}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, ticks2.Milliseconds, countThreads, procId, Thread.GetCurrentThreadIndex(), preGcTotalBytes - SegregatedFreeList.TotalBytes )); #else DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, pid={1:x3}, ms={2:d6}, thds={3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, countThreads)); #endif #endif #endif }
internal override void VisitSmallPageEnd() { SegregatedFreeList.FreeSmallList(ref tempList); }