protected void postVerifyHeap(bool beforeCollection) { VTable.Assert(RCCollector.VerificationMode, @"RCCollector.VerificationMode"); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; // Initialize the "backup" reference count. SegregatedFreeList.VisitAllObjects(backupInit); // Count all references and managed pointers. rootsScanner.Initialize(backupRefCount); CallStack.ScanStacks(rootsScanner, rootsScanner); Thread.VisitBootstrapData(rootsScanner); StaticData.ScanStaticData(rootsScanner); MultiUseWord.VisitStrongRefs(rootsScanner, false); CallStack.ScanStacks(resetRoots, resetRoots); Thread.VisitBootstrapData(resetRoots); StaticData.ScanStaticData(resetRoots); SegregatedFreeList.VisitAllObjects(resetTraversal); // Actual leaks (refCount > 0 and backup refCount = 0). leakAccumulator.Initialize(); SegregatedFreeList.VisitAllObjects(leakAccumulator); VTable.DebugPrint("Leaked storage: "); VTable.DebugPrint((int)leakAccumulator.Size); VTable.DebugPrint("B"); if (VerifyLeakedCycles) { // Find leaked data that *should* have been reclaimed. // (If L is the set of all leaked nodes, and L' the // transitive closure of leaked cycles, then L-L' is // the set of nodes that should have been captured // by a pure RC collector.) SegregatedFreeList.VisitAllObjects(leakedNodesDFS); SegregatedFreeList.VisitAllObjects(resetTraversal); SegregatedFreeList.VisitAllObjects(leakedCycleClosure); SegregatedFreeList.VisitAllObjects(resetTraversal); leakAccumulator.Initialize(); SegregatedFreeList.VisitAllObjects(leakAccumulator); VTable.DebugPrint(" ("); VTable.DebugPrint((int)leakAccumulator.Size); VTable.DebugPrint("B acyclic)"); } // Find the roots of leaked data. leakedRoots.Initialize(); SegregatedFreeList.VisitAllObjects(leakedRoots); leakedRootsCounter.Initialize(); SegregatedFreeList.VisitAllObjects(leakedRootsCounter); SegregatedFreeList.VisitAllObjects(resetTraversal); VTable.DebugPrint("; leaked heap roots: "); VTable.DebugPrint((int)leakedRootsCounter.Total); VTable.DebugPrint("\n"); }
void ProfileScanObjects(SegregatedFreeList.ObjectVisitor visitor) { #if !SINGULARITY VTable.Assert((System.GC.installedGC as MarkSweepCollector != null) || (System.GC.installedGC as ConcurrentMSCollector != null) || (System.GC.installedGC as ReferenceCountingCollector != null) || (System.GC.installedGC as DeferredReferenceCountingCollector != null), "ProfileScanObjects is only valid for MarkSweep, ConcurrentMS, " + "ReferenceCounting, and DeferredReferenceCounting collectors"); #endif SegregatedFreeList.VisitAllObjects(visitor); }
internal override void CollectStopped(int currentThreadIndex, int generation) { #if SINGULARITY #if DEBUG #if THREAD_TIME_ACCOUNTING UIntPtr preGcTotalBytes = SegregatedFreeList.TotalBytes; #endif DebugStub.WriteLine("~~~~~ Start MarkSweep Cleanup [data={0:x8}, pid={1:x3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16)); #endif #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING TimeSpan ticks = Thread.CurrentThread.ExecutionTime; TimeSpan ticks2 = SystemClock.KernelUpTime; #else TimeSpan ticks = SystemClock.KernelUpTime; #endif #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING TimeSpan ticks = ProcessService.GetThreadTime(); TimeSpan ticks2 = ProcessService.GetUpTime(); #else TimeSpan ticks = ProcessService.GetUpTime(); #endif #endif #endif int before = 0; if (VTable.enableGCTiming) { before = Environment.TickCount; } if (GC.IsProfiling) { GcProfiler.NotifyPreGC(MinGeneration); // non-generational collector, so pretend Gen0 // Calls like ResurrectCandidates below can cause // allocations and thus, potentially, profiler // notifications. However, at that time the heap is // damaged in the sense that VPtrs have bits OR-ed in // for object marking. We do not want to accept // profiling during this window. // // There is no synchronization issue with setting this // flag because it will only be consulted by the // thread that sets and resets it. HeapDamaged = true; } // 1) Mark the live objects CollectorStatistics.Event(GCEvent.TraceStart); #if !VC TryAllManager.PreGCHookTryAll(); #endif MultiUseWord.PreGCHook(false /* don't use shadows */); Finalizer.PrepareCollectFinalizers(); int countThreads = CallStack.ScanStacks(threadMarkReferenceVisitor, threadMarkReferenceVisitor); Thread.VisitBootstrapData(markAndProcessReferenceVisitor); #if SINGULARITY_KERNEL Kernel.VisitSpecialData(markAndProcessReferenceVisitor); #endif MultiUseWord.VisitStrongRefs(markAndProcessReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitStrongRefs(markAndProcessReferenceVisitor); #endif StaticData.ScanStaticData(markAndProcessReferenceVisitor); CollectorStatistics.Event(GCEvent.TraceSpecial); WeakReference.Process(updateReferenceVisitor, true, true); Finalizer.ResurrectCandidates(updateReferenceVisitor, markAndProcessReferenceVisitor, true); markReferenceVisitor.Cleanup(); UnmanagedPageList.ReleaseStandbyPages(); // 2) Sweep the garbage objects int afterTrace = 0; if (VTable.enableGCTiming) { afterTrace = Environment.TickCount; } CollectorStatistics.Event(GCEvent.SweepStart, TotalMemory); WeakReference.Process(updateReferenceVisitor, true, false); MultiUseWord.VisitWeakRefs(updateReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitWeakRefs(updateReferenceVisitor); #endif SegregatedFreeList.VisitAllObjects(sweepVisitor); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); CollectorStatistics.Event(GCEvent.SweepSpecial); MultiUseWord.PostGCHook(); if (GC.IsProfiling) { HeapDamaged = false; // Allocations may occur inside the PostGCHook. Hopefully a // sufficiently limited quantity that we don't recursively // trigger a GC. GcProfiler.NotifyPostGC(ProfileRoots, ProfileObjects); } Finalizer.ReleaseCollectFinalizers(); #if !VC TryAllManager.PostGCHookTryAll(); #endif CollectorStatistics.Event(GCEvent.CollectionComplete, TotalMemory); if (VTable.enableGCTiming) { int after = Environment.TickCount; numCollections++; traceTime += (afterTrace - before); sweepTime += (after - afterTrace); } // 3) Determine a new collection trigger UIntPtr testTrigger = (UIntPtr)this.TotalMemory >> 2; UIntPtr minTrigger = (UIntPtr)MinTrigger; UIntPtr maxTrigger = (UIntPtr)MaxTrigger; collectionTrigger = (testTrigger > minTrigger) ? (testTrigger < maxTrigger ? testTrigger : maxTrigger) : minTrigger; #if SINGULARITY #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING int procId = Thread.CurrentProcess.ProcessId; ticks = Thread.CurrentThread.ExecutionTime - ticks; ticks2 = SystemClock.KernelUpTime - ticks2; #else ticks = SystemClock.KernelUpTime - ticks; #endif //Thread.CurrentProcess.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING ushort procId = ProcessService.GetCurrentProcessId(); ticks = ProcessService.GetThreadTime() - ticks; ticks2 = ProcessService.GetUpTime() - ticks2; #else ticks = ProcessService.GetUpTime() - ticks; #endif //ProcessService.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #endif #if DEBUG #if THREAD_TIME_ACCOUNTING DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, diff={7:x8} pid={1:x3}, ms(Thread)={2:d6}, ms(System)={3:d6}, thds={4}, procId={5}, tid={6}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, ticks2.Milliseconds, countThreads, procId, Thread.GetCurrentThreadIndex(), preGcTotalBytes - SegregatedFreeList.TotalBytes )); #else DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, pid={1:x3}, ms={2:d6}, thds={3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, countThreads)); #endif #endif #endif }