internal override void DestructHeap() { base.DestructHeap(); if (VTable.enableFinalGCTiming) { VTable.DebugPrint("total trace time = "); VTable.DebugPrint((long)traceTime); VTable.DebugPrint("\n"); VTable.DebugPrint("total stw time = "); VTable.DebugPrint((long)(traceTime + sweepTime)); VTable.DebugPrint("\n"); VTable.DebugPrint("total sweep time = "); VTable.DebugPrint((long)sweepTime); VTable.DebugPrint("\n"); VTable.DebugPrint("num traces = "); VTable.DebugPrint((long)numCollections); VTable.DebugPrint("\n"); VTable.DebugPrint("num stw = "); VTable.DebugPrint((long)numCollections); VTable.DebugPrint("\n"); VTable.DebugPrint("num sweeps = "); VTable.DebugPrint((long)numCollections); VTable.DebugPrint("\n"); } if (GC.IsProfiling) { GcProfiler.NotifyShutdown(); } }
internal override void ProfileAllocation(Object obj) { if (GC.IsProfiling && !HeapDamaged) { UIntPtr size = ObjectLayout.Sizeof(obj); GcProfiler.NotifyAllocation(Magic.addressOf(obj), obj.GetType(), size); } }
internal override void CollectStopped(int currentThreadIndex, int generation) { #if SINGULARITY #if DEBUG #if THREAD_TIME_ACCOUNTING UIntPtr preGcTotalBytes = SegregatedFreeList.TotalBytes; #endif DebugStub.WriteLine("~~~~~ Start MarkSweep Cleanup [data={0:x8}, pid={1:x3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16)); #endif #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING TimeSpan ticks = Thread.CurrentThread.ExecutionTime; TimeSpan ticks2 = SystemClock.KernelUpTime; #else TimeSpan ticks = SystemClock.KernelUpTime; #endif #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING TimeSpan ticks = ProcessService.GetThreadTime(); TimeSpan ticks2 = ProcessService.GetUpTime(); #else TimeSpan ticks = ProcessService.GetUpTime(); #endif #endif #endif int before = 0; if (VTable.enableGCTiming) { before = Environment.TickCount; } if (GC.IsProfiling) { GcProfiler.NotifyPreGC(MinGeneration); // non-generational collector, so pretend Gen0 // Calls like ResurrectCandidates below can cause // allocations and thus, potentially, profiler // notifications. However, at that time the heap is // damaged in the sense that VPtrs have bits OR-ed in // for object marking. We do not want to accept // profiling during this window. // // There is no synchronization issue with setting this // flag because it will only be consulted by the // thread that sets and resets it. HeapDamaged = true; } // 1) Mark the live objects CollectorStatistics.Event(GCEvent.TraceStart); #if !VC TryAllManager.PreGCHookTryAll(); #endif MultiUseWord.PreGCHook(false /* don't use shadows */); Finalizer.PrepareCollectFinalizers(); int countThreads = CallStack.ScanStacks(threadMarkReferenceVisitor, threadMarkReferenceVisitor); Thread.VisitBootstrapData(markAndProcessReferenceVisitor); #if SINGULARITY_KERNEL Kernel.VisitSpecialData(markAndProcessReferenceVisitor); #endif MultiUseWord.VisitStrongRefs(markAndProcessReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitStrongRefs(markAndProcessReferenceVisitor); #endif StaticData.ScanStaticData(markAndProcessReferenceVisitor); CollectorStatistics.Event(GCEvent.TraceSpecial); WeakReference.Process(updateReferenceVisitor, true, true); Finalizer.ResurrectCandidates(updateReferenceVisitor, markAndProcessReferenceVisitor, true); markReferenceVisitor.Cleanup(); UnmanagedPageList.ReleaseStandbyPages(); // 2) Sweep the garbage objects int afterTrace = 0; if (VTable.enableGCTiming) { afterTrace = Environment.TickCount; } CollectorStatistics.Event(GCEvent.SweepStart, TotalMemory); WeakReference.Process(updateReferenceVisitor, true, false); MultiUseWord.VisitWeakRefs(updateReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitWeakRefs(updateReferenceVisitor); #endif SegregatedFreeList.VisitAllObjects(sweepVisitor); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); CollectorStatistics.Event(GCEvent.SweepSpecial); MultiUseWord.PostGCHook(); if (GC.IsProfiling) { HeapDamaged = false; // Allocations may occur inside the PostGCHook. Hopefully a // sufficiently limited quantity that we don't recursively // trigger a GC. GcProfiler.NotifyPostGC(ProfileRoots, ProfileObjects); } Finalizer.ReleaseCollectFinalizers(); #if !VC TryAllManager.PostGCHookTryAll(); #endif CollectorStatistics.Event(GCEvent.CollectionComplete, TotalMemory); if (VTable.enableGCTiming) { int after = Environment.TickCount; numCollections++; traceTime += (afterTrace - before); sweepTime += (after - afterTrace); } // 3) Determine a new collection trigger UIntPtr testTrigger = (UIntPtr)this.TotalMemory >> 2; UIntPtr minTrigger = (UIntPtr)MinTrigger; UIntPtr maxTrigger = (UIntPtr)MaxTrigger; collectionTrigger = (testTrigger > minTrigger) ? (testTrigger < maxTrigger ? testTrigger : maxTrigger) : minTrigger; #if SINGULARITY #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING int procId = Thread.CurrentProcess.ProcessId; ticks = Thread.CurrentThread.ExecutionTime - ticks; ticks2 = SystemClock.KernelUpTime - ticks2; #else ticks = SystemClock.KernelUpTime - ticks; #endif //Thread.CurrentProcess.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING ushort procId = ProcessService.GetCurrentProcessId(); ticks = ProcessService.GetThreadTime() - ticks; ticks2 = ProcessService.GetUpTime() - ticks2; #else ticks = ProcessService.GetUpTime() - ticks; #endif //ProcessService.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #endif #if DEBUG #if THREAD_TIME_ACCOUNTING DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, diff={7:x8} pid={1:x3}, ms(Thread)={2:d6}, ms(System)={3:d6}, thds={4}, procId={5}, tid={6}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, ticks2.Milliseconds, countThreads, procId, Thread.GetCurrentThreadIndex(), preGcTotalBytes - SegregatedFreeList.TotalBytes )); #else DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, pid={1:x3}, ms={2:d6}, thds={3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, countThreads)); #endif #endif #endif }