protected void postVerifyHeap(bool beforeCollection) { VTable.Assert(RCCollector.VerificationMode, @"RCCollector.VerificationMode"); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; // Initialize the "backup" reference count. SegregatedFreeList.VisitAllObjects(backupInit); // Count all references and managed pointers. rootsScanner.Initialize(backupRefCount); CallStack.ScanStacks(rootsScanner, rootsScanner); Thread.VisitBootstrapData(rootsScanner); StaticData.ScanStaticData(rootsScanner); MultiUseWord.VisitStrongRefs(rootsScanner, false); CallStack.ScanStacks(resetRoots, resetRoots); Thread.VisitBootstrapData(resetRoots); StaticData.ScanStaticData(resetRoots); SegregatedFreeList.VisitAllObjects(resetTraversal); // Actual leaks (refCount > 0 and backup refCount = 0). leakAccumulator.Initialize(); SegregatedFreeList.VisitAllObjects(leakAccumulator); VTable.DebugPrint("Leaked storage: "); VTable.DebugPrint((int)leakAccumulator.Size); VTable.DebugPrint("B"); if (VerifyLeakedCycles) { // Find leaked data that *should* have been reclaimed. // (If L is the set of all leaked nodes, and L' the // transitive closure of leaked cycles, then L-L' is // the set of nodes that should have been captured // by a pure RC collector.) SegregatedFreeList.VisitAllObjects(leakedNodesDFS); SegregatedFreeList.VisitAllObjects(resetTraversal); SegregatedFreeList.VisitAllObjects(leakedCycleClosure); SegregatedFreeList.VisitAllObjects(resetTraversal); leakAccumulator.Initialize(); SegregatedFreeList.VisitAllObjects(leakAccumulator); VTable.DebugPrint(" ("); VTable.DebugPrint((int)leakAccumulator.Size); VTable.DebugPrint("B acyclic)"); } // Find the roots of leaked data. leakedRoots.Initialize(); SegregatedFreeList.VisitAllObjects(leakedRoots); leakedRootsCounter.Initialize(); SegregatedFreeList.VisitAllObjects(leakedRootsCounter); SegregatedFreeList.VisitAllObjects(resetTraversal); VTable.DebugPrint("; leaked heap roots: "); VTable.DebugPrint((int)leakedRootsCounter.Total); VTable.DebugPrint("\n"); }
internal override void EnableHeap() { interlock = new Object(); MultiUseWord.GetMonitor(interlock); CoCoBarrier.InitLate(); // REVIEW: add some bartok args instead sizeFracLim = EnvInt(10, "COCO_SIZE_FRAC_LIM"); sizeLim = EnvInt(-1, "COCO_SIZE_LIM"); pageFragThres = EnvInt(2, "COCO_PAGE_FRAG_THRES"); pinPenalty = EnvInt(10, "COCO_PIN_PENALTY"); cocoDelay = EnvInt(8, "COCO_COPY_DELAY"); if (EnvInt(0, "COCO_FORCE_SLOW") != 0) { CoCoBarrier.ForceSlow(); } if (EnvInt(0, "COCO_FORCE_NOT_IDLE") != 0) { CoCoBarrier.ForceNotIdle(); } if (EnvInt(0, "COCO_FORCE_FORWARDING") != 0) { CoCoBarrier.ForceForwarding(); } if (EnvInt(0, "COCO_FORCE_PINNING") != 0) { CoCoBarrier.ForcePinning(); } base.EnableHeap(); cocoThread = new Thread(new ThreadStart(CoCoLoop)); cocoThread.Start(); }
internal override unsafe UIntPtr Visit(Object obj) { VTable vtable = obj.vtable; RuntimeType rType = vtable.vtableType; VTable.Assert(!MultiUseWord.IsMarked(rType), "@!MultiUseWord.IsMarked(rType)"); return(ObjectLayout.ObjectSize(Magic.addressOf(obj), vtable)); }
/// <summary> /// Look up the Monitor for the specified object in the SyncBlock /// tables. If no Monitor exists for the object then one is created. /// </summary> private static Monitor GetMonitorFromObject(Object obj) { if (obj == null) { DebugStub.Break(); throw new ArgumentNullException("obj"); } Monitor result = MultiUseWord.GetMonitor(obj); return(result); }
// A profiler can request a scan of all Roots, passing in a // visitor for callback. private void ProfileScanRoots(NonNullReferenceVisitor visitor) { CallStack.ScanStacks(visitor, visitor); Thread.VisitBootstrapData(visitor); #if SINGULARITY_KERNEL Kernel.VisitSpecialData(visitor); #endif MultiUseWord.VisitStrongRefs(visitor, false /* Don't use shadows */); StaticData.ScanStaticData(visitor); }
internal override unsafe UIntPtr Visit(Object obj) { VTable vtable = obj.vtable; RuntimeType rType = vtable.vtableType; uint tableIndex = (uint)MultiUseWord.GetValForObject(rType); UIntPtr objAddr = Magic.addressOf(obj); this.accounts[tableIndex].TotalSize += ObjectLayout.ObjectSize(objAddr, vtable); this.accounts[tableIndex].Count++; return(ObjectLayout.ObjectSize(objAddr, vtable)); }
internal override unsafe UIntPtr Visit(Object obj) { VTable vtable = obj.vtable; RuntimeType rType = vtable.vtableType; if (MultiUseWord.IsMarked(rType) != this.isVisitedFlag) { this.Count++; MultiUseWord.SetMark(rType, this.isVisitedFlag); } return(ObjectLayout.ObjectSize(Magic.addressOf(obj), vtable)); }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr objAddr = *loc; Object obj = Magic.fromAddress(objAddr); // 1. remove from ZCT Remove(obj); // 2. decrement RC on objects retained via multiuseword MultiUseWord muw = MultiUseWord.GetForObject(obj); if (muw.IsMonitorOrInflatedTag()) { MultiUseWord.RefCountGCDeadObjHook(muw); } // 3. add to deallocation list DeferredReferenceCountingCollector.deallocateLazily(obj); }
// call when the heap is inited internal static void InitLate() { if (fDebug) { VTable.DebugPrint("CoCo: in InitLate\n"); } interlock = new Object(); MultiUseWord.GetMonitor(interlock); MixinThread(Thread.CurrentThread).readyForCoCo = true; // REVIEW: this is just offensive GCFieldOffset = (UIntPtr)Magic.toPointer(ref ThreadHeaderQueue.MixinObject(instance).preHeader.link) - Magic.addressOf(instance); vtableFieldOffset = (UIntPtr)instance.VTableFieldAddr - Magic.addressOf(instance); instance.InitLateStub(); inited = true; }
internal override unsafe UIntPtr Visit(Object obj) { VTable vtable = obj.vtable; RuntimeType rType = vtable.vtableType; if (MultiUseWord.IsMarked(rType) != this.isVisitedFlag) { VTable.Assert(this.tableIndex < this.accounts.Length, @"this.tableIndex < this.accounts.Length"); this.accounts[this.tableIndex].RuntimeTypeObject = rType; this.accounts[this.tableIndex].TotalSize = UIntPtr.Zero; this.accounts[this.tableIndex].Count = 0; MultiUseWord.SetMark(rType, this.isVisitedFlag); this.tableIndex++; } return(ObjectLayout.ObjectSize(Magic.addressOf(obj), vtable)); }
internal override void DeadThreadNotification(Thread deadThread) { MultiUseWord.CollectFromThread(deadThread); SegregatedFreeList.DeadThreadNotification(deadThread); base.DeadThreadNotification(deadThread); }
internal static void ReportHeapDetails() { VTable.DebugPrint("\nHeap details:\n"); uint pageCount = 0; for (UIntPtr i = UIntPtr.Zero; i < PageTable.pageTableCount; i++) { if (PageTable.IsMyGcPage(i)) { pageCount++; } } VTable.DebugPrint("\tTotal number of heap pages: {0}", __arglist(pageCount)); // The following obtains counts of heap objects against types. UIntPtr lowPage = UIntPtr.Zero; UIntPtr highPage = PageTable.pageTableCount; assertRuntimeTypeHeaders(lowPage, highPage); // First count the RuntimeType instances for heap objects. runtimeTypeReckoner.Initialize(true); visitAllObjects(forGCRuntimeTypeReckoner, lowPage, highPage); // Next, create a table for RuntimeType instance accounting. // NOTE: Storage for the table is marked "non-GC". Since // static data accounting is done before this, it's okay. int numSlots = runtimeTypeReckoner.Count; if (numSlots > TABLE_SIZE) { VTable.DebugPrint("Need {0} slots, have {1}\n", __arglist(numSlots, TABLE_SIZE)); VTable.NotReached("MemoryAccounting table not large enough"); } // Associate a table slot for each RuntimeType instance. runtimeTypeMapper.Initialize(false, MemoryAccounting.table); visitAllObjects(forGCRuntimeTypeMapper, lowPage, highPage); // Map each relevant RuntimeType instance to its table slot. for (uint i = 0; i < numSlots; i++) { RuntimeType rType = MemoryAccounting.table[i].RuntimeTypeObject; VTable.Assert(!MultiUseWord.IsMarked(rType), @"!MultiUseWord.IsMarked(rType)"); MemoryAccounting.table[i].SavedMUW = MultiUseWord.GetForObject(rType); MultiUseWord.SetValForObject(rType, (UIntPtr)i); } // Count heap object instances by RuntimeType using table. instanceReckoner.Initialize(MemoryAccounting.table); visitAllObjects(forGCInstanceReckoner, lowPage, highPage); // Bubble sort the table in decreasing order of total size. for (int i = 0; i < numSlots; i++) { for (int j = numSlots - 1; j > i; j--) { if (MemoryAccounting.table[j].TotalSize > MemoryAccounting.table[j - 1].TotalSize) { // Swap contents. RuntimeTypeAccounting temp = MemoryAccounting.table[j]; MemoryAccounting.table[j] = MemoryAccounting.table[j - 1]; MemoryAccounting.table[j - 1] = temp; } } } // Display table. VTable.DebugPrint("\n\tCounts of objects against types:\n"); for (uint i = 0; i < numSlots; i++) { if ((uint)MemoryAccounting.table[i].TotalSize < 1024) { continue; } VTable.DebugPrint ("\t\t{0,36} instances: {1,6}, bytes: {2,10}\n", __arglist(MemoryAccounting.table[i].RuntimeTypeObject.Name, (uint)MemoryAccounting.table[i].Count, (uint)MemoryAccounting.table[i].TotalSize)); } // Reset book-keeping information maintained in headers and the global // table. for (uint i = 0; i < numSlots; i++) { RuntimeType rType = MemoryAccounting.table[i].RuntimeTypeObject; MultiUseWord.SetForObject (rType, MemoryAccounting.table[i].SavedMUW); VTable.Assert(!MultiUseWord.IsMarked(rType), "@!MultiUseWord.IsMarked(rType)"); MemoryAccounting.table[i].RuntimeTypeObject = null; MemoryAccounting.table[i].SavedMUW = new MultiUseWord(new UIntPtr(0)); MemoryAccounting.table[i].TotalSize = new UIntPtr(0); MemoryAccounting.table[i].Count = 0; } }
internal override void CollectStopped(int currentThreadIndex, int generation) { #if SINGULARITY #if DEBUG #if THREAD_TIME_ACCOUNTING UIntPtr preGcTotalBytes = SegregatedFreeList.TotalBytes; #endif DebugStub.WriteLine("~~~~~ Start MarkSweep Cleanup [data={0:x8}, pid={1:x3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16)); #endif #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING TimeSpan ticks = Thread.CurrentThread.ExecutionTime; TimeSpan ticks2 = SystemClock.KernelUpTime; #else TimeSpan ticks = SystemClock.KernelUpTime; #endif #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING TimeSpan ticks = ProcessService.GetThreadTime(); TimeSpan ticks2 = ProcessService.GetUpTime(); #else TimeSpan ticks = ProcessService.GetUpTime(); #endif #endif #endif int before = 0; if (VTable.enableGCTiming) { before = Environment.TickCount; } if (GC.IsProfiling) { GcProfiler.NotifyPreGC(MinGeneration); // non-generational collector, so pretend Gen0 // Calls like ResurrectCandidates below can cause // allocations and thus, potentially, profiler // notifications. However, at that time the heap is // damaged in the sense that VPtrs have bits OR-ed in // for object marking. We do not want to accept // profiling during this window. // // There is no synchronization issue with setting this // flag because it will only be consulted by the // thread that sets and resets it. HeapDamaged = true; } // 1) Mark the live objects CollectorStatistics.Event(GCEvent.TraceStart); #if !VC TryAllManager.PreGCHookTryAll(); #endif MultiUseWord.PreGCHook(false /* don't use shadows */); Finalizer.PrepareCollectFinalizers(); int countThreads = CallStack.ScanStacks(threadMarkReferenceVisitor, threadMarkReferenceVisitor); Thread.VisitBootstrapData(markAndProcessReferenceVisitor); #if SINGULARITY_KERNEL Kernel.VisitSpecialData(markAndProcessReferenceVisitor); #endif MultiUseWord.VisitStrongRefs(markAndProcessReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitStrongRefs(markAndProcessReferenceVisitor); #endif StaticData.ScanStaticData(markAndProcessReferenceVisitor); CollectorStatistics.Event(GCEvent.TraceSpecial); WeakReference.Process(updateReferenceVisitor, true, true); Finalizer.ResurrectCandidates(updateReferenceVisitor, markAndProcessReferenceVisitor, true); markReferenceVisitor.Cleanup(); UnmanagedPageList.ReleaseStandbyPages(); // 2) Sweep the garbage objects int afterTrace = 0; if (VTable.enableGCTiming) { afterTrace = Environment.TickCount; } CollectorStatistics.Event(GCEvent.SweepStart, TotalMemory); WeakReference.Process(updateReferenceVisitor, true, false); MultiUseWord.VisitWeakRefs(updateReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitWeakRefs(updateReferenceVisitor); #endif SegregatedFreeList.VisitAllObjects(sweepVisitor); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); CollectorStatistics.Event(GCEvent.SweepSpecial); MultiUseWord.PostGCHook(); if (GC.IsProfiling) { HeapDamaged = false; // Allocations may occur inside the PostGCHook. Hopefully a // sufficiently limited quantity that we don't recursively // trigger a GC. GcProfiler.NotifyPostGC(ProfileRoots, ProfileObjects); } Finalizer.ReleaseCollectFinalizers(); #if !VC TryAllManager.PostGCHookTryAll(); #endif CollectorStatistics.Event(GCEvent.CollectionComplete, TotalMemory); if (VTable.enableGCTiming) { int after = Environment.TickCount; numCollections++; traceTime += (afterTrace - before); sweepTime += (after - afterTrace); } // 3) Determine a new collection trigger UIntPtr testTrigger = (UIntPtr)this.TotalMemory >> 2; UIntPtr minTrigger = (UIntPtr)MinTrigger; UIntPtr maxTrigger = (UIntPtr)MaxTrigger; collectionTrigger = (testTrigger > minTrigger) ? (testTrigger < maxTrigger ? testTrigger : maxTrigger) : minTrigger; #if SINGULARITY #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING int procId = Thread.CurrentProcess.ProcessId; ticks = Thread.CurrentThread.ExecutionTime - ticks; ticks2 = SystemClock.KernelUpTime - ticks2; #else ticks = SystemClock.KernelUpTime - ticks; #endif //Thread.CurrentProcess.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING ushort procId = ProcessService.GetCurrentProcessId(); ticks = ProcessService.GetThreadTime() - ticks; ticks2 = ProcessService.GetUpTime() - ticks2; #else ticks = ProcessService.GetUpTime() - ticks; #endif //ProcessService.SetGcPerformanceCounters(ticks, (long) SegregatedFreeList.TotalBytes); #endif #if DEBUG #if THREAD_TIME_ACCOUNTING DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, diff={7:x8} pid={1:x3}, ms(Thread)={2:d6}, ms(System)={3:d6}, thds={4}, procId={5}, tid={6}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, ticks2.Milliseconds, countThreads, procId, Thread.GetCurrentThreadIndex(), preGcTotalBytes - SegregatedFreeList.TotalBytes )); #else DebugStub.WriteLine("~~~~~ Finish MarkSweep Cleanup [data={0:x8}, pid={1:x3}, ms={2:d6}, thds={3}]", __arglist(SegregatedFreeList.TotalBytes, PageTable.processTag >> 16, ticks.Milliseconds, countThreads)); #endif #endif #endif }
internal override void CollectGeneration(int generation, UIntPtr generationPageCount) { VTable.Assert(IsValidGeneration(generation)); // 1) Mark the live objects CollectorStatistics.Event(GCEvent.TraceStart); MultiUseWord.PreGCHook(true /* use shadows */); Finalizer.PrepareCollectFinalizers(); CallStack.ScanStacks(registerThreadReferenceVisitor, registerPinnedReferenceVisitor); registerThreadReferenceVisitor.ForwardReferences(); if (generation < (int)MAX_GENERATION) { // These calls must be done early, as they rely on the // contents of Thread.threadTable being intact. installedRemSet.Clean(); installedRemSet.Uniquify(); this.ScanRemSet((PageType)generation); } // Process runtime data that is allocated from SystemMemory Thread.VisitBootstrapData(markReferenceVisitor); #if SINGULARITY_KERNEL Kernel.VisitSpecialData(markReferenceVisitor); #endif MultiUseWord.VisitStrongRefs(markReferenceVisitor, true /* use shadows */); StaticData.ScanStaticData(markReferenceVisitor); CollectorStatistics.Event(GCEvent.TraceSpecial); WeakReference.Process(forwardReferenceVisitor, false, true); Finalizer.ResurrectCandidates(forwardReferenceVisitor, markReferenceVisitor, false); markReferenceVisitor.Cleanup(); // 2) Forward pointers and compact the live objects CollectorStatistics.Event(GCEvent.SweepStart, TotalMemory); WeakReference.Process(forwardReferenceVisitor, false, false); MultiUseWord.VisitWeakRefs(forwardReferenceVisitor, true /* use shadows */); UIntPtr oldAllocPtr; UIntPtr newLimit = this.ForwardReferences((PageType)generation, out oldAllocPtr); this.CompactHeapObjects(oldAllocPtr); #if SINGULARITY Thread.UpdateAfterGC(); #endif Thread currentThread = Thread.threadTable[collectorThreadIndex]; #if SINGULARITY_KERNEL Kernel.UpdateAfterGC(currentThread); #endif CallStack.ScanStacks(updateThreadReferenceVisitor, updateThreadReferenceVisitor); this.CompactPhaseCleanup(currentThread, (PageType)generation, newLimit); // Resetting the GC state CollectorStatistics.Event(GCEvent.SweepSpecial); installedRemSet.Reset(); MultiUseWord.PostGCHook(); Finalizer.ReleaseCollectFinalizers(); CollectorStatistics.Event(GCEvent.CollectionComplete, TotalMemory); }
internal override void CollectGeneration(int generation, UIntPtr generationPageCount) { UIntPtr fromSpacePageCountTotal = UIntPtr.Zero; for (int i = MinGeneration; i <= generation; i++) { fromSpacePageCountTotal += fromSpacePageCounts[i]; } PageType maxDestGeneration = (generation < (int)MAX_GENERATION) ? (PageType)(generation + 1) : MAX_GENERATION; // The real work: find the roots, copy reachable objects CollectorStatistics.Event(GCEvent.ComputeRootSet); #if !VC TryAllManager.PreGCHookTryAll(); #endif bool onlyNew = false; if (generation == (int)nurseryGeneration) { onlyNew = true; } MultiUseWord.PreGCHook(false, /* don't use shadows */ onlyNew /* only scan new EMUs */); Finalizer.PrepareCollectFinalizers(); CallStack.ScanStacks(null, pinnedReferenceVisitor); pinnedReferenceVisitor.ProcessPinnedPages(generalReferenceVisitor); CallStack.ScanStacks(threadReferenceVisitor, null); Thread.VisitBootstrapData(generalReferenceVisitor); #if SINGULARITY Thread.UpdateAfterGC(); #endif Thread currentThread = Thread.threadTable[collectorThreadIndex]; #if SINGULARITY_KERNEL Kernel.VisitSpecialData(generalReferenceVisitor); Kernel.UpdateAfterGC(currentThread); #endif MultiUseWord.VisitStrongRefs(generalReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitStrongRefs(generalReferenceVisitor); #endif StaticData.ScanStaticData(generalReferenceVisitor); if ((PageType)generation < MAX_GENERATION) { installedRemSet.Clean(); this.ScanRemSet((PageType)generation); } installedRemSet.Reset(); CollectorStatistics.Event(GCEvent.TraceStart); this.ScanAndCopy(maxDestGeneration); CollectorStatistics.Event(GCEvent.TraceSpecial); WeakReference.Process(forwardOnlyReferenceVisitor, true, true); Finalizer.ResurrectCandidates(forwardOnlyReferenceVisitor, generalReferenceVisitor, true); this.ScanAndCopy(maxDestGeneration); WeakReference.Process(forwardOnlyReferenceVisitor, true, false); MultiUseWord.VisitWeakRefs(forwardOnlyReferenceVisitor, false /* Don't use shadows */); #if !VC TryAllManager.VisitWeakRefs(forwardOnlyReferenceVisitor); #endif CollectorStatistics.Event(GCEvent.CollectionComplete); // Clean up the auxiliary data structures for (int i = MinGeneration + 1; i <= (int)maxDestGeneration; i++) { copyScanners[i].Cleanup(); } UnmanagedPageList.ReleaseStandbyPages(); pinnedReferenceVisitor.CleanPinnedPages(); MultiUseWord.PostGCHook(onlyNew); Finalizer.ReleaseCollectFinalizers(); #if !VC TryAllManager.PostGCHookTryAll(); #endif }
// GetHashCode is intended to serve as a hash function for this object. // Based on the contents of the object, the hash function will return a suitable // value with a relatively random distribution over the various inputs. // // The default implementation returns the sync block index for this instance. // Calling it on the same object multiple times will return the same value, so // it will technically meet the needs of a hash function, but it's pretty lame. // Objects (& especially value classes) should override this method. // //| <include path='docs/doc[@for="Object.GetHashCode"]/*' /> public virtual int GetHashCode() { return(MultiUseWord.GetHashCode(this)); }