internal unsafe void Visit(DirectReferenceVisitor visitor) { if (item != UIntPtr.Zero) { fixed(UIntPtr *loc = &item) { visitor.Visit(loc); } } }
void VisitAllRunFinalizer(DirectReferenceVisitor visitor, bool copyFirst, bool markedOnly) { #if !(REFERENCE_COUNTING_GC || DEFERRED_REFERENCE_COUNTING_GC) // Visit all RunFinalizer objects. for (int i = 0; i < RunFinalizerTableShadow.Length; i++) { UIntPtr[] table = copyFirst ? RunFinalizerTable[i] : RunFinalizerTableShadow[i]; if (table != null) { for (int j = 0; j < table.Length; j++) { fixed(UIntPtr *loc = &table[j]) { if (*loc != UIntPtr.Zero) { if (markedOnly) { if ((*loc & 1) == 1) { *loc = *loc & (~(UIntPtr)1); } else { continue; } } visitor.Visit(loc); } } } } } #endif // REFERENCE_COUNTING_GC }
void ResurrectCandidates(DirectReferenceVisitor forwardVisitor, DirectReferenceVisitor resurrectVisitor, bool copyFirst) { #if !(REFERENCE_COUNTING_GC || DEFERRED_REFERENCE_COUNTING_GC) UIntPtr[] runTable = null; int runTableIndex = 0; int runIndex = 0; // For the concurrent collector, ResurrectCandidates could happen // while the application threads are calling SuppressFinalize and // ReRegisterForFinalize. So we need to use the spinLock to prevent // races. But we do not want to hold the spinLock while allocating // (i.e. when we grow the RunFinalizerTable[Shadow]) because our // spinLock is a leaf lock. We don't want to worry about deadlocks // involving the spinLock and any locking that occurs as part of a // GC provoked by an allocation attempt. #if SINGULARITY bool disabled = Processor.DisableLocalPreemption(); #endif spinLock.Acquire(); bool lockHeld = true; try { int logicalIndex = 0; for (int i = 0; i < CandidateTableShadow.Length; i++) { UIntPtr[] table = copyFirst ? CandidateTable[i] : CandidateTableShadow[i]; if (table == null) { VTable.Assert(logicalIndex == lastCandidateIndex); break; } for (int j = 0; j < table.Length; j++, logicalIndex++) { if (table[j] == UIntPtr.Zero) { VTable.Assert(logicalIndex == lastCandidateIndex); break; } fixed(UIntPtr *loc = &table[j]) { if (!IsLink((int)*loc)) { UIntPtr oldVal = *loc; forwardVisitor.Visit(loc); if (*loc == UIntPtr.Zero) { // Put this slot onto the CandidateTable's free list *loc = (UIntPtr)freeCandidateLink; freeCandidateLink = IndexToLink(logicalIndex); // marching forward through the RunFinalizer[Shadow] table, find an // empty slot to install this object. Maintain the cursor across // objects, so we can efficiently transfer an entire batch. for (; runTableIndex < RunFinalizerTableShadow.Length; runTableIndex++) { runTable = copyFirst ? RunFinalizerTable[runTableIndex] : RunFinalizerTableShadow[runTableIndex]; if (runTable == null) { // Create a new table int length = RunFinalizerTableShadow[runTableIndex - 1].Length * 2; lockHeld = false; spinLock.Release(); #if SINGULARITY Processor.RestoreLocalPreemption(disabled); #endif UIntPtr[] newTable = new UIntPtr[length]; #if SINGULARITY disabled = Processor.DisableLocalPreemption(); #endif spinLock.Acquire(); lockHeld = true; // There is no race with the RunFinalizerTable[Shadow]. // The spinLock serializes access to the CandidateTable[Shadow]. runTable = newTable; RunFinalizerTable[runTableIndex] = newTable; RunFinalizerTableShadow[runTableIndex] = newTable; UIntPtr tableAddr = Magic.addressOf(newTable); resurrectVisitor.Visit(&tableAddr); resurrectVisitor.VisitReferenceFields(RunFinalizerTable[runTableIndex]); resurrectVisitor.VisitReferenceFields(RunFinalizerTableShadow[runTableIndex]); } for (; runIndex < runTable.Length; runIndex++) { if (runTable[runIndex] == UIntPtr.Zero) { goto outer; } } runIndex -= runTable.Length; VTable.Assert(runIndex == 0); // ready for next sub-table } outer: // We found an empty slot in the RunFinalizerTable[Shadow], // where we can put our ready Candidate. It's also possible // to reach this label by falling through after exhausting the // entire table. This will result in an exception when we // attempt to over-index the array. It's not clear what more // protections are required... the process has exceeded an // unlikely and hard-wired capacity limit. Interlocked.Increment(ref WaitingToRun); madeRunnable = true; if (copyFirst) { RunFinalizerTable[runTableIndex][runIndex] = oldVal | new UIntPtr(1); } else { RunFinalizerTableShadow[runTableIndex][runIndex] = oldVal | new UIntPtr(1); } } } } } } } finally { if (lockHeld) { spinLock.Release(); #if SINGULARITY Processor.RestoreLocalPreemption(disabled); #endif } } if (madeRunnable) { // Resurrect objects! VisitAllRunFinalizer(resurrectVisitor, copyFirst, true); } #endif // REFERENCE_COUNTING_GC }
void Process(DirectReferenceVisitor updateReferenceVisitor, bool copyFirst, bool ignoreLong) { // Head ref is a dummy ref object. WeakReference wr = headRef; while (wr.nextRef != UIntPtr.Zero) { if (ignoreLong) { WeakReference tmp = Magic.toWeakReference(Magic.fromAddress(wr.nextRef)); if (tmp.trackResurrection) { wr = tmp; continue; } } UIntPtr nextPtr = wr.nextRef; fixed(UIntPtr *loc = &wr.nextRef) { // update the reference updateReferenceVisitor.Visit(loc); } WeakReference next; if (wr.nextRef == UIntPtr.Zero) { // remove next from the chain and continue next = Magic.toWeakReference(Magic.fromAddress(nextPtr)); wr.nextRef = next.nextRef; continue; } // Updating old or new location? if (copyFirst) { next = Magic.toWeakReference(Magic.fromAddress(wr.nextRef)); } else { next = Magic.toWeakReference(Magic.fromAddress(nextPtr)); } fixed(UIntPtr *loc = &next.objPtr) { if (*loc != UIntPtr.Zero) { // Update Reference updateReferenceVisitor.Visit(loc); } } // Continue wr = next; } }