protected static void deallocationListChecker() { // Check for nonzero reference counts and for // loops in the delayed deallocation list. for (Object block = delayedDeallocationList; block != null; block = getNextLink(block)) { UIntPtr objAddr = Magic.addressOf(block); UIntPtr page = PageTable.Page(objAddr); if (!PageTable.IsGcPage(page)) { VTable.DebugPrint("Non-GC memory for freeing!\n"); VTable.DebugBreak(); } uint refState = block.REF_STATE; if ((refState & RSMasks.refCount) != 0) { VTable.DebugPrint("Non-zero reference count!\n"); VTable.DebugBreak(); } block.REF_STATE = refState + 1; } // Make another pass to reset reference counts. for (Object block = delayedDeallocationList; block != null; block = getNextLink(block)) { block.REF_STATE--; } }
/// <summary> /// Attempt to enter the monitor, blocking until it is held. /// </summary> public static void Enter(Object obj) { VTable.DebugBreak(); Monitor monitor = GetMonitorFromSyncBlock(obj); monitor.Enter(); }
//| <include path='docs/doc[@for="Assert.Fail"]/*' /> public static void Fail(String conditionString, String message) { // Run through the list of filters backwards (the last filter in the list // is the default filter. So we're guaranteed that there will be at least // one filter to handle the assert. int iTemp = iNumOfFilters; while (iTemp > 0) { AssertFilters iResult = ListOfFilters [--iTemp].AssertFailure(conditionString, message); if (iResult == AssertFilters.FailDebug) { #if SINGULARITY_KERNEL DebugStub.Break(); #elif SINGULARITY_PROCESS VTable.DebugBreak(); #endif break; } else if (iResult == AssertFilters.FailTerminate) #if SINGULARITY_KERNEL { Kernel.Shutdown(-1); } #elif SINGULARITY_PROCESS { AppRuntime.Stop(-1); }
void ProcessMyGrayObjects(ref ThreadHeaderQueue.LocalList workList) { if (amMarkingForCoCo) { ProcessObjectsSlow(ref workList); } else { // hand-inlined from ConcurrentMSCollector. needed // to ensure that the VisitReferenceFields call gets // inlined. while (!ConcurrentMSCollector.killCollectorThreads && !workList.IsEmpty()) { // Pop the next value Object obj = workList.Pop(markedColor); if (CoCoBarrier.fVerifyToSpaceMark && !CoCoBarrier.instance.IsInToSpace(Magic.addressOf(obj))) { VTable.DebugBreak(); } // Visit Fields this.VisitReferenceFields(obj); } } }
internal static Object Pin(Object o, Pinner pinner) { if (fAbortVerboseDebug) { VTable.DebugPrint("Aborter: requested pinning on "); VTable.DebugPrint((ulong)Magic.addressOf(o)); VTable.DebugPrint(" in thread "); VTable.DebugPrint((ulong)Magic.addressOf(Thread.CurrentThread)); VTable.DebugPrint(" with pinner = "); VTable.DebugPrint((int)pinner); VTable.DebugPrint("\n"); } UIntPtr oldCoCoWord = CAS(ref MixinObject(o).preHeader.CoCoWord, WithNoForwardNotCopying(o), WithNoForwardCopying(o)); if (!IsForwarded(oldCoCoWord, o)) { // the object is not forwarded - nothing further to do. // (we know that it must now be aborted, since if it // was, then that couldn't have changed; and if it wasn't, // then our CAS would have succeeded.) if (fAbortVerboseDebug && IsCopying(oldCoCoWord)) { VTable.DebugPrint("Aborter: aborted copying on "); VTable.DebugPrint((ulong)Magic.addressOf(o)); VTable.DebugPrint(" in thread "); VTable.DebugPrint((ulong)Magic.addressOf(Thread.CurrentThread)); VTable.DebugPrint("\n"); } if (fBreakOnAbort && Thread.CurrentThread != mainThread && pinner == Pinner.Barrier) { VTable.DebugBreak(); } return(o); } else { VTable.Assert(pinner == Pinner.Barrier, "Encountered a forwarded object in a pin " + "request that did not originate from the " + "barrier"); if (fAbortVerboseDebug) { VTable.DebugPrint("Aborter: encountered forwarded object " + "at "); VTable.DebugPrint((ulong)Magic.addressOf(o)); VTable.DebugPrint(" in thread "); VTable.DebugPrint((ulong)Magic.addressOf(Thread.CurrentThread)); VTable.DebugPrint("\n"); } return(Magic.fromAddress(ForwardPtr(oldCoCoWord))); } }
protected override bool WeakCASArbitrarySlow(Object o, UIntPtr offset, UIntPtr size, ulong value, ulong comparand) { VTable.NotImplemented(); VTable.DebugBreak(); return(false); }
internal override void Collect(Thread currentThread, int generation) { try { GC.CollectTransition(currentThread, generation); } catch (Exception e) { VTable.DebugPrint("Garbage collection failed with exception"); VTable.DebugPrint(e.GetType().Name); VTable.DebugBreak(); } }
internal static void Mark(UIntPtr ptr) { if (CMSMarking.MarkIfNecessary(ptr) && fVerifyToSpaceMark && ptr != UIntPtr.Zero && !instance.IsInToSpace(ptr)) { VTable.DebugBreak(); } }
private static UIntPtr AllocateBlock(UIntPtr bytes, uint alignment) { UIntPtr startPtr = Allocator.AlignedAllocationPtr(allocPtr, limitPtr, alignment); allocPtr = startPtr + bytes; if (allocPtr > limitPtr) { VTable.DebugPrint("Out of BootstrapMemory"); VTable.DebugBreak(); } return(startPtr + PreHeader.Size); }
private static void RecordSlow(Thread currentThread, UIntPtr value) { // Try to acquire a new chunk of the store buffer while (writeBufferIndex < writeBufferSize) { int oldIndex = writeBufferIndex; int newIndex = oldIndex + chunkSize; if (Interlocked.CompareExchange(ref writeBufferIndex, newIndex, oldIndex) == oldIndex) { // We secured a new block of write buffer for this thread UIntPtr *cursor = writeBuffer + oldIndex; * cursor = value; cursor++; MixinThread(currentThread).ssb.cursor = cursor; MixinThread(currentThread).ssb.limit = writeBuffer + newIndex; return; } } // We have run out of write barrier space if (StopTheWorldGCData.CurrentPhase == StopTheWorldPhase.SingleThreaded) { VTable.DebugBreak(); } VTable.Assert(MixinThread(currentThread).ssb.overflowValue == UIntPtr.Zero); MixinThread(currentThread).ssb.overflowValue = value; GC.InvokeCollection(currentThread); while (MixinThread(currentThread).ssb.overflowValue != UIntPtr.Zero) { // Another thread must have taken charge of performing the // collection and hadn't yet assigned a GCRequest to the // current thread. Give the other thread a chance to do // some work before we try invoking the collector again. Thread.Yield(); GC.InvokeCollection(currentThread); } }
/// <summary> /// Wait within the monitor for a Pulse. /// </summary> internal bool Wait(SchedulerTime stop) { Thread currentThread = Thread.CurrentThread; if (!mutex.IsOwnedByCurrentThread()) { DebugStub.Break(); throw new SynchronizationLockException("Monitor not held on Wait"); } int rememberedDepth = depth; depth = 0; // Add me onto the waiting list. Enqueue(currentThread); // Exit the monitor mutex.ReleaseMutex(); // Wait currentThread.WaitForMonitor(stop); // Re-enter the monitor mutex.AcquireMutex(); depth = rememberedDepth; bool success = !Remove(currentThread); if (!success && stop == SchedulerTime.MaxValue) { VTable.DebugBreak(); } return(success); }
internal static void EnsureAllocationAllowed() { // Verify that we're not in an interrupt or non-preemptible region where allocations are prohibited Microsoft.Singularity.Processor currentProcessor = Microsoft.Singularity.Processor.CurrentProcessor; if (currentProcessor != null) // The Processor object itself may still need to be allocated { if (currentProcessor.InInterruptContext) { Tracing.Log(Tracing.Fatal, "Attempt to allocate memory from interrupt context!"); VTable.DebugPrint("Attempt to allocate memory from interrupt context!\n"); VTable.DebugBreak(); } #if false // Currently too many allocations with preemption disabled to debug right now if (currentProcessor.PreemptionDisabled) { VTable.DebugPrint("Attempt to allocate memory with preemption disabled!\n"); VTable.DebugBreak(); } #endif } VTable.Deny(Thread.CurrentThread != null && Transitions.fInitializedRuntime && Transitions.UnderGCControl(Thread.GetCurrentThreadIndex())); }
private static void CheckMemoryClear(UIntPtr begin, UIntPtr limit) { VTable.DebugBreak(); }
private static bool CLEAR_POOL_PAGES() { VTable.DebugBreak(); return(false); }