internal static void EnsureAllocationAllowed() { // Verify that we're not in an interrupt or non-preemptible region where allocations are prohibited Microsoft.Singularity.Processor currentProcessor = Microsoft.Singularity.Processor.CurrentProcessor; if (currentProcessor != null) // The Processor object itself may still need to be allocated { if (currentProcessor.InInterruptContext) { Tracing.Log(Tracing.Fatal, "Attempt to allocate memory from interrupt context!"); VTable.DebugPrint("Attempt to allocate memory from interrupt context!\n"); VTable.DebugBreak(); } #if false // Currently too many allocations with preemption disabled to debug right now if (currentProcessor.PreemptionDisabled) { VTable.DebugPrint("Attempt to allocate memory with preemption disabled!\n"); VTable.DebugBreak(); } #endif } VTable.Deny(Thread.CurrentThread != null && Transitions.fInitializedRuntime && Transitions.UnderGCControl(Thread.GetCurrentThreadIndex())); }
internal static void CoCoLoop() { if (fDebug) { VTable.DebugPrint("coco thread = "); VTable.DebugPrint((ulong)Win32Native.GetCurrentThreadId()); VTable.DebugPrint("\n"); VTable.DebugPrint("CoCo at "); VTable.DebugPrint((ulong)Magic.addressOf(Thread.CurrentThread)); VTable.DebugPrint("\n"); } for (;;) { lock (interlock) { doingCoCo = false; for (;;) { if (die) { return; } else if (didStartTrace) { didStartTrace = false; Monitor.PulseAll(interlock); } else if (didEndTrace) { didEndTrace = false; Monitor.PulseAll(interlock); if (wantCoCo) { break; } } Monitor.Wait(interlock); } wantCoCo = false; } // now further tracing is BLOCKED cyclesStarted++; timingBefore = Environment.TickCount; if (fDebug) { VTable.DebugPrint("+++++ Start Concurrent Copying\n"); } CoCoBarrier.EnablePinning(); doingCoCo = true; ConcurrentMSCollector.stackMarkReferenceVisitor = CoCoMSCollector.nopStackMarker; ConcurrentMSCollector.stackMarkPinnedReferenceVisitor = CoCoMSCollector.pinStackMarker; // Perform a scan of all call stacks, including the call // stack of the CoCo thread. ConcurrentMSCollector.TrivialHandshake = false; ConcurrentMSCollector.IncludeMUWInHandshake = false; ConcurrentMSCollector.CollectorHandshake(cocoThread); // In order to scan the call stack of the current thread, // we need a TransitionRecord for the thread. At this // point we don't have one, so we have to go through // CollectBodyTransition to get one. Transitions.MakeGCRequest(cocoThread.threadIndex); GC.InvokeCollection(cocoThread); ConcurrentMSCollector.TrivialHandshake = true; ConcurrentMSCollector.IncludeMUWInHandshake = true; ConcurrentMSCollector.stackMarkReferenceVisitor = CoCoMSCollector.normalStackMarker; ConcurrentMSCollector.stackMarkPinnedReferenceVisitor = CoCoMSCollector.normalStackMarker; timingAfterPin = Environment.TickCount; if (fDebug) { VTable.DebugPrint("+++++ Copying\n"); } if (CoCoBarrier.instance.NeedsPrepPhase) { CoCoBarrier.ChangePhase(CoCoBarrier.Phase.Prep, false, true); } timingAfterPrep = Environment.TickCount; CoCoBarrier.ChangePhase(CoCoBarrier.Phase.Copy, true, true); numCopied += CoCoBarrier.instance.Copy(); CoCoBarrier.ChangePhase(CoCoBarrier.Phase.Fixup, true, true); AddCollectionRequest(); // wait for a complete collector cycle. This is for fixup. if (fDebug) { VTable.DebugPrint("+++++ Fixup: Waiting to start tracing\n"); } lock (interlock) { while (!didStartTrace && !die) { Monitor.Wait(interlock); } if (die) { return; } didStartTrace = false; inFixUp = true; Monitor.PulseAll(interlock); } if (fDebug) { VTable.DebugPrint("+++++ Fixup: Waiting to end tracing\n"); } lock (interlock) { while (!didEndTrace && !die) { Monitor.Wait(interlock); } if (die) { return; } didEndTrace = false; doingCoCo = false; inFixUp = false; Monitor.PulseAll(interlock); } timingAfter = Environment.TickCount; CoCoBarrier.ChangePhase(CoCoBarrier.Phase.Idle, false, false); if (fDebug) { VTable.DebugPrint("+++++ Finish Concurrent Copying\n"); } pinTime += (timingAfterPin - timingBefore); prepTime += (timingAfterPrep - timingAfterPin); copyTime += (timingAfterCopy - timingAfterPrep); forwardTime += (timingAfter - timingAfterCopy); cycles++; } }
internal override void NewThreadNotification(Thread newThread, bool initial) { Transitions.NewThreadNotification(newThread.threadIndex, initial); }
private void PerformCollection(int currentThreadIndex, int generation) { // Clear the GCRequest bit (if necessary) before doing // anything that could cause a state transition. if (Transitions.HasGCRequest(currentThreadIndex)) { Transitions.ClearGCRequest(currentThreadIndex); } int startTicks = 0; bool enableGCTiming = VTable.enableGCTiming; if (enableGCTiming || VTable.enableFinalGCTiming) { VTable.enableGCTiming = false; startTicks = Environment.TickCount; if (enableGCTiming) { VTable.DebugPrint("[GC start: {0} bytes]\n", __arglist(TotalMemory)); } } #if SINGULARITY Tracing.Log(Tracing.Debug, "GC start"); #endif CollectorStatistics.Event(GCEvent.StopTheWorld); CurrentPhase = StopTheWorldPhase.Synchronizing; StopTheWorld(); CurrentPhase = StopTheWorldPhase.SingleThreaded; StartGCCycle(); #if SINGULARITY long preGcMemoryUsage = GC.GetTotalMemory(false); #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING TimeSpan ticks = Thread.CurrentThread.ExecutionTime; TimeSpan ticks2 = SystemClock.KernelUpTime; #else TimeSpan ticks = SystemClock.KernelUpTime; #endif #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING TimeSpan ticks = ProcessService.GetThreadTime(); TimeSpan ticks2 = ProcessService.GetUpTime(); #else TimeSpan ticks = ProcessService.GetUpTime(); #endif #endif #endif //singularity #if SINGULARITY_KERNEL bool iflag = Processor.DisableInterrupts(); // Disable interrupts on other CPU's MpExecution.StopProcessorsForGC(); #endif #if SINGULARITY ulong beg = Isa.GetCycleCount(); #endif // Preparation GC.allocationGCInhibitCount++; // Verify the heap before GC if (VTable.enableGCVerify) { this.VerifyHeap(true); } // Invoke the chosen collector #if SINGULARITY Monitoring.Log(Monitoring.Provider.GC, (ushort)GarbageCollectorEvent.StartCollection); #endif this.CollectStopped(collectorThreadIndex, generation); #if SINGULARITY Monitoring.Log(Monitoring.Provider.GC, (ushort)GarbageCollectorEvent.EndCollection); #endif // Verify the heap after GC if (VTable.enableGCVerify) { this.VerifyHeap(false); } if (VTable.enableGCAccounting) { MemoryAccounting.Report(GC.gcType); } // Cleanup CollectorStatistics.Event(GCEvent.ResumeTheWorld); GC.allocationGCInhibitCount--; CurrentPhase = StopTheWorldPhase.Idle; #if SINGULARITY long postGcMemoryUsage = GC.GetTotalMemory(false); #endif if (enableGCTiming || VTable.enableFinalGCTiming) { int elapsedTicks = Environment.TickCount - startTicks; BaseCollector.RegisterPause(elapsedTicks); if (enableGCTiming) { VTable.DebugPrint("[GC end : {0} bytes, {1} ms]\n", __arglist(TotalMemory, elapsedTicks)); VTable.enableGCTiming = true; } } if (VTable.enableGCProfiling) { ulong totalMemory = (ulong)GC.GetTotalMemory(false); this.RegisterHeapSize(totalMemory); } ResumeTheWorld(); collectorThreadIndex = -1; #if SINGULARITY Tracing.Log(Tracing.Debug, "GC stop"); long pagesCollected = preGcMemoryUsage - postGcMemoryUsage; #if SINGULARITY_KERNEL #if THREAD_TIME_ACCOUNTING int procId = Thread.CurrentProcess.ProcessId; ticks = Thread.CurrentThread.ExecutionTime - ticks; ticks2 = SystemClock.KernelUpTime - ticks2; Process.kernelProcess.SetGcPerformanceCounters(ticks, (long)pagesCollected); #else ticks = SystemClock.KernelUpTime - ticks; #endif Thread.CurrentProcess.SetGcPerformanceCounters(ticks, (long)pagesCollected); #elif SINGULARITY_PROCESS #if THREAD_TIME_ACCOUNTING ushort procId = ProcessService.GetCurrentProcessId(); ticks = ProcessService.GetThreadTime() - ticks; ticks2 = ProcessService.GetUpTime() - ticks2; #else ticks = ProcessService.GetUpTime() - ticks; #endif ProcessService.SetGcPerformanceCounters(ticks, (long)pagesCollected); #endif #if DEBUG #if THREAD_TIME_ACCOUNTING DebugStub.WriteLine("~~~~~ StopTheWorld [collected pages={0:x8}, pid={1:x3}, ms(Thread)={2:d6}, ms(System)={3:d6}, procId={4}, tid={5}]", __arglist(pagesCollected, PageTable.processTag >> 16, ticks.Milliseconds, ticks2.Milliseconds, procId, Thread.GetCurrentThreadIndex() )); #endif #endif #endif #if SINGULARITY DebugStub.AddToPerfCounter(GC.perfCounter, Isa.GetCycleCount() - beg); #endif #if SINGULARITY_KERNEL // Resume interrupts on other CPU's MpExecution.ResumeProcessorsAfterGC(); Processor.RestoreInterrupts(iflag); #endif }
internal static void ChangePhase(Phase phase_, bool forwarding_, bool pinning_) { if (fDebug) { VTable.DebugPrint(" --> CoCo going to "); switch (phase_) { case Phase.Idle: VTable.DebugPrint("Idle"); break; case Phase.Prep: VTable.DebugPrint("Prep"); break; case Phase.Copy: VTable.DebugPrint("Copy"); break; case Phase.Fixup: VTable.DebugPrint("Fixup"); break; default: VTable.NotReached(); break; } VTable.DebugPrint(" (with"); if (!forwarding_) { VTable.DebugPrint("out"); } VTable.DebugPrint(" forwarding, with"); if (!pinning_) { VTable.DebugPrint("out"); } VTable.DebugPrint(" pinning)\n"); } lock (interlock) { phase = phase_; forwarding = forwarding_ || forceForwarding; pinning = pinning_ || forcePinning; SetAllowFastPath(); isNotIdle = phase != Phase.Idle || forceNotIdle; CoCoThread t = MixinThread(Thread.CurrentThread); t.acknowledgedPhase = (int)phase; t.acknowledgedForwarding = forwarding; t.acknowledgedPinning = pinning; t.phaseVersion++; Monitor.PulseAll(interlock); for (;;) { bool needToWait = false; bool doPulseAll = false; for (int i = 0; i < Thread.threadTable.Length; ++i) { Thread t_ = Thread.threadTable[i]; if (t_ == null) { continue; } t = MixinThread(t_); if (Transitions.InDormantState(i) || !t.readyForCoCo || t.pinnedOut) { t.acknowledgedPhase = (int)phase; t.acknowledgedForwarding = forwarding; t.acknowledgedPinning = pinning; } if (t.pinnedOut && phase == Phase.Idle) { t.pinnedOut = false; doPulseAll = true; } if ((Phase)t.acknowledgedPhase != phase || t.acknowledgedForwarding != forwarding || t.acknowledgedPinning != pinning) { if (fDebug) { VTable.DebugPrint(" !! thread "); VTable.DebugPrint((ulong)Magic.addressOf(t)); VTable.DebugPrint(" not ack\n"); } needToWait = true; } } if (doPulseAll) { Monitor.PulseAll(interlock); } if (!needToWait) { break; } // REVIEW: make the timeout less than 500 ms Monitor.Wait(interlock, 500); } } }