private void RunWheel() { tickEvent = new AutoResetEvent(false); SchedulerTime nextTick; TCP tcpSession; while (true) { nextTick = SchedulerTime.Now; nextTick = nextTick.AddMilliseconds(tickFrequency); bool rc = tickEvent.WaitOne(nextTick); VTable.Assert(rc == false); bool done = false; while (done == false) { TimerEvent timerEvent; using (this.objectLock.Lock()) { LinkedList timersList = timerEntries[tickIndex]; VTable.Assert(timersList != null); if (timersList.Count == 0) { tickIndex = (tickIndex + 1) % wheelSize; done = true; break; } LinkedListNode currentNode = timersList.head; VTable.Assert(currentNode != null); timerEvent = currentNode.theObject as TimerEvent; DebugStub.Assert(timerEvent != null); VTable.Assert(timerEvent != null); tcpSession = timerEvent.tcpSession; VTable.Assert(tcpSession != null); } VTable.Assert(timerEvent != null); VTable.Assert(tcpSession != null); timerEvent.timerDelegate.Run(tcpSession); } tickIndex = (tickIndex + 1) % wheelSize; } }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr objectAddr = InteriorPtrTable.Find(addr); this.threadPtrQueue.Write(objectAddr); this.threadPtrQueue.Write(addr - objectAddr); }
internal static unsafe void ClearThreadStack(Thread thread) { short threadIndex = (short)thread.threadIndex; UIntPtr endPage = PageTable.Page(CallStack.StackBase(thread)); UIntPtr startPage = endPage - 1; VTable.Assert(PageTable.IsStackPage(PageTable.Type(startPage))); VTable.Assert(PageTable.Extra(startPage) == threadIndex); while (startPage > 0 && PageTable.IsStackPage(PageTable.Type(startPage - 1)) && PageTable.Extra(startPage - 1) == threadIndex) { startPage--; } UIntPtr startAddr = PageTable.PageAddr(startPage); UIntPtr size = PageTable.RegionSize(endPage - startPage); SetUnallocatedPages(startAddr, size); }
public void ClearServiceThread(Thread thread) { Tracing.Log(Tracing.Audit, "ClearServiceThread"); if (this != Thread.CurrentThread) { throw new Exception("Only the thread itself may call ClearServiceThread"); } if (notificationTable == null) { return; } if (Interlocked.Exchange(ref notificationTable[threadIndex], null) != null) { // We cleared the notification Interlocked.Increment(ref threadCount); } VTable.Assert(threadCount >= 0); }
private void SpinToAcquire(int ownerId) { int iSpin; int backoffs = 0; // Assert preconditions: thread's id and passed in id's should be the same VTable.Assert(ownerId != 0); while (true) { // It is assumed this routine is only called after the inline // method has failed the interlocked spinlock test. Therefore we // retry using the safe test only after cheaper, unsafe test // succeeds. for (iSpin = 0; (this.ownerId != 0 && iSpin < MaxSpinLimit); iSpin++) { // Hopefully ownerId will not be enregistered, and this read will // always hit memory, if it does then we are in trouble // Perform HT friendly pause: Thread.NativeNoOp(); } // If we exited the loop prematurely, then try to get the lock if (iSpin < MaxSpinLimit) { // Attempt to grab the spinlock if (TryAcquire(ownerId)) { break; } // If we couldn't get the lock, at least we know someone did, // and the system is making progress; there is no need to // back off. backoffs = 0; continue; } // Increment back off stats backoffs++; } }
internal void ClearFrame(UIntPtr calleeSaveMask, bool framePointerOmitted) { if (!framePointerOmitted) { VTable.Assert((calleeSaveMask & 0x100) == 0, "EBP should not be callee saved"); r11.ClearFrameReg(); } if ((calleeSaveMask & 0x1) != 0) { r4.ClearFrameReg(); } if ((calleeSaveMask & 0x2) != 0) { r5.ClearFrameReg(); } if ((calleeSaveMask & 0x4) != 0) { r6.ClearFrameReg(); } if ((calleeSaveMask & 0x8) != 0) { r7.ClearFrameReg(); } if ((calleeSaveMask & 0x10) != 0) { r8.ClearFrameReg(); } if ((calleeSaveMask & 0x20) != 0) { r9.ClearFrameReg(); } if ((calleeSaveMask & 0x40) != 0) { r10.ClearFrameReg(); } if ((calleeSaveMask & 0x80) != 0) { r11.ClearFrameReg(); } }
public static unsafe void Initialize() { maxEntries = 1 << 16; VTable UIntPtrArrayVtable = ((RuntimeType)typeof(UIntPtr[])).classVtable; tableSize = ObjectLayout.ArraySize(UIntPtrArrayVtable, maxEntries); // Allocate a pool for ZCT BumpAllocator entryPool = new BumpAllocator(PageType.NonGC); UIntPtr memStart = MemoryManager.AllocateMemory(tableSize); entryPool.SetZeroedRange(memStart, tableSize); PageManager.SetStaticDataPages(memStart, tableSize); // Initialize ZCT zeroCountTable = (UIntPtr[]) DeferredReferenceCountingCollector. AllocateArray(ref entryPool, UIntPtrArrayVtable, tableSize); VTable.Assert(zeroCountTable != null, @"zeroCountTable != null"); *(uint *)(Magic.addressOf(zeroCountTable) + PostHeader.Size) = maxEntries; VTable.Assert(zeroCountTable.Length == maxEntries, @"zeroCountTable.Length == maxEntries"); // Build ZCT freeEntries list freeHead = 1; for (uint i = 1; i < maxEntries - 1; i++) { zeroCountTable[i] = (UIntPtr)(((i + 1) << 2) | 0x01); } zeroCountTable[maxEntries - 1] = (UIntPtr)0x01; zctGarbagePicker = (ZCTGarbagePicker)BootstrapMemory. Allocate(typeof(ZCTGarbagePicker)); }
internal unsafe static UIntPtr ObjectSize(UIntPtr objectBase, VTable vtable) { uint objectTag = unchecked ((uint)vtable.pointerTrackingMask) & 0xf; switch (objectTag) { case SPARSE_TAG: case DENSE_TAG: { return(ObjectSize(vtable)); } case PTR_VECTOR_TAG: case OTHER_VECTOR_TAG: { uint length = *(uint *)(objectBase + PostHeader.Size); return(ArraySize(vtable, length)); } case PTR_ARRAY_TAG: case OTHER_ARRAY_TAG: { uint length = *(uint *)(objectBase + PostHeader.Size + sizeof(uint)); return(ArraySize(vtable, length)); } case STRING_TAG: { uint length = *(uint *)(objectBase + PostHeader.Size); return(StringSize(vtable, length)); } case RESERVED_TAG: { VTable.Assert(false, "RESERVED_TAG was used!"); return(UIntPtr.Zero); } default: { // escape case return(ObjectSize(vtable)); } } }
internal void ClearFrame(UIntPtr calleeSaveMask, bool framePointerOmitted) { if (!framePointerOmitted) { VTable.Assert((calleeSaveMask & 0x100) == 0, "EBP should not be callee saved"); EBP.ClearFrameReg(); } if ((calleeSaveMask & 0x1) != 0) { EBX.ClearFrameReg(); } if ((calleeSaveMask & 0x80) != 0) { EBP.ClearFrameReg(); } if ((calleeSaveMask & 0x4) != 0) { ESI.ClearFrameReg(); } if ((calleeSaveMask & 0x2) != 0) { EDI.ClearFrameReg(); } if ((calleeSaveMask & 0x8) != 0) { R12.ClearFrameReg(); } if ((calleeSaveMask & 0x10) != 0) { R13.ClearFrameReg(); } if ((calleeSaveMask & 0x20) != 0) { R14.ClearFrameReg(); } if ((calleeSaveMask & 0x40) != 0) { R15.ClearFrameReg(); } }
internal void Cleanup(bool mustBeEmpty) { if (mustBeEmpty) { VTable.Assert(this.IsEmpty); } if (this.stackPage != UIntPtr.Zero) { UnmanagedPageList.pageCache.AddHead(this.stackPage); this.stackPage = UIntPtr.Zero; } while (!this.pageList.IsEmpty) { UIntPtr headPage = this.pageList.RemoveHead(); UnmanagedPageList.pageCache.AddHead(headPage); } this.stackBottom = null; this.stackTop = null; this.stackPtr = null; }
public void EnqueueTail(ThreadEntry entry) { VTable.Assert(entry.next == null); VTable.Assert(entry.prev == null); VTable.Assert(entry.queue == null); entry.prev = this.tail; if (tail != null) { VTable.Assert(tail.next == null); tail.next = entry; } else { VTable.Assert(head == null); head = entry; } tail = entry; }
private bool TryAcquireInternal(Thread thread, int threadId) { bool result; VTable.Assert(thread != null); // Notify thread that we are about to acquire spinlock thread.NotifySpinLockAboutToAcquire(this.baseLock.Type); result = baseLock.TryAcquire(threadId + 1); // If we didn't acquire spinlock -we should notify thread about it: Just use release // notification if (!result) { thread.NotifySpinLockReleased(this.baseLock.Type); } return(result); }
private bool StealFrom(ref ThreadHeaderQueue fromQueue, UIntPtr markedColor) { UIntPtr fromHead, fromTail; if (fromQueue.StealList(out fromHead, out fromTail)) { // Prepend the stolen list segment to our list Object tailObject = Magic.fromAddress(fromTail); VTable.Assert(ThreadHeaderQueue.GcMark(tailObject) == markedColor); SetQueueField(tailObject, this.head + markedColor); this.head = fromHead; return(true); } else { return(false); } }
public override Thread RunPolicy( int schedulerAffinity, int runningThreadAffinity, Thread currentThread, ThreadState schedulerAction, SchedulerTime currentTime) { // Assert preconditions: current thread can be either NULL or its base scheduler // should be the same as specified by affinity VTable.Assert(currentThread == null || currentThread.Affinity == schedulerAffinity); // Use affinity to derive actual scheduler return(schedulers[schedulerAffinity].RunPolicy( schedulerAffinity, runningThreadAffinity, currentThread, schedulerAction, currentTime)); }
public void EnqueueHead(ThreadEntry entry) { VTable.Assert(entry.next == null); VTable.Assert(entry.prev == null); VTable.Assert(entry.queue == null); entry.next = head; if (head != null) { VTable.Assert(head.prev == null); head.prev = entry; } else { VTable.Assert(tail == null); tail = entry; } head = entry; }
internal unsafe override void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); PageType pageType = PageTable.Type(page); if (!PageTable.IsZombiePage(pageType)) { VTable.Assert(PageTable.IsGcPage(pageType) || PageTable.IsNonGcPage(pageType) || PageTable.IsStackPage(pageType) || PageTable.IsSharedPage(pageType)); return; } UIntPtr objectAddr = InteriorPtrTable.Find(addr); registerThreadReferenceVisitor.threadPtrQueue.Write(objectAddr); registerThreadReferenceVisitor.threadPtrQueue.Write(addr - objectAddr); *Allocator.GetObjectVTableAddress(objectAddr) |= (UIntPtr)2U; }
internal static bool MarkIfNecessary(UIntPtr value) { #if !SINGULARITY || CONCURRENT_MS_COLLECTOR if (value == 0) { return(false); } UIntPtr marked = markedColor; if (PageTable.IsGcPage(PageTable.Page(value)) && ThreadHeaderQueue.GcMark(Magic.fromAddress(value)) != marked) { VTable.Assert(PageTable.IsMyPage(PageTable.Page(value))); Thread thread = Thread.CurrentThread; UIntPtr unmarked = unmarkedColor; ThreadHeaderQueue.Push(thread, value, marked, unmarked); return(true); } #endif // CONCURRENT_MS_COLLECTOR return(false); }
internal void Cleanup(bool mustBeEmpty) { if (mustBeEmpty) { VTable.Assert(this.pageList.Head == this.pageList.Tail); VTable.Assert(this.readCursor.cursor == this.writeCursor.cursor); } if (this.readPage != UIntPtr.Zero) { UnmanagedPageList.pageCache.AddHead(this.readPage); this.readPage = UIntPtr.Zero; } while (!this.pageList.IsEmpty) { UIntPtr headPage = this.pageList.RemoveHead(); UnmanagedPageList.pageCache.AddHead(headPage); } this.writeCursor = new PageCursor(null, null); this.readCursor = new PageCursor(null, null); }
/// /// <summary> /// Post wait is to dissasociate thread from all handlers /// </summary> /// protected static void PostWaitAnyInternal( Thread currentThread, WaitHandleBase[] waitHandles, ThreadEntry[] entries, int waitHandlesCount) { ThreadState state; int handlerIdx; int unblockedBy = WaitHandle.WaitTimeout; int resultUnblockedBy = WaitHandle.WaitTimeout; VTable.Assert(currentThread.ThreadState == ThreadState.Running); // Dequeue a thread from all hadndles it was waiting on for (handlerIdx = 0; handlerIdx < waitHandlesCount; handlerIdx++) { // Dissaociate handler from entry waitHandles[handlerIdx].Remove(entries[handlerIdx]); } }
private void StealFromDead(ref ThreadHeaderQueue fromQueue, UIntPtr markedColor) { // It is assumed that there are no concurrent accesses to // fromQueue. The thread owning the queue is supposed to // be dead, and there should only be one other thread // trying to steal the dead thread's queue. if (Thread.VolatileRead(ref fromQueue.head) != Thread.VolatileRead(ref fromQueue.stolenHead)) { UIntPtr fromHead, fromTail; this.ongoingUpdates++; ThreadHeaderQueue.transferAttempt = true; if (fromQueue.StealList(out fromHead, out fromTail)) { // Prepend the stolen list segment to our list Object tailObject = Magic.fromAddress(fromTail); #if ATOMIC_PUSH // NOTE: We don't try to be thread-safe on this.head VTable.Assert(GcMark(tailObject) == markedColor); SetQueueField(tailObject, this.head + markedColor); Thread.VolatileWrite(ref this.head, fromHead); #else // ATOMIC_PUSH // REVIEW: We don't really need LOCK CMPXCHG, but // we do need CMPXCHG (needs to be atomic with // respect to the current processor, only). UIntPtr oldHead; UIntPtr foundHead = Thread.VolatileRead(ref this.head); do { oldHead = foundHead; SetQueueField(tailObject, oldHead + markedColor); foundHead = Interlocked.CompareExchange(ref this.head, fromHead, oldHead); } while (foundHead != oldHead); #endif // ATOMIC_PUSH } this.ongoingUpdates--; } }
internal void PopFrame(UIntPtr *framePointer, UIntPtr calleeSaveMask, bool framePointerOmitted, bool hasTransitionRecord) { UIntPtr *calleeSaveStart; if (framePointerOmitted) { calleeSaveStart = framePointer - 1; } else { VTable.Assert((calleeSaveMask & 0x10) == 0, "EBP should not be callee saved"); calleeSaveStart = framePointer; EBP.PopFrameReg(ref calleeSaveStart); } if (hasTransitionRecord) { calleeSaveStart -= sizeof(CallStack.TransitionRecord) / sizeof(UIntPtr); } // Note: the order in which these appear is important! if ((calleeSaveMask & 0x1) != 0) { EBX.PopFrameReg(ref calleeSaveStart); } if ((calleeSaveMask & 0x8) != 0) { EBP.PopFrameReg(ref calleeSaveStart); } if ((calleeSaveMask & 0x4) != 0) { ESI.PopFrameReg(ref calleeSaveStart); } if ((calleeSaveMask & 0x2) != 0) { EDI.PopFrameReg(ref calleeSaveStart); } }
internal static void EmitRefCountsProfile() { VTable.Assert(RCCollector.ProfilingMode, @"RCCollector.ProfilingMode"); if (methods == null) // No RC updates present. { return; } // Bubble sort in decreasing order of sums. for (int i = 0; i < methods.Length; i++) { for (int j = methods.Length - 1; j > i; j--) { if (methods[j].CompareTo(methods[j - 1])) { // Swap contents. AcctRecord temp = methods[j]; methods[j] = methods[j - 1]; methods[j - 1] = temp; } } } VTable.DebugPrint("\n"); AcctRecord.DispCountsHeader(); AcctRecord.DispMethodNameHeader(); VTable.DebugPrint("\n"); for (int i = 0; i < methods.Length; i++) { if (methods[i].increments.Total == 0 && methods[i].decrements.Total == 0) { continue; } methods[i].DispCounts(); methods[i].DispMethodName(); VTable.DebugPrint("\n"); } }
private static void ReferenceCheck(PageType addrType, UIntPtr *addr, Object value) { VTable.Assert(PageTable.IsGcPage(addrType)); if (GC.remsetType == RemSetType.Cards) { GenerationalGCData. installedRemSet.RecordReference(addr, value); return; } UIntPtr valueAddr = Magic.addressOf(value); PageType valType = PageTable.Type(PageTable.Page(valueAddr)); if (PageTable.IsGcPage(valType) && (addrType > valType)) { GenerationalGCData. installedRemSet.RecordReference(addr, value); } }
internal override unsafe void Visit(UIntPtr *loc) { UIntPtr addr = *loc; UIntPtr page = PageTable.Page(addr); if (!PageTable.IsGcPage(page)) { PageType pageType = PageTable.Type(page); VTable.Assert(pageType == PageType.NonGC || pageType == PageType.Stack, @"pageType == PageType.NonGC || pageType == PageType.Stack"); return; } UIntPtr objAddr = SegregatedFreeList.Find(addr); incrementBackupRefCount.Traverse(objAddr); }
internal void PopFrameReg(ref UIntPtr *calleeSaveStart) { if (this.head != null && !this.pending) { ClearCalleeReg(); } if (this.head == null) { this.value = *calleeSaveStart; } else { VTable.Assert(this.pending, "pending should be true"); VTable.Assert(*calleeSaveStart == this.value, "values are not equal"); } this.pending = false; *calleeSaveStart = (UIntPtr)this.head; this.head = calleeSaveStart; calleeSaveStart--; }
internal static unsafe void FinishInitializeThread() { int threadIndex = initialThread.threadIndex; // Get the GC ready for initialThread Transitions.RuntimeInitialized(); Transitions.ThreadStart(); initialThread.processGcEvent = new AutoResetEvent(false); initialThread.autoEvent = new AutoResetEvent(false); initialThread.joinEvent = new ManualResetEvent(false); initialThread.singleQueueItem = new ThreadQueueItem [1] { new ThreadQueueItem(initialThread) }; // Use CurrentThread to find our initial handle: VTable.Assert(initialThread == CurrentThread); initialThread.threadHandle = ThreadHandle.CurrentThread(); // Instantiate the static variable that needs to be initialized m_LocalDataStoreMgr = new LocalDataStoreMgr(); AddThread(threadIndex); }
internal static void ReturnKernelStackSegment() { // @TODO: see note about disabling interrupts above. bool en = Processor.DisableInterrupts(); try { Isa.CallbackOnInterruptStack(returnKernelStackCallback, 0); unsafe { // Sanity check: we freed from the previous segment, and // should have set the thread context to point to this segment now. VTable.Assert(Isa.GetStackPointer() <= Processor.GetCurrentThreadContext()->stackBegin); VTable.Assert(Isa.GetStackPointer() >= Processor.GetCurrentThreadContext()->stackLimit); } } finally { Processor.RestoreInterrupts(en); } }
/* * Returns a pointer to the beginning of an object such that the * pointer is less than or equal to addr. N.B. Before may return a * pointer to an alignment or an unused space token. */ private static UIntPtr Before(UIntPtr addr) { UIntPtr page = PageTable.Page(addr); uint offset = PageTable.Extra(page); // OFFSET_NO_DATA and negative offsets should always fail this // test. if (PageTable.PageAddr(page) + (offset - OFFSET_SKEW) > addr) { // If the addr is an interior pointer of an object on a // previous page, go back one entry. --page; offset = PageTable.Extra(page); } if (offset == OFFSET_NO_DATA) { // Scroll back until we find a page entry with real data in // it. This handles the case of a large object allocated // across pages. do { --page; offset = PageTable.Extra(page); }while (offset == OFFSET_NO_DATA); } VTable.Assert(offset > OFFSET_NO_DATA, "No offset data"); // Unused: since we currently do not use negative offsets in the // page table. This would be more efficient for really big // objects, but the OFFSET_NO_DATA value works fine too. /* * // Scroll backwards using big steps. Offset will never be * // OFFSET_NO_DATA in this loop. * while (offset < OFFSET_NO_DATA) { * entry += (offset - OFFSET_SKEW); * offset = *entry; * } */ return(PageTable.PageAddr(page) + (offset - OFFSET_SKEW)); }
private static void RecordSlow(Thread currentThread, UIntPtr value) { // Try to acquire a new chunk of the store buffer while (writeBufferIndex < writeBufferSize) { int oldIndex = writeBufferIndex; int newIndex = oldIndex + chunkSize; if (Interlocked.CompareExchange(ref writeBufferIndex, newIndex, oldIndex) == oldIndex) { // We secured a new block of write buffer for this thread UIntPtr *cursor = writeBuffer + oldIndex; * cursor = value; cursor++; MixinThread(currentThread).ssb.cursor = cursor; MixinThread(currentThread).ssb.limit = writeBuffer + newIndex; return; } } // We have run out of write barrier space if (StopTheWorldGCData.CurrentPhase == StopTheWorldPhase.SingleThreaded) { VTable.DebugBreak(); } VTable.Assert(MixinThread(currentThread).ssb.overflowValue == UIntPtr.Zero); MixinThread(currentThread).ssb.overflowValue = value; GC.InvokeCollection(currentThread); while (MixinThread(currentThread).ssb.overflowValue != UIntPtr.Zero) { // Another thread must have taken charge of performing the // collection and hadn't yet assigned a GCRequest to the // current thread. Give the other thread a chance to do // some work before we try invoking the collector again. Thread.Yield(); GC.InvokeCollection(currentThread); } }
private int WaitDone( ThreadEntry entry, int handleId, ref ThreadQueueStruct deferredQueue) { ThreadState state; int handledIdUnblocked; // Assert preconditions: We assume that queues are stable - spinlock is held when // this method is called VTable.Assert(myLock.IsHeldBy(Thread.CurrentThread)); // Indicate that thread migth be given owrneship of the object so that it can't be aborted entry.Thread.DelayStop(true); //Attempt to unblock thread, if we fail it means that thread has already timed out // If thread has timed out don't move it to signaled queue if ((handledIdUnblocked = entry.Thread.Unblock(handleId)) == handleId) { // The signal is good-we can take a thread from non-signaled queue and // move it to signaled MoveEntryToSignalQueue(entry); // If thread state is blocked - we will be responsible for waking it up if (entry.Thread.ShouldCallSchedulerUnBlock(handleId)) { // Enqueue entry onto deferred unblock queue. We will call unblock // once we are done with processing signal and we released the lock deferredQueue.EnqueueTail(entry.Thread.deferredEntry); } } else { // We haven't granted ownership to the thread so that we need to restore its // delay abort status entry.Thread.DelayStop(false); } return(handledIdUnblocked); }