internal static bool TryReservePages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReservePages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; UIntPtr index = startPage; while (index < endPage && PageTable.IsUnusedPage(index) && PageTable.IsMyPage(index)) { index++; } if (PageTable.IsUnallocatedPage(PageTable.Type(index))) { // We should try to extend the region of allocated pages UIntPtr pagesNeeded = pageCount - (index - startPage); UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = PageTable.PageAddr(index); bool gotMemory = false; bool iflag = EnterMutex(currentThread); try { gotMemory = MemoryManager.AllocateMemory(startAddr, allocSize); if (gotMemory) { UIntPtr allocPages = PageTable.PageCount(allocSize); MarkUnusedPages(/* avoid recursive locking */ null, index, allocPages, true); } } finally { LeaveMutex(currentThread, iflag); } if (gotMemory) { bool success = TryReserveUnusedPages(currentThread, startPage, pageCount, newType, ref fCleanPages); Trace.Log(Trace.Area.Page, "TryReservePages success={0}", __arglist(success)); return(success); } } return(false); }
private static void EnsureMutatorControlNoGC(ref int statusWord, int currentThreadIndex) { if (!fInMutatorState(statusWord)) { TakeMutatorControlNoGC(ref statusWord, currentThreadIndex); } VTable.Deny(fInDormantState(statusWord)); VTable.Deny(fUnderGCControl(statusWord)); }
private static bool SwitchToMutatorState(ref int statusWord) { VTable.Deny(fInMutatorState(statusWord), "Thread is already under mutator control"); VTable.Assert(fInDormantState(statusWord)); int oldValue = statusWord & ~(GCRequest | GCControl); int newValue = (oldValue | MutatorState) & ~DormantState; return(CompareAndSwap(ref statusWord, newValue, oldValue)); }
private static void TakeMutatorControlNoGC(ref int statusWord, int currentThreadIndex) { if (!SwitchToMutatorState(ref statusWord)) { TakeMutatorControlSlowNoGC(ref statusWord, currentThreadIndex); } VTable.Assert(fInMutatorState(statusWord)); VTable.Deny(fInDormantState(statusWord)); VTable.Deny(fUnderGCControl(statusWord)); }
private static void TakeDormantControlNoGC(ref int statusWord) { VTable.Assert(fInMutatorState(statusWord)); VTable.Deny(fInDormantState(statusWord)); int oldValue, newValue; do { oldValue = statusWord; newValue = (oldValue | DormantState) & ~MutatorState; } while (!CompareAndSwap(ref statusWord, newValue, oldValue)); }
internal void ClearCalleeReg() { VTable.Deny(this.pending); UIntPtr *scan = this.head; while (scan != null) { UIntPtr temp = *scan; * scan = value; scan = (UIntPtr *)temp; } this.head = null; }
private static void LinkUnusedPages(UIntPtr startPage, UIntPtr pageCount, bool asVictim) { if (PageManager.SlowDebug) { for (UIntPtr i = startPage; i < startPage + pageCount; i++) { VTable.Assert(PageTable.IsUnusedPage(i) && PageTable.IsMyPage(i), "Incorrect page to link into unused region"); } } Trace.Log(Trace.Area.Page, "LinkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); VTable.Deny(startPage + pageCount > PageTable.pageTableCount); VTable.Deny(startPage + pageCount < PageTable.pageTableCount && PageTable.IsUnusedPage(startPage + pageCount) && PageTable.IsMyPage(startPage + pageCount)); UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); UnusedBlockHeader.Initialize(header, pageCount); int slot = SlotFromCount(pageCount); // Unused blocks are linked into the free list either as the result of a collection // or as a result of carving a big block into a smaller allocation and a remainder. // When such a remainder is linked back into the free list, it is identified as a // victim. We favor subsequent allocations from these victims, in an attempt to // reduce fragmentation. This is achieved by keeping victims at the head of the // free list. // // TODO: the long term solution is to perform best fit on the free list. if (asVictim || unusedMemoryBlocks[slot].next == null) { fixed(UnusedBlockHeader *listHeader = &unusedMemoryBlocks[slot]) { UnusedBlockHeader.InsertNext(listHeader, header); } } else { UnusedBlockHeader *listHeader = unusedMemoryBlocks[slot].next; UnusedBlockHeader.InsertNext(listHeader, header); } }
private static UIntPtr UnlinkUnusedPages(UIntPtr startPage) { VTable.Assert(PageTable.IsUnusedPage(startPage) && PageTable.IsMyPage(startPage)); VTable.Deny(startPage > UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); UIntPtr pageCount = UnusedBlockHeader.Remove(header); Trace.Log(Trace.Area.Page, "UnlinkUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); return(pageCount); }
private static void TakeDormantControl(ref int statusWord, int threadIndex) { VTable.Assert(fInMutatorState(statusWord)); VTable.Deny(fInDormantState(statusWord)); int oldValue, newValue; do { oldValue = statusWord; newValue = (oldValue | DormantState) & ~MutatorState; } while (!CompareAndSwap(ref statusWord, newValue, oldValue)); if (fHasGCRequest(newValue)) { GC.ThreadDormantGCNotification(threadIndex); } }
private static UIntPtr FindUnusedPages(Thread currentThread, UIntPtr pageCount, PageType newType) { VTable.Deny(PageTable.IsUnusedPageType(newType)); int slot = SlotFromCount(pageCount); Trace.Log(Trace.Area.Page, "FindUnusedPages count={0:x} slot={1}", __arglist(pageCount, slot)); bool iflag = EnterMutex(currentThread); try { while (slot < 32) { UnusedBlockHeader *header = unusedMemoryBlocks[slot].next; while (header != null) { if (header->count >= pageCount) { UIntPtr startPage = PageTable.Page((UIntPtr)header); UIntPtr regionSize = UnlinkUnusedPages(startPage); SetPageTypeClean(startPage, pageCount, newType); if (regionSize > pageCount) { UIntPtr restCount = regionSize - pageCount; UIntPtr endPage = startPage + pageCount; LinkUnusedPages(endPage, restCount, true); } Trace.Log(Trace.Area.Page, "FindUnusedPages success {0:x}", __arglist(startPage)); return(startPage); } header = header->next; } slot++; } return(UIntPtr.Zero); } finally { LeaveMutex(currentThread, iflag); } }
public void VerifyHeap() { // Temporarily set the thread allocation point to the unused // space token, if necessary. Thread currentThread = Thread.CurrentThread; VerifyAllThreadsGCControlled(currentThread); // Do the real work VerifyPages(this.objectVisitor); StaticData.ScanStaticData(this.referenceVisitor); VTable.Deny(PageTable.IsUnusedPage(PageTable.Page(Magic.addressOf(Thread.threadTable)))); for (int i = 0; i < Thread.threadTable.Length; i++) { Thread t = Thread.threadTable[i]; if (t != null) { VTable.Deny(PageTable.IsUnusedPage(PageTable.Page(Magic.addressOf(t)))); this.stackVerifier.Verify(this.threadReferenceVisitor, t); } } }
internal void ScanLiveReg(uint kind, NonNullReferenceVisitor visitor) { switch (kind) { case 0: { // Value is not a traceable heap pointer break; } case 1: { // Value is a pointer variable VTable.Deny(this.head == null); if (value != UIntPtr.Zero) { fixed(UIntPtr *valueField = &this.value) { visitor.Visit(valueField); } } ClearCalleeReg(); break; } case 2: { // Value is unchanged since function entry VTable.Deny(this.pending); this.pending = true; break; } case 3: default: { VTable.NotReached("ScanLiveReg 3 or default"); break; } } }
internal static unsafe bool AllocateMemory(UIntPtr startAddr, UIntPtr size) { VTable.Deny(inAllocator); inAllocator = true; VTable.Assert(PageTable.PageAligned(startAddr)); VTable.Assert(PageTable.PageAligned(size)); #if SINGULARITY_KERNEL UIntPtr addr = Sing_MemoryManager.KernelExtend( startAddr, Sing_MemoryManager.PagesFromBytes(size), Process.kernelProcess, PageType.Unknown); #elif SINGULARITY_PROCESS UIntPtr addr = PageTableService.AllocateExtend(startAddr, size); #endif inAllocator = false; if (addr != UIntPtr.Zero) { Util.MemClear(addr, size); return(true); } return(false); }
internal static void EnsureAllocationAllowed() { // Verify that we're not in an interrupt or non-preemptible region where allocations are prohibited Microsoft.Singularity.Processor currentProcessor = Microsoft.Singularity.Processor.CurrentProcessor; if (currentProcessor != null) // The Processor object itself may still need to be allocated { if (currentProcessor.InInterruptContext) { Tracing.Log(Tracing.Fatal, "Attempt to allocate memory from interrupt context!"); VTable.DebugPrint("Attempt to allocate memory from interrupt context!\n"); VTable.DebugBreak(); } #if false // Currently too many allocations with preemption disabled to debug right now if (currentProcessor.PreemptionDisabled) { VTable.DebugPrint("Attempt to allocate memory with preemption disabled!\n"); VTable.DebugBreak(); } #endif } VTable.Deny(Thread.CurrentThread != null && Transitions.fInitializedRuntime && Transitions.UnderGCControl(Thread.GetCurrentThreadIndex())); }
internal static UIntPtr EnsurePages(Thread currentThread, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } VTable.Deny(PageTable.IsUnusedPageType(newType)); // Try to find already allocated but unused pages UIntPtr foundPages = FindUnusedPages(currentThread, pageCount, newType); if (foundPages != UIntPtr.Zero) { if (fCleanPages) { CleanFoundPages(foundPages); } else { fCleanPages = FoundOnlyCleanPages(foundPages); } return(foundPages); } // We need to allocate new pages bool iflag = EnterMutex(currentThread); try { UIntPtr bytesNeeded = PageTable.RegionSize(pageCount); UIntPtr allocSize = Util.Pad(bytesNeeded, heap_commit_size); UIntPtr startAddr = MemoryManager.AllocateMemory(allocSize); if (startAddr == UIntPtr.Zero) { if (heap_commit_size > os_commit_size) { allocSize = Util.Pad(bytesNeeded, os_commit_size); startAddr = MemoryManager.AllocateMemory(allocSize); } } if (startAddr == UIntPtr.Zero) { // BUGBUG: if in CMS, should wait on one complete GC cycle and // the retry. for STW, we may get here even if the collector // hasn't triggered just prior. PageTable.Dump("Out of memory"); throw outOfMemoryException; } UIntPtr startPage = PageTable.Page(startAddr); PageTable.SetType(startPage, pageCount, newType); PageTable.SetProcess(startPage, pageCount); UIntPtr extraPages = PageTable.PageCount(allocSize) - pageCount; if (extraPages > 0) { // Mark the new memory pages as allocated-but-unused MarkUnusedPages(/* avoid recursive locking */ null, startPage + pageCount, extraPages, true); } return(startPage); } finally { LeaveMutex(currentThread, iflag); } }
internal void Write(UIntPtr value) { VTable.Deny(this.IsEmpty); *this.cursor++ = value; }
internal static bool TryReserveUnusedPages(Thread currentThread, UIntPtr startPage, UIntPtr pageCount, PageType newType, ref bool fCleanPages) { Trace.Log(Trace.Area.Page, "TryReserveUnusedPages start={0:x} count={1:x}", __arglist(startPage, pageCount)); VTable.Deny(PageTable.IsUnusedPageType(newType)); VTable.Assert(pageCount > UIntPtr.Zero); VTable.Deny(startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1) && PageTable.IsMyPage(startPage - 1)); UIntPtr endPage = startPage + pageCount; if (endPage > PageTable.pageTableCount) { return(false); } if (currentThread != null) { GC.CheckForNeededGCWork(currentThread); } bool iflag = EnterMutex(currentThread); try { // GC can occur and page can be collected. if (startPage != UIntPtr.Zero && PageTable.IsUnusedPage(startPage - 1)) { return(false); } if (!PageTable.IsUnusedPage(startPage) || !PageTable.IsMyPage(startPage)) { return(false); } UnusedBlockHeader *header = (UnusedBlockHeader *) PageTable.PageAddr(startPage); if (header->count < pageCount) { return(false); } UIntPtr regionPages = UnlinkUnusedPages(startPage); Trace.Log(Trace.Area.Page, "TryReserveUnusedPages found={0:x}", __arglist(regionPages)); SetPageTypeClean(startPage, pageCount, newType); if (regionPages > pageCount) { UIntPtr suffixPages = regionPages - pageCount; LinkUnusedPages(endPage, suffixPages, true); } } finally { LeaveMutex(currentThread, iflag); } // Now that we are outside the Mutex, we should perform the // real cleaning of the gotten pages if (fCleanPages) { CleanFoundPages(startPage); } else { fCleanPages = FoundOnlyCleanPages(startPage); } return(true); }
internal UIntPtr Read() { VTable.Deny(this.IsEmpty); return(*this.cursor++); }