// // Unmap the page at the provided virtual address and release its // underlying physical page // internal static void UnmapAndReleasePage(UIntPtr virtualAddr) { DebugStub.Assert(VMManager.IsPageMapped(virtualAddr), "Trying to unmap an unmapped page"); PhysicalAddress phys = VMManager.UnmapPage(virtualAddr); DebugStub.Assert(phys != PhysicalAddress.Null); PhysicalPages.FreePage(phys); }
private unsafe UIntPtr FreeInternal(UIntPtr startPage, UIntPtr numPages) { DebugStub.Assert(MemoryManager.IsPageAligned(startPage)); DebugStub.Assert(startPage >= dataStart); UIntPtr blockLimit = startPage + MemoryManager.BytesFromPages(numPages); DebugStub.Assert(blockLimit <= rangeLimit); DebugStub.Assert(nextAlloc >= blockLimit); bool iflag = Lock(); try { // Mark the pages free within our lock so we can rely on // blocks being uniformly marked free in here SetRange(startPage, blockLimit, MemoryManager.PageFree); // Reduce fragmentation: lower the nextAlloc pointer if // we have freed the top end of memory. if (nextAlloc == blockLimit) { nextAlloc = startPage; // // NOTE: this chooses to use the page table // information, which is currently usermode-accessible // in the case of a user-mode range. Keep in mind that // the information may be corrupt! // // Further optimization: drop nextAlloc more // if the memory below us is free, too. uint * pageTable = PageTable; UIntPtr stepPage = nextAlloc; uint val; do { nextAlloc = stepPage; stepPage -= MemoryManager.PageSize; uint dsc = pageTable[(uint)PageFromAddr(stepPage)]; val = dsc & MemoryManager.SystemPageMask; }while ((val == MemoryManager.PageFree) && (!VMManager.IsPageMapped(stepPage))); // sanity } freedCount++; freedBytes += (ulong)MemoryManager.BytesFromPages(numPages); } finally { Unlock(iflag); } return(MemoryManager.BytesFromPages(numPages)); }
///////////////////////////////////// // PUBLIC METHODS ///////////////////////////////////// // // The range of memory turned over to a VirtualMemoryRange structure // must not have *any* mapped pages in it to start out with // // A VirtualMemoryRange can build a pagetable that *describes* // more memory than it *manages* (this supports some kernel GC // oddities). In that case, pages out-of-range are marked // as PageType.Unknown. Obviously, allocation requests are never // satisfied with out-of-bounds data. // internal unsafe VirtualMemoryRange_struct( UIntPtr baseAddr, UIntPtr limitAddr, UIntPtr descrBaseAddr, UIntPtr descrLimitAddr, ProtectionDomain inDomain) { DebugStub.Assert(MemoryManager.IsPageAligned(baseAddr)); DebugStub.Assert(MemoryManager.IsPageAligned(limitAddr)); DebugStub.Assert(MemoryManager.IsPageAligned(descrBaseAddr)); DebugStub.Assert(MemoryManager.IsPageAligned(descrLimitAddr)); // The descriptive range can't be smaller than the managed range DebugStub.Assert(descrLimitAddr >= limitAddr); DebugStub.Assert(descrBaseAddr <= baseAddr); descrBase = descrBaseAddr; descrLimit = descrLimitAddr; rangeBase = baseAddr; rangeLimit = limitAddr; rangeLock = new SpinLock(SpinLock.Types.VirtualMemoryRange); describedPages = MemoryManager.PagesFromBytes(descrLimit - descrBase); // Figure out how many pages we need for a page description table UIntPtr pageTableSize = MemoryManager.PagePad(describedPages * sizeof(uint)); dataStart = baseAddr + pageTableSize; nextAlloc = dataStart; // Commit and prepare the page table pageTable = (uint *)baseAddr; bool success = MemoryManager.CommitAndMapRange( baseAddr, baseAddr + pageTableSize, inDomain); if (!success) { Kernel.Panic("Couldn't get pages to create a new VirtualMemoryRange page table"); } allocatedBytes = 0; allocatedCount = 0; freedBytes = 0; freedCount = 0; // Describe the pages outside our range as Unknown if (descrBase < rangeBase) { SetRange(descrBase, rangeBase, MemoryManager.PageUnknown); } if (descrLimit > rangeLimit) { SetRange(rangeLimit, descrLimit, MemoryManager.PageUnknown); } // The page-table pages themselves are in use by the System SetRange((UIntPtr)pageTable, (UIntPtr)pageTable + pageTableSize, MemoryManager.KernelPageNonGC); // Describe pages in-range as Free SetRange(dataStart, rangeLimit, MemoryManager.PageFree); #if DEBUG // Check that our data range is pristine for (UIntPtr stepAddr = dataStart; stepAddr < rangeLimit; stepAddr += MemoryManager.PageSize) { DebugStub.Assert(!VMManager.IsPageMapped(stepAddr)); } #endif }