Пример #1
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        public unsafe PhysicalHeap(UIntPtr start, UIntPtr limit)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(start));
            DebugStub.Assert(MemoryManager.IsPageAligned(limit));

            // Note that this wastes a little bit of memory by allocating
            // table space to describe page-table memory!
            UIntPtr numPages      = MemoryManager.PagesFromBytes(limit - start);
            UIntPtr bytesForTable = numPages * BytesPerTableEntry;

            bytesForTable = MemoryManager.PagePad(bytesForTable);
            UIntPtr pagesForTable = MemoryManager.PagesFromBytes(bytesForTable);

            pageCount = numPages - pagesForTable;
            startAddr = start + bytesForTable;
            heapLimit = limit;
            pageTable = (ushort *)start;
            heapLock  = new SpinLock(SpinLock.Types.PhysicalHeap);

            // The entire heap is free to start out with
            freeList = new FreeList();

            // Initialize the page table
            SetPages(startAddr, pageCount, FreePage);

            fixed(PhysicalHeap *thisPtr = &this)
            {
                freeList.CreateAndInsert(thisPtr, startAddr, pageCount);
            }

            CheckConsistency();
        }
Пример #2
0
        private UIntPtr PageIndex(UIntPtr pageAddr)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(pageAddr));
            DebugStub.Assert(pageAddr >= startAddr && pageAddr <= heapLimit,
                             "PhysicalHeap.PageIndex pageAddr = {0:x} Range = {1:x} ... {2:x}",
                             __arglist(pageAddr, startAddr, heapLimit));
            DebugStub.Assert(pageAddr < heapLimit);

            return(MemoryManager.PagesFromBytes(pageAddr - startAddr));
        }
Пример #3
0
        private unsafe UIntPtr FreeInternal(UIntPtr startPage,
                                            UIntPtr numPages)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(startPage));
            DebugStub.Assert(startPage >= dataStart);
            UIntPtr blockLimit = startPage + MemoryManager.BytesFromPages(numPages);

            DebugStub.Assert(blockLimit <= rangeLimit);
            DebugStub.Assert(nextAlloc >= blockLimit);

            bool iflag = Lock();

            try {
                // Mark the pages free within our lock so we can rely on
                // blocks being uniformly marked free in here
                SetRange(startPage, blockLimit, MemoryManager.PageFree);

                // Reduce fragmentation: lower the nextAlloc pointer if
                // we have freed the top end of memory.
                if (nextAlloc == blockLimit)
                {
                    nextAlloc = startPage;

                    //
                    // NOTE: this chooses to use the page table
                    // information, which is currently usermode-accessible
                    // in the case of a user-mode range. Keep in mind that
                    // the information may be corrupt!
                    //

                    // Further optimization: drop nextAlloc more
                    // if the memory below us is free, too.
                    uint *  pageTable = PageTable;
                    UIntPtr stepPage  = nextAlloc;
                    uint    val;

                    do
                    {
                        nextAlloc = stepPage;
                        stepPage -= MemoryManager.PageSize;
                        uint dsc = pageTable[(uint)PageFromAddr(stepPage)];
                        val = dsc & MemoryManager.SystemPageMask;
                    }while ((val == MemoryManager.PageFree) &&
                            (!VMManager.IsPageMapped(stepPage))); // sanity
                }

                freedCount++;
                freedBytes += (ulong)MemoryManager.BytesFromPages(numPages);
            }
            finally {
                Unlock(iflag);
            }

            return(MemoryManager.BytesFromPages(numPages));
        }
Пример #4
0
        internal static void FreeMemory(UIntPtr startAddr, UIntPtr size)
        {
#if SINGULARITY_KERNEL
            DebugStub.Assert(Sing_MemoryManager.IsPageAligned(size));
            Sing_MemoryManager.KernelFree(
                startAddr, Sing_MemoryManager.PagesFromBytes(size),
                Process.kernelProcess);
#elif SINGULARITY_PROCESS
            VTable.Assert((size & PageTable.PageMask) == 0);
            PageTableService.Free(startAddr, size);
#endif
        }
Пример #5
0
            internal static unsafe LastNode *Create(PhysicalHeap *inHeap,
                                                    UIntPtr addr, FreeNode *node)
            {
                LastNode *last = (LastNode *)(addr + node->bytes - MemoryManager.PageSize);

                DebugStub.Assert((UIntPtr)last >= inHeap->startAddr);
                DebugStub.Assert((UIntPtr)last <= inHeap->heapLimit);
                DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)last));
                last->signature = LastNode.Signature;
                last->node      = node;
                return(last);
            }
Пример #6
0
        private unsafe void CheckConsistency()
        {
#if SELF_TEST
            UIntPtr freePagesByTable = 0;

            for (UIntPtr i = 0; i < pageCount; i++)
            {
                UIntPtr pageAddr = startAddr + (MemoryManager.PageSize * i);

                if (PageWord(i) == FreePage)
                {
                    // Validate this block's free information
                    FreeNode *thisBlock = (FreeNode *)pageAddr;
                    DebugStub.Assert(thisBlock->signature == FreeNode.Signature);

                    if (thisBlock->last != null)
                    {
                        // Multi-page free block; validate and skip ahead
                        DebugStub.Assert(thisBlock->last->node == thisBlock);
                        DebugStub.Assert(thisBlock->last->signature == LastNode.Signature);
                        UIntPtr numBytes = (UIntPtr)thisBlock->last - (UIntPtr)pageAddr +
                                           MemoryManager.PageSize;
                        DebugStub.Assert(numBytes == thisBlock->bytes);
                        DebugStub.Assert(MemoryManager.IsPageAligned(numBytes));
                        UIntPtr numPages = MemoryManager.PagesFromBytes(numBytes);

                        for (UIntPtr j = i; j < i + numPages; j++)
                        {
                            DebugStub.Assert(PageWord(j) == FreePage);
                        }

                        i += numPages - 1;
                        freePagesByTable += numPages;
                    }
                    else
                    {
                        // Single-page free block
                        if (i != pageCount - 1)
                        {
                            DebugStub.Assert(PageWord(i + 1) != FreePage);
                        }
                        freePagesByTable++;
                    }
                }
            }

            // Now make sure all free pages are accounted for
            UIntPtr freePagesByList = FreePageCountFromList();
            DebugStub.Assert(freePagesByList == freePagesByTable);
#endif
        }
Пример #7
0
            internal unsafe void CreateAndInsert(PhysicalHeap *inHeap,
                                                 UIntPtr addr,
                                                 UIntPtr pages)
            {
                DebugStub.Assert(MemoryManager.IsPageAligned(addr),
                                 "PhysicalHeap.CreateAndInsert non page-aligned addr={0:x}",
                                 __arglist(addr));

                FreeNode *node = FreeNode.Create(inHeap, addr, pages);

                DebugStub.Assert(MemoryManager.IsPageAligned(node->bytes),
                                 "PhysicalHeap.CreateAndInsert non page-sized node->bytes={0:x}",
                                 __arglist(node->bytes));

                InsertBySize(node);
            }
Пример #8
0
        //
        // Releases a previously reserved range of memory, but does not
        // uncommit any pages.
        //
        internal UIntPtr Unreserve(UIntPtr startAddr, UIntPtr numPages, Process process)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(startAddr));
            DebugStub.Assert(numPages > 0);
            UIntPtr blockLimit = startAddr + MemoryManager.BytesFromPages(numPages);

            DebugStub.Assert(startAddr >= dataStart);
            DebugStub.Assert(blockLimit <= rangeLimit);

            // Strictly a sanity check
            uint tag = process != null ? process.ProcessTag : MemoryManager.KernelPage;

            VerifyOwner(startAddr, blockLimit, tag);

            return(FreeInternal(startAddr, numPages));
        }
Пример #9
0
        /////////////////////////////////////
        // PRIVATE METHODS
        /////////////////////////////////////

        private unsafe UIntPtr FreePageCountFromList()
        {
            UIntPtr   retval = 0;
            FreeNode *entry  = freeList.head;
            FreeNode *prev   = null;

            while (entry != null)
            {
                DebugStub.Assert(MemoryManager.IsPageAligned(entry->bytes));
                retval += MemoryManager.PagesFromBytes(entry->bytes);
                DebugStub.Assert(entry->prev == prev);
                prev  = entry;
                entry = entry->next;
            }

            return(retval);
        }
Пример #10
0
        internal unsafe UIntPtr Free(UIntPtr startPage,
                                     UIntPtr numPages,
                                     Process process)
        {
            DebugStub.Assert(startPage >= dataStart);
            DebugStub.Assert(MemoryManager.IsPageAligned(startPage));
            UIntPtr blockLimit = startPage + MemoryManager.BytesFromPages(numPages);

            DebugStub.Assert(blockLimit <= rangeLimit);

            //
            // NOTE: This is strictly a sanity check. The pagetable
            // is ultimately not trustworthy because it is writeable by
            // user-mode code.
            //
            uint tag = process != null ? process.ProcessTag : MemoryManager.KernelPage;

            VerifyOwner(startPage, blockLimit, tag);

            // Do it
            return(FreeAndUncommit(startPage, numPages));
        }
Пример #11
0
        public unsafe void Free(UIntPtr addr, UIntPtr bytes, Process process)
        {
            if (addr == UIntPtr.Zero)
            {
                // Silently accept freeing null
                return;
            }

            // We always hand out memory in page-size chunks, so round up what
            // the caller thinks their block size is
            bytes = MemoryManager.PagePad(bytes);

            // Our blocks always start on page boundaries
            DebugStub.Assert(MemoryManager.IsPageAligned(addr));
            ushort tag   = process != null ? (ushort)process.ProcessId : KernelPage;
            bool   iflag = Lock();

            try {
                CheckConsistency();

                UIntPtr numPages = MemoryManager.PagesFromBytes(bytes);
                VerifyOwner(addr, numPages, tag);

                FreeNode *nextBlock = null;
                FreeNode *prevBlock = null;

                if ((addr + bytes) < heapLimit)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        nextBlock = FreeNode.GetNodeAt(thisPtr, addr + bytes);
                    }
                }

                if (addr > startAddr)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        prevBlock = LastNode.GetNodeFromLast(thisPtr, addr - MemoryManager.PageSize);
                    }
                }

                // Don't mark pages as free until we're done discovering the
                // previous and next blocks, or the attempt to discover
                // the previous and next blocks gets confused to find itself
                // adjacent to a free block.
                SetPages(addr, numPages, FreePage);

                // Coalesce with the preceding region
                if (prevBlock != null)
                {
                    addr   = (UIntPtr)prevBlock;
                    bytes += prevBlock->bytes;
                    freeList.Remove(prevBlock);
                }

                // Coalesce with the following region
                if (nextBlock != null)
                {
                    bytes += nextBlock->bytes;
                    freeList.Remove(nextBlock);
                }

                // Blocks should always be integral numbers of pages
                DebugStub.Assert(MemoryManager.IsPageAligned(bytes));

                // Create the free node.
                fixed(PhysicalHeap *thisPtr = &this)
                {
                    freeList.CreateAndInsert(thisPtr, addr, bytes / MemoryManager.PageSize);
                }

                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }
        }
Пример #12
0
        public unsafe UIntPtr Allocate(UIntPtr limitAddr,
                                       UIntPtr bytes,
                                       UIntPtr alignment,
                                       Process process)
        {
            ushort  tag = process != null ? (ushort)process.ProcessId : KernelPage;
            UIntPtr blockPtr;
            bool    iflag = Lock();

            if (alignment < MemoryManager.PageSize)
            {
                alignment = MemoryManager.PageSize;
            }

            try {
                CheckConsistency();

                // Find an appropriately-sized block
                FreeNode *foundNode = freeList.FindGoodFit(bytes, alignment);

                if (foundNode == null)
                {
                    return(UIntPtr.Zero);
                }

                DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)foundNode));

                // Respect alignment within the node
                blockPtr = MemoryManager.Pad((UIntPtr)foundNode, alignment);
                UIntPtr alignedSize = bytes + SpaceToAlign((UIntPtr)foundNode, alignment);
                DebugStub.Assert(alignedSize == (blockPtr + bytes) - (UIntPtr)foundNode);
                DebugStub.Assert(foundNode->bytes >= alignedSize);

                // Give back any extra pages
                UIntPtr numPages   = MemoryManager.PagesFromBytes(MemoryManager.PagePad(alignedSize));
                UIntPtr chunkPages = MemoryManager.PagesFromBytes(foundNode->bytes);

                DebugStub.Assert(chunkPages >= numPages);
                UIntPtr extraPages = chunkPages - numPages;

                if (extraPages > 0)
                {
                    // Give back the extra memory
                    UIntPtr remainderPtr = (UIntPtr)foundNode + (numPages * MemoryManager.PageSize);

                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        freeList.CreateAndInsert(thisPtr, remainderPtr, extraPages);
                    }
                }

                SetPages((UIntPtr)foundNode, numPages, tag);
                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }

            // TODO: Flexible limit specification not yet implemented
            if (limitAddr > UIntPtr.Zero)
            {
                DebugStub.Assert(blockPtr < limitAddr);
            }

            return(blockPtr);
        }
Пример #13
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        //
        // The range of memory turned over to a VirtualMemoryRange structure
        // must not have *any* mapped pages in it to start out with
        //
        // A VirtualMemoryRange can build a pagetable that *describes*
        // more memory than it *manages* (this supports some kernel GC
        // oddities). In that case, pages out-of-range are marked
        // as PageType.Unknown. Obviously, allocation requests are never
        // satisfied with out-of-bounds data.
        //
        internal unsafe VirtualMemoryRange_struct(
            UIntPtr baseAddr, UIntPtr limitAddr,
            UIntPtr descrBaseAddr, UIntPtr descrLimitAddr,
            ProtectionDomain inDomain)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(baseAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(limitAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(descrBaseAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(descrLimitAddr));

            // The descriptive range can't be smaller than the managed range
            DebugStub.Assert(descrLimitAddr >= limitAddr);
            DebugStub.Assert(descrBaseAddr <= baseAddr);

            descrBase  = descrBaseAddr;
            descrLimit = descrLimitAddr;
            rangeBase  = baseAddr;
            rangeLimit = limitAddr;
            rangeLock  = new SpinLock(SpinLock.Types.VirtualMemoryRange);

            describedPages = MemoryManager.PagesFromBytes(descrLimit - descrBase);

            // Figure out how many pages we need for a page description table
            UIntPtr pageTableSize = MemoryManager.PagePad(describedPages * sizeof(uint));

            dataStart = baseAddr + pageTableSize;
            nextAlloc = dataStart;

            // Commit and prepare the page table
            pageTable = (uint *)baseAddr;

            bool success = MemoryManager.CommitAndMapRange(
                baseAddr, baseAddr + pageTableSize, inDomain);

            if (!success)
            {
                Kernel.Panic("Couldn't get pages to create a new VirtualMemoryRange page table");
            }

            allocatedBytes = 0;
            allocatedCount = 0;
            freedBytes     = 0;
            freedCount     = 0;

            // Describe the pages outside our range as Unknown
            if (descrBase < rangeBase)
            {
                SetRange(descrBase, rangeBase, MemoryManager.PageUnknown);
            }

            if (descrLimit > rangeLimit)
            {
                SetRange(rangeLimit, descrLimit, MemoryManager.PageUnknown);
            }

            // The page-table pages themselves are in use by the System
            SetRange((UIntPtr)pageTable,
                     (UIntPtr)pageTable + pageTableSize,
                     MemoryManager.KernelPageNonGC);

            // Describe pages in-range as Free
            SetRange(dataStart, rangeLimit, MemoryManager.PageFree);

#if DEBUG
            // Check that our data range is pristine
            for (UIntPtr stepAddr = dataStart;
                 stepAddr < rangeLimit;
                 stepAddr += MemoryManager.PageSize)
            {
                DebugStub.Assert(!VMManager.IsPageMapped(stepAddr));
            }
#endif
        }