// Mark a range of physical memory as being used. THIS IS ONLY
        // USED DURING INITIALIZATION, and so is not particularly
        // efficient.
        //
        // Addresses do not have to be page-aligned, but whole pages will
        // be marked.
        //
        // To allow SMAP overlaps with HAL regions make this ignore
        // region overlaps.
        private static unsafe void MarkRangeUsed(ulong startAddr, ulong length)
        {
            ulong firstPage = startAddr & ~MemoryManager.PageMask;
            // lastPage ends up being the address of the page immediately *after*
            // the data ends.
            ulong lastPage = MemoryManager.PagePad(startAddr + length);

            for (ulong pageAddr = firstPage; pageAddr < lastPage; pageAddr += MemoryManager.PageSize)
            {
                ulong      pageNum     = MemoryManager.PagesFromBytes(pageAddr);
                uint       blockIdx    = (uint)(pageNum / (ulong)PageBlock.PagesPerBlock);
                uint       pageInBlock = (uint)(pageNum % (ulong)PageBlock.PagesPerBlock);
                PageBlock *pBlock      = GetBlock(blockIdx);

                if (!pBlock->PageInUse(pageInBlock))
                {
                    pBlock->MarkAsUsed(pageInBlock);

                    if (pBlock->Full)
                    {
                        pBlock->Unlink(ref freeList);
                    }
                }
            }
        }
Beispiel #2
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        public unsafe PhysicalHeap(UIntPtr start, UIntPtr limit)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(start));
            DebugStub.Assert(MemoryManager.IsPageAligned(limit));

            // Note that this wastes a little bit of memory by allocating
            // table space to describe page-table memory!
            UIntPtr numPages      = MemoryManager.PagesFromBytes(limit - start);
            UIntPtr bytesForTable = numPages * BytesPerTableEntry;

            bytesForTable = MemoryManager.PagePad(bytesForTable);
            UIntPtr pagesForTable = MemoryManager.PagesFromBytes(bytesForTable);

            pageCount = numPages - pagesForTable;
            startAddr = start + bytesForTable;
            heapLimit = limit;
            pageTable = (ushort *)start;
            heapLock  = new SpinLock(SpinLock.Types.PhysicalHeap);

            // The entire heap is free to start out with
            freeList = new FreeList();

            // Initialize the page table
            SetPages(startAddr, pageCount, FreePage);

            fixed(PhysicalHeap *thisPtr = &this)
            {
                freeList.CreateAndInsert(thisPtr, startAddr, pageCount);
            }

            CheckConsistency();
        }
Beispiel #3
0
        public unsafe void Free(UIntPtr addr, UIntPtr bytes, Process process)
        {
            if (addr == UIntPtr.Zero)
            {
                // Silently accept freeing null
                return;
            }

            // We always hand out memory in page-size chunks, so round up what
            // the caller thinks their block size is
            bytes = MemoryManager.PagePad(bytes);

            // Our blocks always start on page boundaries
            DebugStub.Assert(MemoryManager.IsPageAligned(addr));
            ushort tag   = process != null ? (ushort)process.ProcessId : KernelPage;
            bool   iflag = Lock();

            try {
                CheckConsistency();

                UIntPtr numPages = MemoryManager.PagesFromBytes(bytes);
                VerifyOwner(addr, numPages, tag);

                FreeNode *nextBlock = null;
                FreeNode *prevBlock = null;

                if ((addr + bytes) < heapLimit)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        nextBlock = FreeNode.GetNodeAt(thisPtr, addr + bytes);
                    }
                }

                if (addr > startAddr)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        prevBlock = LastNode.GetNodeFromLast(thisPtr, addr - MemoryManager.PageSize);
                    }
                }

                // Don't mark pages as free until we're done discovering the
                // previous and next blocks, or the attempt to discover
                // the previous and next blocks gets confused to find itself
                // adjacent to a free block.
                SetPages(addr, numPages, FreePage);

                // Coalesce with the preceding region
                if (prevBlock != null)
                {
                    addr   = (UIntPtr)prevBlock;
                    bytes += prevBlock->bytes;
                    freeList.Remove(prevBlock);
                }

                // Coalesce with the following region
                if (nextBlock != null)
                {
                    bytes += nextBlock->bytes;
                    freeList.Remove(nextBlock);
                }

                // Blocks should always be integral numbers of pages
                DebugStub.Assert(MemoryManager.IsPageAligned(bytes));

                // Create the free node.
                fixed(PhysicalHeap *thisPtr = &this)
                {
                    freeList.CreateAndInsert(thisPtr, addr, bytes / MemoryManager.PageSize);
                }

                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }
        }
Beispiel #4
0
        public unsafe UIntPtr Allocate(UIntPtr limitAddr,
                                       UIntPtr bytes,
                                       UIntPtr alignment,
                                       Process process)
        {
            ushort  tag = process != null ? (ushort)process.ProcessId : KernelPage;
            UIntPtr blockPtr;
            bool    iflag = Lock();

            if (alignment < MemoryManager.PageSize)
            {
                alignment = MemoryManager.PageSize;
            }

            try {
                CheckConsistency();

                // Find an appropriately-sized block
                FreeNode *foundNode = freeList.FindGoodFit(bytes, alignment);

                if (foundNode == null)
                {
                    return(UIntPtr.Zero);
                }

                DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)foundNode));

                // Respect alignment within the node
                blockPtr = MemoryManager.Pad((UIntPtr)foundNode, alignment);
                UIntPtr alignedSize = bytes + SpaceToAlign((UIntPtr)foundNode, alignment);
                DebugStub.Assert(alignedSize == (blockPtr + bytes) - (UIntPtr)foundNode);
                DebugStub.Assert(foundNode->bytes >= alignedSize);

                // Give back any extra pages
                UIntPtr numPages   = MemoryManager.PagesFromBytes(MemoryManager.PagePad(alignedSize));
                UIntPtr chunkPages = MemoryManager.PagesFromBytes(foundNode->bytes);

                DebugStub.Assert(chunkPages >= numPages);
                UIntPtr extraPages = chunkPages - numPages;

                if (extraPages > 0)
                {
                    // Give back the extra memory
                    UIntPtr remainderPtr = (UIntPtr)foundNode + (numPages * MemoryManager.PageSize);

                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        freeList.CreateAndInsert(thisPtr, remainderPtr, extraPages);
                    }
                }

                SetPages((UIntPtr)foundNode, numPages, tag);
                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }

            // TODO: Flexible limit specification not yet implemented
            if (limitAddr > UIntPtr.Zero)
            {
                DebugStub.Assert(blockPtr < limitAddr);
            }

            return(blockPtr);
        }
Beispiel #5
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        //
        // The range of memory turned over to a VirtualMemoryRange structure
        // must not have *any* mapped pages in it to start out with
        //
        // A VirtualMemoryRange can build a pagetable that *describes*
        // more memory than it *manages* (this supports some kernel GC
        // oddities). In that case, pages out-of-range are marked
        // as PageType.Unknown. Obviously, allocation requests are never
        // satisfied with out-of-bounds data.
        //
        internal unsafe VirtualMemoryRange_struct(
            UIntPtr baseAddr, UIntPtr limitAddr,
            UIntPtr descrBaseAddr, UIntPtr descrLimitAddr,
            ProtectionDomain inDomain)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(baseAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(limitAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(descrBaseAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(descrLimitAddr));

            // The descriptive range can't be smaller than the managed range
            DebugStub.Assert(descrLimitAddr >= limitAddr);
            DebugStub.Assert(descrBaseAddr <= baseAddr);

            descrBase  = descrBaseAddr;
            descrLimit = descrLimitAddr;
            rangeBase  = baseAddr;
            rangeLimit = limitAddr;
            rangeLock  = new SpinLock(SpinLock.Types.VirtualMemoryRange);

            describedPages = MemoryManager.PagesFromBytes(descrLimit - descrBase);

            // Figure out how many pages we need for a page description table
            UIntPtr pageTableSize = MemoryManager.PagePad(describedPages * sizeof(uint));

            dataStart = baseAddr + pageTableSize;
            nextAlloc = dataStart;

            // Commit and prepare the page table
            pageTable = (uint *)baseAddr;

            bool success = MemoryManager.CommitAndMapRange(
                baseAddr, baseAddr + pageTableSize, inDomain);

            if (!success)
            {
                Kernel.Panic("Couldn't get pages to create a new VirtualMemoryRange page table");
            }

            allocatedBytes = 0;
            allocatedCount = 0;
            freedBytes     = 0;
            freedCount     = 0;

            // Describe the pages outside our range as Unknown
            if (descrBase < rangeBase)
            {
                SetRange(descrBase, rangeBase, MemoryManager.PageUnknown);
            }

            if (descrLimit > rangeLimit)
            {
                SetRange(rangeLimit, descrLimit, MemoryManager.PageUnknown);
            }

            // The page-table pages themselves are in use by the System
            SetRange((UIntPtr)pageTable,
                     (UIntPtr)pageTable + pageTableSize,
                     MemoryManager.KernelPageNonGC);

            // Describe pages in-range as Free
            SetRange(dataStart, rangeLimit, MemoryManager.PageFree);

#if DEBUG
            // Check that our data range is pristine
            for (UIntPtr stepAddr = dataStart;
                 stepAddr < rangeLimit;
                 stepAddr += MemoryManager.PageSize)
            {
                DebugStub.Assert(!VMManager.IsPageMapped(stepAddr));
            }
#endif
        }
Beispiel #6
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        //
        // Initialize() must be called during OS boot *BEFORE* paging
        // is enabled, so that physical memory can be accessed
        // directly. We consult the system's memory map to pick a
        // range of memory where we can situate the physical page
        // table, and initialize it.
        //
        // the reserveSize argument specifies how much physical
        // memory to set aside and return a PhysicalAddress to.
        // This is used by the MemoryManager to reserve an area
        // of contiguous physical memory for use in I/O operations.
        //
        // After Initialize()ation, the PhysicalPages module
        // assumes that its page table will be identity-mapped
        // into the kernel's virtual memory space.
        //
        internal static unsafe PhysicalAddress Initialize(ulong reserveSize)
        {
            Platform p = Platform.ThePlatform;
            UIntPtr lowerAddress = UIntPtr.Zero;
            UIntPtr upperAddress = UIntPtr.Zero;

            // NB: our initialization is currently naive; we
            // set up a table to describe all of physical memory,
            // even though there are holes in it for reserved
            // memory that we will never be able to use.
            // This makes indexing much easier, though.

            InitializeLock();

            // Retrieve the highest RAM address
            MemoryManager.PhysicalMemoryLimits(out lowerAddress, out upperAddress);
            addressLimit = (ulong)upperAddress;

            DebugStub.WriteLine("Highest usable RAM address is 0x{0:x8}",
                                __arglist(addressLimit));

            // How much room do we need for the pageblock table?
            numBlocks = GetNumBlocks(addressLimit);
            ulong requiredSpace = (ulong)numBlocks * (ulong)sizeof(PageBlock);

            // Now we need to find a location in physical memory
            // where we can stick the physical pageblock table.
            ulong startAddr = LowerRAMBlackout;
            if (p.BootAllocatedMemorySize != 0) {
                if (startAddr > p.BootAllocatedMemory &&
                    startAddr < p.BootAllocatedMemory + p.BootAllocatedMemorySize) {
                    startAddr = (ulong)p.BootAllocatedMemory + (ulong)p.BootAllocatedMemorySize;
                }
            }

            ulong physLocation = FindFreeBlockInSMAP(startAddr, requiredSpace);

            if (physLocation == 0) {
                // Failed to find a spot to park the physical page
                // table. This is fatal!
                Kernel.Panic("Couldn't find enough room to initialize hardware page table");
            }

            // Initialize all the descriptor blocks as "free" (zeroed)
            physTable = (PageBlock*)physLocation;
            tableSize = requiredSpace;
            freeList = 0; // First descriptor

            for (uint i = 0; i < numBlocks; i++) {
                PageBlock* thisBlock = GetBlock(i);
                thisBlock->Initialize();
                thisBlock->prevIdx = (i == 0) ? PageBlock.Sentinel : i - 1;
                thisBlock->nextIdx = (i == numBlocks - 1) ? PageBlock.Sentinel : i + 1;
            }

            // Mark the blackout area of low physical memory as used
            MarkRangeUsed(0, startAddr - 1);

            // Now mark the range of physical pages occupied by the
            // hardware table itself as being used!
            MarkRangeUsed((ulong)physTable, requiredSpace);

            // Mark any non-Free areas (according to SMAP) as in use
            SMAPINFO* smap = p.Smap;
            for (uint i = 0; i < p.SmapCount; i++) {
                if ((smap[i].type != (ulong)SMAPINFO.AddressType.Free) &&
                    ((smap[i].addr + smap[i].size) > startAddr) &&
                    (smap[i].addr < addressLimit)) {

                    ulong unaccountedStart, unaccountedLength;

                    if (smap[i].addr >= startAddr) {
                        unaccountedStart = smap[i].addr;
                        unaccountedLength = smap[i].size;
                    }
                    else {
                        // Part of this memory window is already accounted for
                        unaccountedStart = startAddr;
                        unaccountedLength = smap[i].size - (startAddr - smap[i].addr);
                    }

                    MarkRangeUsed(unaccountedStart, unaccountedLength);
                }
            }

            // Mark out all the Platform special regions as busy
            if (p.MiniDumpLimit != 0) {
                ulong miniDumpSize = (ulong)p.MiniDumpLimit - (ulong)p.MiniDumpBase;
                MarkRangeUsed((ulong)p.MiniDumpBase,
                              MemoryManager.PagePad(miniDumpSize));
            }
            if (p.KernelDllSize != 0) {
                MarkRangeUsed((ulong)p.KernelDllBase,
                              MemoryManager.PagePad((ulong)p.KernelDllSize));
            }
            if (p.BootAllocatedMemorySize != 0) {
                MarkRangeUsed((ulong)p.BootAllocatedMemory,
                              MemoryManager.PagePad((ulong)p.BootAllocatedMemorySize));
            }

            // Last step: find an available area to situate the caller's
            // requested reserve-block, if any.
            PhysicalAddress reservedPtr = PhysicalAddress.Null;

            if (reserveSize != 0) {
                reservedPtr = new PhysicalAddress(
                    FindFreeBlockInSMAP(
                        MemoryManager.PagePad((ulong)physTable + tableSize),
                        reserveSize));

                if (reservedPtr == PhysicalAddress.Null) {
                    Kernel.Panic("Couldn't find enough physically contiguous memory to reserve");
                }

                if (reservedPtr.Value + reserveSize > 0xFFFFFF) {
                    Kernel.Panic("Couldn't find enough physically contiguous memory below 0xFFFFFF for I/O memory");
                }
            }

            // Mark the reserved block as used. It's up to the caller
            // to administer its use.
            MarkRangeUsed(reservedPtr.Value, reserveSize);

            CheckConsistency();

            DebugStub.WriteLine("PhysicalPages initialized at 0x{0:x8} - 0x{1:x8} with {2} physical pages available.",
                                __arglist(reservedPtr.Value,
                                          reservedPtr.Value + reserveSize,
                                          GetFreePageCount()));

            return reservedPtr;
        }
Beispiel #7
0
        internal static unsafe StackHead *GetStackSegmentRaw(UIntPtr size,
                                                             ref ThreadContext context,
                                                             bool kernelAllocation,
                                                             bool initialStack)
        {
            // Allocate a new chunk, making room for StackHead at the top.
            // If you change these constants to add more data, see the
            // comment about InitialStackSize at the top of this file!
#if DO_TRACE_STACKS
            Kernel.Waypoint(667);
#endif
            if (size == UIntPtr.Zero)
            {
                size = InitialStackSize;
            }
            size = MemoryManager.PagePad(size + sizeof(StackHead) + SafetyBufferSize);

            UIntPtr chunk;


            Process owner = Process.GetProcessByID(context.processId);
            //
            //// NOTE: here's where we should be clever about
            //// whether to allocate a stack chunk in the user range
            //// or the kernel range. Except, if we switch contexts
            //// during an ABI call while using a user-range stack
            //// segment on a paging machine, we die. Gloss over
            //// this hackily by always getting stack segments
            //// from the kernel range.
            //if (kernelAllocation || (owner == Process.kernelProcess)) {
            //  chunk = MemoryManager.KernelAllocate(
            //      MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack);
            //}
            //else {
            //  chunk = MemoryManager.UserAllocate(
            //      MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack);
            //}
            //

            UIntPtr pageCount = MemoryManager.PagesFromBytes(size);
#if DEBUG_STACK_VERBOSE
            fixed(ThreadContext *ptr = &context)
            {
                Tracing.Log(Tracing.Debug,
                            "GetStackSegmentRaw(ctx={0:x8},size={1:d}) pages={2} [{3:x8}..{4:x8}]",
                            (UIntPtr)ptr, size, pageCount,
                            context.stackLimit, context.stackBegin);
            }
#endif
            chunk = MemoryManager.StackAllocate(pageCount, owner, 0, kernelAllocation, initialStack);

            if (chunk != UIntPtr.Zero)
            {
                // NB: We do _not_ zero out stack memory!
                // We assume that Bartok prevents access to prev contents.
                StackHead *head = (StackHead *)(chunk + size - sizeof(StackHead));

                context.stackBegin = chunk + size;
                context.stackLimit = chunk + SafetyBufferSize;

#if DEBUG_STACK_VERBOSE
                Tracing.Log(Tracing.Debug,
                            "GetStackSegmentRaw(size={0:d}) -> [{1:x8}..{2:x8}]",
                            size, context.stackLimit, context.stackBegin);
#endif
                return(head);
            }
            else
            {
                // Stack allocation failed.  In the future, we should
                // trigger a kernel exception; for now, we break to the
                // debugger.
#if DEBUG_STACK_VERBOSE
                Tracing.Log(Tracing.Debug,
                            "GetStackSegmentRaw: KernelAllocate failed!(siz={0:d})",
                            size);
#endif
                //DebugStub.Break();
                return(null);
            }
        }