// Free a single physical page
        internal unsafe static void FreePage(PhysicalAddress addr)
        {
            bool iflag = Lock();

            try {
                CheckConsistency();
                ulong pageNum = MemoryManager.PagesFromBytes(addr.Value);
                uint blockIdx = (uint)(pageNum / (ulong)PageBlock.PagesPerBlock);
                uint pageIdx = (uint)(pageNum % (ulong)PageBlock.PagesPerBlock);
                PageBlock* thisBlock = GetBlock(blockIdx);
                bool wasFull = thisBlock->Full;

                thisBlock->MarkAsFree(pageIdx);

                if (wasFull) {
                    // This block now has a page available; put it on the
                    // free list
                    DebugStub.Assert(!thisBlock->IsLinked(freeList));
                    thisBlock->Link(ref freeList);
                }
            }
            finally {
                CheckConsistency();
                Unlock(iflag);
            }
        }
Exemple #2
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        public unsafe PhysicalHeap(UIntPtr start, UIntPtr limit)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(start));
            DebugStub.Assert(MemoryManager.IsPageAligned(limit));

            // Note that this wastes a little bit of memory by allocating
            // table space to describe page-table memory!
            UIntPtr numPages      = MemoryManager.PagesFromBytes(limit - start);
            UIntPtr bytesForTable = numPages * BytesPerTableEntry;

            bytesForTable = MemoryManager.PagePad(bytesForTable);
            UIntPtr pagesForTable = MemoryManager.PagesFromBytes(bytesForTable);

            pageCount = numPages - pagesForTable;
            startAddr = start + bytesForTable;
            heapLimit = limit;
            pageTable = (ushort *)start;
            heapLock  = new SpinLock(SpinLock.Types.PhysicalHeap);

            // The entire heap is free to start out with
            freeList = new FreeList();

            // Initialize the page table
            SetPages(startAddr, pageCount, FreePage);

            fixed(PhysicalHeap *thisPtr = &this)
            {
                freeList.CreateAndInsert(thisPtr, startAddr, pageCount);
            }

            CheckConsistency();
        }
        // Mark a range of physical memory as being used. THIS IS ONLY
        // USED DURING INITIALIZATION, and so is not particularly
        // efficient.
        //
        // Addresses do not have to be page-aligned, but whole pages will
        // be marked.
        //
        // To allow SMAP overlaps with HAL regions make this ignore
        // region overlaps.
        private static unsafe void MarkRangeUsed(ulong startAddr, ulong length)
        {
            ulong firstPage = startAddr & ~MemoryManager.PageMask;
            // lastPage ends up being the address of the page immediately *after*
            // the data ends.
            ulong lastPage = MemoryManager.PagePad(startAddr + length);

            for (ulong pageAddr = firstPage; pageAddr < lastPage; pageAddr += MemoryManager.PageSize)
            {
                ulong      pageNum     = MemoryManager.PagesFromBytes(pageAddr);
                uint       blockIdx    = (uint)(pageNum / (ulong)PageBlock.PagesPerBlock);
                uint       pageInBlock = (uint)(pageNum % (ulong)PageBlock.PagesPerBlock);
                PageBlock *pBlock      = GetBlock(blockIdx);

                if (!pBlock->PageInUse(pageInBlock))
                {
                    pBlock->MarkAsUsed(pageInBlock);

                    if (pBlock->Full)
                    {
                        pBlock->Unlink(ref freeList);
                    }
                }
            }
        }
Exemple #4
0
        private UIntPtr PageIndex(UIntPtr pageAddr)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(pageAddr));
            DebugStub.Assert(pageAddr >= startAddr && pageAddr <= heapLimit,
                             "PhysicalHeap.PageIndex pageAddr = {0:x} Range = {1:x} ... {2:x}",
                             __arglist(pageAddr, startAddr, heapLimit));
            DebugStub.Assert(pageAddr < heapLimit);

            return(MemoryManager.PagesFromBytes(pageAddr - startAddr));
        }
        internal static void FreeMemory(UIntPtr startAddr, UIntPtr size)
        {
#if SINGULARITY_KERNEL
            DebugStub.Assert(Sing_MemoryManager.IsPageAligned(size));
            Sing_MemoryManager.KernelFree(
                startAddr, Sing_MemoryManager.PagesFromBytes(size),
                Process.kernelProcess);
#elif SINGULARITY_PROCESS
            VTable.Assert((size & PageTable.PageMask) == 0);
            PageTableService.Free(startAddr, size);
#endif
        }
        // Get the number of descriptor blocks that are necessary for
        // a given amount of physical RAM
        private static uint GetNumBlocks(ulong physicalRAM)
        {
            ulong pages = MemoryManager.PagesFromBytes(physicalRAM);

            // Round up the number of blocks needed
            ulong blocks = pages / PageBlock.PagesPerBlock;

            if ((pages % PageBlock.PagesPerBlock) > 0) {
                blocks++;
            }

            return (uint)blocks;
        }
Exemple #7
0
        private unsafe void CheckConsistency()
        {
#if SELF_TEST
            UIntPtr freePagesByTable = 0;

            for (UIntPtr i = 0; i < pageCount; i++)
            {
                UIntPtr pageAddr = startAddr + (MemoryManager.PageSize * i);

                if (PageWord(i) == FreePage)
                {
                    // Validate this block's free information
                    FreeNode *thisBlock = (FreeNode *)pageAddr;
                    DebugStub.Assert(thisBlock->signature == FreeNode.Signature);

                    if (thisBlock->last != null)
                    {
                        // Multi-page free block; validate and skip ahead
                        DebugStub.Assert(thisBlock->last->node == thisBlock);
                        DebugStub.Assert(thisBlock->last->signature == LastNode.Signature);
                        UIntPtr numBytes = (UIntPtr)thisBlock->last - (UIntPtr)pageAddr +
                                           MemoryManager.PageSize;
                        DebugStub.Assert(numBytes == thisBlock->bytes);
                        DebugStub.Assert(MemoryManager.IsPageAligned(numBytes));
                        UIntPtr numPages = MemoryManager.PagesFromBytes(numBytes);

                        for (UIntPtr j = i; j < i + numPages; j++)
                        {
                            DebugStub.Assert(PageWord(j) == FreePage);
                        }

                        i += numPages - 1;
                        freePagesByTable += numPages;
                    }
                    else
                    {
                        // Single-page free block
                        if (i != pageCount - 1)
                        {
                            DebugStub.Assert(PageWord(i + 1) != FreePage);
                        }
                        freePagesByTable++;
                    }
                }
            }

            // Now make sure all free pages are accounted for
            UIntPtr freePagesByList = FreePageCountFromList();
            DebugStub.Assert(freePagesByList == freePagesByTable);
#endif
        }
Exemple #8
0
        /////////////////////////////////////
        // PRIVATE METHODS
        /////////////////////////////////////

        private unsafe UIntPtr FreePageCountFromList()
        {
            UIntPtr   retval = 0;
            FreeNode *entry  = freeList.head;
            FreeNode *prev   = null;

            while (entry != null)
            {
                DebugStub.Assert(MemoryManager.IsPageAligned(entry->bytes));
                retval += MemoryManager.PagesFromBytes(entry->bytes);
                DebugStub.Assert(entry->prev == prev);
                prev  = entry;
                entry = entry->next;
            }

            return(retval);
        }
Exemple #9
0
        private unsafe void VerifyOwner(UIntPtr startAddr, UIntPtr limitAddr, uint tag)
        {
#if DEBUG
            DebugStub.Assert(startAddr >= dataStart);
            DebugStub.Assert(limitAddr <= rangeLimit);

            UIntPtr startIdx = PageFromAddr(startAddr);
            UIntPtr limitIdx = MemoryManager.PagesFromBytes(limitAddr - descrBase);
            DebugStub.Assert(limitIdx <= PageCount);

            tag &= MemoryManager.ProcessPageMask;

            for (UIntPtr i = startIdx; i < limitIdx; i++)
            {
                DebugStub.Assert
                    ((pageTable[(uint)i] & MemoryManager.ProcessPageMask) == tag,
                    "VirtualMemoryRange.VerifyOwner page={0} i={1} tag={2}",
                    __arglist(i, i, tag));
            }
#endif
        }
Exemple #10
0
        //////////////////////////////////// Allocation and Free Routines.
        //
        // Allocation is optimized for the case where an allocation starts
        // with a relatively small amount of memory and grows over time.
        // This is exactly the behavior exhibited by stacks and GC heaps.
        //
        // The allocation strategy also works well for large initial
        // allocations.  The strategy would be very inefficient if a very
        // large number of small, completely independent allocations are
        // made.
        //
        // AllocateMemory(size) performs an initial allocation.
        // AllocateMemory(startAddr, size) performs growing allocations.
        //
        internal static unsafe UIntPtr AllocateMemory(UIntPtr size)
        {
            VTable.Assert(PageTable.PageAligned(size));
#if SINGULARITY_KERNEL
            UIntPtr addr = Sing_MemoryManager.KernelAllocate(
                Sing_MemoryManager.PagesFromBytes(size),
                Process.kernelProcess, 0, PageType.Unknown);
#elif SINGULARITY_PROCESS
            UIntPtr addr = PageTableService.Allocate(size);
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint((int)size);
            Kernel.Waypoint(811);
#endif // SINGULARITY_KERNEL

            if (addr != UIntPtr.Zero)
            {
                Util.MemClear(addr, size);
            }
            return(addr);
        }
Exemple #11
0
        internal static unsafe bool AllocateMemory(UIntPtr startAddr,
                                                   UIntPtr size)
        {
            VTable.Deny(inAllocator);
            inAllocator = true;
            VTable.Assert(PageTable.PageAligned(startAddr));
            VTable.Assert(PageTable.PageAligned(size));

#if SINGULARITY_KERNEL
            UIntPtr addr = Sing_MemoryManager.KernelExtend(
                startAddr, Sing_MemoryManager.PagesFromBytes(size),
                Process.kernelProcess, PageType.Unknown);
#elif SINGULARITY_PROCESS
            UIntPtr addr = PageTableService.AllocateExtend(startAddr, size);
#endif
            inAllocator = false;
            if (addr != UIntPtr.Zero)
            {
                Util.MemClear(addr, size);
                return(true);
            }
            return(false);
        }
Exemple #12
0
        public unsafe void Free(UIntPtr addr, UIntPtr bytes, Process process)
        {
            if (addr == UIntPtr.Zero)
            {
                // Silently accept freeing null
                return;
            }

            // We always hand out memory in page-size chunks, so round up what
            // the caller thinks their block size is
            bytes = MemoryManager.PagePad(bytes);

            // Our blocks always start on page boundaries
            DebugStub.Assert(MemoryManager.IsPageAligned(addr));
            ushort tag   = process != null ? (ushort)process.ProcessId : KernelPage;
            bool   iflag = Lock();

            try {
                CheckConsistency();

                UIntPtr numPages = MemoryManager.PagesFromBytes(bytes);
                VerifyOwner(addr, numPages, tag);

                FreeNode *nextBlock = null;
                FreeNode *prevBlock = null;

                if ((addr + bytes) < heapLimit)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        nextBlock = FreeNode.GetNodeAt(thisPtr, addr + bytes);
                    }
                }

                if (addr > startAddr)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        prevBlock = LastNode.GetNodeFromLast(thisPtr, addr - MemoryManager.PageSize);
                    }
                }

                // Don't mark pages as free until we're done discovering the
                // previous and next blocks, or the attempt to discover
                // the previous and next blocks gets confused to find itself
                // adjacent to a free block.
                SetPages(addr, numPages, FreePage);

                // Coalesce with the preceding region
                if (prevBlock != null)
                {
                    addr   = (UIntPtr)prevBlock;
                    bytes += prevBlock->bytes;
                    freeList.Remove(prevBlock);
                }

                // Coalesce with the following region
                if (nextBlock != null)
                {
                    bytes += nextBlock->bytes;
                    freeList.Remove(nextBlock);
                }

                // Blocks should always be integral numbers of pages
                DebugStub.Assert(MemoryManager.IsPageAligned(bytes));

                // Create the free node.
                fixed(PhysicalHeap *thisPtr = &this)
                {
                    freeList.CreateAndInsert(thisPtr, addr, bytes / MemoryManager.PageSize);
                }

                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }
        }
Exemple #13
0
        public unsafe UIntPtr Allocate(UIntPtr limitAddr,
                                       UIntPtr bytes,
                                       UIntPtr alignment,
                                       Process process)
        {
            ushort  tag = process != null ? (ushort)process.ProcessId : KernelPage;
            UIntPtr blockPtr;
            bool    iflag = Lock();

            if (alignment < MemoryManager.PageSize)
            {
                alignment = MemoryManager.PageSize;
            }

            try {
                CheckConsistency();

                // Find an appropriately-sized block
                FreeNode *foundNode = freeList.FindGoodFit(bytes, alignment);

                if (foundNode == null)
                {
                    return(UIntPtr.Zero);
                }

                DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)foundNode));

                // Respect alignment within the node
                blockPtr = MemoryManager.Pad((UIntPtr)foundNode, alignment);
                UIntPtr alignedSize = bytes + SpaceToAlign((UIntPtr)foundNode, alignment);
                DebugStub.Assert(alignedSize == (blockPtr + bytes) - (UIntPtr)foundNode);
                DebugStub.Assert(foundNode->bytes >= alignedSize);

                // Give back any extra pages
                UIntPtr numPages   = MemoryManager.PagesFromBytes(MemoryManager.PagePad(alignedSize));
                UIntPtr chunkPages = MemoryManager.PagesFromBytes(foundNode->bytes);

                DebugStub.Assert(chunkPages >= numPages);
                UIntPtr extraPages = chunkPages - numPages;

                if (extraPages > 0)
                {
                    // Give back the extra memory
                    UIntPtr remainderPtr = (UIntPtr)foundNode + (numPages * MemoryManager.PageSize);

                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        freeList.CreateAndInsert(thisPtr, remainderPtr, extraPages);
                    }
                }

                SetPages((UIntPtr)foundNode, numPages, tag);
                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }

            // TODO: Flexible limit specification not yet implemented
            if (limitAddr > UIntPtr.Zero)
            {
                DebugStub.Assert(blockPtr < limitAddr);
            }

            return(blockPtr);
        }
Exemple #14
0
        /////////////////////////////////////
        // PUBLIC METHODS
        /////////////////////////////////////

        //
        // The range of memory turned over to a VirtualMemoryRange structure
        // must not have *any* mapped pages in it to start out with
        //
        // A VirtualMemoryRange can build a pagetable that *describes*
        // more memory than it *manages* (this supports some kernel GC
        // oddities). In that case, pages out-of-range are marked
        // as PageType.Unknown. Obviously, allocation requests are never
        // satisfied with out-of-bounds data.
        //
        internal unsafe VirtualMemoryRange_struct(
            UIntPtr baseAddr, UIntPtr limitAddr,
            UIntPtr descrBaseAddr, UIntPtr descrLimitAddr,
            ProtectionDomain inDomain)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(baseAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(limitAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(descrBaseAddr));
            DebugStub.Assert(MemoryManager.IsPageAligned(descrLimitAddr));

            // The descriptive range can't be smaller than the managed range
            DebugStub.Assert(descrLimitAddr >= limitAddr);
            DebugStub.Assert(descrBaseAddr <= baseAddr);

            descrBase  = descrBaseAddr;
            descrLimit = descrLimitAddr;
            rangeBase  = baseAddr;
            rangeLimit = limitAddr;
            rangeLock  = new SpinLock(SpinLock.Types.VirtualMemoryRange);

            describedPages = MemoryManager.PagesFromBytes(descrLimit - descrBase);

            // Figure out how many pages we need for a page description table
            UIntPtr pageTableSize = MemoryManager.PagePad(describedPages * sizeof(uint));

            dataStart = baseAddr + pageTableSize;
            nextAlloc = dataStart;

            // Commit and prepare the page table
            pageTable = (uint *)baseAddr;

            bool success = MemoryManager.CommitAndMapRange(
                baseAddr, baseAddr + pageTableSize, inDomain);

            if (!success)
            {
                Kernel.Panic("Couldn't get pages to create a new VirtualMemoryRange page table");
            }

            allocatedBytes = 0;
            allocatedCount = 0;
            freedBytes     = 0;
            freedCount     = 0;

            // Describe the pages outside our range as Unknown
            if (descrBase < rangeBase)
            {
                SetRange(descrBase, rangeBase, MemoryManager.PageUnknown);
            }

            if (descrLimit > rangeLimit)
            {
                SetRange(rangeLimit, descrLimit, MemoryManager.PageUnknown);
            }

            // The page-table pages themselves are in use by the System
            SetRange((UIntPtr)pageTable,
                     (UIntPtr)pageTable + pageTableSize,
                     MemoryManager.KernelPageNonGC);

            // Describe pages in-range as Free
            SetRange(dataStart, rangeLimit, MemoryManager.PageFree);

#if DEBUG
            // Check that our data range is pristine
            for (UIntPtr stepAddr = dataStart;
                 stepAddr < rangeLimit;
                 stepAddr += MemoryManager.PageSize)
            {
                DebugStub.Assert(!VMManager.IsPageMapped(stepAddr));
            }
#endif
        }
Exemple #15
0
        internal static unsafe void ReturnStackSegmentRawCommon(ref ThreadContext context,
                                                                bool kernelAllocation,
                                                                bool initialStack)
        {
            UIntPtr begin = context.stackBegin;
            UIntPtr limit = context.stackLimit;

            StackHead *head = (StackHead *)(begin - sizeof(StackHead));

#if DO_TRACE_STACKS
            Kernel.Waypoint(669);
#endif

            UIntPtr addr = limit - SafetyBufferSize;
            UIntPtr size = begin - limit + SafetyBufferSize;

#if DEBUG_STACK_VERBOSE
            fixed(ThreadContext *ptr = &context)
            {
                Tracing.Log(Tracing.Debug,
                            "ReturnStackSegmentRaw(ctx={0:x8}) [{1:x8}..{2:x8}]\n",
                            (UIntPtr)ptr, context.stackLimit, context.stackBegin);
            }
#endif

#if !PAGING
            context.stackBegin = head->prevBegin;
            context.stackLimit = head->prevLimit;
#else
            //context.stackBegin = head->prevBegin;
            //context.stackLimit = head->prevLimit;
            // Moved below, because of the following scenario:
            //   - call UnlinkStack
            //   - UnlinkStack switches to the scheduler stack
            //   - UnlinkStack calls ReturnStackSegmentRaw, which calls
            //     various other methods
            //   - one of the other methods invokes write barrier code
            //   - the write barrier code performs a stack link check
            //   - If context.stackLimit is already set to head->prevLimit,
            //     then it may appear that we're out of stack space,
            //     even if we're really not, so we jump to LinkStack
            //   - LinkStack overwrites the scheduler stack
            // TODO: really fix this.
            UIntPtr stackBegin = head->prevBegin;
            UIntPtr stackLimit = head->prevLimit;
#endif

            Process owner = Process.GetProcessByID(context.processId);
            //
            //// See note above in GetStackSegmentRaw
            //if ((owner != Process.kernelProcess) &&
            //(addr >= BootInfo.KERNEL_BOUNDARY)) {
            //MemoryManager.UserFree(addr, MemoryManager.PagesFromBytes(size), owner);
            //}
            //else {
            //MemoryManager.KernelFree(addr, MemoryManager.PagesFromBytes(size), owner);
            //}
            //
            MemoryManager.StackFree(addr, MemoryManager.PagesFromBytes(size), owner, kernelAllocation, initialStack);

#if PAGING
            // See comments above.
            context.stackBegin = stackBegin;
            context.stackLimit = stackLimit;
#endif

#if DEBUG_STACK_VERBOSE
            Tracing.Log(Tracing.Debug,
                        "ReturnStackSegment({0:x8}, {1:x8}) [{2:x8}..{3:x8}]\n",
                        addr, size, context.stackLimit, context.stackBegin);
#endif
        }
Exemple #16
0
        internal static unsafe StackHead *GetStackSegmentRaw(UIntPtr size,
                                                             ref ThreadContext context,
                                                             bool kernelAllocation,
                                                             bool initialStack)
        {
            // Allocate a new chunk, making room for StackHead at the top.
            // If you change these constants to add more data, see the
            // comment about InitialStackSize at the top of this file!
#if DO_TRACE_STACKS
            Kernel.Waypoint(667);
#endif
            if (size == UIntPtr.Zero)
            {
                size = InitialStackSize;
            }
            size = MemoryManager.PagePad(size + sizeof(StackHead) + SafetyBufferSize);

            UIntPtr chunk;


            Process owner = Process.GetProcessByID(context.processId);
            //
            //// NOTE: here's where we should be clever about
            //// whether to allocate a stack chunk in the user range
            //// or the kernel range. Except, if we switch contexts
            //// during an ABI call while using a user-range stack
            //// segment on a paging machine, we die. Gloss over
            //// this hackily by always getting stack segments
            //// from the kernel range.
            //if (kernelAllocation || (owner == Process.kernelProcess)) {
            //  chunk = MemoryManager.KernelAllocate(
            //      MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack);
            //}
            //else {
            //  chunk = MemoryManager.UserAllocate(
            //      MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack);
            //}
            //

            UIntPtr pageCount = MemoryManager.PagesFromBytes(size);
#if DEBUG_STACK_VERBOSE
            fixed(ThreadContext *ptr = &context)
            {
                Tracing.Log(Tracing.Debug,
                            "GetStackSegmentRaw(ctx={0:x8},size={1:d}) pages={2} [{3:x8}..{4:x8}]",
                            (UIntPtr)ptr, size, pageCount,
                            context.stackLimit, context.stackBegin);
            }
#endif
            chunk = MemoryManager.StackAllocate(pageCount, owner, 0, kernelAllocation, initialStack);

            if (chunk != UIntPtr.Zero)
            {
                // NB: We do _not_ zero out stack memory!
                // We assume that Bartok prevents access to prev contents.
                StackHead *head = (StackHead *)(chunk + size - sizeof(StackHead));

                context.stackBegin = chunk + size;
                context.stackLimit = chunk + SafetyBufferSize;

#if DEBUG_STACK_VERBOSE
                Tracing.Log(Tracing.Debug,
                            "GetStackSegmentRaw(size={0:d}) -> [{1:x8}..{2:x8}]",
                            size, context.stackLimit, context.stackBegin);
#endif
                return(head);
            }
            else
            {
                // Stack allocation failed.  In the future, we should
                // trigger a kernel exception; for now, we break to the
                // debugger.
#if DEBUG_STACK_VERBOSE
                Tracing.Log(Tracing.Debug,
                            "GetStackSegmentRaw: KernelAllocate failed!(siz={0:d})",
                            size);
#endif
                //DebugStub.Break();
                return(null);
            }
        }