Beispiel #1
0
        /////////////////////////////////////
        // PRIVATE METHODS
        /////////////////////////////////////

        //
        // Finds a contiguous block of virtual memory.
        //
        // *** CALLER MUST HOLD OUR PROTECTIVE LOCK! ***
        //
        // We don't acquire the spinlock ourselves in case the caller
        // wants to do more work within it.
        //
        // Currently, the approach is VERY simplistic; we just keep handing out
        // pages starting with the base of the virtual memory space until we
        // bump into the top, and then we become much more inefficient.
        //
        // The expectation is that higher-level components will do smarter
        // space management than this!
        //
        private unsafe UIntPtr ReserveInternal(UIntPtr numPages, Process process, uint extra, PageType type)
        {
            DebugStub.Assert(numPages >= 1);
            UIntPtr mapAddr = UIntPtr.Zero;

            uint tag =
                (process != null ? process.ProcessTag : MemoryManager.KernelPage)
                | (extra & MemoryManager.ExtraMask)
                | (uint)type;

            UIntPtr blockBytes = MemoryManager.BytesFromPages(numPages);

            if (nextAlloc + blockBytes <= rangeLimit)
            {
                mapAddr = nextAlloc;
                UIntPtr limitAddr = mapAddr + blockBytes;
                nextAlloc = limitAddr;

                SetRange(mapAddr, limitAddr, tag);
            }
            else
            {
                // slow-path allocation - just do first-fit for now...
                // TODO: Need to integrate freelist management from flatpages et al
                UIntPtr startIdx = PageFromAddr(dataStart);
                UIntPtr limitIdx = MemoryManager.PageFromAddr(rangeLimit - descrBase) - (numPages - 1);
                DebugStub.Assert(limitIdx <= PageCount);

                for (UIntPtr pageIdx = startIdx; pageIdx < limitIdx; pageIdx++)
                {
                    UIntPtr pageCount = 0;

                    while (pageCount < numPages)
                    {
                        uint pageTag = *(pageTable + pageIdx + pageCount);
                        if (pageTag != MemoryManager.PageFree)
                        {
                            break;
                        }
                        pageCount++;
                    }

                    if (pageCount == numPages)
                    {
                        mapAddr = dataStart + MemoryManager.BytesFromPages(pageIdx - startIdx);

                        SetRange(mapAddr, mapAddr + blockBytes, tag);
                        break;
                    }
                    else
                    {
                        pageIdx += pageCount;
                    }
                }
            }

            return(mapAddr);
        }
Beispiel #2
0
        private unsafe UIntPtr FreeAndUncommit(UIntPtr startPage,
                                               UIntPtr numPages)
        {
            // Drop all the memory
            MemoryManager.UnmapAndReleaseRange(
                startPage, startPage + MemoryManager.BytesFromPages(numPages));

            // Do the bookkeeping
            return(FreeInternal(startPage, numPages));
        }
Beispiel #3
0
        private unsafe UIntPtr FreeInternal(UIntPtr startPage,
                                            UIntPtr numPages)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(startPage));
            DebugStub.Assert(startPage >= dataStart);
            UIntPtr blockLimit = startPage + MemoryManager.BytesFromPages(numPages);

            DebugStub.Assert(blockLimit <= rangeLimit);
            DebugStub.Assert(nextAlloc >= blockLimit);

            bool iflag = Lock();

            try {
                // Mark the pages free within our lock so we can rely on
                // blocks being uniformly marked free in here
                SetRange(startPage, blockLimit, MemoryManager.PageFree);

                // Reduce fragmentation: lower the nextAlloc pointer if
                // we have freed the top end of memory.
                if (nextAlloc == blockLimit)
                {
                    nextAlloc = startPage;

                    //
                    // NOTE: this chooses to use the page table
                    // information, which is currently usermode-accessible
                    // in the case of a user-mode range. Keep in mind that
                    // the information may be corrupt!
                    //

                    // Further optimization: drop nextAlloc more
                    // if the memory below us is free, too.
                    uint *  pageTable = PageTable;
                    UIntPtr stepPage  = nextAlloc;
                    uint    val;

                    do
                    {
                        nextAlloc = stepPage;
                        stepPage -= MemoryManager.PageSize;
                        uint dsc = pageTable[(uint)PageFromAddr(stepPage)];
                        val = dsc & MemoryManager.SystemPageMask;
                    }while ((val == MemoryManager.PageFree) &&
                            (!VMManager.IsPageMapped(stepPage))); // sanity
                }

                freedCount++;
                freedBytes += (ulong)MemoryManager.BytesFromPages(numPages);
            }
            finally {
                Unlock(iflag);
            }

            return(MemoryManager.BytesFromPages(numPages));
        }
Beispiel #4
0
 internal static void KernelFree(UIntPtr startAddr, UIntPtr numPages, Process process)
 {
     if (useAddressTranslation)
     {
         KernelRange.Free(startAddr, numPages, process);
     }
     else
     {
         FlatPages.Free(startAddr, MemoryManager.BytesFromPages(numPages), process);
     }
 }
Beispiel #5
0
 internal static void StackFree(UIntPtr startAddr, UIntPtr numPages, Process process, bool kernelAllocation, bool initialStack)
 {
     if (useAddressTranslation)
     {
         KernelRange.Free(startAddr, numPages, process);
     }
     else
     {
         FlatPages.StackFree(startAddr, MemoryManager.BytesFromPages(numPages), process, kernelAllocation, initialStack);
     }
 }
Beispiel #6
0
        //
        // Releases a previously reserved range of memory, but does not
        // uncommit any pages.
        //
        internal UIntPtr Unreserve(UIntPtr startAddr, UIntPtr numPages, Process process)
        {
            DebugStub.Assert(MemoryManager.IsPageAligned(startAddr));
            DebugStub.Assert(numPages > 0);
            UIntPtr blockLimit = startAddr + MemoryManager.BytesFromPages(numPages);

            DebugStub.Assert(startAddr >= dataStart);
            DebugStub.Assert(blockLimit <= rangeLimit);

            // Strictly a sanity check
            uint tag = process != null ? process.ProcessTag : MemoryManager.KernelPage;

            VerifyOwner(startAddr, blockLimit, tag);

            return(FreeInternal(startAddr, numPages));
        }
Beispiel #7
0
        //
        // Allocates and commits a new range of memory.
        //
        internal UIntPtr Allocate(UIntPtr numPages, Process process,
                                  uint extra, PageType type,
                                  ProtectionDomain inDomain)
        {
            DebugStub.Assert(numPages > 0);
            UIntPtr mapAddr = UIntPtr.Zero;

            bool iflag = Lock();

            // Within our lock, figure out where we're going to stick the newly
            // mapped memory, and mark it used.
            try {
                mapAddr = ReserveInternal(numPages, process, extra, type);

                if (mapAddr == UIntPtr.Zero)
                {
                    DebugStub.Assert(false, "Failed to find a mapping point for new memory");
                    return(mapAddr);
                }
            }
            finally {
                Unlock(iflag);
            }

            UIntPtr limitAddr = mapAddr + MemoryManager.BytesFromPages(numPages);

            // Now actually map pages to the chosen region.
            if (!MemoryManager.CommitAndMapRange(mapAddr, limitAddr, inDomain))
            {
                // yikes; failure
                return(UIntPtr.Zero);
            }

            allocatedCount++;
            allocatedBytes += (ulong)MemoryManager.BytesFromPages(numPages);

            if (process != null)
            {
                process.Allocated(MemoryManager.BytesFromPages(numPages));
            }

            return(mapAddr);
        }
Beispiel #8
0
        internal unsafe UIntPtr Free(UIntPtr startPage,
                                     UIntPtr numPages,
                                     Process process)
        {
            DebugStub.Assert(startPage >= dataStart);
            DebugStub.Assert(MemoryManager.IsPageAligned(startPage));
            UIntPtr blockLimit = startPage + MemoryManager.BytesFromPages(numPages);

            DebugStub.Assert(blockLimit <= rangeLimit);

            //
            // NOTE: This is strictly a sanity check. The pagetable
            // is ultimately not trustworthy because it is writeable by
            // user-mode code.
            //
            uint tag = process != null ? process.ProcessTag : MemoryManager.KernelPage;

            VerifyOwner(startPage, blockLimit, tag);

            // Do it
            return(FreeAndUncommit(startPage, numPages));
        }
Beispiel #9
0
 internal UIntPtr AddrFromPage(UIntPtr pageNum)
 {
     DebugStub.Assert(pageNum < PageCount);
     return(descrBase + MemoryManager.BytesFromPages(pageNum));
 }