示例#1
0
        internal static unsafe bool QueryMemory(UIntPtr queryAddr,
                                                out UIntPtr regionAddr,
                                                out UIntPtr regionSize)
        {
            VTable.Assert(PageTable.PageAligned(queryAddr));
            MEMORY_BASIC_INFORMATION memInfo;
            UIntPtr size = (UIntPtr)sizeof(MEMORY_BASIC_INFORMATION);
            UIntPtr data = VirtualQuery((void *)queryAddr, out memInfo, size);

            Trace.Log(Trace.Area.Page,
                      "VirtualQuery {0}: base={1} size={2}",
                      __arglist(queryAddr, memInfo.AllocationBase,
                                memInfo.RegionSize));
            if (data == 0)
            {
                // queryAddr is a kernel-mode pointer
                regionAddr = queryAddr;
                regionSize = (UIntPtr)sizeof(int);
                return(false);
            }
            else
            {
                VTable.Assert(data == size &&
                              memInfo.BaseAddress == queryAddr);
                regionAddr = memInfo.AllocationBase;
                regionSize = (queryAddr - regionAddr) + memInfo.RegionSize;
                return(memInfo.State != MEM_FREE);
            }
        }
 private unsafe void CompactHeapObjects(UIntPtr previousEnd)
 {
     while (!this.relocationQueue.IsEmpty)
     {
         UIntPtr sourceAddress      = this.relocationQueue.Read();
         UIntPtr destinationAddress = this.relocationQueue.Read();
         UIntPtr runLength          = this.relocationQueue.Read();
         if (previousEnd != destinationAddress)
         {
             VTable.Assert(previousEnd < destinationAddress);
             if (PageTable.Page(destinationAddress) !=
                 PageTable.Page(previousEnd + PreHeader.Size))
             {
                 if (!PageTable.PageAligned(previousEnd))
                 {
                     UIntPtr pageLimit = PageTable.PagePad(previousEnd);
                     BumpAllocator.WriteUnusedMarker(previousEnd);
                     previousEnd += UIntPtr.Size;
                     Util.MemClear(previousEnd,
                                   pageLimit - previousEnd);
                 }
                 if (!PageTable.PageAligned(destinationAddress))
                 {
                     // This only happens before pinned objects and
                     // large objects
                     UIntPtr start =
                         PageTable.PageAlign(destinationAddress);
                     VTable.Assert(previousEnd <= start);
                     while (start < destinationAddress)
                     {
                         Allocator.WriteAlignment(start);
                         start += UIntPtr.Size;
                     }
                 }
                 UIntPtr objAddr = destinationAddress + PreHeader.Size;
                 InteriorPtrTable.SetFirst(objAddr);
             }
             else
             {
                 VTable.Assert(previousEnd < destinationAddress);
                 UIntPtr start = previousEnd;
                 while (start < destinationAddress)
                 {
                     Allocator.WriteAlignment(start);
                     start += UIntPtr.Size;
                 }
             }
         }
         Util.MemCopy(destinationAddress, sourceAddress, runLength);
         previousEnd = destinationAddress + runLength;
     }
     // Zero out the end of the allocation page
     if (!PageTable.PageAligned(previousEnd))
     {
         UIntPtr pageLimit = PageTable.PagePad(previousEnd);
         Util.MemClear(previousEnd, pageLimit - previousEnd);
     }
     this.relocationQueue.Cleanup(true);
 }
        void VisitObjects(ObjectLayout.ObjectVisitor objectVisitor,
                          UIntPtr lowAddr, UIntPtr highAddr)
        {
            VTable.Assert(PageTable.PageAligned(lowAddr));
            VTable.Assert(PageTable.PageAligned(highAddr));
            UIntPtr lowPage  = PageTable.Page(lowAddr);
            UIntPtr highPage = PageTable.Page(highAddr);

            SegregatedFreeList.VisitObjects(lowPage, highPage, objectVisitor);
        }
示例#4
0
 private static void Clear(UIntPtr startAddr,
                           UIntPtr regionSize)
 {
     VTable.Assert(PageTable.PageAligned(startAddr));
     VTable.Assert(PageTable.PageAligned(regionSize));
     MemoryManager.IgnoreMemoryContents(startAddr, regionSize);
     MarkUnusedPages(Thread.CurrentThread,
                     PageTable.Page(startAddr),
                     PageTable.PageCount(regionSize),
                     false);
 }
        private unsafe void SkipDestinationAreas(ref UIntPtr destPage,
                                                 UIntPtr destCursor,
                                                 ref UIntPtr destLimit,
                                                 UIntPtr sourceCursor)
        {
            UIntPtr cursorPage = PageTable.Page(destCursor);
            UIntPtr sourcePage = PageTable.Page(sourceCursor);

            if (cursorPage != sourcePage)
            {
                UIntPtr destPageLimit = PageTable.PagePad(destCursor);
                if (destPageLimit != destCursor)
                {
                    cursorPage++;
                }
                VTable.Assert(PageTable.PageAligned(destLimit));
                UIntPtr limitPage = PageTable.Page(destLimit);
                while (destPage < sourcePage)
                {
                    if (cursorPage < limitPage)
                    {
                        this.RegisterSkippedPages(cursorPage, limitPage);
                    }
                    do
                    {
                        destPage++;
                    } while (!IsMyZombiePage(destPage));
                    cursorPage = destPage;
                    do
                    {
                        destPage++;
                    } while (IsMyZombiePage(destPage));
                    limitPage = destPage;
                }
                destLimit = PageTable.PageAddr(limitPage);
                VTable.Assert(destPage > sourcePage);
                VTable.Assert(cursorPage <= sourcePage);
                if (cursorPage < sourcePage)
                {
                    this.RegisterSkippedPages(cursorPage, sourcePage);
                    cursorPage = sourcePage;
                }
                InteriorPtrTable.ClearFirst(cursorPage, destPage);
                InteriorPtrTable.SetFirst(sourceCursor + PreHeader.Size);
                if (GC.remsetType == RemSetType.Cards)
                {
                    OffsetTable.ClearLast(PageTable.PageAddr(cursorPage),
                                          PageTable.PageAddr(destPage) - 1);
                }
            }
        }
示例#6
0
        internal void Truncate()
        {
            UIntPtr allocPtr = this.allocPtr;

            if (!PageTable.PageAligned(allocPtr))
            {
                WriteUnusedMarker(allocPtr);
            }
            // NB: allocPtr must never be zero unless zeroedLimit is also zero.
            // NB: zeroedLimit can be zero if GC is necessary.
            this.zeroedLimit  = UIntPtr.Zero;
            this.reserveLimit = UIntPtr.Zero;
            this.allocPtr     = UIntPtr.Zero;
            this.allocNew     = UIntPtr.Zero;
        }
示例#7
0
        //////////////////////////////////// Allocation and Free Routines.
        //
        // Allocation is optimized for the case where an allocation starts
        // with a relatively small amount of memory and grows over time.
        // This is exactly the behavior exhibited by stacks and GC heaps.
        //
        // The allocation strategy also works well for large initial
        // allocations.  The strategy would be very inefficient if a very
        // large number of small, completely independent allocations are
        // made.
        //
        // AllocateMemory(size) performs an initial allocation.
        // AllocateMemory(startAddr, size) performs growing allocations.
        //
        internal static unsafe UIntPtr AllocateMemory(UIntPtr size)
        {
            VTable.Assert(PageTable.PageAligned(size));
#if SINGULARITY_KERNEL
            UIntPtr addr = Sing_MemoryManager.KernelAllocate(
                Sing_MemoryManager.PagesFromBytes(size),
                Process.kernelProcess, 0, PageType.Unknown);
#elif SINGULARITY_PROCESS
            UIntPtr addr = PageTableService.Allocate(size);
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint((int)size);
            Kernel.Waypoint(811);
#endif // SINGULARITY_KERNEL

            if (addr != UIntPtr.Zero)
            {
                Util.MemClear(addr, size);
            }
            return(addr);
        }
示例#8
0
        //===============================
        // Routines to mark special pages

        private static unsafe void SetNonheapPages(UIntPtr startAddr,
                                                   UIntPtr size)
        {
            UIntPtr startPage = PageTable.Page(startAddr);
            UIntPtr endAddr   = startAddr + size;
            UIntPtr endPage   = PageTable.Page(endAddr);

            if (!PageTable.PageAligned(endAddr))
            {
                endPage++;
            }
            UIntPtr pageCount = endPage - startPage;

            if (pageCount == 1)
            {
                PageTable.SetType(startPage, PageType.System);
            }
            else
            {
                PageTable.SetType(startPage, pageCount, PageType.System);
            }
        }
示例#9
0
        internal static unsafe bool AllocateMemory(UIntPtr startAddr,
                                                   UIntPtr size)
        {
            VTable.Deny(inAllocator);
            inAllocator = true;
            VTable.Assert(PageTable.PageAligned(startAddr));
            VTable.Assert(PageTable.PageAligned(size));

#if SINGULARITY_KERNEL
            UIntPtr addr = Sing_MemoryManager.KernelExtend(
                startAddr, Sing_MemoryManager.PagesFromBytes(size),
                Process.kernelProcess, PageType.Unknown);
#elif SINGULARITY_PROCESS
            UIntPtr addr = PageTableService.AllocateExtend(startAddr, size);
#endif
            inAllocator = false;
            if (addr != UIntPtr.Zero)
            {
                Util.MemClear(addr, size);
                return(true);
            }
            return(false);
        }
        private unsafe void FindDestinationArea(ref UIntPtr destPage,
                                                ref UIntPtr destCursor,
                                                ref UIntPtr destLimit,
                                                UIntPtr objectSize,
                                                PageType destGeneration)
        {
            VTable.Assert(IsValidGeneration((int)destGeneration));

            UIntPtr cursorPage  = PageTable.Page(destCursor);
            UIntPtr limitPage   = PageTable.Page(destLimit);
            UIntPtr pageAddr    = PageTable.PagePad(destCursor);
            UIntPtr testPage    = limitPage;
            UIntPtr endTestPage = PageTable.PageCount(destCursor + objectSize);

            if (destCursor > UIntPtr.Zero &&
                IsMyZombiePage(PageTable.Page(destCursor - 1)))
            {
                VTable.Assert(destPage == limitPage);
                while (IsMyZombiePage(testPage) ||
                       (testPage < endTestPage &&
                        (PageTable.IsUnusedPage(testPage))))
                {
                    testPage++;
                }
                if (testPage >= endTestPage)
                {
                    // We can expand the current region
                    endTestPage = testPage;
                    VTable.Assert(PageTable.PageAligned(destLimit));
                    InteriorPtrTable.ClearFirst(limitPage, testPage);
                    if (GC.remsetType == RemSetType.Cards)
                    {
                        OffsetTable.ClearLast(PageTable.PageAddr(limitPage),
                                              PageTable.PageAddr(testPage) - 1);
                    }
                    while (limitPage != endTestPage)
                    {
                        VTable.Assert(PageTable.IsUnusedPage(destPage));
                        do
                        {
                            destPage++;
                        } while (destPage < endTestPage &&
                                 PageTable.IsUnusedPage(destPage));
                        bool fCleanPages = true;
                        bool status      =
                            PageManager.TryReserveUnusedPages(null, limitPage,
                                                              destPage - limitPage,
                                                              nurseryGeneration,
                                                              ref fCleanPages);
                        VTable.Assert(status);
                        MakeZombiePages(limitPage, destPage - limitPage,
                                        destGeneration);
                        while (destPage < endTestPage &&
                               IsMyZombiePage(destPage))
                        {
                            destPage++;
                        }
                        limitPage = destPage;
                    }
                    destLimit = PageTable.PageAddr(limitPage);
                    return;
                }
            }
            if (destCursor != pageAddr)
            {
                cursorPage++;
            }
            if (cursorPage != limitPage)
            {
                this.RegisterSkippedPages(cursorPage, limitPage);
            }
            // Find new region big enough to contain object
            UIntPtr neededPages = PageTable.PageCount(objectSize);
            UIntPtr prefixPage;

            while (true)
            {
                do
                {
                    destPage++;
                } while (!IsMyZombiePage(destPage));
                cursorPage = destPage;
                prefixPage = cursorPage;
                do
                {
                    destPage++;
                } while (IsMyZombiePage(destPage));
                limitPage = destPage;
                if (neededPages <= limitPage - cursorPage)
                {
                    break;
                }
                // Check for following unused pages
                endTestPage = cursorPage + neededPages;
                VTable.Assert(endTestPage <= PageTable.pageTableCount);
                while (destPage < endTestPage &&
                       (PageTable.IsUnusedPage(destPage) ||
                        (IsMyZombiePage(destPage))))
                {
                    destPage++;
                }
                if (destPage == endTestPage)
                {
                    break;
                }
                // Check for preceding unused pages
                if (destPage >= neededPages)
                {
                    endTestPage = destPage - neededPages;
                    prefixPage  = cursorPage - 1;
                    while (prefixPage >= UIntPtr.Zero &&
                           PageTable.IsUnusedPage(prefixPage))
                    {
                        prefixPage--;
                    }
                    prefixPage++;
                    if (prefixPage == endTestPage)
                    {
                        break;
                    }
                }
                // Register any skipped regions of pages
                this.RegisterSkippedPages(cursorPage, limitPage);
                while (limitPage < destPage)
                {
                    VTable.Assert(PageTable.IsUnusedPage(limitPage));
                    do
                    {
                        limitPage++;
                    } while (limitPage < destPage &&
                             PageTable.IsUnusedPage(limitPage));
                    cursorPage = limitPage;
                    while (limitPage < destPage && IsMyZombiePage(limitPage))
                    {
                        limitPage++;
                    }
                    if (cursorPage != limitPage)
                    {
                        this.RegisterSkippedPages(cursorPage, limitPage);
                    }
                }
            }
            // We found an area big enough.  Commit the pre- and
            // postfix areas of unused pages
            if (prefixPage != cursorPage)
            {
                bool fCleanPages = true;
                bool status      =
                    PageManager.TryReserveUnusedPages(null, prefixPage,
                                                      cursorPage - prefixPage,
                                                      nurseryGeneration,
                                                      ref fCleanPages);
                VTable.Assert(status);
                MakeZombiePages(prefixPage, cursorPage - prefixPage,
                                destGeneration);
            }
            while (destPage != limitPage)
            {
                // Mark the region of unused pages as fromspace
                UIntPtr unusedPage = limitPage;
                VTable.Assert(PageTable.IsUnusedPage(unusedPage));
                do
                {
                    unusedPage++;
                } while (unusedPage < destPage &&
                         PageTable.IsUnusedPage(unusedPage));
                bool fCleanPages = true;
                bool status      =
                    PageManager.TryReserveUnusedPages(null, limitPage,
                                                      unusedPage - limitPage,
                                                      nurseryGeneration,
                                                      ref fCleanPages);
                VTable.Assert(status);
                MakeZombiePages(limitPage, unusedPage - limitPage,
                                destGeneration);
                // Skip any sections of pages already marked as fromspace
                limitPage = unusedPage;
                while (limitPage < destPage && IsMyZombiePage(limitPage))
                {
                    limitPage++;
                }
            }
            destCursor = PageTable.PageAddr(prefixPage);
            destLimit  = PageTable.PageAddr(limitPage);
            // Take ownership of the new pages
            InteriorPtrTable.ClearFirst(prefixPage, limitPage);
            InteriorPtrTable.SetFirst(destCursor + PreHeader.Size);
            if (GC.remsetType == RemSetType.Cards)
            {
                OffsetTable.ClearLast(PageTable.PageAddr(prefixPage),
                                      PageTable.PageAddr(limitPage) - 1);
            }
        }
示例#11
0
            private static void CleanPageTail(UIntPtr postPinnedAddr)
            {
                if (!PageTable.PageAligned(postPinnedAddr))
                {
                    // If postPinnedAddr points to the first object on its page,
                    // then we are removing all objects (specifically the part
                    // of the object that the InteriorPtrTable tracks, the
                    // vtables) from the page, so we should clear the page's
                    // entry in the InteriorPtrTable.

                    UIntPtr page        = PageTable.Page(postPinnedAddr);
                    UIntPtr firstObjPtr = InteriorPtrTable.First(page);
                    if (firstObjPtr > postPinnedAddr)
                    {
                        VTable.Assert
                            (firstObjPtr - PreHeader.Size >= postPinnedAddr,
                            "postPinnedAddr should not point to the "
                            + "interior of an object (1)");
                        InteriorPtrTable.ClearFirst(page);
                    }
                    else if (!BumpAllocator.IsUnusedSpace(firstObjPtr))
                    {
                        UIntPtr firstObjSize =
                            InteriorPtrTable.ObjectSize(firstObjPtr);
                        VTable.Assert
                            (firstObjPtr + firstObjSize - PreHeader.Size
                            <= postPinnedAddr,
                            "postPinnedAddr should not point to the "
                            + "interior of an object (2)");
                    }

                    UIntPtr byteCount = PageTable.PagePad(postPinnedAddr)
                                        - postPinnedAddr;
                    Util.MemClear(postPinnedAddr, byteCount);
                    BumpAllocator.WriteUnusedMarker(postPinnedAddr);

                    if (GC.remsetType == RemSetType.Cards && byteCount > 0)
                    {
                        UIntPtr firstCard = CardTable.CardNo(postPinnedAddr);
                        UIntPtr lastCard  =
                            CardTable.CardNo(postPinnedAddr + byteCount - 1);

                        if (!OffsetTable.NoObjectPtrToTheCard(firstCard))
                        {
                            UIntPtr offset = OffsetTable.GetOffset(firstCard);
                            UIntPtr objPtr =
                                CardTable.CardAddr(firstCard) + offset;
                            UIntPtr size = OffsetTable.ObjectSize(objPtr);

                            VTable.Assert
                                ((objPtr + size - PreHeader.Size
                                  <= postPinnedAddr) ||
                                (objPtr >= postPinnedAddr),
                                "Object should be totally "
                                + "above or below postPinnedAddr");
                            if (objPtr >= postPinnedAddr)
                            {
                                OffsetTable.ClearCards(firstCard, firstCard);
                            }
                        }

                        OffsetTable.ClearCards(firstCard + 1, lastCard);
                    }
                }
            }
示例#12
0
        private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment,
                                    Thread currentThread)
        {
            if (this.reserveLimit == UIntPtr.Zero)
            {
                return(UIntPtr.Zero);
            }
#if SINGULARITY_KERNEL
            Kernel.Waypoint(700);
#endif
            UIntPtr neededBytes =
                bytes +                              // Bytes required for object +
                alignment - UIntPtr.Size -           // worst case alignment overhead +
                (this.reserveLimit - this.allocPtr); // bytes already available
            UIntPtr paddedNeed  = PageTable.PagePad(neededBytes);
            UIntPtr pageCount   = PageTable.PageCount(paddedNeed);
            UIntPtr startPage   = PageTable.Page(this.reserveLimit);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            bool    gotPages    =
                PageManager.TryReserveUnusedPages(currentThread, startPage,
                                                  pageCount, this.pageType,
                                                  ref fCleanPages);
            if (!gotPages)
            {
                // We can't indiscriminately ask for more memory if we have
                // unused pages already available.
                return(UIntPtr.Zero);
            }
            if (this.reserveLimit == UIntPtr.Zero)
            {
                // A collection occurred, so there is no region to extend
                PageManager.ReleaseUnusedPages(startPage, pageCount,
                                               fCleanPages);
                return(UIntPtr.Zero);
            }
            BaseCollector.IncrementNewBytesSinceGC(paddedNeed);
            this.allocNew = this.reserveLimit;
            // Pad alignment space if necessary.  NB: a prior call to
            // AllocateFast may have started generating alignment tokens,
            // but we may need to finish the job here if the residual space
            // was insufficient for a multi-word alignment.
            UIntPtr oldReserveLimit = this.reserveLimit;
            this.reserveLimit += paddedNeed;
            this.allocPtr      =
                Allocator.AlignedAllocationPtr(this.allocPtr,
                                               this.reserveLimit,
                                               alignment);
            if (this.zeroedLimit < this.allocPtr)
            {
                this.zeroedLimit = this.allocPtr;
            }
            UIntPtr objectAddr = this.allocPtr + PreHeader.Size;
            this.allocPtr += bytes;
            if (fCleanPages)
            {
                if (this.zeroedLimit < oldReserveLimit)
                {
                    Util.MemClear(this.zeroedLimit,
                                  oldReserveLimit - this.zeroedLimit);
                }
                this.zeroedLimit = this.reserveLimit;
            }
            else
            {
                Util.MemClear(this.zeroedLimit,
                              this.allocPtr - this.zeroedLimit);
                this.zeroedLimit = this.allocPtr;
            }
            VTable.Assert(this.allocPtr <= this.zeroedLimit);
            VTable.Assert(PageTable.PageAligned(this.reserveLimit));
            if (objectAddr >= oldReserveLimit)
            {
                // Object is first on new page
                InteriorPtrTable.SetFirst(objectAddr);
            }
            else if (objectAddr + bytes < this.reserveLimit)
            {
                // The object does not end on new limit

                // N.B. The next object may not be allocated at exactly
                // (objectAddr + bytes) due to alignment considerations.  It
                // also might not ever be allocated.  These cases are handled
                // by InteriorPtrTable.First skipping over alignment tokens
                // and callers of First watching out for unused space tokens.

                InteriorPtrTable.SetFirst(objectAddr + bytes);
            }
            // We know an object is located as the last one in a page
            // when it extends through the page to the next.
            // Otherwise, it is totally before or below the page, and
            // we are not sure whether it is the last object or not.
            // So record only such an object for the last card in that
            // page. Many objects may have been omitted due to
            // this coarse-grain recording. But we should be able
            // to incrementally update the offset table and find them.
            // I believe this is a better choice than simply recording
            // any object to the offset table, because most objects
            // may just die and need not to record.

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                if (objectAddr < oldReserveLimit &&
                    allocPtr + bytes > oldReserveLimit)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(objectAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(701);
#endif
            return(objectAddr);
        }