Пример #1
0
 private unsafe void CompactHeapObjects(UIntPtr previousEnd)
 {
     while (!this.relocationQueue.IsEmpty)
     {
         UIntPtr sourceAddress      = this.relocationQueue.Read();
         UIntPtr destinationAddress = this.relocationQueue.Read();
         UIntPtr runLength          = this.relocationQueue.Read();
         if (previousEnd != destinationAddress)
         {
             VTable.Assert(previousEnd < destinationAddress);
             if (PageTable.Page(destinationAddress) !=
                 PageTable.Page(previousEnd + PreHeader.Size))
             {
                 if (!PageTable.PageAligned(previousEnd))
                 {
                     UIntPtr pageLimit = PageTable.PagePad(previousEnd);
                     BumpAllocator.WriteUnusedMarker(previousEnd);
                     previousEnd += UIntPtr.Size;
                     Util.MemClear(previousEnd,
                                   pageLimit - previousEnd);
                 }
                 if (!PageTable.PageAligned(destinationAddress))
                 {
                     // This only happens before pinned objects and
                     // large objects
                     UIntPtr start =
                         PageTable.PageAlign(destinationAddress);
                     VTable.Assert(previousEnd <= start);
                     while (start < destinationAddress)
                     {
                         Allocator.WriteAlignment(start);
                         start += UIntPtr.Size;
                     }
                 }
                 UIntPtr objAddr = destinationAddress + PreHeader.Size;
                 InteriorPtrTable.SetFirst(objAddr);
             }
             else
             {
                 VTable.Assert(previousEnd < destinationAddress);
                 UIntPtr start = previousEnd;
                 while (start < destinationAddress)
                 {
                     Allocator.WriteAlignment(start);
                     start += UIntPtr.Size;
                 }
             }
         }
         Util.MemCopy(destinationAddress, sourceAddress, runLength);
         previousEnd = destinationAddress + runLength;
     }
     // Zero out the end of the allocation page
     if (!PageTable.PageAligned(previousEnd))
     {
         UIntPtr pageLimit = PageTable.PagePad(previousEnd);
         Util.MemClear(previousEnd, pageLimit - previousEnd);
     }
     this.relocationQueue.Cleanup(true);
 }
Пример #2
0
        private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment,
                                   Thread currentThread)
        {
#if SINGULARITY_KERNEL
            Kernel.Waypoint(702);
#endif
            this.Truncate();
            UIntPtr paddedBytes =
                PageTable.PagePad(bytes + alignment - UIntPtr.Size);
            BaseCollector.IncrementNewBytesSinceGC(paddedBytes);
            UIntPtr pages       = PageTable.PageCount(paddedBytes);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            // We may eventually want to ask for specific pages
            // between asking if any pages are reusable and asking the
            // OS for any possible page.
            UIntPtr startPage =
                PageManager.EnsurePages(currentThread, pages, this.pageType,
                                        ref fCleanPages);
            UIntPtr startAddr = PageTable.PageAddr(startPage);
            UIntPtr limitAddr = PageTable.PageAddr(startPage + pages);
            startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr,
                                                       alignment);
            this.allocNew = startAddr;
            this.allocPtr = startAddr + bytes;
            if (fCleanPages)
            {
                this.zeroedLimit = limitAddr;
            }
            else
            {
                Util.MemClear(startAddr, bytes);
                this.zeroedLimit = this.allocPtr;
            }
            this.reserveLimit = limitAddr;
            UIntPtr resultAddr = startAddr + PreHeader.Size;
            InteriorPtrTable.SetFirst(resultAddr);

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                UIntPtr nextPageAddr = startAddr + PageTable.PageSize;
                VTable.Assert(resultAddr < nextPageAddr);
                if (this.allocPtr > nextPageAddr)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(resultAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(703);
#endif
            return(resultAddr);
        }
Пример #3
0
        // Finds the object base for an interior pointer.  In the case of a
        // pointer to the tail of an object and the head of another, it will
        // return the former object (the one whose tail we point at).  To
        // get the base pointer for a pointer into the pre-header, you should
        // add PreHeader.Size before calling this.
        internal static UIntPtr Find(UIntPtr addr)
        {
            UIntPtr page     = PageTable.Page(addr);
            UIntPtr currAddr = InteriorPtrTable.First(page);

            // Look out for the unused space token: this page may not
            // have been completely allocated: its "first" object might not
            // be valid.
            if (BumpAllocator.IsUnusedSpace(currAddr) || currAddr > addr)
            {
                // Back up to the previous object.  Should be fast
                // since First updated the InteriorPtrTable entries.
                currAddr = Before(PageTable.PageAddr(page));
            }
            VTable.Assert(!BumpAllocator.IsUnusedSpace(currAddr),
                          "InteriorPtrTable.Find 0");
            VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 1");
            while (true)
            {
                // Watch out for alignment padding; advance the pointer if
                // it points to a syncblock index rather than a vtable
                // pointer.  Note that we must do this before scrolling,
                // since the page table value was set before we knew the
                // required alignment.
                if (Allocator.IsAlignment(currAddr))
                {
                    currAddr += UIntPtr.Size;
                }
                else if (BumpAllocator.IsUnusedSpace(currAddr))
                {
                    UIntPtr postAddr =
                        PageTable.PagePad(currAddr) + PreHeader.Size;
                    VTable.Assert(postAddr <= addr, "InteriorPtrTable.Find 2");
                    currAddr = postAddr;
                }
                else
                {
                    VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 3");
                    UIntPtr size = ObjectSize(currAddr);
                    VTable.Assert(size >= UIntPtr.Zero,
                                  "InteriorPtrTable.Find 4");
                    UIntPtr postAddr = currAddr + size;
                    if (postAddr > addr)
                    {
                        return(currAddr);
                    }
                    else
                    {
                        currAddr = postAddr;
                    }
                }
            }
        }
Пример #4
0
        private unsafe void SkipDestinationAreas(ref UIntPtr destPage,
                                                 UIntPtr destCursor,
                                                 ref UIntPtr destLimit,
                                                 UIntPtr sourceCursor)
        {
            UIntPtr cursorPage = PageTable.Page(destCursor);
            UIntPtr sourcePage = PageTable.Page(sourceCursor);

            if (cursorPage != sourcePage)
            {
                UIntPtr destPageLimit = PageTable.PagePad(destCursor);
                if (destPageLimit != destCursor)
                {
                    cursorPage++;
                }
                VTable.Assert(PageTable.PageAligned(destLimit));
                UIntPtr limitPage = PageTable.Page(destLimit);
                while (destPage < sourcePage)
                {
                    if (cursorPage < limitPage)
                    {
                        this.RegisterSkippedPages(cursorPage, limitPage);
                    }
                    do
                    {
                        destPage++;
                    } while (!IsMyZombiePage(destPage));
                    cursorPage = destPage;
                    do
                    {
                        destPage++;
                    } while (IsMyZombiePage(destPage));
                    limitPage = destPage;
                }
                destLimit = PageTable.PageAddr(limitPage);
                VTable.Assert(destPage > sourcePage);
                VTable.Assert(cursorPage <= sourcePage);
                if (cursorPage < sourcePage)
                {
                    this.RegisterSkippedPages(cursorPage, sourcePage);
                    cursorPage = sourcePage;
                }
                InteriorPtrTable.ClearFirst(cursorPage, destPage);
                InteriorPtrTable.SetFirst(sourceCursor + PreHeader.Size);
                if (GC.remsetType == RemSetType.Cards)
                {
                    OffsetTable.ClearLast(PageTable.PageAddr(cursorPage),
                                          PageTable.PageAddr(destPage) - 1);
                }
            }
        }
Пример #5
0
            internal unsafe override void Visit(UIntPtr *loc)
            {
                UIntPtr addr = *loc;
                UIntPtr page = PageTable.Page(addr);

                Trace.Log(Trace.Area.Pointer,
                          "FwdThrRef: loc={0}, addr={1}, page={2}",
                          __arglist(loc, addr, page));
                PageType pageType = PageTable.Type(page);

                // if an object "spills" into a page that
                // is pinned, and the object is copied
                // during a collection, we will end up with
                // the first part of the object in a zombie page
                // the second part of the object in a GC page.
                // We need to find the start of the object and
                // use that to determine whether the object has
                // been moved.
                if (!PageTable.IsZombiePage(pageType) &&
                    !PageTable.IsGcPage(pageType))
                {
                    VTable.Assert(PageTable.IsNonGcPage(pageType) ||
                                  PageTable.IsStackPage(pageType) ||
                                  PageTable.IsSharedPage(pageType) ||
                                  VTable.BuildC2Mods,
                                  "Semispace:ForwardThreadReference");
                    return;
                }

                UIntPtr objectPtr = InteriorPtrTable.Find(addr);

                if (objectPtr == addr)
                {
                    generalReferenceVisitor.Visit(loc);
                }
                else
                {
                    // we can check for the page type of
                    // objectPtr here to see if it is zombie page.
                    // If true we can just return.
                    UIntPtr newObject = objectPtr;
                    generalReferenceVisitor.Visit(&newObject);
                    UIntPtr newAddr = newObject + (addr - objectPtr);
                    Trace.Log(Trace.Area.Pointer,
                              "FwdThrRef: {0} -> {1}",
                              __arglist(addr, newAddr));
                    *loc = newAddr;
                }
            }
Пример #6
0
            // BUGBUG: We are allocating an ArrayList while the collector
            // is running.  If the ArrayList gets big enough to be
            // allocated in the older generation, then the RemSet has the
            // potential to overflow since the boxed integers will reside
            // in the young generation.  We should eventually eliminate
            // the use of ArrayList in this class as well as avoid boxing
            // the page indices.

            internal unsafe override void Visit(UIntPtr *loc)
            {
                UIntPtr  addr     = *loc;
                UIntPtr  page     = PageTable.Page(addr);
                PageType pageType = PageTable.Type(page);

                if (!PageTable.IsZombiePage(pageType))
                {
                    VTable.Assert(PageTable.IsGcPage(pageType) ||
                                  PageTable.IsNonGcPage(pageType) ||
                                  PageTable.IsStackPage(pageType) ||
                                  PageTable.IsSharedPage(pageType) ||
                                  VTable.BuildC2Mods,
                                  "Semispace:RegisterPinnedReference:1");
                    return;
                }
                PageType gen = PageTable.ZombieToLive(pageType);
                UIntPtr  pinnedObjectAddr = InteriorPtrTable.Find(addr);

                if (pinnedPageList == null)
                {
                    pinnedPageList = new ArrayList();
                    comparer       = new UIntPtrComparer();
                }
                Object  pinnedObject = Magic.fromAddress(pinnedObjectAddr);
                UIntPtr objectSize   =
                    ObjectLayout.ObjectSize(pinnedObjectAddr,
                                            pinnedObject.vtable);
                UIntPtr beforeObjectAddr = pinnedObjectAddr - PreHeader.Size;
                UIntPtr pastObjectAddr   = beforeObjectAddr + objectSize;
                UIntPtr firstPage        = PageTable.Page(beforeObjectAddr);
                UIntPtr lastPage         = PageTable.Page(pastObjectAddr - 1);

                for (UIntPtr i = firstPage; i <= lastPage; i++)
                {
                    if (!pinnedPageList.Contains(i))
                    {
                        Trace.Log(Trace.Area.Pointer,
                                  "RegPin: ptr={0} page={1} gen={2}",
                                  __arglist(pinnedObjectAddr, i, gen));
                        GenerationalCollector.gcPromotedTable[(int)gen - 1] +=
                            PageTable.PageSize;
                        pinnedPageList.Add(i);
                    }
                }
            }
Пример #7
0
            private static UIntPtr PostPinnedObjectAddr(UIntPtr endPage)
            {
                UIntPtr endAddr            = PageTable.PageAddr(endPage);
                UIntPtr postLastObjectAddr = InteriorPtrTable.Last(endPage - 1);

                if (postLastObjectAddr < endAddr &&
                    !BumpAllocator.IsUnusedSpace(postLastObjectAddr))
                {
                    // If the next object straddles into the next page,
                    // return the location just past the object
                    Object  lastObject     = Magic.fromAddress(postLastObjectAddr);
                    UIntPtr lastObjectSize =
                        ObjectLayout.ObjectSize(postLastObjectAddr,
                                                lastObject.vtable);
                    postLastObjectAddr += lastObjectSize;
                }
                return(postLastObjectAddr - PreHeader.Size);
            }
Пример #8
0
            internal unsafe override void Visit(UIntPtr *loc)
            {
                UIntPtr  addr     = *loc;
                UIntPtr  page     = PageTable.Page(addr);
                PageType pageType = PageTable.Type(page);

                if (!PageTable.IsZombiePage(pageType))
                {
                    VTable.Assert(PageTable.IsGcPage(pageType) ||
                                  PageTable.IsNonGcPage(pageType) ||
                                  PageTable.IsStackPage(pageType) ||
                                  PageTable.IsSharedPage(pageType));
                    return;
                }
                UIntPtr objectAddr = InteriorPtrTable.Find(addr);

                this.threadPtrQueue.Write(objectAddr);
                this.threadPtrQueue.Write(addr - objectAddr);
            }
Пример #9
0
            internal unsafe override void Visit(UIntPtr *loc)
            {
                UIntPtr  addr     = *loc;
                UIntPtr  page     = PageTable.Page(addr);
                PageType pageType = PageTable.Type(page);

                if (!PageTable.IsZombiePage(pageType))
                {
                    VTable.Assert(PageTable.IsGcPage(pageType) ||
                                  PageTable.IsNonGcPage(pageType) ||
                                  PageTable.IsStackPage(pageType) ||
                                  PageTable.IsSharedPage(pageType));
                    return;
                }
                UIntPtr objectAddr = InteriorPtrTable.Find(addr);

                registerThreadReferenceVisitor.threadPtrQueue.Write(objectAddr);
                registerThreadReferenceVisitor.threadPtrQueue.Write(addr - objectAddr);
                *Allocator.GetObjectVTableAddress(objectAddr) |= (UIntPtr)2U;
            }
Пример #10
0
        void VisitObjects(ObjectLayout.ObjectVisitor objectVisitor,
                          UIntPtr lowAddr, UIntPtr highAddr)
        {
            UIntPtr oldAddr    = UIntPtr.Zero;
            UIntPtr objectAddr = lowAddr + PreHeader.Size;

            objectAddr = BumpAllocator.SkipNonObjectData(objectAddr, highAddr);
            while (objectAddr < highAddr)
            {
                if (PageTable.Page(objectAddr) != PageTable.Page(oldAddr))
                {
                    InteriorPtrTable.VerifyFirst(oldAddr, objectAddr);
                }
                oldAddr = objectAddr;
                Object  obj        = Magic.fromAddress(objectAddr);
                UIntPtr objectSize = objectVisitor.Visit(obj);
                objectAddr += objectSize;
                objectAddr  =
                    BumpAllocator.SkipNonObjectData(objectAddr, highAddr);
            }
            VTable.Assert(objectAddr - PreHeader.Size <= highAddr);
        }
Пример #11
0
        private void CompactPhaseCleanup(Thread currentThread,
                                         PageType generation,
                                         UIntPtr newLimitPtr)
        {
            VTable.Assert(IsValidGeneration((int)generation));

            registerThreadReferenceVisitor.Cleanup();
            // Free up skipped pages
            while (!this.skippedPageQueue.IsEmpty)
            {
                UIntPtr start  = this.skippedPageQueue.Read();
                UIntPtr finish = this.skippedPageQueue.Read();
                InteriorPtrTable.ClearFirst(start, finish);
                PageManager.FreePageRange(start, finish);
                if (GC.remsetType == RemSetType.Cards)
                {
                    OffsetTable.ClearLast(PageTable.PageAddr(start),
                                          PageTable.PageAddr(finish) - 1);
                }
            }
            this.skippedPageQueue.Cleanup(true);
            // Release the queue standby pages
            UnmanagedPageList.ReleaseStandbyPages();
            // Update the ownership information for the copied data
            PageType destGeneration =
                (generation == MAX_GENERATION) ?
                MAX_GENERATION :
                (PageType)(generation + 1);
            UIntPtr limitPage =
                PageTable.Page(PageTable.PagePad(newLimitPtr));

            for (UIntPtr i = UIntPtr.Zero; i < limitPage; i++)
            {
                if (IsMyZombiePage(i))
                {
                    PageTable.SetType(i, (PageType)destGeneration);
                }
            }
        }
Пример #12
0
        /*
         * Returns a pointer to the first object on the given page.
         * N.B. If called on a page with no ~allocated~ first object it may
         * return a pointer to the unused space token.
         */
        internal static UIntPtr First(UIntPtr page)
        {
            uint    offset   = PageTable.Extra(page);
            UIntPtr pageAddr = PageTable.PageAddr(page);
            UIntPtr currAddr;

            if (offset != OFFSET_NO_DATA)
            {
                currAddr = pageAddr + (offset - OFFSET_SKEW);
            }
            else
            {
                currAddr = Before(pageAddr);
                VTable.Assert(currAddr <= pageAddr);
                UIntPtr nextPageStart = PageTable.PagePad(currAddr + 1);
                while (currAddr < pageAddr)
                {
                    if (Allocator.IsAlignment(currAddr))
                    {
                        currAddr += UIntPtr.Size;
                    }
                    else if (BumpAllocator.IsUnusedSpace(currAddr))
                    {
                        currAddr = PageTable.PagePad(currAddr) + PreHeader.Size;
                    }
                    else
                    {
                        if (currAddr >= nextPageStart)
                        {
                            InteriorPtrTable.SetFirst(currAddr);
                            nextPageStart = PageTable.PagePad(currAddr + 1);
                        }
                        currAddr += ObjectSize(currAddr);
                    }
                }
            }
            currAddr = Allocator.SkipAlignment(currAddr);
            return(currAddr);
        }
Пример #13
0
        internal static void ReclaimZombiePages(UIntPtr heapPageCount,
                                                int generation)
        {
            // to indicate if we want to release pages back to the OS
            bool    releasePages = true;
            UIntPtr reservePages = UIntPtr.Zero;

            if (generation == (int)nurseryGeneration)
            {
                // don't bother when we do nursery collection since
                // nursery size is small.
                releasePages = false;
            }
            else
            {
                reservePages = heapPageCount;
                UIntPtr alreadyReservedPages = PageManager.TotalUnusedPages();
                if (reservePages > alreadyReservedPages)
                {
                    reservePages = reservePages - alreadyReservedPages;
                }
                else
                {
                    reservePages = UIntPtr.Zero;
                }
            }

            // MarkZombiePages updates the range for this generation, so we do
            // not need to take the union ranges of all target generations
            UIntPtr minZombiePage = MinGenPage[generation];
            UIntPtr maxZombiePage = MaxGenPage[generation];

            for (UIntPtr i = minZombiePage; i <= maxZombiePage; i++)
            {
                if (IsMyZombiePage(i))
                {
                    UIntPtr startPage = i;
                    UIntPtr endPage   = startPage;
                    do
                    {
                        endPage++;
                    } while (IsMyZombiePage(endPage));
                    InteriorPtrTable.ClearFirst(startPage, endPage);
                    if (GC.remsetType == RemSetType.Cards)
                    {
                        OffsetTable.ClearLast(PageTable.PageAddr(startPage),
                                              PageTable.PageAddr(endPage) - 1);
                    }
                    if (!releasePages)
                    {
                        // Don't need to worry about giving the pages back
                        // Zero out the memory for reuse
                        UIntPtr pageCount = endPage - startPage;
                        PageManager.ReleaseUnusedPages(startPage,
                                                       pageCount,
                                                       false);
                    }
                    else if (reservePages > UIntPtr.Zero)
                    {
                        // Keep sufficient pages for the new nursery
                        UIntPtr pageCount = endPage - startPage;
                        if (pageCount > reservePages)
                        {
                            // Zero out the memory for reuse
                            PageManager.ReleaseUnusedPages(startPage,
                                                           reservePages,
                                                           false);
                            startPage += reservePages;
                            PageManager.FreePageRange(startPage, endPage);
                            reservePages = UIntPtr.Zero;
                        }
                        else
                        {
                            // Zero out the memory for reuse
                            PageManager.ReleaseUnusedPages(startPage,
                                                           pageCount,
                                                           false);
                            reservePages = reservePages - pageCount;
                        }
                    }
                    else
                    {
                        PageManager.FreePageRange(startPage, endPage);
                    }
                    i = endPage - 1;
                }
            }
        }
Пример #14
0
 internal override UIntPtr FindObjectAddr(UIntPtr interiorPtr)
 {
     return(InteriorPtrTable.Find(interiorPtr));
 }
Пример #15
0
        private unsafe void FindDestinationArea(ref UIntPtr destPage,
                                                ref UIntPtr destCursor,
                                                ref UIntPtr destLimit,
                                                UIntPtr objectSize,
                                                PageType destGeneration)
        {
            VTable.Assert(IsValidGeneration((int)destGeneration));

            UIntPtr cursorPage  = PageTable.Page(destCursor);
            UIntPtr limitPage   = PageTable.Page(destLimit);
            UIntPtr pageAddr    = PageTable.PagePad(destCursor);
            UIntPtr testPage    = limitPage;
            UIntPtr endTestPage = PageTable.PageCount(destCursor + objectSize);

            if (destCursor > UIntPtr.Zero &&
                IsMyZombiePage(PageTable.Page(destCursor - 1)))
            {
                VTable.Assert(destPage == limitPage);
                while (IsMyZombiePage(testPage) ||
                       (testPage < endTestPage &&
                        (PageTable.IsUnusedPage(testPage))))
                {
                    testPage++;
                }
                if (testPage >= endTestPage)
                {
                    // We can expand the current region
                    endTestPage = testPage;
                    VTable.Assert(PageTable.PageAligned(destLimit));
                    InteriorPtrTable.ClearFirst(limitPage, testPage);
                    if (GC.remsetType == RemSetType.Cards)
                    {
                        OffsetTable.ClearLast(PageTable.PageAddr(limitPage),
                                              PageTable.PageAddr(testPage) - 1);
                    }
                    while (limitPage != endTestPage)
                    {
                        VTable.Assert(PageTable.IsUnusedPage(destPage));
                        do
                        {
                            destPage++;
                        } while (destPage < endTestPage &&
                                 PageTable.IsUnusedPage(destPage));
                        bool fCleanPages = true;
                        bool status      =
                            PageManager.TryReserveUnusedPages(null, limitPage,
                                                              destPage - limitPage,
                                                              nurseryGeneration,
                                                              ref fCleanPages);
                        VTable.Assert(status);
                        MakeZombiePages(limitPage, destPage - limitPage,
                                        destGeneration);
                        while (destPage < endTestPage &&
                               IsMyZombiePage(destPage))
                        {
                            destPage++;
                        }
                        limitPage = destPage;
                    }
                    destLimit = PageTable.PageAddr(limitPage);
                    return;
                }
            }
            if (destCursor != pageAddr)
            {
                cursorPage++;
            }
            if (cursorPage != limitPage)
            {
                this.RegisterSkippedPages(cursorPage, limitPage);
            }
            // Find new region big enough to contain object
            UIntPtr neededPages = PageTable.PageCount(objectSize);
            UIntPtr prefixPage;

            while (true)
            {
                do
                {
                    destPage++;
                } while (!IsMyZombiePage(destPage));
                cursorPage = destPage;
                prefixPage = cursorPage;
                do
                {
                    destPage++;
                } while (IsMyZombiePage(destPage));
                limitPage = destPage;
                if (neededPages <= limitPage - cursorPage)
                {
                    break;
                }
                // Check for following unused pages
                endTestPage = cursorPage + neededPages;
                VTable.Assert(endTestPage <= PageTable.pageTableCount);
                while (destPage < endTestPage &&
                       (PageTable.IsUnusedPage(destPage) ||
                        (IsMyZombiePage(destPage))))
                {
                    destPage++;
                }
                if (destPage == endTestPage)
                {
                    break;
                }
                // Check for preceding unused pages
                if (destPage >= neededPages)
                {
                    endTestPage = destPage - neededPages;
                    prefixPage  = cursorPage - 1;
                    while (prefixPage >= UIntPtr.Zero &&
                           PageTable.IsUnusedPage(prefixPage))
                    {
                        prefixPage--;
                    }
                    prefixPage++;
                    if (prefixPage == endTestPage)
                    {
                        break;
                    }
                }
                // Register any skipped regions of pages
                this.RegisterSkippedPages(cursorPage, limitPage);
                while (limitPage < destPage)
                {
                    VTable.Assert(PageTable.IsUnusedPage(limitPage));
                    do
                    {
                        limitPage++;
                    } while (limitPage < destPage &&
                             PageTable.IsUnusedPage(limitPage));
                    cursorPage = limitPage;
                    while (limitPage < destPage && IsMyZombiePage(limitPage))
                    {
                        limitPage++;
                    }
                    if (cursorPage != limitPage)
                    {
                        this.RegisterSkippedPages(cursorPage, limitPage);
                    }
                }
            }
            // We found an area big enough.  Commit the pre- and
            // postfix areas of unused pages
            if (prefixPage != cursorPage)
            {
                bool fCleanPages = true;
                bool status      =
                    PageManager.TryReserveUnusedPages(null, prefixPage,
                                                      cursorPage - prefixPage,
                                                      nurseryGeneration,
                                                      ref fCleanPages);
                VTable.Assert(status);
                MakeZombiePages(prefixPage, cursorPage - prefixPage,
                                destGeneration);
            }
            while (destPage != limitPage)
            {
                // Mark the region of unused pages as fromspace
                UIntPtr unusedPage = limitPage;
                VTable.Assert(PageTable.IsUnusedPage(unusedPage));
                do
                {
                    unusedPage++;
                } while (unusedPage < destPage &&
                         PageTable.IsUnusedPage(unusedPage));
                bool fCleanPages = true;
                bool status      =
                    PageManager.TryReserveUnusedPages(null, limitPage,
                                                      unusedPage - limitPage,
                                                      nurseryGeneration,
                                                      ref fCleanPages);
                VTable.Assert(status);
                MakeZombiePages(limitPage, unusedPage - limitPage,
                                destGeneration);
                // Skip any sections of pages already marked as fromspace
                limitPage = unusedPage;
                while (limitPage < destPage && IsMyZombiePage(limitPage))
                {
                    limitPage++;
                }
            }
            destCursor = PageTable.PageAddr(prefixPage);
            destLimit  = PageTable.PageAddr(limitPage);
            // Take ownership of the new pages
            InteriorPtrTable.ClearFirst(prefixPage, limitPage);
            InteriorPtrTable.SetFirst(destCursor + PreHeader.Size);
            if (GC.remsetType == RemSetType.Cards)
            {
                OffsetTable.ClearLast(PageTable.PageAddr(prefixPage),
                                      PageTable.PageAddr(limitPage) - 1);
            }
        }
Пример #16
0
        private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment,
                                    Thread currentThread)
        {
            if (this.reserveLimit == UIntPtr.Zero)
            {
                return(UIntPtr.Zero);
            }
#if SINGULARITY_KERNEL
            Kernel.Waypoint(700);
#endif
            UIntPtr neededBytes =
                bytes +                              // Bytes required for object +
                alignment - UIntPtr.Size -           // worst case alignment overhead +
                (this.reserveLimit - this.allocPtr); // bytes already available
            UIntPtr paddedNeed  = PageTable.PagePad(neededBytes);
            UIntPtr pageCount   = PageTable.PageCount(paddedNeed);
            UIntPtr startPage   = PageTable.Page(this.reserveLimit);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            bool    gotPages    =
                PageManager.TryReserveUnusedPages(currentThread, startPage,
                                                  pageCount, this.pageType,
                                                  ref fCleanPages);
            if (!gotPages)
            {
                // We can't indiscriminately ask for more memory if we have
                // unused pages already available.
                return(UIntPtr.Zero);
            }
            if (this.reserveLimit == UIntPtr.Zero)
            {
                // A collection occurred, so there is no region to extend
                PageManager.ReleaseUnusedPages(startPage, pageCount,
                                               fCleanPages);
                return(UIntPtr.Zero);
            }
            BaseCollector.IncrementNewBytesSinceGC(paddedNeed);
            this.allocNew = this.reserveLimit;
            // Pad alignment space if necessary.  NB: a prior call to
            // AllocateFast may have started generating alignment tokens,
            // but we may need to finish the job here if the residual space
            // was insufficient for a multi-word alignment.
            UIntPtr oldReserveLimit = this.reserveLimit;
            this.reserveLimit += paddedNeed;
            this.allocPtr      =
                Allocator.AlignedAllocationPtr(this.allocPtr,
                                               this.reserveLimit,
                                               alignment);
            if (this.zeroedLimit < this.allocPtr)
            {
                this.zeroedLimit = this.allocPtr;
            }
            UIntPtr objectAddr = this.allocPtr + PreHeader.Size;
            this.allocPtr += bytes;
            if (fCleanPages)
            {
                if (this.zeroedLimit < oldReserveLimit)
                {
                    Util.MemClear(this.zeroedLimit,
                                  oldReserveLimit - this.zeroedLimit);
                }
                this.zeroedLimit = this.reserveLimit;
            }
            else
            {
                Util.MemClear(this.zeroedLimit,
                              this.allocPtr - this.zeroedLimit);
                this.zeroedLimit = this.allocPtr;
            }
            VTable.Assert(this.allocPtr <= this.zeroedLimit);
            VTable.Assert(PageTable.PageAligned(this.reserveLimit));
            if (objectAddr >= oldReserveLimit)
            {
                // Object is first on new page
                InteriorPtrTable.SetFirst(objectAddr);
            }
            else if (objectAddr + bytes < this.reserveLimit)
            {
                // The object does not end on new limit

                // N.B. The next object may not be allocated at exactly
                // (objectAddr + bytes) due to alignment considerations.  It
                // also might not ever be allocated.  These cases are handled
                // by InteriorPtrTable.First skipping over alignment tokens
                // and callers of First watching out for unused space tokens.

                InteriorPtrTable.SetFirst(objectAddr + bytes);
            }
            // We know an object is located as the last one in a page
            // when it extends through the page to the next.
            // Otherwise, it is totally before or below the page, and
            // we are not sure whether it is the last object or not.
            // So record only such an object for the last card in that
            // page. Many objects may have been omitted due to
            // this coarse-grain recording. But we should be able
            // to incrementally update the offset table and find them.
            // I believe this is a better choice than simply recording
            // any object to the offset table, because most objects
            // may just die and need not to record.

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                if (objectAddr < oldReserveLimit &&
                    allocPtr + bytes > oldReserveLimit)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(objectAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(701);
#endif
            return(objectAddr);
        }
Пример #17
0
            internal void CleanPinnedPages()
            {
                if (pinnedPageList == null || pinnedPageList.Count == 0)
                {
                    return;
                }
                int     pageIndex          = 0;
                int     limit              = pinnedPageList.Count;
                UIntPtr lastPostPinnedAddr = UIntPtr.Zero;

                while (pageIndex < limit)
                {
                    UIntPtr startPage = (UIntPtr)pinnedPageList[pageIndex];
                    UIntPtr endPage   = startPage + 1;
                    pageIndex++;
                    while (pageIndex < limit &&
                           (UIntPtr)pinnedPageList[pageIndex] == endPage)
                    {
                        pageIndex++;
                        endPage++;
                    }
                    // Zero out the area between the start of the page and
                    // the first object on the page
                    UIntPtr firstObjectAddr = FirstPinnedObjectAddr(startPage);
                    UIntPtr firstAddr       = firstObjectAddr - PreHeader.Size;
                    UIntPtr trashAddr       = PageTable.PageAddr(startPage);
                    if (firstAddr < trashAddr)
                    {
                        // The first object "spills" into the previous page,
                        // presumably by no more than HEADER_BYTES bytes
                        VTable.Assert(
                            PageTable.Page(firstAddr) == startPage - 1,
                            "Semispace:RegisterPinnedReference:3");
                        // Prepare to zero the preceding page unless it also
                        // had pinned data on it
                        trashAddr = PageTable.PageAddr(startPage - 1);
                        InteriorPtrTable.ClearFirst(startPage - 1);
                        if (trashAddr >= lastPostPinnedAddr)
                        {
                            // Need to mark the spilled-onto page live to
                            // keep the spilled data around
                            PageType fromSpaceType =
                                PageTable.Type(startPage - 1);
                            VTable.Assert(
                                PageTable.IsZombiePage(fromSpaceType),
                                "Semispace:RegisterPinnedReference:4");
                            PageType toSpaceType =
                                PageTable.ZombieToLive(fromSpaceType);
                            PageTable.SetType(startPage - 1, toSpaceType);
                        }
                    }
                    // If lastPostPinnedAddr is on the page that trashAddr
                    // starts, pinned data from the last run of pinned pages
                    // and pinned data from this run of pinned data are on the
                    // same page, so just write alignment tokens from
                    // lastPostPinnedAddr to the first pinned object.
                    // Otherwise, write an unused marker at lastPostPinnedAddr
                    // since the rest of its page must be copied or dead.
                    if (trashAddr < lastPostPinnedAddr)
                    {
                        trashAddr = lastPostPinnedAddr;
                    }
                    else
                    {
                        CleanPageTail(lastPostPinnedAddr);
                    }

                    if (GC.remsetType == RemSetType.Cards &&
                        trashAddr < firstAddr)
                    {
                        UIntPtr firstCard = CardTable.CardNo(trashAddr);
                        UIntPtr lastCard  = CardTable.CardNo(firstAddr - 1);

                        if (!OffsetTable.NoObjectPtrToTheCard(firstCard))
                        {
                            UIntPtr offset = OffsetTable.GetOffset(firstCard);
                            UIntPtr objPtr =
                                CardTable.CardAddr(firstCard) + offset;
                            UIntPtr size = OffsetTable.ObjectSize(objPtr);
                            VTable.Assert
                                ((objPtr + size - PreHeader.Size <= trashAddr) ||
                                (objPtr >= trashAddr),
                                "Object should be totally "
                                + "above or below trashAddr");
                            if (objPtr >= trashAddr)
                            {
                                // The offset in this card needs to be updated
                                OffsetTable.ClearCards(firstCard, firstCard);
                            }
                        }

                        OffsetTable.ClearCards(firstCard + 1, lastCard - 1);

                        if (lastCard != CardTable.CardNo(firstObjectAddr))
                        {
                            OffsetTable.ClearCards(lastCard, lastCard);
                        }
                        else
                        {
                            VTable.Assert(OffsetTable.GetOffset(lastCard)
                                          >= (firstObjectAddr
                                              - CardTable.CardAddr(lastCard)),
                                          "wrong offset");
                        }
                    }

                    {
                        // trashAddr should go back at most one page.

                        UIntPtr trashPage           = PageTable.Page(trashAddr);
                        UIntPtr firstObjectAddrPage =
                            PageTable.Page(firstObjectAddr);
                        VTable.Assert((trashPage == firstObjectAddrPage - 1) ||
                                      (trashPage == firstObjectAddrPage));
                    }

                    // If the InteriorPtrTable already had a value, then this is
                    // redundant, but if the call to First above has to compute
                    // the value, then (since it won't store it in the table) we
                    // should store it.  Why?  At this point the previous page
                    // would be "connected" to this one.  After this collection
                    // the previous page will be unused or re-used and unrelated
                    // to this page and subsequent calls to First would then
                    // rely on it making the leap between unrelated pages.
                    InteriorPtrTable.SetFirst(firstObjectAddr);

                    while (trashAddr < firstAddr)
                    {
                        Allocator.WriteAlignment(trashAddr);
                        trashAddr += UIntPtr.Size;
                    }

                    // Zero out the area between the last whole object on
                    // the last page and the end of the last page
                    UIntPtr pastAddr   = PostPinnedObjectAddr(endPage);
                    UIntPtr newEndPage =
                        PageTable.Page(PageTable.PagePad(pastAddr));
                    while (endPage < newEndPage)
                    {
                        // The last object spills into the next page(s), so
                        // mark those page(s) live
                        PageType fromPageType = PageTable.Type(endPage);
                        if (PageTable.IsZombiePage(fromPageType))
                        {
                            PageType toSpaceType =
                                PageTable.ZombieToLive(fromPageType);
                            PageTable.SetType(endPage, toSpaceType);
                        }
                        else
                        {
                            // final page might be live already because
                            // something else on it was pinned.
                            // pageIndex has already been incremented,
                            // so it points to the start of the next
                            // set of contiguous pages
                            VTable.Assert(
                                PageTable.IsLiveGcPage(fromPageType) &&
                                pageIndex < limit &&
                                endPage ==
                                (UIntPtr)pinnedPageList[pageIndex],
                                "Semispace:RegisterPinnedReference:5");
                        }
                        ++endPage;
                    }
                    lastPostPinnedAddr = pastAddr;
                }
                CleanPageTail(lastPostPinnedAddr);
                pinnedPageList = null;
                comparer       = null;
            }
Пример #18
0
        // Interface with the compiler!

        internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes,
                                                   uint alignment,
                                                   Thread currentThread)
        {
            // Pretenure Trigger
            pretenuredSinceLastFullGC += numBytes;
            if (pretenuredSinceLastFullGC > PretenureHardGCTrigger)
            {
                GC.InvokeMajorCollection(currentThread);
            }

            // Potentially Join a collection
            GC.CheckForNeededGCWork(currentThread);
            int     maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size;
            UIntPtr pageCount            =
                PageTable.PageCount(numBytes + maxAlignmentOverhead);
            bool    fCleanPages = true;
            UIntPtr page        = PageManager.EnsurePages(currentThread, pageCount,
                                                          largeObjectGeneration,
                                                          ref fCleanPages);
            int unusedBytes =
                unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes));
            int unusedCacheLines =
                unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5;
            int pageOffset = 0;

            if (unusedCacheLines != 0)
            {
                pageOffset = (bigOffset % unusedCacheLines) << 5;
                bigOffset++;
            }
            UIntPtr pageStart = PageTable.PageAddr(page);

            for (int i = 0; i < pageOffset; i += UIntPtr.Size)
            {
                Allocator.WriteAlignment(pageStart + i);
            }
            UIntPtr unalignedStartAddr = pageStart + pageOffset;
            UIntPtr startAddr          =
                Allocator.AlignedAllocationPtr(unalignedStartAddr,
                                               pageStart + unusedBytes,
                                               alignment);

            pageOffset +=
                unchecked ((int)(uint)(startAddr - unalignedStartAddr));
            if (pageOffset < unusedBytes)
            {
                BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes);
            }
            UIntPtr resultAddr = startAddr + PreHeader.Size;

            InteriorPtrTable.SetFirst(resultAddr);
            VTable.Assert(PageTable.Page(resultAddr) <
                          PageTable.Page(startAddr + numBytes - 1),
                          "Big object should cross pages");
            if (GC.remsetType == RemSetType.Cards)
            {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                OffsetTable.SetLast(resultAddr);
#endif
            }
            return(resultAddr);
        }
Пример #19
0
 private static UIntPtr FirstPinnedObjectAddr(UIntPtr startPage)
 {
     return(InteriorPtrTable.First(startPage));
 }
Пример #20
0
            private static void CleanPageTail(UIntPtr postPinnedAddr)
            {
                if (!PageTable.PageAligned(postPinnedAddr))
                {
                    // If postPinnedAddr points to the first object on its page,
                    // then we are removing all objects (specifically the part
                    // of the object that the InteriorPtrTable tracks, the
                    // vtables) from the page, so we should clear the page's
                    // entry in the InteriorPtrTable.

                    UIntPtr page        = PageTable.Page(postPinnedAddr);
                    UIntPtr firstObjPtr = InteriorPtrTable.First(page);
                    if (firstObjPtr > postPinnedAddr)
                    {
                        VTable.Assert
                            (firstObjPtr - PreHeader.Size >= postPinnedAddr,
                            "postPinnedAddr should not point to the "
                            + "interior of an object (1)");
                        InteriorPtrTable.ClearFirst(page);
                    }
                    else if (!BumpAllocator.IsUnusedSpace(firstObjPtr))
                    {
                        UIntPtr firstObjSize =
                            InteriorPtrTable.ObjectSize(firstObjPtr);
                        VTable.Assert
                            (firstObjPtr + firstObjSize - PreHeader.Size
                            <= postPinnedAddr,
                            "postPinnedAddr should not point to the "
                            + "interior of an object (2)");
                    }

                    UIntPtr byteCount = PageTable.PagePad(postPinnedAddr)
                                        - postPinnedAddr;
                    Util.MemClear(postPinnedAddr, byteCount);
                    BumpAllocator.WriteUnusedMarker(postPinnedAddr);

                    if (GC.remsetType == RemSetType.Cards && byteCount > 0)
                    {
                        UIntPtr firstCard = CardTable.CardNo(postPinnedAddr);
                        UIntPtr lastCard  =
                            CardTable.CardNo(postPinnedAddr + byteCount - 1);

                        if (!OffsetTable.NoObjectPtrToTheCard(firstCard))
                        {
                            UIntPtr offset = OffsetTable.GetOffset(firstCard);
                            UIntPtr objPtr =
                                CardTable.CardAddr(firstCard) + offset;
                            UIntPtr size = OffsetTable.ObjectSize(objPtr);

                            VTable.Assert
                                ((objPtr + size - PreHeader.Size
                                  <= postPinnedAddr) ||
                                (objPtr >= postPinnedAddr),
                                "Object should be totally "
                                + "above or below postPinnedAddr");
                            if (objPtr >= postPinnedAddr)
                            {
                                OffsetTable.ClearCards(firstCard, firstCard);
                            }
                        }

                        OffsetTable.ClearCards(firstCard + 1, lastCard);
                    }
                }
            }
Пример #21
0
        private static UIntPtr FirstPtrFromInteriorTable(UIntPtr c)
        {
            UIntPtr cardAddr     = CardTable.CardAddr(c);
            UIntPtr nextCardAddr = CardTable.NextCardAddr(c);
            UIntPtr page         = PageTable.Page(cardAddr);
            UIntPtr pageAddr     = PageTable.PageAddr(page);
            UIntPtr currAddr;

            if (page == 0)
            {
                currAddr = PtrToNextObject(pageAddr,
                                           (UIntPtr)PreHeader.Size, nextCardAddr);
            }
            else
            {
                short offset = PageTable.Extra(page);
                currAddr = UIntPtr.Zero;
                if (offset != InteriorPtrTable.OFFSET_NO_DATA)
                {
                    currAddr = pageAddr + (offset - InteriorPtrTable.OFFSET_SKEW);
                }

                // In general, we expect currAddr <= cardAddr. Or in the extreme
                // case, when the object starts from the page boundary,
                // currAddr - Object.HEADER_BYTES <= cardAddr. The contrary
                // cases has to be handled by searching previous pages.

                if (currAddr == UIntPtr.Zero ||
                    (currAddr > cardAddr &&
                     currAddr - PreHeader.Size > cardAddr))
                {
                    // look from previous pages, in case that an object on
                    // them spans to the current page. In that case, we should
                    // should use that object's ptr.

                    currAddr = InteriorPtrTable.Last(page - 1);

                    // Usually, Last() returns a pointer before or at the page
                    // boundary. However, there is one exception: when an object
                    // exactly fits to the last byte of the previous page, and the next
                    // object starts right from the page boundary (the first byte of
                    // the next page), then the pointer to this next object is returned.
                    // Example found: objPtr =3d09fa8, size=60, pageboundary=
                    // 3d0a000, next objPtr=3d0a008. Then returned pointer is
                    // 3d0a008, which is beyond the page boundary.

                    VTable.Assert(currAddr <= pageAddr ||
                                  currAddr - PreHeader.Size <= pageAddr,
                                  "object is expected before page or right at the beginning of it");
                }
            }
            VTable.Assert(currAddr < nextCardAddr, "object is expected before next card");

            while (currAddr < nextCardAddr)
            {
                if (Allocator.IsAlignment(currAddr))
                {
                    currAddr += UIntPtr.Size;
                }
                else if (BumpAllocator.IsUnusedSpace(currAddr))
                {
                    currAddr = PageTable.PagePad(currAddr) + PreHeader.Size;
                }
                else
                {
                    UIntPtr size = InteriorPtrTable.ObjectSize(currAddr);
                    if (currAddr + size - PreHeader.Size > cardAddr)
                    {
                        return(currAddr);
                    }
                    currAddr += size;
                }
            }
            VTable.Assert(false, "No obj ptr found by looking at interior table");
            return(UIntPtr.Zero);
        }
Пример #22
0
        /*
         * Returns a pointer past the last object _that_fits_completely_ on
         * the given page.  Note that the "last" object on a page may
         * actually start on a previous page.
         */
        internal static UIntPtr Last(UIntPtr page)
        {
            UIntPtr currAddr = InteriorPtrTable.First(page);
            UIntPtr endAddr  = PageTable.PageAddr(page + 1);

            // Look out for the unused space token: this page may not
            // have been completely allocated: its "first" object might not
            // be valid.
            if (BumpAllocator.IsUnusedSpace(currAddr) || currAddr >= endAddr)
            {
                // Back up to the previous object.  Should be fast
                // since First updated the InteriorPtrTable entries.
                currAddr = Before(PageTable.PageAddr(page));
            }
            // REVIEW this is very similar to Find(addr) below.
            VTable.Assert(currAddr <= endAddr);
            while (true)
            {
                // Watch out for alignment padding; advance the pointer if
                // it points to a syncblock index rather than a vtable
                // pointer.  Note that we must do this before scrolling,
                // since the page table value was set before we knew the
                // required alignment.
                if (Allocator.IsAlignment(currAddr))
                {
                    currAddr += UIntPtr.Size;
                }
                else if (BumpAllocator.IsUnusedSpace(currAddr))
                {
                    UIntPtr nextAddr =
                        PageTable.PagePad(currAddr) + PreHeader.Size;
                    if (nextAddr >= endAddr)
                    {
                        return(currAddr);
                    }
                    else
                    {
                        currAddr = nextAddr;
                    }
                }
                else
                {
                    VTable.Assert(currAddr <= endAddr);
                    UIntPtr size     = ObjectSize(currAddr);
                    UIntPtr postAddr = currAddr + size;
                    if (postAddr > endAddr)
                    {
                        if (postAddr - PreHeader.Size > endAddr)
                        {
                            // The object spills over onto the next page
                            return(currAddr);
                        }
                        else
                        {
                            // The object ended at or before the page boundary
                            return(postAddr);
                        }
                    }
                    else
                    {
                        currAddr = postAddr;
                    }
                }
            }
        }