internal static void VerifyUnusedPage(UIntPtr page, bool containsHeader)
        {
            if (PageTable.Type(page) == PageType.UnusedDirty)
            {
                return;
            }

            // Verify that the page is indeed clean

            UIntPtr *startAddr = (UIntPtr *)PageTable.PageAddr(page);
            UIntPtr *endAddr   = (UIntPtr *)PageTable.PageAddr(page + 1);

            // If the page contains a header then we can't expect the header
            // to be clean.
            if (containsHeader)
            {
                startAddr += (uint)
                             (Util.UIntPtrPad((UIntPtr)sizeof(UnusedBlockHeader))
                              / (uint)sizeof(UIntPtr));
            }

            while (startAddr < endAddr)
            {
                VTable.Assert(*startAddr == UIntPtr.Zero,
                              "UnusedClean page contains nonzero data");
                startAddr++;
            }
        }
Beispiel #2
0
        internal static void PrintPageContents(UIntPtr page)
        {
            UIntPtr startAddr = PageTable.PageAddr(page);
            UIntPtr endAddr   = PageTable.PageAddr(page + 1);

            PrintMemoryContents(startAddr, endAddr);
        }
        internal static void VerifyUnusedRegion(UIntPtr startPage,
                                                UIntPtr endPage)
        {
            // Verify that all of the pages are of the same Clean/Dirty type.
            PageType startType = PageTable.Type(startPage);

            for (UIntPtr page = startPage; page < endPage; ++page)
            {
                VTable.Assert(startType == PageTable.Type(page),
                              "Unused page types don't match in region");
            }

            if (startPage > UIntPtr.Zero &&
                PageTable.IsUnusedPage(startPage - 1) &&
                PageTable.IsMyPage(startPage - 1))
            {
                // We have already checked the region
                return;
            }

            UIntPtr            regionAddr   = PageTable.PageAddr(startPage);
            UnusedBlockHeader *regionHeader = (UnusedBlockHeader *)regionAddr;
            UIntPtr            pageCount    = regionHeader->count;

            VTable.Assert
                (pageCount >= (endPage - startPage),
                "Region-to-verify is larger than its header specifies");

            endPage = startPage + pageCount;

            for (UIntPtr page = startPage; page < endPage; ++page)
            {
                VTable.Assert(PageTable.IsUnusedPage(page) &&
                              PageTable.IsMyPage(page),
                              "Non my-unused page in unused region");

                PageManager.VerifyUnusedPage
                    (page, (page == startPage) || (page == (endPage - 1)));
            }

            VTable.Assert(!(endPage < PageTable.pageTableCount &&
                            PageTable.IsUnusedPage(endPage) &&
                            PageTable.IsMyPage(endPage)),
                          "My-unused page immediately after unused region");

            // Verify that the region is correctly linked into the
            // list of unused memory blocks
            int slot = SlotFromCount(pageCount);
            UnusedBlockHeader *header = unusedMemoryBlocks[slot].next;

            UnusedBlockHeader.Verify(header);
            while (regionAddr != (UIntPtr)header)
            {
                header = header->next;
                VTable.Assert(header != null,
                              "Unused region not list for its slot number");
                UnusedBlockHeader.Verify(header);
            }
        }
        internal static bool TryReservePages(Thread currentThread,
                                             UIntPtr startPage,
                                             UIntPtr pageCount,
                                             PageType newType,
                                             ref bool fCleanPages)
        {
            Trace.Log(Trace.Area.Page,
                      "TryReservePages start={0:x} count={1:x}",
                      __arglist(startPage, pageCount));
            VTable.Deny(PageTable.IsUnusedPageType(newType));
            VTable.Assert(pageCount > UIntPtr.Zero);
            VTable.Deny(startPage != UIntPtr.Zero &&
                        PageTable.IsUnusedPage(startPage - 1) &&
                        PageTable.IsMyPage(startPage - 1));
            UIntPtr endPage = startPage + pageCount;
            UIntPtr index   = startPage;

            while (index < endPage &&
                   PageTable.IsUnusedPage(index) &&
                   PageTable.IsMyPage(index))
            {
                index++;
            }
            if (PageTable.IsUnallocatedPage(PageTable.Type(index)))
            {
                // We should try to extend the region of allocated pages
                UIntPtr pagesNeeded = pageCount - (index - startPage);
                UIntPtr bytesNeeded = PageTable.RegionSize(pagesNeeded);
                UIntPtr allocSize   = Util.Pad(bytesNeeded, heap_commit_size);
                UIntPtr startAddr   = PageTable.PageAddr(index);
                bool    gotMemory   = false;
                bool    iflag       = EnterMutex(currentThread);
                try {
                    gotMemory =
                        MemoryManager.AllocateMemory(startAddr, allocSize);
                    if (gotMemory)
                    {
                        UIntPtr allocPages = PageTable.PageCount(allocSize);
                        MarkUnusedPages(/* avoid recursive locking */ null,
                                        index, allocPages, true);
                    }
                } finally {
                    LeaveMutex(currentThread, iflag);
                }
                if (gotMemory)
                {
                    bool success =
                        TryReserveUnusedPages(currentThread, startPage,
                                              pageCount, newType,
                                              ref fCleanPages);
                    Trace.Log(Trace.Area.Page,
                              "TryReservePages success={0}",
                              __arglist(success));
                    return(success);
                }
            }
            return(false);
        }
Beispiel #5
0
        internal static unsafe void VerifyFirst(UIntPtr previousObjectAddr,
                                                UIntPtr objectAddr)
        {
            UIntPtr page = PageTable.Page(objectAddr);

            if (previousObjectAddr != UIntPtr.Zero)
            {
                UIntPtr previousPage = PageTable.Page(previousObjectAddr);
                UIntPtr pageCursor   = previousPage + 1;
                while (pageCursor < page)
                {
                    uint    cursorOffset = PageTable.Extra(pageCursor);
                    UIntPtr objAddr      = (PageTable.PageAddr(pageCursor) +
                                            cursorOffset - OFFSET_SKEW);
                    if (!(cursorOffset <= OFFSET_NO_DATA ||
                          BumpAllocator.IsUnusedSpace(objAddr) ||
                          Allocator.IsAlignment(objAddr) ||
                          BumpAllocator.IsRestOfPageZero(objAddr)))
                    {
                        VTable.DebugPrint
                            ("cursorOffset={0:x} OFFSET_NO_DATA={1:x} objAddr={2:x} unused={3} isalign={4} iszero={5}\n",
                            __arglist((cursorOffset),
                                      (OFFSET_NO_DATA),
                                      ((long)objAddr),
                                      (BumpAllocator.IsUnusedSpace(objAddr)),
                                      (Allocator.IsAlignment(objAddr)),
                                      (BumpAllocator.IsRestOfPageZero(objAddr))));
                    }
                    VTable.Assert(cursorOffset <= OFFSET_NO_DATA ||
                                  BumpAllocator.IsUnusedSpace(objAddr) ||
                                  Allocator.IsAlignment(objAddr) ||
                                  BumpAllocator.IsRestOfPageZero(objAddr),
                                  "VerifyFirst 1");
                    pageCursor++;
                }
            }
            uint offset = PageTable.Extra(page);

            if (offset > OFFSET_NO_DATA)
            {
                UIntPtr firstAddr =
                    PageTable.PageAddr(page) + offset - OFFSET_SKEW;
                if (!(firstAddr == objectAddr ||
                      (firstAddr + UIntPtr.Size == objectAddr &&
                       Allocator.IsAlignment(firstAddr))))
                {
                    VTable.DebugPrint
                        ("firstAddr={0:x} objectAddr={1:x} isalign={2}\n",
                        __arglist(((long)firstAddr),
                                  ((long)objectAddr),
                                  (Allocator.IsAlignment(firstAddr))));
                }
                VTable.Assert(firstAddr == objectAddr ||
                              (firstAddr + 4 == objectAddr &&
                               Allocator.IsAlignment(firstAddr)),
                              "VerifyFirst 2");
            }
        }
Beispiel #6
0
        internal static unsafe void SetFirst(UIntPtr newAddr)
        {
            VTable.Assert(PageTable.IsGcPage(PageTable.Page(newAddr)),
                          "SetFirst on a non-GC page");
            UIntPtr page   = PageTable.Page(newAddr);
            UIntPtr offset = newAddr - PageTable.PageAddr(page);

            PageTable.SetExtra(page, unchecked ((uint)(offset + OFFSET_SKEW)));
        }
Beispiel #7
0
        private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment,
                                   Thread currentThread)
        {
#if SINGULARITY_KERNEL
            Kernel.Waypoint(702);
#endif
            this.Truncate();
            UIntPtr paddedBytes =
                PageTable.PagePad(bytes + alignment - UIntPtr.Size);
            BaseCollector.IncrementNewBytesSinceGC(paddedBytes);
            UIntPtr pages       = PageTable.PageCount(paddedBytes);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            // We may eventually want to ask for specific pages
            // between asking if any pages are reusable and asking the
            // OS for any possible page.
            UIntPtr startPage =
                PageManager.EnsurePages(currentThread, pages, this.pageType,
                                        ref fCleanPages);
            UIntPtr startAddr = PageTable.PageAddr(startPage);
            UIntPtr limitAddr = PageTable.PageAddr(startPage + pages);
            startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr,
                                                       alignment);
            this.allocNew = startAddr;
            this.allocPtr = startAddr + bytes;
            if (fCleanPages)
            {
                this.zeroedLimit = limitAddr;
            }
            else
            {
                Util.MemClear(startAddr, bytes);
                this.zeroedLimit = this.allocPtr;
            }
            this.reserveLimit = limitAddr;
            UIntPtr resultAddr = startAddr + PreHeader.Size;
            InteriorPtrTable.SetFirst(resultAddr);

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                UIntPtr nextPageAddr = startAddr + PageTable.PageSize;
                VTable.Assert(resultAddr < nextPageAddr);
                if (this.allocPtr > nextPageAddr)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(resultAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(703);
#endif
            return(resultAddr);
        }
        private UIntPtr growFreeList(UIntPtr blockSize, Thread t)
        {
            UIntPtr pageCount   = PageTable.PageCount(blockSize);
            bool    fCleanPages = true;
            UIntPtr startPage   = PageManager.EnsurePages(t, pageCount,
                                                          PageType.Owner0,
                                                          ref fCleanPages);
            UIntPtr newBlockSize = PageTable.RegionSize(pageCount);
            UIntPtr newBlockAddr = PageTable.PageAddr(startPage);

            return(FreeBlock(newBlockAddr, newBlockSize));
        }
Beispiel #9
0
        // Finds the object base for an interior pointer.  In the case of a
        // pointer to the tail of an object and the head of another, it will
        // return the former object (the one whose tail we point at).  To
        // get the base pointer for a pointer into the pre-header, you should
        // add PreHeader.Size before calling this.
        internal static UIntPtr Find(UIntPtr addr)
        {
            UIntPtr page     = PageTable.Page(addr);
            UIntPtr currAddr = InteriorPtrTable.First(page);

            // Look out for the unused space token: this page may not
            // have been completely allocated: its "first" object might not
            // be valid.
            if (BumpAllocator.IsUnusedSpace(currAddr) || currAddr > addr)
            {
                // Back up to the previous object.  Should be fast
                // since First updated the InteriorPtrTable entries.
                currAddr = Before(PageTable.PageAddr(page));
            }
            VTable.Assert(!BumpAllocator.IsUnusedSpace(currAddr),
                          "InteriorPtrTable.Find 0");
            VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 1");
            while (true)
            {
                // Watch out for alignment padding; advance the pointer if
                // it points to a syncblock index rather than a vtable
                // pointer.  Note that we must do this before scrolling,
                // since the page table value was set before we knew the
                // required alignment.
                if (Allocator.IsAlignment(currAddr))
                {
                    currAddr += UIntPtr.Size;
                }
                else if (BumpAllocator.IsUnusedSpace(currAddr))
                {
                    UIntPtr postAddr =
                        PageTable.PagePad(currAddr) + PreHeader.Size;
                    VTable.Assert(postAddr <= addr, "InteriorPtrTable.Find 2");
                    currAddr = postAddr;
                }
                else
                {
                    VTable.Assert(currAddr <= addr, "InteriorPtrTable.Find 3");
                    UIntPtr size = ObjectSize(currAddr);
                    VTable.Assert(size >= UIntPtr.Zero,
                                  "InteriorPtrTable.Find 4");
                    UIntPtr postAddr = currAddr + size;
                    if (postAddr > addr)
                    {
                        return(currAddr);
                    }
                    else
                    {
                        currAddr = postAddr;
                    }
                }
            }
        }
Beispiel #10
0
        internal static bool ShouldPin(UIntPtr objAddr)
        {
            UIntPtr page = PageTable.Page(objAddr);

            if (PageTable.Type(page) != SegregatedFreeList.SMALL_OBJ_PAGE)
            {
                // in practice this won't be reached
                return(true);
            }
            SegregatedFreeList.PageHeader *ph =
                (SegregatedFreeList.PageHeader *)PageTable.PageAddr(page);
            return(new CoCoPageUserValue(ph->userValue).Pinned);
        }
        private static void MarkUnusedPages(Thread currentThread,
                                            UIntPtr startPage,
                                            UIntPtr pageCount,
                                            bool fCleanPages)
        {
            Trace.Log(Trace.Area.Page,
                      "MarkUnusedPages start={0:x} count={1:x}",
                      __arglist(startPage, pageCount));
            UIntPtr endPage = startPage + pageCount;

            if (avoidDirtyPages && !fCleanPages)
            {
                UIntPtr dirtyStartAddr = PageTable.PageAddr(startPage);
                UIntPtr dirtySize      = PageTable.RegionSize(pageCount);
                Util.MemClear(dirtyStartAddr, dirtySize);
                fCleanPages = true;
            }
            bool iflag = EnterMutex(currentThread);

            try {
                if (endPage < PageTable.pageTableCount)
                {
                    if (PageTable.IsUnusedPage(endPage) &&
                        PageTable.IsMyPage(endPage))
                    {
                        UIntPtr regionSize = UnlinkUnusedPages(endPage);
                        endPage += regionSize;
                    }
                }

                UIntPtr queryStartPage = startPage - 1;
                UIntPtr newStartPage   = startPage;
                if (PageTable.IsUnusedPage(queryStartPage) &&
                    PageTable.IsMyPage(queryStartPage))
                {
                    UnusedBlockHeader *tailUnused = (UnusedBlockHeader *)
                                                    PageTable.PageAddr(queryStartPage);
                    UIntPtr newStartAddr = (UIntPtr)tailUnused->curr;
                    newStartPage = PageTable.Page(newStartAddr);
                    UIntPtr regionSize = UnlinkUnusedPages(newStartPage);
                    VTable.Assert(newStartPage + regionSize == startPage);
                }

                PageType pageType =
                    fCleanPages ? PageType.UnusedClean : PageType.UnusedDirty;
                PageTable.SetType(startPage, pageCount, pageType);
                LinkUnusedPages(newStartPage, endPage - newStartPage, false);
            } finally {
                LeaveMutex(currentThread, iflag);
            }
        }
        private unsafe void SkipDestinationAreas(ref UIntPtr destPage,
                                                 UIntPtr destCursor,
                                                 ref UIntPtr destLimit,
                                                 UIntPtr sourceCursor)
        {
            UIntPtr cursorPage = PageTable.Page(destCursor);
            UIntPtr sourcePage = PageTable.Page(sourceCursor);

            if (cursorPage != sourcePage)
            {
                UIntPtr destPageLimit = PageTable.PagePad(destCursor);
                if (destPageLimit != destCursor)
                {
                    cursorPage++;
                }
                VTable.Assert(PageTable.PageAligned(destLimit));
                UIntPtr limitPage = PageTable.Page(destLimit);
                while (destPage < sourcePage)
                {
                    if (cursorPage < limitPage)
                    {
                        this.RegisterSkippedPages(cursorPage, limitPage);
                    }
                    do
                    {
                        destPage++;
                    } while (!IsMyZombiePage(destPage));
                    cursorPage = destPage;
                    do
                    {
                        destPage++;
                    } while (IsMyZombiePage(destPage));
                    limitPage = destPage;
                }
                destLimit = PageTable.PageAddr(limitPage);
                VTable.Assert(destPage > sourcePage);
                VTable.Assert(cursorPage <= sourcePage);
                if (cursorPage < sourcePage)
                {
                    this.RegisterSkippedPages(cursorPage, sourcePage);
                    cursorPage = sourcePage;
                }
                InteriorPtrTable.ClearFirst(cursorPage, destPage);
                InteriorPtrTable.SetFirst(sourceCursor + PreHeader.Size);
                if (GC.remsetType == RemSetType.Cards)
                {
                    OffsetTable.ClearLast(PageTable.PageAddr(cursorPage),
                                          PageTable.PageAddr(destPage) - 1);
                }
            }
        }
        private static void LinkUnusedPages(UIntPtr startPage,
                                            UIntPtr pageCount,
                                            bool asVictim)
        {
            if (PageManager.SlowDebug)
            {
                for (UIntPtr i = startPage; i < startPage + pageCount; i++)
                {
                    VTable.Assert(PageTable.IsUnusedPage(i) &&
                                  PageTable.IsMyPage(i),
                                  "Incorrect page to link into unused region");
                }
            }
            Trace.Log(Trace.Area.Page,
                      "LinkUnusedPages start={0:x} count={1:x}",
                      __arglist(startPage, pageCount));
            VTable.Deny(startPage > UIntPtr.Zero &&
                        PageTable.IsUnusedPage(startPage - 1) &&
                        PageTable.IsMyPage(startPage - 1));
            VTable.Deny(startPage + pageCount > PageTable.pageTableCount);
            VTable.Deny(startPage + pageCount < PageTable.pageTableCount &&
                        PageTable.IsUnusedPage(startPage + pageCount) &&
                        PageTable.IsMyPage(startPage + pageCount));
            UnusedBlockHeader *header = (UnusedBlockHeader *)
                                        PageTable.PageAddr(startPage);

            UnusedBlockHeader.Initialize(header, pageCount);
            int slot = SlotFromCount(pageCount);

            // Unused blocks are linked into the free list either as the result of a collection
            // or as a result of carving a big block into a smaller allocation and a remainder.
            // When such a remainder is linked back into the free list, it is identified as a
            // victim.  We favor subsequent allocations from these victims, in an attempt to
            // reduce fragmentation.  This is achieved by keeping victims at the head of the
            // free list.
            //
            // TODO: the long term solution is to perform best fit on the free list.
            if (asVictim || unusedMemoryBlocks[slot].next == null)
            {
                fixed(UnusedBlockHeader *listHeader = &unusedMemoryBlocks[slot])
                {
                    UnusedBlockHeader.InsertNext(listHeader, header);
                }
            }
            else
            {
                UnusedBlockHeader *listHeader = unusedMemoryBlocks[slot].next;
                UnusedBlockHeader.InsertNext(listHeader, header);
            }
        }
        private static UIntPtr UnlinkUnusedPages(UIntPtr startPage)
        {
            VTable.Assert(PageTable.IsUnusedPage(startPage) &&
                          PageTable.IsMyPage(startPage));
            VTable.Deny(startPage > UIntPtr.Zero &&
                        PageTable.IsUnusedPage(startPage - 1) &&
                        PageTable.IsMyPage(startPage - 1));
            UnusedBlockHeader *header = (UnusedBlockHeader *)
                                        PageTable.PageAddr(startPage);
            UIntPtr pageCount = UnusedBlockHeader.Remove(header);

            Trace.Log(Trace.Area.Page,
                      "UnlinkUnusedPages start={0:x} count={1:x}",
                      __arglist(startPage, pageCount));
            return(pageCount);
        }
        private static void CleanFoundPages(UIntPtr startPage)
        {
            UIntPtr *tableAddr = (UIntPtr *)PageTable.PageAddr(startPage);
            uint     count     = (uint)*tableAddr;
            UIntPtr *cursor    = tableAddr + (count + count);

            while (count != 0)
            {
                UIntPtr dirtySize      = *cursor;
                *       cursor--       = UIntPtr.Zero;
                UIntPtr dirtyStartAddr = *cursor;
                *       cursor--       = UIntPtr.Zero;
                Util.MemClear(dirtyStartAddr, dirtySize);
                count--;
            }
            *tableAddr = UIntPtr.Zero;
        }
        private static bool FoundOnlyCleanPages(UIntPtr startPage)
        {
            UIntPtr *tableAddr = (UIntPtr *)PageTable.PageAddr(startPage);
            uint     count     = (uint)*tableAddr;
            UIntPtr *cursor    = tableAddr + (count + count);
            bool     result    = true;

            while (count != 0)
            {
                result = false;
                *cursor-- = UIntPtr.Zero;
                *cursor-- = UIntPtr.Zero;
                count--;
            }
            *tableAddr = UIntPtr.Zero;
            return(result);
        }
            private static UIntPtr PostPinnedObjectAddr(UIntPtr endPage)
            {
                UIntPtr endAddr            = PageTable.PageAddr(endPage);
                UIntPtr postLastObjectAddr = InteriorPtrTable.Last(endPage - 1);

                if (postLastObjectAddr < endAddr &&
                    !BumpAllocator.IsUnusedSpace(postLastObjectAddr))
                {
                    // If the next object straddles into the next page,
                    // return the location just past the object
                    Object  lastObject     = Magic.fromAddress(postLastObjectAddr);
                    UIntPtr lastObjectSize =
                        ObjectLayout.ObjectSize(postLastObjectAddr,
                                                lastObject.vtable);
                    postLastObjectAddr += lastObjectSize;
                }
                return(postLastObjectAddr - PreHeader.Size);
            }
Beispiel #18
0
        // this is _just_ a notification - it doesn't pin the object, it's just
        // what we do if an object ends up being pinned.
        internal static void NotifyPin(UIntPtr objAddr)
        {
            UIntPtr page = PageTable.Page(objAddr);

            if (PageTable.Type(page) != SegregatedFreeList.SMALL_OBJ_PAGE)
            {
                return;
            }
            SegregatedFreeList.PageHeader *ph =
                (SegregatedFreeList.PageHeader *)PageTable.PageAddr(page);
            CoCoPageUserValue v = new CoCoPageUserValue(ph->userValue);

            if (v.Marked)
            {
                v.Pinned = true;
            }
            ph->userValue = v.Bits;
        }
        internal static unsafe void ClearThreadStack(Thread thread)
        {
            short   threadIndex = (short)thread.threadIndex;
            UIntPtr endPage     = PageTable.Page(CallStack.StackBase(thread));
            UIntPtr startPage   = endPage - 1;

            VTable.Assert(PageTable.IsStackPage(PageTable.Type(startPage)));
            VTable.Assert(PageTable.Extra(startPage) == threadIndex);
            while (startPage > 0 &&
                   PageTable.IsStackPage(PageTable.Type(startPage - 1)) &&
                   PageTable.Extra(startPage - 1) == threadIndex)
            {
                startPage--;
            }
            UIntPtr startAddr = PageTable.PageAddr(startPage);
            UIntPtr size      = PageTable.RegionSize(endPage - startPage);

            SetUnallocatedPages(startAddr, size);
        }
        private void AdvanceWritePage()
        {
            UIntPtr pageAddr;

            if (UnmanagedPageList.pageCache.IsEmpty)
            {
                bool    fCleanPages = true;
                UIntPtr page        = PageManager.EnsurePages(null, (UIntPtr)1,
                                                              PageType.System,
                                                              ref fCleanPages);
                pageAddr = PageTable.PageAddr(page);
            }
            else
            {
                pageAddr = UnmanagedPageList.pageCache.RemoveHead();
            }
            this.pageList.AddTail(pageAddr);
            this.writeCursor =
                new PageCursor(UnmanagedPageList.FirstPageAddr(pageAddr),
                               UnmanagedPageList.EndPageAddr(pageAddr));
        }
Beispiel #21
0
        /*
         * Returns a pointer to the beginning of an object such that the
         * pointer is less than or equal to addr.  N.B. Before may return a
         * pointer to an alignment or an unused space token.
         */
        private static UIntPtr Before(UIntPtr addr)
        {
            UIntPtr page   = PageTable.Page(addr);
            uint    offset = PageTable.Extra(page);

            // OFFSET_NO_DATA and negative offsets should always fail this
            // test.
            if (PageTable.PageAddr(page) + (offset - OFFSET_SKEW) > addr)
            {
                // If the addr is an interior pointer of an object on a
                // previous page, go back one entry.
                --page;
                offset = PageTable.Extra(page);
            }
            if (offset == OFFSET_NO_DATA)
            {
                // Scroll back until we find a page entry with real data in
                // it.  This handles the case of a large object allocated
                // across pages.
                do
                {
                    --page;
                    offset = PageTable.Extra(page);
                }while (offset == OFFSET_NO_DATA);
            }
            VTable.Assert(offset > OFFSET_NO_DATA, "No offset data");
            // Unused: since we currently do not use negative offsets in the
            // page table.  This would be more efficient for really big
            // objects, but the OFFSET_NO_DATA value works fine too.

            /*
             * // Scroll backwards using big steps.  Offset will never be
             * // OFFSET_NO_DATA in this loop.
             * while (offset < OFFSET_NO_DATA) {
             * entry += (offset - OFFSET_SKEW);
             * offset = *entry;
             * }
             */
            return(PageTable.PageAddr(page) + (offset - OFFSET_SKEW));
        }
        private void CompactPhaseCleanup(Thread currentThread,
                                         PageType generation,
                                         UIntPtr newLimitPtr)
        {
            VTable.Assert(IsValidGeneration((int)generation));

            registerThreadReferenceVisitor.Cleanup();
            // Free up skipped pages
            while (!this.skippedPageQueue.IsEmpty)
            {
                UIntPtr start  = this.skippedPageQueue.Read();
                UIntPtr finish = this.skippedPageQueue.Read();
                InteriorPtrTable.ClearFirst(start, finish);
                PageManager.FreePageRange(start, finish);
                if (GC.remsetType == RemSetType.Cards)
                {
                    OffsetTable.ClearLast(PageTable.PageAddr(start),
                                          PageTable.PageAddr(finish) - 1);
                }
            }
            this.skippedPageQueue.Cleanup(true);
            // Release the queue standby pages
            UnmanagedPageList.ReleaseStandbyPages();
            // Update the ownership information for the copied data
            PageType destGeneration =
                (generation == MAX_GENERATION) ?
                MAX_GENERATION :
                (PageType)(generation + 1);
            UIntPtr limitPage =
                PageTable.Page(PageTable.PagePad(newLimitPtr));

            for (UIntPtr i = UIntPtr.Zero; i < limitPage; i++)
            {
                if (IsMyZombiePage(i))
                {
                    PageTable.SetType(i, (PageType)destGeneration);
                }
            }
        }
Beispiel #23
0
 private void AdvancePage()
 {
     if (this.stackPage != UIntPtr.Zero)
     {
         this.pageList.AddHead(this.stackPage);
     }
     if (UnmanagedPageList.pageCache.IsEmpty)
     {
         bool    fCleanPages = true;
         UIntPtr page        = PageManager.EnsurePages(null, (UIntPtr)1,
                                                       PageType.System,
                                                       ref fCleanPages);
         this.stackPage = PageTable.PageAddr(page);
     }
     else
     {
         this.stackPage = UnmanagedPageList.pageCache.RemoveHead();
     }
     this.stackBottom = UnmanagedPageList.FirstPageAddr(this.stackPage);
     this.stackPtr    = this.stackBottom;
     this.stackTop    = UnmanagedPageList.EndPageAddr(this.stackPage);
 }
Beispiel #24
0
        /*
         * Returns a pointer to the first object on the given page.
         * N.B. If called on a page with no ~allocated~ first object it may
         * return a pointer to the unused space token.
         */
        internal static UIntPtr First(UIntPtr page)
        {
            uint    offset   = PageTable.Extra(page);
            UIntPtr pageAddr = PageTable.PageAddr(page);
            UIntPtr currAddr;

            if (offset != OFFSET_NO_DATA)
            {
                currAddr = pageAddr + (offset - OFFSET_SKEW);
            }
            else
            {
                currAddr = Before(pageAddr);
                VTable.Assert(currAddr <= pageAddr);
                UIntPtr nextPageStart = PageTable.PagePad(currAddr + 1);
                while (currAddr < pageAddr)
                {
                    if (Allocator.IsAlignment(currAddr))
                    {
                        currAddr += UIntPtr.Size;
                    }
                    else if (BumpAllocator.IsUnusedSpace(currAddr))
                    {
                        currAddr = PageTable.PagePad(currAddr) + PreHeader.Size;
                    }
                    else
                    {
                        if (currAddr >= nextPageStart)
                        {
                            InteriorPtrTable.SetFirst(currAddr);
                            nextPageStart = PageTable.PagePad(currAddr + 1);
                        }
                        currAddr += ObjectSize(currAddr);
                    }
                }
            }
            currAddr = Allocator.SkipAlignment(currAddr);
            return(currAddr);
        }
 private static void visitAllObjects(ObjectLayout.ObjectVisitor visitor,
                                     UIntPtr lowPage, UIntPtr highPage)
 {
     for (UIntPtr first = lowPage; first < highPage; first++)
     {
         if (PageTable.IsMyGcPage(first))
         {
             UIntPtr last = first + 1;
             while (last < highPage)
             {
                 if (!PageTable.IsMyGcPage(last))
                 {
                     break;
                 }
                 last++;
             }
             UIntPtr start = PageTable.PageAddr(first);
             UIntPtr end   = PageTable.PageAddr(last);
             GC.installedGC.VisitObjects(visitor, start, end);
             first = last;
         }
     }
 }
        private static void SetPageTypeClean(UIntPtr startPage,
                                             UIntPtr pageCount,
                                             PageType newType)
        {
            UIntPtr *tableAddr   = (UIntPtr *)PageTable.PageAddr(startPage);
            UIntPtr *tableCursor = tableAddr + 1;
            UIntPtr  dirtyCount  = UIntPtr.Zero;
            UIntPtr  endPage     = startPage + pageCount;

            for (UIntPtr i = startPage; i < endPage; i++)
            {
                PageType pageType = PageTable.Type(i);
                if (pageType == PageType.UnusedDirty)
                {
                    PageTable.SetType(i, newType);
                    UIntPtr j = i + 1;
                    while (j < endPage &&
                           PageTable.Type(j) == PageType.UnusedDirty)
                    {
                        PageTable.SetType(j, newType);
                        j++;
                    }
                    UIntPtr dirtyStartAddr = PageTable.PageAddr(i);
                    UIntPtr dirtyEndAddr   = PageTable.PageAddr(j);
                    *       tableCursor++  = dirtyStartAddr;
                    *       tableCursor++  = dirtyEndAddr - dirtyStartAddr;
                    dirtyCount++;
                    i = j - 1;
                }
                else
                {
                    PageTable.SetType(i, newType);
                }
            }
            *tableAddr = dirtyCount;
            PageTable.SetProcess(startPage, pageCount);
        }
Beispiel #27
0
        internal static void ReclaimZombiePages(UIntPtr heapPageCount,
                                                int generation)
        {
            // to indicate if we want to release pages back to the OS
            bool    releasePages = true;
            UIntPtr reservePages = UIntPtr.Zero;

            if (generation == (int)nurseryGeneration)
            {
                // don't bother when we do nursery collection since
                // nursery size is small.
                releasePages = false;
            }
            else
            {
                reservePages = heapPageCount;
                UIntPtr alreadyReservedPages = PageManager.TotalUnusedPages();
                if (reservePages > alreadyReservedPages)
                {
                    reservePages = reservePages - alreadyReservedPages;
                }
                else
                {
                    reservePages = UIntPtr.Zero;
                }
            }

            // MarkZombiePages updates the range for this generation, so we do
            // not need to take the union ranges of all target generations
            UIntPtr minZombiePage = MinGenPage[generation];
            UIntPtr maxZombiePage = MaxGenPage[generation];

            for (UIntPtr i = minZombiePage; i <= maxZombiePage; i++)
            {
                if (IsMyZombiePage(i))
                {
                    UIntPtr startPage = i;
                    UIntPtr endPage   = startPage;
                    do
                    {
                        endPage++;
                    } while (IsMyZombiePage(endPage));
                    InteriorPtrTable.ClearFirst(startPage, endPage);
                    if (GC.remsetType == RemSetType.Cards)
                    {
                        OffsetTable.ClearLast(PageTable.PageAddr(startPage),
                                              PageTable.PageAddr(endPage) - 1);
                    }
                    if (!releasePages)
                    {
                        // Don't need to worry about giving the pages back
                        // Zero out the memory for reuse
                        UIntPtr pageCount = endPage - startPage;
                        PageManager.ReleaseUnusedPages(startPage,
                                                       pageCount,
                                                       false);
                    }
                    else if (reservePages > UIntPtr.Zero)
                    {
                        // Keep sufficient pages for the new nursery
                        UIntPtr pageCount = endPage - startPage;
                        if (pageCount > reservePages)
                        {
                            // Zero out the memory for reuse
                            PageManager.ReleaseUnusedPages(startPage,
                                                           reservePages,
                                                           false);
                            startPage += reservePages;
                            PageManager.FreePageRange(startPage, endPage);
                            reservePages = UIntPtr.Zero;
                        }
                        else
                        {
                            // Zero out the memory for reuse
                            PageManager.ReleaseUnusedPages(startPage,
                                                           pageCount,
                                                           false);
                            reservePages = reservePages - pageCount;
                        }
                    }
                    else
                    {
                        PageManager.FreePageRange(startPage, endPage);
                    }
                    i = endPage - 1;
                }
            }
        }
            internal unsafe void ScanHook(Object obj)
            {
                UIntPtr page = PageTable.Page(Magic.addressOf(obj));

                if (PageTable.Type(page) != SegregatedFreeList.SMALL_OBJ_PAGE)
                {
                    //VTable.DebugPrint("   not tagging because this isn't a small object page");
                    return;
                }
                SegregatedFreeList.PageHeader *ph =
                    (SegregatedFreeList.PageHeader *)PageTable.PageAddr(page);
                if (!new CoCoPageUserValue(ph->userValue).Marked)
                {
                    //VTable.DebugPrint("   not tagging because the page isn't marked\n");
                    return;
                }
                if (obj is EMU ||
                    obj is Monitor ||
                    obj is Thread ||
                    obj is ThreadHeaderQueue)
                {
                    CoCoBarrier.NotifyPin(Magic.addressOf(obj));
                    if (fVerbose)
                    {
                        VTable.DebugPrint("      $$ not tagging object because it's a monitor or EMU\n");
                    }
                    return;
                }
                if (doingCoCo)
                {
                    //VTable.DebugPrint("   not tagging object because doingCoCo\n");
                    return;
                }
                if (!CoCoBarrier.instance.ObjectIsNotCopied(obj))
                {
                    if (fVerbose)
                    {
                        VTable.DebugPrint("   not tagging object because object is already in the process of being copied.\n");
                    }
                    return;
                }

                if (fVerbose && obj.GetType() != typeof(Object))
                {
                    VTable.DebugPrint("    $$ tagging a non-System.Object; type is ");
                    VTable.DebugPrint(obj.GetType().Name);
                    VTable.DebugPrint("\n");
                }

                // REVIEW: I wish that there was an easier way of
                // doing this.
                Object copy;

                if (obj is Array)
                {
                    Array a = (Array)obj;
                    if (a.IsVector)
                    {
                        copy = GC.AllocateVector(a.vtable, a.Length);
                    }
                    else
                    {
                        copy = GC.AllocateArray(a.vtable, a.Rank, a.Length);
                    }
                }
                else if (obj is String)
                {
                    String s = (String)obj;
                    // REVIEW: this is not nice.
                    copy = GC.AllocateString(s.ArrayLength - 1);
                }
                else
                {
                    copy = GC.AllocateObject(obj.vtable);
                }

                VTable.Assert(ObjectLayout.Sizeof(copy)
                              == ObjectLayout.Sizeof(obj),
                              "Copy is not same size as original");

                spaceOverhead += ObjectLayout.Sizeof(copy);

                bool    first = !CoCoBarrier.instance.AnyTaggedForCopying;
                UIntPtr thisSpaceOverhead;

                if (CoCoBarrier.instance.TagObjectForCopy(obj, copy,
                                                          out thisSpaceOverhead))
                {
                    cnt++;
                    if (first)
                    {
                        lock (interlock) {
                            if (!wantCoCo && !doingCoCo)
                            {
                                wantCoCo = true;
                            }
                        }
                    }
                }

                spaceOverhead += thisSpaceOverhead;
            }
        private static UIntPtr FirstPtrFromInteriorTable(UIntPtr c)
        {
            UIntPtr cardAddr     = CardTable.CardAddr(c);
            UIntPtr nextCardAddr = CardTable.NextCardAddr(c);
            UIntPtr page         = PageTable.Page(cardAddr);
            UIntPtr pageAddr     = PageTable.PageAddr(page);
            UIntPtr currAddr;

            if (page == 0)
            {
                currAddr = PtrToNextObject(pageAddr,
                                           (UIntPtr)PreHeader.Size, nextCardAddr);
            }
            else
            {
                short offset = PageTable.Extra(page);
                currAddr = UIntPtr.Zero;
                if (offset != InteriorPtrTable.OFFSET_NO_DATA)
                {
                    currAddr = pageAddr + (offset - InteriorPtrTable.OFFSET_SKEW);
                }

                // In general, we expect currAddr <= cardAddr. Or in the extreme
                // case, when the object starts from the page boundary,
                // currAddr - Object.HEADER_BYTES <= cardAddr. The contrary
                // cases has to be handled by searching previous pages.

                if (currAddr == UIntPtr.Zero ||
                    (currAddr > cardAddr &&
                     currAddr - PreHeader.Size > cardAddr))
                {
                    // look from previous pages, in case that an object on
                    // them spans to the current page. In that case, we should
                    // should use that object's ptr.

                    currAddr = InteriorPtrTable.Last(page - 1);

                    // Usually, Last() returns a pointer before or at the page
                    // boundary. However, there is one exception: when an object
                    // exactly fits to the last byte of the previous page, and the next
                    // object starts right from the page boundary (the first byte of
                    // the next page), then the pointer to this next object is returned.
                    // Example found: objPtr =3d09fa8, size=60, pageboundary=
                    // 3d0a000, next objPtr=3d0a008. Then returned pointer is
                    // 3d0a008, which is beyond the page boundary.

                    VTable.Assert(currAddr <= pageAddr ||
                                  currAddr - PreHeader.Size <= pageAddr,
                                  "object is expected before page or right at the beginning of it");
                }
            }
            VTable.Assert(currAddr < nextCardAddr, "object is expected before next card");

            while (currAddr < nextCardAddr)
            {
                if (Allocator.IsAlignment(currAddr))
                {
                    currAddr += UIntPtr.Size;
                }
                else if (BumpAllocator.IsUnusedSpace(currAddr))
                {
                    currAddr = PageTable.PagePad(currAddr) + PreHeader.Size;
                }
                else
                {
                    UIntPtr size = InteriorPtrTable.ObjectSize(currAddr);
                    if (currAddr + size - PreHeader.Size > cardAddr)
                    {
                        return(currAddr);
                    }
                    currAddr += size;
                }
            }
            VTable.Assert(false, "No obj ptr found by looking at interior table");
            return(UIntPtr.Zero);
        }
Beispiel #30
0
        // Interface with the compiler!

        internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes,
                                                   uint alignment,
                                                   Thread currentThread)
        {
            // Pretenure Trigger
            pretenuredSinceLastFullGC += numBytes;
            if (pretenuredSinceLastFullGC > PretenureHardGCTrigger)
            {
                GC.InvokeMajorCollection(currentThread);
            }

            // Potentially Join a collection
            GC.CheckForNeededGCWork(currentThread);
            int     maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size;
            UIntPtr pageCount            =
                PageTable.PageCount(numBytes + maxAlignmentOverhead);
            bool    fCleanPages = true;
            UIntPtr page        = PageManager.EnsurePages(currentThread, pageCount,
                                                          largeObjectGeneration,
                                                          ref fCleanPages);
            int unusedBytes =
                unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes));
            int unusedCacheLines =
                unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5;
            int pageOffset = 0;

            if (unusedCacheLines != 0)
            {
                pageOffset = (bigOffset % unusedCacheLines) << 5;
                bigOffset++;
            }
            UIntPtr pageStart = PageTable.PageAddr(page);

            for (int i = 0; i < pageOffset; i += UIntPtr.Size)
            {
                Allocator.WriteAlignment(pageStart + i);
            }
            UIntPtr unalignedStartAddr = pageStart + pageOffset;
            UIntPtr startAddr          =
                Allocator.AlignedAllocationPtr(unalignedStartAddr,
                                               pageStart + unusedBytes,
                                               alignment);

            pageOffset +=
                unchecked ((int)(uint)(startAddr - unalignedStartAddr));
            if (pageOffset < unusedBytes)
            {
                BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes);
            }
            UIntPtr resultAddr = startAddr + PreHeader.Size;

            InteriorPtrTable.SetFirst(resultAddr);
            VTable.Assert(PageTable.Page(resultAddr) <
                          PageTable.Page(startAddr + numBytes - 1),
                          "Big object should cross pages");
            if (GC.remsetType == RemSetType.Cards)
            {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                OffsetTable.SetLast(resultAddr);
#endif
            }
            return(resultAddr);
        }