Example #1
0
        internal static void ReclaimZombiePages(UIntPtr heapPageCount,
                                                int generation)
        {
            // to indicate if we want to release pages back to the OS
            bool    releasePages = true;
            UIntPtr reservePages = UIntPtr.Zero;

            if (generation == (int)nurseryGeneration)
            {
                // don't bother when we do nursery collection since
                // nursery size is small.
                releasePages = false;
            }
            else
            {
                reservePages = heapPageCount;
                UIntPtr alreadyReservedPages = PageManager.TotalUnusedPages();
                if (reservePages > alreadyReservedPages)
                {
                    reservePages = reservePages - alreadyReservedPages;
                }
                else
                {
                    reservePages = UIntPtr.Zero;
                }
            }

            // MarkZombiePages updates the range for this generation, so we do
            // not need to take the union ranges of all target generations
            UIntPtr minZombiePage = MinGenPage[generation];
            UIntPtr maxZombiePage = MaxGenPage[generation];

            for (UIntPtr i = minZombiePage; i <= maxZombiePage; i++)
            {
                if (IsMyZombiePage(i))
                {
                    UIntPtr startPage = i;
                    UIntPtr endPage   = startPage;
                    do
                    {
                        endPage++;
                    } while (IsMyZombiePage(endPage));
                    InteriorPtrTable.ClearFirst(startPage, endPage);
                    if (GC.remsetType == RemSetType.Cards)
                    {
                        OffsetTable.ClearLast(PageTable.PageAddr(startPage),
                                              PageTable.PageAddr(endPage) - 1);
                    }
                    if (!releasePages)
                    {
                        // Don't need to worry about giving the pages back
                        // Zero out the memory for reuse
                        UIntPtr pageCount = endPage - startPage;
                        PageManager.ReleaseUnusedPages(startPage,
                                                       pageCount,
                                                       false);
                    }
                    else if (reservePages > UIntPtr.Zero)
                    {
                        // Keep sufficient pages for the new nursery
                        UIntPtr pageCount = endPage - startPage;
                        if (pageCount > reservePages)
                        {
                            // Zero out the memory for reuse
                            PageManager.ReleaseUnusedPages(startPage,
                                                           reservePages,
                                                           false);
                            startPage += reservePages;
                            PageManager.FreePageRange(startPage, endPage);
                            reservePages = UIntPtr.Zero;
                        }
                        else
                        {
                            // Zero out the memory for reuse
                            PageManager.ReleaseUnusedPages(startPage,
                                                           pageCount,
                                                           false);
                            reservePages = reservePages - pageCount;
                        }
                    }
                    else
                    {
                        PageManager.FreePageRange(startPage, endPage);
                    }
                    i = endPage - 1;
                }
            }
        }
Example #2
0
        // Interface with the compiler!

        internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes,
                                                   uint alignment,
                                                   Thread currentThread)
        {
            // Pretenure Trigger
            pretenuredSinceLastFullGC += numBytes;
            if (pretenuredSinceLastFullGC > PretenureHardGCTrigger)
            {
                GC.InvokeMajorCollection(currentThread);
            }

            // Potentially Join a collection
            GC.CheckForNeededGCWork(currentThread);
            int     maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size;
            UIntPtr pageCount            =
                PageTable.PageCount(numBytes + maxAlignmentOverhead);
            bool    fCleanPages = true;
            UIntPtr page        = PageManager.EnsurePages(currentThread, pageCount,
                                                          largeObjectGeneration,
                                                          ref fCleanPages);
            int unusedBytes =
                unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes));
            int unusedCacheLines =
                unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5;
            int pageOffset = 0;

            if (unusedCacheLines != 0)
            {
                pageOffset = (bigOffset % unusedCacheLines) << 5;
                bigOffset++;
            }
            UIntPtr pageStart = PageTable.PageAddr(page);

            for (int i = 0; i < pageOffset; i += UIntPtr.Size)
            {
                Allocator.WriteAlignment(pageStart + i);
            }
            UIntPtr unalignedStartAddr = pageStart + pageOffset;
            UIntPtr startAddr          =
                Allocator.AlignedAllocationPtr(unalignedStartAddr,
                                               pageStart + unusedBytes,
                                               alignment);

            pageOffset +=
                unchecked ((int)(uint)(startAddr - unalignedStartAddr));
            if (pageOffset < unusedBytes)
            {
                BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes);
            }
            UIntPtr resultAddr = startAddr + PreHeader.Size;

            InteriorPtrTable.SetFirst(resultAddr);
            VTable.Assert(PageTable.Page(resultAddr) <
                          PageTable.Page(startAddr + numBytes - 1),
                          "Big object should cross pages");
            if (GC.remsetType == RemSetType.Cards)
            {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                OffsetTable.SetLast(resultAddr);
#endif
            }
            return(resultAddr);
        }
Example #3
0
        internal static unsafe bool AccumulateRCUpdates(String methodName,
                                                        int methodIndex,
                                                        uint maxIndex,
                                                        AcctRecord rec)
        {
            VTable.Assert(RCCollector.ProfilingMode,
                          @"RCCollector.ProfilingMode");

            // Return if the page table hasn't been set up yet.
            if (PageTable.pageTableCount == UIntPtr.Zero)
            {
                return(false);
            }

            if (methods == null)
            {
                // Allocate up front storage for the accounting records.
                //
                // This is requisitioned directly from the memory
                // manager. Care should be taken to ensure that
                // AccumulateRCUpdates does not indirectly call
                // methods that may have compiler-inserted RC updates.
                VTable vtable =
                    ((RuntimeType)typeof(AcctRecord[])).classVtable;
                UIntPtr size =
                    ObjectLayout.ArraySize(vtable, maxIndex + 1);

                BumpAllocator profileData =
                    new BumpAllocator(PageType.NonGC);
                UIntPtr profileDataStart =
                    MemoryManager.AllocateMemory(size);
                profileData.SetRange(profileDataStart, size);
                PageManager.SetStaticDataPages(profileDataStart, size);

                methods =
                    (AcctRecord[])Allocate(ref profileData, vtable, size);
                VTable.Assert(methods != null,
                              @"methods != null");

                *(uint *)(Magic.addressOf(methods) +
                          PostHeader.Size) = maxIndex + 1;
            }

            VTable.Assert(methods.Length == maxIndex + 1,
                          @"methods.Length == maxIndex+1");

            if (methods[methodIndex].methodName == null)
            {
                methodNames[methodIndex].methodName = methodName;
            }
            // Not "methodNames[methodIndex].methodName == methodName"
            // because the Equality operator carries compiler-inserted
            // RC updates!
            VTable.Assert(Magic.addressOf(methodNames[methodIndex].
                                          methodName) ==
                          Magic.addressOf(methodName),
                          @"Magic.addressOf(methodNames[methodIndex].
                                          methodName) ==
                        Magic.addressOf(methodName)");

            methods[methodIndex] += rec;

            return(true);
        }
Example #4
0
        void VerifyPages(ObjectLayout.ObjectVisitor objectVisitor)
        {
            UIntPtr page = UIntPtr.Zero;

            while (page < PageTable.pageTableCount)
            {
                UIntPtr startPage = page;
                if (!PageTable.IsMyPage(startPage))
                {
                    page++;
                    continue;
                }
                PageType pageType    = PageTable.Type(page);
                uint     pageProcess = PageTable.Process(page);
                do
                {
                    page++;
                } while (page < PageTable.pageTableCount &&
                         PageTable.Type(page) == pageType &&
                         PageTable.Process(page) == pageProcess);
                UIntPtr endPage = page;
                switch (pageType)
                {
                case PageType.Unallocated:
                case PageType.Unknown:
                case PageType.Shared: {
                    // The region does not belong to us, so there is
                    // nothing to check.
                    break;
                }

                case PageType.UnusedClean:
                case PageType.UnusedDirty: {
                    PageManager.VerifyUnusedRegion(startPage, endPage);
                    break;
                }

                case PageType.System: {
                    // We have looked at the region, but it is off-limits
                    // for the verifier.
                    break;
                }

                case PageType.NonGC: {
                    // Since there may be non-objects in the static data
                    // pages, we cannot apply the heapVerifier to the
                    // region.
                    break;
                }

                case PageType.Stack: {
                    // The page contains (part of) the activation record
                    // stack for one or more threads.
                    break;
                }

                default: {
                    // We have found a data region
                    VTable.Assert(PageTable.IsGcPage(startPage));
                    UIntPtr startAddr = PageTable.PageAddr(startPage);
                    UIntPtr endAddr   = PageTable.PageAddr(endPage);
                    GC.installedGC.VisitObjects(objectVisitor,
                                                startAddr, endAddr);
                    break;
                }
                }
            }
        }
        private unsafe void FindDestinationArea(ref UIntPtr destPage,
                                                ref UIntPtr destCursor,
                                                ref UIntPtr destLimit,
                                                UIntPtr objectSize,
                                                PageType destGeneration)
        {
            VTable.Assert(IsValidGeneration((int)destGeneration));

            UIntPtr cursorPage  = PageTable.Page(destCursor);
            UIntPtr limitPage   = PageTable.Page(destLimit);
            UIntPtr pageAddr    = PageTable.PagePad(destCursor);
            UIntPtr testPage    = limitPage;
            UIntPtr endTestPage = PageTable.PageCount(destCursor + objectSize);

            if (destCursor > UIntPtr.Zero &&
                IsMyZombiePage(PageTable.Page(destCursor - 1)))
            {
                VTable.Assert(destPage == limitPage);
                while (IsMyZombiePage(testPage) ||
                       (testPage < endTestPage &&
                        (PageTable.IsUnusedPage(testPage))))
                {
                    testPage++;
                }
                if (testPage >= endTestPage)
                {
                    // We can expand the current region
                    endTestPage = testPage;
                    VTable.Assert(PageTable.PageAligned(destLimit));
                    InteriorPtrTable.ClearFirst(limitPage, testPage);
                    if (GC.remsetType == RemSetType.Cards)
                    {
                        OffsetTable.ClearLast(PageTable.PageAddr(limitPage),
                                              PageTable.PageAddr(testPage) - 1);
                    }
                    while (limitPage != endTestPage)
                    {
                        VTable.Assert(PageTable.IsUnusedPage(destPage));
                        do
                        {
                            destPage++;
                        } while (destPage < endTestPage &&
                                 PageTable.IsUnusedPage(destPage));
                        bool fCleanPages = true;
                        bool status      =
                            PageManager.TryReserveUnusedPages(null, limitPage,
                                                              destPage - limitPage,
                                                              nurseryGeneration,
                                                              ref fCleanPages);
                        VTable.Assert(status);
                        MakeZombiePages(limitPage, destPage - limitPage,
                                        destGeneration);
                        while (destPage < endTestPage &&
                               IsMyZombiePage(destPage))
                        {
                            destPage++;
                        }
                        limitPage = destPage;
                    }
                    destLimit = PageTable.PageAddr(limitPage);
                    return;
                }
            }
            if (destCursor != pageAddr)
            {
                cursorPage++;
            }
            if (cursorPage != limitPage)
            {
                this.RegisterSkippedPages(cursorPage, limitPage);
            }
            // Find new region big enough to contain object
            UIntPtr neededPages = PageTable.PageCount(objectSize);
            UIntPtr prefixPage;

            while (true)
            {
                do
                {
                    destPage++;
                } while (!IsMyZombiePage(destPage));
                cursorPage = destPage;
                prefixPage = cursorPage;
                do
                {
                    destPage++;
                } while (IsMyZombiePage(destPage));
                limitPage = destPage;
                if (neededPages <= limitPage - cursorPage)
                {
                    break;
                }
                // Check for following unused pages
                endTestPage = cursorPage + neededPages;
                VTable.Assert(endTestPage <= PageTable.pageTableCount);
                while (destPage < endTestPage &&
                       (PageTable.IsUnusedPage(destPage) ||
                        (IsMyZombiePage(destPage))))
                {
                    destPage++;
                }
                if (destPage == endTestPage)
                {
                    break;
                }
                // Check for preceding unused pages
                if (destPage >= neededPages)
                {
                    endTestPage = destPage - neededPages;
                    prefixPage  = cursorPage - 1;
                    while (prefixPage >= UIntPtr.Zero &&
                           PageTable.IsUnusedPage(prefixPage))
                    {
                        prefixPage--;
                    }
                    prefixPage++;
                    if (prefixPage == endTestPage)
                    {
                        break;
                    }
                }
                // Register any skipped regions of pages
                this.RegisterSkippedPages(cursorPage, limitPage);
                while (limitPage < destPage)
                {
                    VTable.Assert(PageTable.IsUnusedPage(limitPage));
                    do
                    {
                        limitPage++;
                    } while (limitPage < destPage &&
                             PageTable.IsUnusedPage(limitPage));
                    cursorPage = limitPage;
                    while (limitPage < destPage && IsMyZombiePage(limitPage))
                    {
                        limitPage++;
                    }
                    if (cursorPage != limitPage)
                    {
                        this.RegisterSkippedPages(cursorPage, limitPage);
                    }
                }
            }
            // We found an area big enough.  Commit the pre- and
            // postfix areas of unused pages
            if (prefixPage != cursorPage)
            {
                bool fCleanPages = true;
                bool status      =
                    PageManager.TryReserveUnusedPages(null, prefixPage,
                                                      cursorPage - prefixPage,
                                                      nurseryGeneration,
                                                      ref fCleanPages);
                VTable.Assert(status);
                MakeZombiePages(prefixPage, cursorPage - prefixPage,
                                destGeneration);
            }
            while (destPage != limitPage)
            {
                // Mark the region of unused pages as fromspace
                UIntPtr unusedPage = limitPage;
                VTable.Assert(PageTable.IsUnusedPage(unusedPage));
                do
                {
                    unusedPage++;
                } while (unusedPage < destPage &&
                         PageTable.IsUnusedPage(unusedPage));
                bool fCleanPages = true;
                bool status      =
                    PageManager.TryReserveUnusedPages(null, limitPage,
                                                      unusedPage - limitPage,
                                                      nurseryGeneration,
                                                      ref fCleanPages);
                VTable.Assert(status);
                MakeZombiePages(limitPage, unusedPage - limitPage,
                                destGeneration);
                // Skip any sections of pages already marked as fromspace
                limitPage = unusedPage;
                while (limitPage < destPage && IsMyZombiePage(limitPage))
                {
                    limitPage++;
                }
            }
            destCursor = PageTable.PageAddr(prefixPage);
            destLimit  = PageTable.PageAddr(limitPage);
            // Take ownership of the new pages
            InteriorPtrTable.ClearFirst(prefixPage, limitPage);
            InteriorPtrTable.SetFirst(destCursor + PreHeader.Size);
            if (GC.remsetType == RemSetType.Cards)
            {
                OffsetTable.ClearLast(PageTable.PageAddr(prefixPage),
                                      PageTable.PageAddr(limitPage) - 1);
            }
        }
Example #6
0
        // Use this method to free heap pages.
        // There must be no contention regarding ownership of the pages.
        internal static void FreePageRange(UIntPtr startPage,
                                           UIntPtr endPage)
        {
            if (VTable.enableDebugPrint)
            {
                VTable.DebugPrint("FreePageRange({0}, {1})\n",
                                  __arglist(startPage, endPage));
            }
            UIntPtr startAddr = PageTable.PageAddr(startPage);
            UIntPtr endAddr   = PageTable.PageAddr(endPage);
            UIntPtr rangeSize = endAddr - startAddr;

#if SINGULARITY
            // Singularity doesn't care if you free pages in different
            // chunks than you acquired them in
            MemoryManager.FreeMemory(startAddr, rangeSize);
#else
            // We cannot simply release the memory range, as MEM_RELEASE
            // requires the pointer to be the same as the original call
            // to VirtualAlloc, and the length to be zero.
            UIntPtr regionAddr, regionSize;
            bool    fUsed = MemoryManager.QueryMemory(startAddr,
                                                      out regionAddr,
                                                      out regionSize);
            if (VTable.enableDebugPrint)
            {
                VTable.DebugPrint(" 1 Query({0}, {1}, {2}) -> {3}\n",
                                  __arglist(startAddr, regionAddr,
                                            regionSize, fUsed));
            }
            VTable.Assert(fUsed, "Memory to be freed isn't used");
            UIntPtr endRegion = regionAddr + regionSize;
            if (regionAddr < startAddr)
            {
                // startAddr is in the middle of an allocation region -> skip
                if (endRegion >= endAddr)
                {
                    // [startAddr, endAddr] is fully contained in a region
                    PageManager.Clear(startAddr, endAddr - startAddr);
                    return;
                }
                // Part of the address range falls into the next region
                PageManager.Clear(startAddr, endRegion - startAddr);
                fUsed = MemoryManager.QueryMemory(endRegion,
                                                  out regionAddr,
                                                  out regionSize);
                if (VTable.enableDebugPrint)
                {
                    VTable.DebugPrint(" 2 Query({0}, {1}, {2}) -> {3}\n",
                                      __arglist(endRegion, regionAddr,
                                                regionSize, fUsed));
                }
                VTable.Assert(fUsed, "Area to be freed isn't used");
                endRegion = regionAddr + regionSize;
            }
            // [regionAddr, endRegion] is contained in [startAddr, endAddr]
            while (endRegion < endAddr)
            {
                if (VTable.enableDebugPrint)
                {
                    VTable.DebugPrint("Freeing region [{0}, {1}]\n",
                                      __arglist(regionAddr,
                                                regionAddr + regionSize));
                }
                SetUnallocatedPages(regionAddr, regionSize);
                MemoryManager.FreeMemory(regionAddr, regionSize);
                fUsed = MemoryManager.QueryMemory(endRegion,
                                                  out regionAddr,
                                                  out regionSize);
                if (VTable.enableDebugPrint)
                {
                    VTable.DebugPrint(" 3 Query({0}, {1}, {2}) -> {3}\n",
                                      __arglist(endRegion, regionAddr,
                                                regionSize, fUsed));
                }
                VTable.Assert(fUsed, "Region to be freed isn't used");
                endRegion = regionAddr + regionSize;
            }
            if (endRegion == endAddr)
            {
                if (VTable.enableDebugPrint)
                {
                    VTable.DebugPrint("Freeing final region [{0}, {1}]\n",
                                      __arglist(regionAddr,
                                                regionAddr + regionSize));
                }
                SetUnallocatedPages(regionAddr, regionSize);
                MemoryManager.FreeMemory(regionAddr, regionSize);
            }
            else
            {
                PageManager.Clear(regionAddr, endAddr - regionAddr);
            }
            if (VTable.enableDebugPrint)
            {
                VTable.DebugPrint("  --> FreePageRange({0},{1})\n",
                                  __arglist(startPage, endPage));
            }
#endif // SINGULARITY
        }
Example #7
0
        private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment,
                                    Thread currentThread)
        {
            if (this.reserveLimit == UIntPtr.Zero)
            {
                return(UIntPtr.Zero);
            }
#if SINGULARITY_KERNEL
            Kernel.Waypoint(700);
#endif
            UIntPtr neededBytes =
                bytes +                              // Bytes required for object +
                alignment - UIntPtr.Size -           // worst case alignment overhead +
                (this.reserveLimit - this.allocPtr); // bytes already available
            UIntPtr paddedNeed  = PageTable.PagePad(neededBytes);
            UIntPtr pageCount   = PageTable.PageCount(paddedNeed);
            UIntPtr startPage   = PageTable.Page(this.reserveLimit);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            bool    gotPages    =
                PageManager.TryReserveUnusedPages(currentThread, startPage,
                                                  pageCount, this.pageType,
                                                  ref fCleanPages);
            if (!gotPages)
            {
                // We can't indiscriminately ask for more memory if we have
                // unused pages already available.
                return(UIntPtr.Zero);
            }
            if (this.reserveLimit == UIntPtr.Zero)
            {
                // A collection occurred, so there is no region to extend
                PageManager.ReleaseUnusedPages(startPage, pageCount,
                                               fCleanPages);
                return(UIntPtr.Zero);
            }
            BaseCollector.IncrementNewBytesSinceGC(paddedNeed);
            this.allocNew = this.reserveLimit;
            // Pad alignment space if necessary.  NB: a prior call to
            // AllocateFast may have started generating alignment tokens,
            // but we may need to finish the job here if the residual space
            // was insufficient for a multi-word alignment.
            UIntPtr oldReserveLimit = this.reserveLimit;
            this.reserveLimit += paddedNeed;
            this.allocPtr      =
                Allocator.AlignedAllocationPtr(this.allocPtr,
                                               this.reserveLimit,
                                               alignment);
            if (this.zeroedLimit < this.allocPtr)
            {
                this.zeroedLimit = this.allocPtr;
            }
            UIntPtr objectAddr = this.allocPtr + PreHeader.Size;
            this.allocPtr += bytes;
            if (fCleanPages)
            {
                if (this.zeroedLimit < oldReserveLimit)
                {
                    Util.MemClear(this.zeroedLimit,
                                  oldReserveLimit - this.zeroedLimit);
                }
                this.zeroedLimit = this.reserveLimit;
            }
            else
            {
                Util.MemClear(this.zeroedLimit,
                              this.allocPtr - this.zeroedLimit);
                this.zeroedLimit = this.allocPtr;
            }
            VTable.Assert(this.allocPtr <= this.zeroedLimit);
            VTable.Assert(PageTable.PageAligned(this.reserveLimit));
            if (objectAddr >= oldReserveLimit)
            {
                // Object is first on new page
                InteriorPtrTable.SetFirst(objectAddr);
            }
            else if (objectAddr + bytes < this.reserveLimit)
            {
                // The object does not end on new limit

                // N.B. The next object may not be allocated at exactly
                // (objectAddr + bytes) due to alignment considerations.  It
                // also might not ever be allocated.  These cases are handled
                // by InteriorPtrTable.First skipping over alignment tokens
                // and callers of First watching out for unused space tokens.

                InteriorPtrTable.SetFirst(objectAddr + bytes);
            }
            // We know an object is located as the last one in a page
            // when it extends through the page to the next.
            // Otherwise, it is totally before or below the page, and
            // we are not sure whether it is the last object or not.
            // So record only such an object for the last card in that
            // page. Many objects may have been omitted due to
            // this coarse-grain recording. But we should be able
            // to incrementally update the offset table and find them.
            // I believe this is a better choice than simply recording
            // any object to the offset table, because most objects
            // may just die and need not to record.

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                if (objectAddr < oldReserveLimit &&
                    allocPtr + bytes > oldReserveLimit)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(objectAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(701);
#endif
            return(objectAddr);
        }