Esempio n. 1
0
 internal UIntPtr ExtendZero(UIntPtr bytes, uint alignment)
 {
     while (this.allocPtr + bytes <= this.reserveLimit)
     {
         UIntPtr newZeroedLimit =
             Util.Pad(this.allocPtr + bytes, CACHE_SIZE);
         Util.MemClear(this.zeroedLimit,
                       newZeroedLimit - this.zeroedLimit);
         this.zeroedLimit = newZeroedLimit;
         UIntPtr allocPtr =
             Allocator.AlignedAllocationPtr(this.allocPtr,
                                            newZeroedLimit,
                                            alignment);
         if (allocPtr + bytes <= newZeroedLimit)
         {
             this.allocPtr = allocPtr + bytes;
             return(allocPtr + PreHeader.Size);
         }
         else
         {
             this.allocPtr = allocPtr;
         }
     }
     return(UIntPtr.Zero);
 }
Esempio n. 2
0
        internal UIntPtr AllocateFast(UIntPtr bytes, uint alignment)
        {
#if SINGULARITY_KERNEL
#if ENSURE_ALLOCATION_ALLOWED
            BumpAllocator.EnsureAllocationAllowed();
#endif
#endif
            UIntPtr allocPtr =
                Allocator.AlignedAllocationPtr(this.allocPtr,
                                               this.reserveLimit,
                                               alignment);
            UIntPtr objectLimitPtr = allocPtr + bytes;
            if (objectLimitPtr > this.reserveLimit)
            {
                this.allocPtr = allocPtr;
                return(UIntPtr.Zero);
            }
            if (objectLimitPtr > this.zeroedLimit)
            {
                Util.MemClear(allocPtr, bytes);
                this.zeroedLimit = objectLimitPtr;
            }
            this.allocPtr = objectLimitPtr;
            return(allocPtr + PreHeader.Size);
        }
Esempio n. 3
0
        private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment,
                                   Thread currentThread)
        {
#if SINGULARITY_KERNEL
            Kernel.Waypoint(702);
#endif
            this.Truncate();
            UIntPtr paddedBytes =
                PageTable.PagePad(bytes + alignment - UIntPtr.Size);
            BaseCollector.IncrementNewBytesSinceGC(paddedBytes);
            UIntPtr pages       = PageTable.PageCount(paddedBytes);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            // We may eventually want to ask for specific pages
            // between asking if any pages are reusable and asking the
            // OS for any possible page.
            UIntPtr startPage =
                PageManager.EnsurePages(currentThread, pages, this.pageType,
                                        ref fCleanPages);
            UIntPtr startAddr = PageTable.PageAddr(startPage);
            UIntPtr limitAddr = PageTable.PageAddr(startPage + pages);
            startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr,
                                                       alignment);
            this.allocNew = startAddr;
            this.allocPtr = startAddr + bytes;
            if (fCleanPages)
            {
                this.zeroedLimit = limitAddr;
            }
            else
            {
                Util.MemClear(startAddr, bytes);
                this.zeroedLimit = this.allocPtr;
            }
            this.reserveLimit = limitAddr;
            UIntPtr resultAddr = startAddr + PreHeader.Size;
            InteriorPtrTable.SetFirst(resultAddr);

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                UIntPtr nextPageAddr = startAddr + PageTable.PageSize;
                VTable.Assert(resultAddr < nextPageAddr);
                if (this.allocPtr > nextPageAddr)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(resultAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(703);
#endif
            return(resultAddr);
        }
Esempio n. 4
0
        private static UIntPtr AllocateBlock(UIntPtr bytes, uint alignment)
        {
            UIntPtr startPtr =
                Allocator.AlignedAllocationPtr(allocPtr, limitPtr, alignment);

            allocPtr = startPtr + bytes;
            if (allocPtr > limitPtr)
            {
                VTable.DebugPrint("Out of BootstrapMemory");
                VTable.DebugBreak();
            }
            return(startPtr + PreHeader.Size);
        }
Esempio n. 5
0
        internal UIntPtr AllocateFast(UIntPtr bytes, uint alignment)
        {
#if SINGULARITY_KERNEL
#if ENSURE_ALLOCATION_ALLOWED
            // BumpAllocator.EnsureAllocationAllowed();
#endif
#endif
            UIntPtr allocPtr =
                Allocator.AlignedAllocationPtr(this.allocPtr, this.zeroedLimit,
                                               alignment);
            if (allocPtr + bytes > this.zeroedLimit)
            {
                this.allocPtr = allocPtr;
                return(UIntPtr.Zero);
            }
            this.allocPtr = allocPtr + bytes;
            return(allocPtr + PreHeader.Size);
        }
Esempio n. 6
0
        // Interface with the compiler!

        internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes,
                                                   uint alignment,
                                                   Thread currentThread)
        {
            // Pretenure Trigger
            pretenuredSinceLastFullGC += numBytes;
            if (pretenuredSinceLastFullGC > PretenureHardGCTrigger)
            {
                GC.InvokeMajorCollection(currentThread);
            }

            // Potentially Join a collection
            GC.CheckForNeededGCWork(currentThread);
            int     maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size;
            UIntPtr pageCount            =
                PageTable.PageCount(numBytes + maxAlignmentOverhead);
            bool    fCleanPages = true;
            UIntPtr page        = PageManager.EnsurePages(currentThread, pageCount,
                                                          largeObjectGeneration,
                                                          ref fCleanPages);
            int unusedBytes =
                unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes));
            int unusedCacheLines =
                unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5;
            int pageOffset = 0;

            if (unusedCacheLines != 0)
            {
                pageOffset = (bigOffset % unusedCacheLines) << 5;
                bigOffset++;
            }
            UIntPtr pageStart = PageTable.PageAddr(page);

            for (int i = 0; i < pageOffset; i += UIntPtr.Size)
            {
                Allocator.WriteAlignment(pageStart + i);
            }
            UIntPtr unalignedStartAddr = pageStart + pageOffset;
            UIntPtr startAddr          =
                Allocator.AlignedAllocationPtr(unalignedStartAddr,
                                               pageStart + unusedBytes,
                                               alignment);

            pageOffset +=
                unchecked ((int)(uint)(startAddr - unalignedStartAddr));
            if (pageOffset < unusedBytes)
            {
                BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes);
            }
            UIntPtr resultAddr = startAddr + PreHeader.Size;

            InteriorPtrTable.SetFirst(resultAddr);
            VTable.Assert(PageTable.Page(resultAddr) <
                          PageTable.Page(startAddr + numBytes - 1),
                          "Big object should cross pages");
            if (GC.remsetType == RemSetType.Cards)
            {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                OffsetTable.SetLast(resultAddr);
#endif
            }
            return(resultAddr);
        }
Esempio n. 7
0
        private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment,
                                    Thread currentThread)
        {
            if (this.reserveLimit == UIntPtr.Zero)
            {
                return(UIntPtr.Zero);
            }
#if SINGULARITY_KERNEL
            Kernel.Waypoint(700);
#endif
            UIntPtr neededBytes =
                bytes +                              // Bytes required for object +
                alignment - UIntPtr.Size -           // worst case alignment overhead +
                (this.reserveLimit - this.allocPtr); // bytes already available
            UIntPtr paddedNeed  = PageTable.PagePad(neededBytes);
            UIntPtr pageCount   = PageTable.PageCount(paddedNeed);
            UIntPtr startPage   = PageTable.Page(this.reserveLimit);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            bool    gotPages    =
                PageManager.TryReserveUnusedPages(currentThread, startPage,
                                                  pageCount, this.pageType,
                                                  ref fCleanPages);
            if (!gotPages)
            {
                // We can't indiscriminately ask for more memory if we have
                // unused pages already available.
                return(UIntPtr.Zero);
            }
            if (this.reserveLimit == UIntPtr.Zero)
            {
                // A collection occurred, so there is no region to extend
                PageManager.ReleaseUnusedPages(startPage, pageCount,
                                               fCleanPages);
                return(UIntPtr.Zero);
            }
            BaseCollector.IncrementNewBytesSinceGC(paddedNeed);
            this.allocNew = this.reserveLimit;
            // Pad alignment space if necessary.  NB: a prior call to
            // AllocateFast may have started generating alignment tokens,
            // but we may need to finish the job here if the residual space
            // was insufficient for a multi-word alignment.
            UIntPtr oldReserveLimit = this.reserveLimit;
            this.reserveLimit += paddedNeed;
            this.allocPtr      =
                Allocator.AlignedAllocationPtr(this.allocPtr,
                                               this.reserveLimit,
                                               alignment);
            if (this.zeroedLimit < this.allocPtr)
            {
                this.zeroedLimit = this.allocPtr;
            }
            UIntPtr objectAddr = this.allocPtr + PreHeader.Size;
            this.allocPtr += bytes;
            if (fCleanPages)
            {
                if (this.zeroedLimit < oldReserveLimit)
                {
                    Util.MemClear(this.zeroedLimit,
                                  oldReserveLimit - this.zeroedLimit);
                }
                this.zeroedLimit = this.reserveLimit;
            }
            else
            {
                Util.MemClear(this.zeroedLimit,
                              this.allocPtr - this.zeroedLimit);
                this.zeroedLimit = this.allocPtr;
            }
            VTable.Assert(this.allocPtr <= this.zeroedLimit);
            VTable.Assert(PageTable.PageAligned(this.reserveLimit));
            if (objectAddr >= oldReserveLimit)
            {
                // Object is first on new page
                InteriorPtrTable.SetFirst(objectAddr);
            }
            else if (objectAddr + bytes < this.reserveLimit)
            {
                // The object does not end on new limit

                // N.B. The next object may not be allocated at exactly
                // (objectAddr + bytes) due to alignment considerations.  It
                // also might not ever be allocated.  These cases are handled
                // by InteriorPtrTable.First skipping over alignment tokens
                // and callers of First watching out for unused space tokens.

                InteriorPtrTable.SetFirst(objectAddr + bytes);
            }
            // We know an object is located as the last one in a page
            // when it extends through the page to the next.
            // Otherwise, it is totally before or below the page, and
            // we are not sure whether it is the last object or not.
            // So record only such an object for the last card in that
            // page. Many objects may have been omitted due to
            // this coarse-grain recording. But we should be able
            // to incrementally update the offset table and find them.
            // I believe this is a better choice than simply recording
            // any object to the offset table, because most objects
            // may just die and need not to record.

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                if (objectAddr < oldReserveLimit &&
                    allocPtr + bytes > oldReserveLimit)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(objectAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(701);
#endif
            return(objectAddr);
        }