private unsafe void CompactHeapObjects(UIntPtr previousEnd) { while (!this.relocationQueue.IsEmpty) { UIntPtr sourceAddress = this.relocationQueue.Read(); UIntPtr destinationAddress = this.relocationQueue.Read(); UIntPtr runLength = this.relocationQueue.Read(); if (previousEnd != destinationAddress) { VTable.Assert(previousEnd < destinationAddress); if (PageTable.Page(destinationAddress) != PageTable.Page(previousEnd + PreHeader.Size)) { if (!PageTable.PageAligned(previousEnd)) { UIntPtr pageLimit = PageTable.PagePad(previousEnd); BumpAllocator.WriteUnusedMarker(previousEnd); previousEnd += UIntPtr.Size; Util.MemClear(previousEnd, pageLimit - previousEnd); } if (!PageTable.PageAligned(destinationAddress)) { // This only happens before pinned objects and // large objects UIntPtr start = PageTable.PageAlign(destinationAddress); VTable.Assert(previousEnd <= start); while (start < destinationAddress) { Allocator.WriteAlignment(start); start += UIntPtr.Size; } } UIntPtr objAddr = destinationAddress + PreHeader.Size; InteriorPtrTable.SetFirst(objAddr); } else { VTable.Assert(previousEnd < destinationAddress); UIntPtr start = previousEnd; while (start < destinationAddress) { Allocator.WriteAlignment(start); start += UIntPtr.Size; } } } Util.MemCopy(destinationAddress, sourceAddress, runLength); previousEnd = destinationAddress + runLength; } // Zero out the end of the allocation page if (!PageTable.PageAligned(previousEnd)) { UIntPtr pageLimit = PageTable.PagePad(previousEnd); Util.MemClear(previousEnd, pageLimit - previousEnd); } this.relocationQueue.Cleanup(true); }
public static unsafe Object CompilerAllocateGenerational (VTable vtable, Thread currentThread, UIntPtr bytes, uint alignment) { VTable.Assert((alignment == 4) || ((alignment == 8) && (UIntPtr.Size == 8)) || ((alignment == 8) && (PreHeader.Size == 4)), "Unsupported object layout"); VTable.Assert(UIntPtr.Size == PreHeader.Size, "Unsupported preheader size"); VTable.Assert (Util.IsAligned((uint)PreHeader.Size + (uint)PostHeader.Size, alignment), "Unsupported header sizes"); UIntPtr preHeaderAddr = MixinThread(currentThread).bumpAllocator.allocPtr; // allocPtr is only needed when alignment needs to be checked. It // stores the beginning of the region to be allocation including // a possible alignment token. UIntPtr allocPtr = UIntPtr.Zero; if ((alignment == 8) && (UIntPtr.Size == 4)) { allocPtr = preHeaderAddr; preHeaderAddr = Util.Pad(preHeaderAddr, (UIntPtr)alignment); } UIntPtr bound = preHeaderAddr + bytes; UIntPtr limitPtr = MixinThread(currentThread).bumpAllocator.zeroedLimit; if (bound <= limitPtr) { if ((alignment == 8) && (UIntPtr.Size == 4)) { // Store alignment token at allocPtr. This will be where an // alignment token should go if 'preHeaderAddr' was // bumped... Allocator.WriteAlignment(allocPtr); // ... or the alignment token is where the object header // should be. This code zeroes the object header regardless // and avoids a branch in this fast path. *(UIntPtr *)preHeaderAddr = UIntPtr.Zero; } MixinThread(currentThread).bumpAllocator.allocPtr = bound; UIntPtr objAddr = preHeaderAddr + PreHeader.Size; Object obj = Magic.fromAddress(objAddr); obj.vtable = vtable; return(obj); } return(GC.AllocateObjectNoInline(vtable, currentThread)); }
internal static UIntPtr AlignedObjectPtr(UIntPtr startAddr, uint alignment) { if (alignment > UIntPtr.Size) { uint alignmentMask = alignment - 1; int offset = PostHeader.Size; while (((startAddr + offset) & alignmentMask) != 0) { Allocator.WriteAlignment(startAddr - PreHeader.Size); startAddr += UIntPtr.Size; } } return(startAddr); }
internal static UIntPtr AlignedAllocationPtr(UIntPtr startAddr, UIntPtr limitAddr, uint alignment) { if (alignment > UIntPtr.Size) { uint alignmentMask = alignment - 1; int offset = PreHeader.Size + PostHeader.Size; while (((startAddr + offset) & alignmentMask) != 0 && startAddr < limitAddr) { Allocator.WriteAlignment(startAddr); startAddr += UIntPtr.Size; } } return(startAddr); }
// Interface with the compiler! internal static unsafe UIntPtr AllocateBig(UIntPtr numBytes, uint alignment, Thread currentThread) { // Pretenure Trigger pretenuredSinceLastFullGC += numBytes; if (pretenuredSinceLastFullGC > PretenureHardGCTrigger) { GC.InvokeMajorCollection(currentThread); } // Potentially Join a collection GC.CheckForNeededGCWork(currentThread); int maxAlignmentOverhead = unchecked ((int)alignment) - UIntPtr.Size; UIntPtr pageCount = PageTable.PageCount(numBytes + maxAlignmentOverhead); bool fCleanPages = true; UIntPtr page = PageManager.EnsurePages(currentThread, pageCount, largeObjectGeneration, ref fCleanPages); int unusedBytes = unchecked ((int)(PageTable.RegionSize(pageCount) - numBytes)); int unusedCacheLines = unchecked ((int)(unusedBytes - maxAlignmentOverhead)) >> 5; int pageOffset = 0; if (unusedCacheLines != 0) { pageOffset = (bigOffset % unusedCacheLines) << 5; bigOffset++; } UIntPtr pageStart = PageTable.PageAddr(page); for (int i = 0; i < pageOffset; i += UIntPtr.Size) { Allocator.WriteAlignment(pageStart + i); } UIntPtr unalignedStartAddr = pageStart + pageOffset; UIntPtr startAddr = Allocator.AlignedAllocationPtr(unalignedStartAddr, pageStart + unusedBytes, alignment); pageOffset += unchecked ((int)(uint)(startAddr - unalignedStartAddr)); if (pageOffset < unusedBytes) { BumpAllocator.WriteUnusedMarker(pageStart + pageOffset + numBytes); } UIntPtr resultAddr = startAddr + PreHeader.Size; InteriorPtrTable.SetFirst(resultAddr); VTable.Assert(PageTable.Page(resultAddr) < PageTable.Page(startAddr + numBytes - 1), "Big object should cross pages"); if (GC.remsetType == RemSetType.Cards) { #if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE #else OffsetTable.SetLast(resultAddr); #endif } return(resultAddr); }
internal void CleanPinnedPages() { if (pinnedPageList == null || pinnedPageList.Count == 0) { return; } int pageIndex = 0; int limit = pinnedPageList.Count; UIntPtr lastPostPinnedAddr = UIntPtr.Zero; while (pageIndex < limit) { UIntPtr startPage = (UIntPtr)pinnedPageList[pageIndex]; UIntPtr endPage = startPage + 1; pageIndex++; while (pageIndex < limit && (UIntPtr)pinnedPageList[pageIndex] == endPage) { pageIndex++; endPage++; } // Zero out the area between the start of the page and // the first object on the page UIntPtr firstObjectAddr = FirstPinnedObjectAddr(startPage); UIntPtr firstAddr = firstObjectAddr - PreHeader.Size; UIntPtr trashAddr = PageTable.PageAddr(startPage); if (firstAddr < trashAddr) { // The first object "spills" into the previous page, // presumably by no more than HEADER_BYTES bytes VTable.Assert( PageTable.Page(firstAddr) == startPage - 1, "Semispace:RegisterPinnedReference:3"); // Prepare to zero the preceding page unless it also // had pinned data on it trashAddr = PageTable.PageAddr(startPage - 1); InteriorPtrTable.ClearFirst(startPage - 1); if (trashAddr >= lastPostPinnedAddr) { // Need to mark the spilled-onto page live to // keep the spilled data around PageType fromSpaceType = PageTable.Type(startPage - 1); VTable.Assert( PageTable.IsZombiePage(fromSpaceType), "Semispace:RegisterPinnedReference:4"); PageType toSpaceType = PageTable.ZombieToLive(fromSpaceType); PageTable.SetType(startPage - 1, toSpaceType); } } // If lastPostPinnedAddr is on the page that trashAddr // starts, pinned data from the last run of pinned pages // and pinned data from this run of pinned data are on the // same page, so just write alignment tokens from // lastPostPinnedAddr to the first pinned object. // Otherwise, write an unused marker at lastPostPinnedAddr // since the rest of its page must be copied or dead. if (trashAddr < lastPostPinnedAddr) { trashAddr = lastPostPinnedAddr; } else { CleanPageTail(lastPostPinnedAddr); } if (GC.remsetType == RemSetType.Cards && trashAddr < firstAddr) { UIntPtr firstCard = CardTable.CardNo(trashAddr); UIntPtr lastCard = CardTable.CardNo(firstAddr - 1); if (!OffsetTable.NoObjectPtrToTheCard(firstCard)) { UIntPtr offset = OffsetTable.GetOffset(firstCard); UIntPtr objPtr = CardTable.CardAddr(firstCard) + offset; UIntPtr size = OffsetTable.ObjectSize(objPtr); VTable.Assert ((objPtr + size - PreHeader.Size <= trashAddr) || (objPtr >= trashAddr), "Object should be totally " + "above or below trashAddr"); if (objPtr >= trashAddr) { // The offset in this card needs to be updated OffsetTable.ClearCards(firstCard, firstCard); } } OffsetTable.ClearCards(firstCard + 1, lastCard - 1); if (lastCard != CardTable.CardNo(firstObjectAddr)) { OffsetTable.ClearCards(lastCard, lastCard); } else { VTable.Assert(OffsetTable.GetOffset(lastCard) >= (firstObjectAddr - CardTable.CardAddr(lastCard)), "wrong offset"); } } { // trashAddr should go back at most one page. UIntPtr trashPage = PageTable.Page(trashAddr); UIntPtr firstObjectAddrPage = PageTable.Page(firstObjectAddr); VTable.Assert((trashPage == firstObjectAddrPage - 1) || (trashPage == firstObjectAddrPage)); } // If the InteriorPtrTable already had a value, then this is // redundant, but if the call to First above has to compute // the value, then (since it won't store it in the table) we // should store it. Why? At this point the previous page // would be "connected" to this one. After this collection // the previous page will be unused or re-used and unrelated // to this page and subsequent calls to First would then // rely on it making the leap between unrelated pages. InteriorPtrTable.SetFirst(firstObjectAddr); while (trashAddr < firstAddr) { Allocator.WriteAlignment(trashAddr); trashAddr += UIntPtr.Size; } // Zero out the area between the last whole object on // the last page and the end of the last page UIntPtr pastAddr = PostPinnedObjectAddr(endPage); UIntPtr newEndPage = PageTable.Page(PageTable.PagePad(pastAddr)); while (endPage < newEndPage) { // The last object spills into the next page(s), so // mark those page(s) live PageType fromPageType = PageTable.Type(endPage); if (PageTable.IsZombiePage(fromPageType)) { PageType toSpaceType = PageTable.ZombieToLive(fromPageType); PageTable.SetType(endPage, toSpaceType); } else { // final page might be live already because // something else on it was pinned. // pageIndex has already been incremented, // so it points to the start of the next // set of contiguous pages VTable.Assert( PageTable.IsLiveGcPage(fromPageType) && pageIndex < limit && endPage == (UIntPtr)pinnedPageList[pageIndex], "Semispace:RegisterPinnedReference:5"); } ++endPage; } lastPostPinnedAddr = pastAddr; } CleanPageTail(lastPostPinnedAddr); pinnedPageList = null; comparer = null; }