internal override void ProfileAllocation(Object obj) { if (GC.IsProfiling && !HeapDamaged) { UIntPtr size = ObjectLayout.Sizeof(obj); GcProfiler.NotifyAllocation(Magic.addressOf(obj), obj.GetType(), size); } }
// The ObjectVisitor contract: // Must return the size of the visited object. internal override UIntPtr Visit(Object obj) { UIntPtr size = ObjectLayout.Sizeof(obj); UIntPtr objectAddress = Magic.addressOf(obj); profiler.StartScanOneObject(objectAddress, obj.GetType(), size); oneObjectVisitor.VisitReferenceFields(obj); profiler.EndScanOneObject(); return(size); }
internal Object Copy(Object obj) { UIntPtr size = ObjectLayout.Sizeof(obj); if (GenerationalGCData.IsLargeObjectSize(size)) { return(CopyLarge(obj, size)); } else { return(CopySmall(obj, size)); } }
internal override bool TagObjectForCopy(Object from, Object to, out UIntPtr spaceOverhead) { TagNode tn = new TagNode(); spaceOverhead = ObjectLayout.Sizeof(tn); tn.next = tagHead; tn.from = from; tn.to = to; tagHead = tn; nTagged++; return(true); }
internal override UIntPtr Visit(Object obj) { UIntPtr size; if (obj.GcMark() != UIntPtr.Zero) { // The object has the mark color, so it should only // reference other objects with the mark color. size = VerifyMarkVisitor.visitor.VisitReferenceFields(obj); } else { size = ObjectLayout.Sizeof(obj); } return(size); }
internal override bool TagObjectForCopy(Object from, Object to, out UIntPtr spaceOverhead) { TagNode tn = new TagNode(); spaceOverhead = ObjectLayout.Sizeof(tn); tn.from = from; tn.next = tagHead; // prepare to-space object UIntPtr begin = UIntPtr.Zero - (UIntPtr)PreHeader.Size; UIntPtr end = ((ObjectLayout.Sizeof(from) + sizeof(UIntPtr) - 1) & ~((UIntPtr)sizeof(UIntPtr) - 1)) - PreHeader.Size; for (UIntPtr offset = begin; offset != end; offset += sizeof(UIntPtr)) { if (!IgnoreOffset(offset)) { *(UIntPtr *)(Magic.addressOf(to) + offset) = Alpha; } } // now we can tag from-space if (CASCoCoWord(from, Tagged(Magic.addressOf(to)), Simple())) { tagHead = tn; return(true); } else { return(false); } }
private static unsafe void processPLCList() { int startTicks = 0; bool enableGCTiming = VTable.enableGCTiming; if (enableGCTiming) { VTable.enableGCTiming = false; startTicks = Environment.TickCount; } if (VTable.enableGCWatermarks) { MemoryAccounting.RecordHeapWatermarks(); } #if DEBUG VTable.Assert(firstPLCLink->objAddr == UIntPtr.Zero, @"firstPLCLink->objAddr == UIntPtr.Zero"); #endif // DEBUG // Let S be the subgraph of heap objects reachable from // the PLC list. Decrement counts due to references in S. for (PLCLink *link = firstPLCLink->next; link != null; link = link->next) { UIntPtr objAddr = link->objAddr; VTable.Assert(objAddr != UIntPtr.Zero, @"objAddr != UIntPtr.Zero"); Object obj = Magic.fromAddress(objAddr); VTable.Assert((obj.REF_STATE & countingONFlagMask) != 0, @"(obj.REF_STATE & countingONFlagMask) != 0"); uint refState = obj.REF_STATE; if ((refState & markFlagMask) == 0) { obj.REF_STATE = refState | markFlagMask; internalDecrementer.Traverse(objAddr); } } // Objects that now have non-zero counts are those that // have references external to S incident on them. // Recompute counts due to reachability from such objects. for (PLCLink *link = firstPLCLink->next; link != null; link = link->next) { UIntPtr objAddr = link->objAddr; internalScanner.Traverse(objAddr); } // String together objects with reference count // of zero for reclamation. internalReclaimer.Initialize(); for (PLCLink *link = firstPLCLink->next; link != null; link = link->next) { UIntPtr objAddr = link->objAddr; internalReclaimer.Traverse(objAddr); } ulong reclaimedBytes = 0; Object reclaimedObj = internalReclaimer.ReclaimedObjects; while (reclaimedObj != null) { if (VTable.enableGCProfiling) { UIntPtr size = ObjectLayout.Sizeof(reclaimedObj); reclaimedBytes += (ulong)size; } Object nextReclaimedObj = getNextLink(reclaimedObj); SegregatedFreeList.Free(reclaimedObj); reclaimedObj = nextReclaimedObj; } // Recycle the PLC list. if (firstPLCLink->next != null) { PLCLink *lastPLCLink = firstPLCLink; do { lastPLCLink = lastPLCLink->next; } while (lastPLCLink->next != null); lastPLCLink->next = plcListChunk; plcListChunk = firstPLCLink->next; firstPLCLink->next = null; } // Release the memory used up by work lists. UIntPtrQueue.ReleaseStandbyPages(null); SegregatedFreeList.RecycleGlobalPages(); SegregatedFreeList.CommitFreedData(); GC.newBytesSinceGC = UIntPtr.Zero; if (enableGCTiming) { int elapsedTicks = Environment.TickCount - startTicks; System.GC.gcTotalTime += elapsedTicks; if (System.GC.maxPauseTime < elapsedTicks) { System.GC.maxPauseTime = elapsedTicks; } System.GC.pauseCount++; VTable.enableGCTiming = true; } if (VTable.enableGCProfiling) { if (maxCyclicGarbage < reclaimedBytes) { maxCyclicGarbage = reclaimedBytes; } totalCyclicGarbage += reclaimedBytes; cycleCollections++; } }
internal unsafe void ScanHook(Object obj) { UIntPtr page = PageTable.Page(Magic.addressOf(obj)); if (PageTable.Type(page) != SegregatedFreeList.SMALL_OBJ_PAGE) { //VTable.DebugPrint(" not tagging because this isn't a small object page"); return; } SegregatedFreeList.PageHeader *ph = (SegregatedFreeList.PageHeader *)PageTable.PageAddr(page); if (!new CoCoPageUserValue(ph->userValue).Marked) { //VTable.DebugPrint(" not tagging because the page isn't marked\n"); return; } if (obj is EMU || obj is Monitor || obj is Thread || obj is ThreadHeaderQueue) { CoCoBarrier.NotifyPin(Magic.addressOf(obj)); if (fVerbose) { VTable.DebugPrint(" $$ not tagging object because it's a monitor or EMU\n"); } return; } if (doingCoCo) { //VTable.DebugPrint(" not tagging object because doingCoCo\n"); return; } if (!CoCoBarrier.instance.ObjectIsNotCopied(obj)) { if (fVerbose) { VTable.DebugPrint(" not tagging object because object is already in the process of being copied.\n"); } return; } if (fVerbose && obj.GetType() != typeof(Object)) { VTable.DebugPrint(" $$ tagging a non-System.Object; type is "); VTable.DebugPrint(obj.GetType().Name); VTable.DebugPrint("\n"); } // REVIEW: I wish that there was an easier way of // doing this. Object copy; if (obj is Array) { Array a = (Array)obj; if (a.IsVector) { copy = GC.AllocateVector(a.vtable, a.Length); } else { copy = GC.AllocateArray(a.vtable, a.Rank, a.Length); } } else if (obj is String) { String s = (String)obj; // REVIEW: this is not nice. copy = GC.AllocateString(s.ArrayLength - 1); } else { copy = GC.AllocateObject(obj.vtable); } VTable.Assert(ObjectLayout.Sizeof(copy) == ObjectLayout.Sizeof(obj), "Copy is not same size as original"); spaceOverhead += ObjectLayout.Sizeof(copy); bool first = !CoCoBarrier.instance.AnyTaggedForCopying; UIntPtr thisSpaceOverhead; if (CoCoBarrier.instance.TagObjectForCopy(obj, copy, out thisSpaceOverhead)) { cnt++; if (first) { lock (interlock) { if (!wantCoCo && !doingCoCo) { wantCoCo = true; } } } } spaceOverhead += thisSpaceOverhead; }
static bool CopyObject(Object from) { if (fDietVerboseCopyDebug) { VTable.DebugPrint(" Copying "); VTable.DebugPrint((ulong)Magic.addressOf(from)); VTable.DebugPrint(" with CoCoWord = "); VTable.DebugPrint((ulong)MixinObject(from).preHeader.CoCoWord); VTable.DebugPrint("\n"); } for (;;) { UIntPtr CoCoWord = MixinObject(from).preHeader.CoCoWord; if (IsSimple(CoCoWord)) { // got pinned .. ignore return(false); } else if (IsTagged(CoCoWord)) { if (ShouldPin(Magic.addressOf(from))) { if (CASCoCoWord(from, Simple(), CoCoWord)) { // pinned NotifyPin(Magic.addressOf(from)); return(false); } } else { CASCoCoWord(from, Copying(CoCoWord), CoCoWord); } } else if (IsCopying(CoCoWord)) { Object to = Magic.fromAddress(ForwardPtr(CoCoWord)); UIntPtr begin = UIntPtr.Zero - (UIntPtr)PreHeader.Size; UIntPtr end = ((ObjectLayout.Sizeof(from) + sizeof(UIntPtr) - 1) & ~((UIntPtr)sizeof(UIntPtr) - 1)) - PreHeader.Size; if (fDietVerboseCopyDebug) { VTable.DebugPrint(" copying to "); VTable.DebugPrint((ulong)Magic.addressOf(to)); VTable.DebugPrint("; begin = "); VTable.DebugPrint((ulong)begin); VTable.DebugPrint("; end = "); VTable.DebugPrint((ulong)end); VTable.DebugPrint("; PreHeader.Size = "); VTable.DebugPrint((ulong)PreHeader.Size); VTable.DebugPrint("; Sizeof(from) = "); VTable.DebugPrint((ulong)ObjectLayout.Sizeof(from)); VTable.DebugPrint("; Sizeof(to) = "); VTable.DebugPrint((ulong)ObjectLayout.Sizeof(to)); VTable.DebugPrint("\n"); } for (UIntPtr offset = begin; offset != end; offset += sizeof(UIntPtr)) { if (!IgnoreOffset(offset)) { UIntPtr *fptr = (UIntPtr *)(Magic.addressOf(from) + offset); UIntPtr *tptr = (UIntPtr *)(Magic.addressOf(to) + offset); for (;;) { UIntPtr word = *fptr; if (word == Alpha) { // NOTE: this case will only be hit the first time // around the loop. if it's not hit the first time // it'll never get hit. break; } *tptr = word; if (InternalImmutableOffset(from, offset) || CAS(fptr, Alpha, word) == word) { break; } } } } MixinObject(from).preHeader.CoCoWord = Forwarded(CoCoWord); return(true); } else { VTable.NotReached(); } } }
internal override ulong Copy() { ulong cnt = 0; TagNode myTagHead = tagHead; ulong myNTagged = nTagged; tagHead = null; nTagged = 0; if (fAbortDebug) { VTable.DebugPrint("Aborter: copying "); VTable.DebugPrint(myNTagged); VTable.DebugPrint(" objects.\n"); } // first do all of the copying for (TagNode n = myTagHead; n != null; n = n.next) { Util.MemCopy(Magic.addressOf(n.to) - PreHeader.Size, Magic.addressOf(n.from) - PreHeader.Size, ObjectLayout.Sizeof(n.from)); // fix the forwarding word in the to-space object (without // this it'll point at from-space) MixinObject(n.to).preHeader.CoCoWord = WithNoForwardNotCopying(n.to); } if (fAbortDebug) { VTable.DebugPrint("Aborter: copied "); VTable.DebugPrint(myNTagged); VTable.DebugPrint(" objects.\n"); } // now attempt to forward all objects. for (TagNode n = myTagHead; n != null; n = n.next) { UIntPtr oldCoCoWord = CAS(ref MixinObject(n.from).preHeader.CoCoWord, WithForward(Magic.addressOf(n.to)), WithNoForwardCopying(n.from)); VTable.Assert(!IsForwarded(MixinObject(n.to).preHeader.CoCoWord, n.to)); if (oldCoCoWord == WithNoForwardCopying(n.from)) { // copy successful cnt++; } } if (fAbortDebug) { VTable.DebugPrint("Aborter: forwarded "); VTable.DebugPrint(cnt); VTable.DebugPrint(" objects.\n"); } return(cnt); }