internal virtual void StopTheWorld()
        {
#if SINGULARITY
            //DebugStub.WriteLine("~~~~~ StopTheWorld()");
            Monitoring.Log(Monitoring.Provider.GC,
                           (ushort)GarbageCollectorEvent.StartStopTheWorld);
#if SINGULARITY_KERNEL
            TimeSpan ticks = SystemClock.KernelUpTime;
#elif SINGULARITY_PROCESS
            TimeSpan ticks = ProcessService.GetUpTime();
#endif
#endif
            VTable.Assert(Thread.GetCurrentThreadIndex() ==
                          collectorThreadIndex);
            BaseCollector.AllThreadRendezvous(collectorThreadIndex);
#if SINGULARITY
#if SINGULARITY_KERNEL
            ticks = SystemClock.KernelUpTime - ticks;
#elif SINGULARITY_PROCESS
            ticks = ProcessService.GetUpTime() - ticks;
#endif
            Monitoring.Log(Monitoring.Provider.GC,
                           (ushort)GarbageCollectorEvent.EndStopTheWorld);
#endif
        }
        private UIntPtr FreshAlloc(UIntPtr bytes, uint alignment,
                                   Thread currentThread)
        {
#if SINGULARITY_KERNEL
            Kernel.Waypoint(702);
#endif
            this.Truncate();
            UIntPtr paddedBytes =
                PageTable.PagePad(bytes + alignment - UIntPtr.Size);
            BaseCollector.IncrementNewBytesSinceGC(paddedBytes);
            UIntPtr pages       = PageTable.PageCount(paddedBytes);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            // We may eventually want to ask for specific pages
            // between asking if any pages are reusable and asking the
            // OS for any possible page.
            UIntPtr startPage =
                PageManager.EnsurePages(currentThread, pages, this.pageType,
                                        ref fCleanPages);
            UIntPtr startAddr = PageTable.PageAddr(startPage);
            UIntPtr limitAddr = PageTable.PageAddr(startPage + pages);
            startAddr = Allocator.AlignedAllocationPtr(startAddr, limitAddr,
                                                       alignment);
            this.allocNew = startAddr;
            this.allocPtr = startAddr + bytes;
            if (fCleanPages)
            {
                this.zeroedLimit = limitAddr;
            }
            else
            {
                Util.MemClear(startAddr, bytes);
                this.zeroedLimit = this.allocPtr;
            }
            this.reserveLimit = limitAddr;
            UIntPtr resultAddr = startAddr + PreHeader.Size;
            InteriorPtrTable.SetFirst(resultAddr);

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                UIntPtr nextPageAddr = startAddr + PageTable.PageSize;
                VTable.Assert(resultAddr < nextPageAddr);
                if (this.allocPtr > nextPageAddr)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(resultAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(703);
#endif
            return(resultAddr);
        }
 internal virtual void ResumeTheWorld()
 {
     VTable.Assert(Thread.GetCurrentThreadIndex() ==
                   collectorThreadIndex);
     BaseCollector.AllThreadRelease(collectorThreadIndex);
 }
        private void PerformCollection(int currentThreadIndex,
                                       int generation)
        {
            // Clear the GCRequest bit (if necessary) before doing
            // anything that could cause a state transition.
            if (Transitions.HasGCRequest(currentThreadIndex))
            {
                Transitions.ClearGCRequest(currentThreadIndex);
            }
            int  startTicks     = 0;
            bool enableGCTiming = VTable.enableGCTiming;

            if (enableGCTiming || VTable.enableFinalGCTiming)
            {
                VTable.enableGCTiming = false;
                startTicks            = Environment.TickCount;
                if (enableGCTiming)
                {
                    VTable.DebugPrint("[GC start: {0} bytes]\n",
                                      __arglist(TotalMemory));
                }
            }
#if SINGULARITY
            Tracing.Log(Tracing.Debug, "GC start");
#endif
            CollectorStatistics.Event(GCEvent.StopTheWorld);
            CurrentPhase = StopTheWorldPhase.Synchronizing;
            StopTheWorld();
            CurrentPhase = StopTheWorldPhase.SingleThreaded;
            StartGCCycle();
#if SINGULARITY
            long preGcMemoryUsage = GC.GetTotalMemory(false);
#if SINGULARITY_KERNEL
#if THREAD_TIME_ACCOUNTING
            TimeSpan ticks  = Thread.CurrentThread.ExecutionTime;
            TimeSpan ticks2 = SystemClock.KernelUpTime;
#else
            TimeSpan ticks = SystemClock.KernelUpTime;
#endif
#elif SINGULARITY_PROCESS
#if THREAD_TIME_ACCOUNTING
            TimeSpan ticks  = ProcessService.GetThreadTime();
            TimeSpan ticks2 = ProcessService.GetUpTime();
#else
            TimeSpan ticks = ProcessService.GetUpTime();
#endif
#endif
#endif  //singularity
#if SINGULARITY_KERNEL
            bool iflag = Processor.DisableInterrupts();

            // Disable interrupts on other CPU's
            MpExecution.StopProcessorsForGC();
#endif
#if SINGULARITY
            ulong beg = Isa.GetCycleCount();
#endif
            // Preparation
            GC.allocationGCInhibitCount++;
            // Verify the heap before GC
            if (VTable.enableGCVerify)
            {
                this.VerifyHeap(true);
            }
            // Invoke the chosen collector
#if SINGULARITY
            Monitoring.Log(Monitoring.Provider.GC,
                           (ushort)GarbageCollectorEvent.StartCollection);
#endif
            this.CollectStopped(collectorThreadIndex, generation);
#if SINGULARITY
            Monitoring.Log(Monitoring.Provider.GC,
                           (ushort)GarbageCollectorEvent.EndCollection);
#endif
            // Verify the heap after GC
            if (VTable.enableGCVerify)
            {
                this.VerifyHeap(false);
            }
            if (VTable.enableGCAccounting)
            {
                MemoryAccounting.Report(GC.gcType);
            }
            // Cleanup
            CollectorStatistics.Event(GCEvent.ResumeTheWorld);
            GC.allocationGCInhibitCount--;
            CurrentPhase = StopTheWorldPhase.Idle;
#if SINGULARITY
            long postGcMemoryUsage = GC.GetTotalMemory(false);
#endif
            if (enableGCTiming || VTable.enableFinalGCTiming)
            {
                int elapsedTicks = Environment.TickCount - startTicks;
                BaseCollector.RegisterPause(elapsedTicks);
                if (enableGCTiming)
                {
                    VTable.DebugPrint("[GC end  : {0} bytes, {1} ms]\n",
                                      __arglist(TotalMemory, elapsedTicks));
                    VTable.enableGCTiming = true;
                }
            }
            if (VTable.enableGCProfiling)
            {
                ulong totalMemory = (ulong)GC.GetTotalMemory(false);
                this.RegisterHeapSize(totalMemory);
            }
            ResumeTheWorld();
            collectorThreadIndex = -1;
#if SINGULARITY
            Tracing.Log(Tracing.Debug, "GC stop");
            long pagesCollected = preGcMemoryUsage - postGcMemoryUsage;
#if SINGULARITY_KERNEL
#if THREAD_TIME_ACCOUNTING
            int procId = Thread.CurrentProcess.ProcessId;
            ticks  = Thread.CurrentThread.ExecutionTime - ticks;
            ticks2 = SystemClock.KernelUpTime - ticks2;
            Process.kernelProcess.SetGcPerformanceCounters(ticks, (long)pagesCollected);
#else
            ticks = SystemClock.KernelUpTime - ticks;
#endif
            Thread.CurrentProcess.SetGcPerformanceCounters(ticks, (long)pagesCollected);
#elif SINGULARITY_PROCESS
#if THREAD_TIME_ACCOUNTING
            ushort procId = ProcessService.GetCurrentProcessId();
            ticks  = ProcessService.GetThreadTime() - ticks;
            ticks2 = ProcessService.GetUpTime() - ticks2;
#else
            ticks = ProcessService.GetUpTime() - ticks;
#endif
            ProcessService.SetGcPerformanceCounters(ticks, (long)pagesCollected);
#endif

#if DEBUG
#if THREAD_TIME_ACCOUNTING
            DebugStub.WriteLine("~~~~~ StopTheWorld [collected pages={0:x8}, pid={1:x3}, ms(Thread)={2:d6}, ms(System)={3:d6}, procId={4}, tid={5}]",
                                __arglist(pagesCollected,
                                          PageTable.processTag >> 16,
                                          ticks.Milliseconds,
                                          ticks2.Milliseconds,
                                          procId,
                                          Thread.GetCurrentThreadIndex()
                                          ));
#endif
#endif
#endif

#if SINGULARITY
            DebugStub.AddToPerfCounter(GC.perfCounter, Isa.GetCycleCount() - beg);
#endif
#if SINGULARITY_KERNEL
            // Resume interrupts on other CPU's
            MpExecution.ResumeProcessorsAfterGC();

            Processor.RestoreInterrupts(iflag);
#endif
        }
        private UIntPtr ExtendAlloc(UIntPtr bytes, uint alignment,
                                    Thread currentThread)
        {
            if (this.reserveLimit == UIntPtr.Zero)
            {
                return(UIntPtr.Zero);
            }
#if SINGULARITY_KERNEL
            Kernel.Waypoint(700);
#endif
            UIntPtr neededBytes =
                bytes +                              // Bytes required for object +
                alignment - UIntPtr.Size -           // worst case alignment overhead +
                (this.reserveLimit - this.allocPtr); // bytes already available
            UIntPtr paddedNeed  = PageTable.PagePad(neededBytes);
            UIntPtr pageCount   = PageTable.PageCount(paddedNeed);
            UIntPtr startPage   = PageTable.Page(this.reserveLimit);
            bool    fCleanPages = CLEAR_POOL_PAGES();
            bool    gotPages    =
                PageManager.TryReserveUnusedPages(currentThread, startPage,
                                                  pageCount, this.pageType,
                                                  ref fCleanPages);
            if (!gotPages)
            {
                // We can't indiscriminately ask for more memory if we have
                // unused pages already available.
                return(UIntPtr.Zero);
            }
            if (this.reserveLimit == UIntPtr.Zero)
            {
                // A collection occurred, so there is no region to extend
                PageManager.ReleaseUnusedPages(startPage, pageCount,
                                               fCleanPages);
                return(UIntPtr.Zero);
            }
            BaseCollector.IncrementNewBytesSinceGC(paddedNeed);
            this.allocNew = this.reserveLimit;
            // Pad alignment space if necessary.  NB: a prior call to
            // AllocateFast may have started generating alignment tokens,
            // but we may need to finish the job here if the residual space
            // was insufficient for a multi-word alignment.
            UIntPtr oldReserveLimit = this.reserveLimit;
            this.reserveLimit += paddedNeed;
            this.allocPtr      =
                Allocator.AlignedAllocationPtr(this.allocPtr,
                                               this.reserveLimit,
                                               alignment);
            if (this.zeroedLimit < this.allocPtr)
            {
                this.zeroedLimit = this.allocPtr;
            }
            UIntPtr objectAddr = this.allocPtr + PreHeader.Size;
            this.allocPtr += bytes;
            if (fCleanPages)
            {
                if (this.zeroedLimit < oldReserveLimit)
                {
                    Util.MemClear(this.zeroedLimit,
                                  oldReserveLimit - this.zeroedLimit);
                }
                this.zeroedLimit = this.reserveLimit;
            }
            else
            {
                Util.MemClear(this.zeroedLimit,
                              this.allocPtr - this.zeroedLimit);
                this.zeroedLimit = this.allocPtr;
            }
            VTable.Assert(this.allocPtr <= this.zeroedLimit);
            VTable.Assert(PageTable.PageAligned(this.reserveLimit));
            if (objectAddr >= oldReserveLimit)
            {
                // Object is first on new page
                InteriorPtrTable.SetFirst(objectAddr);
            }
            else if (objectAddr + bytes < this.reserveLimit)
            {
                // The object does not end on new limit

                // N.B. The next object may not be allocated at exactly
                // (objectAddr + bytes) due to alignment considerations.  It
                // also might not ever be allocated.  These cases are handled
                // by InteriorPtrTable.First skipping over alignment tokens
                // and callers of First watching out for unused space tokens.

                InteriorPtrTable.SetFirst(objectAddr + bytes);
            }
            // We know an object is located as the last one in a page
            // when it extends through the page to the next.
            // Otherwise, it is totally before or below the page, and
            // we are not sure whether it is the last object or not.
            // So record only such an object for the last card in that
            // page. Many objects may have been omitted due to
            // this coarse-grain recording. But we should be able
            // to incrementally update the offset table and find them.
            // I believe this is a better choice than simply recording
            // any object to the offset table, because most objects
            // may just die and need not to record.

#if !SINGULARITY || SEMISPACE_COLLECTOR || ADAPTIVE_COPYING_COLLECTOR || SLIDING_COLLECTOR
            if (GC.remsetType == RemSetType.Cards)
            {
                if (objectAddr < oldReserveLimit &&
                    allocPtr + bytes > oldReserveLimit)
                {
#if DONT_RECORD_OBJALLOC_IN_OFFSETTABLE
#else
                    OffsetTable.SetLast(objectAddr);
#endif
                }
            }
#endif

#if SINGULARITY_KERNEL
            Kernel.Waypoint(701);
#endif
            return(objectAddr);
        }