Example #1
0
        internal static void ConstructHeap()
        {
            PageTable.Initialize();

            MemoryManager.Initialize();
#if OS_WINCE
            UIntPtr heap_commit_size = new UIntPtr(1 << 16);
#elif SINGULARITY
            UIntPtr heap_commit_size = new UIntPtr(1 << 16);
#else
            UIntPtr heap_commit_size = new UIntPtr(1 << 20);
#endif
            UIntPtr os_commit_size = MemoryManager.OperatingSystemCommitSize;
            VTable.Assert(os_commit_size > UIntPtr.Zero);
            VTable.Assert(heap_commit_size >= os_commit_size);
            UIntPtr bootstrapSize;
            if (UIntPtr.Size == 8)
            {
                if (gcType == GCType.ConcurrentMSCollector)
                {
                    // increase bootstrap size so that
                    // the concurrent mark sweep collector will run on
                    // 64-bit Windows
                    bootstrapSize = (UIntPtr)1 << 16;
                }
                else
                {
                    bootstrapSize = (UIntPtr)1 << 15;
                }
            }
            else
            {
                bootstrapSize = (UIntPtr)1 << 14;
            }
            if (bootstrapSize < os_commit_size)
            {
                bootstrapSize = os_commit_size;
            }
            BootstrapMemory.Initialize(bootstrapSize);
            StaticData.Initialize();
            PageManager.Initialize(os_commit_size, heap_commit_size);
            CallStack.Initialize();
        }
Example #2
0
        static Delegate()
        {
            CriticalSection        = new Mutex();
            NativeDelegateTable    = new NativeDelegateRecord[24][];
            NativeDelegateTable[0] = new NativeDelegateRecord[FIRST_TABLE_SIZE];
            FreeExtentStartIdx     = 0;
            FreeListStartIdx       = FREE_LIST_EMPTY;

#if FALSE // Enable this code to force testing of the FreeNativeDelegateRecord fn
            for (int i = 0; i < FIRST_TABLE_SIZE * 2; i++)
            {
                int idx = AllocateNativeDelegateRecord(CriticalSection);
                VTable.Assert(idx == i);
            }
            for (int i = 0; i < FIRST_TABLE_SIZE * 2; i++)
            {
                FreeNativeDelegateRecord(i);
            }
#endif
        }
Example #3
0
 // Creates a new guid with all contents having value of 0
 private Guid(bool blank)
 {
     // Must initialize value class members even if the native
     // function reinitializes them.  Compiler appeasement.
     _a = 0;
     _b = 0;
     _c = 0;
     _d = 0;
     _e = 0;
     _f = 0;
     _g = 0;
     _h = 0;
     _i = 0;
     _j = 0;
     _k = 0;
     if (!blank)
     {
         // Create a new Guid...
         Counter++;
         _k = (byte)(Counter & 0xff);
         _j = (byte)(Counter >> 8);
         VTable.DebugPrint("CreateGuid.Guid(" + Counter + ")\n");
     }
 }
Example #4
0
 internal static Object AllocateObject(VTable vtable)
 {
     return(AllocateObject(vtable, Thread.CurrentThread));
 }
Example #5
0
 public static void Assert(bool truth, string statement)
 {
     VTable.Assert(truth, statement);
 }
Example #6
0
 extern private static Array AllocateVector(VTable vtable, int numElements);
Example #7
0
        public override Type GetElementType()
        {
            VTable element = this.classVtable.arrayElementClass;

            return((element != null) ? element.vtableType : null);
        }
Example #8
0
        void ResurrectCandidates(DirectReferenceVisitor forwardVisitor,
                                 DirectReferenceVisitor resurrectVisitor,
                                 bool copyFirst)
        {
#if !(REFERENCE_COUNTING_GC || DEFERRED_REFERENCE_COUNTING_GC)
            UIntPtr[] runTable      = null;
            int       runTableIndex = 0;
            int       runIndex      = 0;

            // For the concurrent collector, ResurrectCandidates could happen
            // while the application threads are calling SuppressFinalize and
            // ReRegisterForFinalize.  So we need to use the spinLock to prevent
            // races.  But we do not want to hold the spinLock while allocating
            // (i.e. when we grow the RunFinalizerTable[Shadow]) because our
            // spinLock is a leaf lock.  We don't want to worry about deadlocks
            // involving the spinLock and any locking that occurs as part of a
            // GC provoked by an allocation attempt.
#if SINGULARITY
            bool disabled = Processor.DisableLocalPreemption();
#endif
            spinLock.Acquire();
            bool lockHeld = true;
            try {
                int logicalIndex = 0;
                for (int i = 0; i < CandidateTableShadow.Length; i++)
                {
                    UIntPtr[] table = copyFirst
                      ? CandidateTable[i]
                      : CandidateTableShadow[i];

                    if (table == null)
                    {
                        VTable.Assert(logicalIndex == lastCandidateIndex);
                        break;
                    }
                    for (int j = 0; j < table.Length; j++, logicalIndex++)
                    {
                        if (table[j] == UIntPtr.Zero)
                        {
                            VTable.Assert(logicalIndex == lastCandidateIndex);
                            break;
                        }
                        fixed(UIntPtr *loc = &table[j])
                        {
                            if (!IsLink((int)*loc))
                            {
                                UIntPtr oldVal = *loc;
                                forwardVisitor.Visit(loc);
                                if (*loc == UIntPtr.Zero)
                                {
                                    // Put this slot onto the CandidateTable's free list
                                    *loc = (UIntPtr)freeCandidateLink;
                                    freeCandidateLink = IndexToLink(logicalIndex);

                                    // marching forward through the RunFinalizer[Shadow] table, find an
                                    // empty slot to install this object.  Maintain the cursor across
                                    // objects, so we can efficiently transfer an entire batch.

                                    for (; runTableIndex < RunFinalizerTableShadow.Length; runTableIndex++)
                                    {
                                        runTable = copyFirst
                                          ? RunFinalizerTable[runTableIndex]
                                          : RunFinalizerTableShadow[runTableIndex];

                                        if (runTable == null)
                                        {
                                            // Create a new table
                                            int length = RunFinalizerTableShadow[runTableIndex - 1].Length * 2;

                                            lockHeld = false;
                                            spinLock.Release();
#if SINGULARITY
                                            Processor.RestoreLocalPreemption(disabled);
#endif
                                            UIntPtr[] newTable = new UIntPtr[length];
#if SINGULARITY
                                            disabled = Processor.DisableLocalPreemption();
#endif
                                            spinLock.Acquire();
                                            lockHeld = true;

                                            // There is no race with the RunFinalizerTable[Shadow].
                                            // The spinLock serializes access to the CandidateTable[Shadow].
                                            runTable = newTable;
                                            RunFinalizerTable[runTableIndex]       = newTable;
                                            RunFinalizerTableShadow[runTableIndex] = newTable;
                                            UIntPtr tableAddr = Magic.addressOf(newTable);
                                            resurrectVisitor.Visit(&tableAddr);
                                            resurrectVisitor.VisitReferenceFields(RunFinalizerTable[runTableIndex]);
                                            resurrectVisitor.VisitReferenceFields(RunFinalizerTableShadow[runTableIndex]);
                                        }

                                        for (; runIndex < runTable.Length; runIndex++)
                                        {
                                            if (runTable[runIndex] == UIntPtr.Zero)
                                            {
                                                goto outer;
                                            }
                                        }
                                        runIndex -= runTable.Length;
                                        VTable.Assert(runIndex == 0);   // ready for next sub-table
                                    }
outer:
                                    // We found an empty slot in the RunFinalizerTable[Shadow],
                                    // where we can put our ready Candidate.  It's also possible
                                    // to reach this label by falling through after exhausting the
                                    // entire table.  This will result in an exception when we
                                    // attempt to over-index the array.  It's not clear what more
                                    // protections are required... the process has exceeded an
                                    // unlikely and hard-wired capacity limit.
                                    Interlocked.Increment(ref WaitingToRun);
                                    madeRunnable = true;

                                    if (copyFirst)
                                    {
                                        RunFinalizerTable[runTableIndex][runIndex]
                                            = oldVal | new UIntPtr(1);
                                    }
                                    else
                                    {
                                        RunFinalizerTableShadow[runTableIndex][runIndex]
                                            = oldVal | new UIntPtr(1);
                                    }
                                }
                            }
                        }
                    }
                }
            }
            finally {
                if (lockHeld)
                {
                    spinLock.Release();
#if SINGULARITY
                    Processor.RestoreLocalPreemption(disabled);
#endif
                }
            }

            if (madeRunnable)
            {
                // Resurrect objects!
                VisitAllRunFinalizer(resurrectVisitor, copyFirst, true);
            }
#endif // REFERENCE_COUNTING_GC
        }
Example #9
0
 /// <summary>
 /// Check if the first stack height represents a deeper location on the
 /// stack.
 /// </summary>
 /// <param name="first">The first stack height to compare.</param>
 /// <param name="second">The second stack height to compare.</param>
 /// <returns>True iff the first height is deeper in the stack than the
 /// second height.</returns>
 internal static bool Deeper(StackHeight first, StackHeight second)
 {
     VTable.Assert(first.stackPointer != UIntPtr.Zero);
     VTable.Assert(second.stackPointer != UIntPtr.Zero);
     return(first.stackPointer <= second.stackPointer);
 }
Example #10
0
 internal static String AllocateString(int stringLength,
                                       Thread currentThread)
 {
     VTable.Deny(Transitions.UnderGCControl(currentThread.threadIndex));
     return(installedGC.AllocateString(stringLength, currentThread));
 }
Example #11
0
 internal static Array AllocateArray(VTable vtable, int rank,
                                     int totalElements)
 {
     return(AllocateArray(vtable, rank, totalElements,
                          Thread.CurrentThread));
 }
Example #12
0
 internal static Array AllocateVector(VTable vtable, int numElements)
 {
     return(AllocateVector(vtable, numElements, Thread.CurrentThread));
 }
Example #13
0
 internal static Object AllocateObjectNoInline(VTable vtable,
                                               Thread currentThread)
 {
     return(AllocateObject(vtable, currentThread));
 }
Example #14
0
 internal static Object AllocateObject(VTable vtable,
                                       Thread currentThread)
 {
     VTable.Deny(Transitions.UnderGCControl(currentThread.threadIndex));
     return(installedGC.AllocateObject(vtable, currentThread));
 }
Example #15
0
 internal static Object AllocateObjectNoInline(VTable vtable)
 {
     return(AllocateObject(vtable));
 }
Example #16
0
 internal static void DebugPrint(String v, __arglist)
 {
     VTable.DebugPrint(v, new ArgIterator(__arglist));
 }
Example #17
0
 //////////////////////////////////////////////////////////////////////
 //
 internal static IntPtr FixedAlloc(int sizetdwBytes)
 {
     VTable.DebugBreak();
     return(IntPtr.Zero);
 }
Example #18
0
 private static int LinkToIndex(int link)
 {
     VTable.Assert(IsLink(link));
     return(link >> 1);
 }
Example #19
0
 internal static void FixedFree(IntPtr handle)
 {
     VTable.DebugBreak();
 }
Example #20
0
        internal static void RegisterCandidate(Object obj)
        {
#if !(REFERENCE_COUNTING_GC || DEFERRED_REFERENCE_COUNTING_GC)
            UIntPtr objPtr = Magic.addressOf(obj);
            UIntPtr page   = PageTable.Page(objPtr);
            if (!PageTable.IsGcPage(page))
            {
                return;
            }

            // It's tempting to do all manipulations of the CandidateTable with Interlocked
            // operations, but it would leave us open to the ABA problem.  For example,
            //
            //    candidateFreeList points to slot 5, which points to slot 7, and then 9.
            //    We start our CompareExchange loop based on these facts.
            //    Another thread intervenes.  It:
            //          Allocates slot 5 from the free list.
            //          Allocates slot 7 from the free list, so 9 is now at the top.
            //          It calls SuppressCandidate on #5, putting it back on the list,
            //                pointing at 9.
            //    Our thread performs the CompareExchange, removing 5 from the list &
            //          establishing 7 as the head.
            //
            // In order to avoid these (admittedly unlikely) corruptions, just use a
            // SpinLock to serialize all modifications of the CandidateTable.  But to
            // keep the SpinLock as a leaf lock and avoid deadlocks, ensure that we
            // never allocate or do anything else significant while holding the lock.

#if SINGULARITY
            bool disabled = Processor.DisableLocalPreemption();
#endif
            spinLock.Acquire();
            bool lockHeld = true;
            try {
restart:
                // Set index to point into the table at a free spot.
                int index = 0;
                UIntPtr[] table = null;

                // Grab from the free list.  Note that our free list can never be empty.
                // That's because it points either down a linked chain of previously allocated
                // and since free'd entries, or it points out past the high water mark.  We
                // may need to allocate memory to back that index, but the free list always
                // contains at least a virtual index.  And the slot that points past the
                // high water mark is always the last slot to be consumed from the free
                // list, so we grow only as a last resort.
                int logicalIndex = index = LinkToIndex(freeCandidateLink);

                // Relate that global index to a table and a local index.
                for (int i = 0; i < CandidateTable.Length; i++)
                {
                    table = CandidateTable[i];
                    if (table == null)
                    {
                        // Must be the first index into a new table.  We're going to
                        // create the new table, but we don't want to hold the spinLock
                        // during the create.  (We want the spinLock to be a leaf lock
                        // that cannot interact with any GC locks to cause deadlock).
                        VTable.Assert(index == 0);
                        lockHeld = false;
                        spinLock.Release();
#if SINGULARITY
                        Processor.RestoreLocalPreemption(disabled);
#endif
                        table = new UIntPtr[CandidateTable[i - 1].Length * 2];
#if SINGULARITY
                        disabled = Processor.DisableLocalPreemption();
#endif
                        spinLock.Acquire();
                        lockHeld = true;

                        // There is a race here.  If we lose the race, someone else will
                        // have extended the table.  If so, we use their official table and
                        // let the GC collect our extra one.  No need for interlocked
                        // operations since we are back inside the spinLock.
                        if (CandidateTable[i] == null)
                        {
                            CandidateTable[i] = table;
                        }
                        // Regardless, the world has changed.
                        goto restart;
                    }

                    // Are we extending or are we chasing previously allocated empty slots?
                    if (index < table.Length)
                    {
                        if (logicalIndex >= lastCandidateIndex)
                        {
                            // we are extending past the high water mark.  Grow the free list
                            // ahead of us by 1 slot.
                            VTable.Assert(logicalIndex == lastCandidateIndex);
                            lastCandidateIndex = logicalIndex + 1;
                            freeCandidateLink  = IndexToLink(lastCandidateIndex);
                        }
                        else
                        {
                            // We are consuming the free list
                            freeCandidateLink = (int)table[index];
                            VTable.Assert(IsLink(freeCandidateLink));
                        }
                        break;
                    }
                    index -= table.Length;      // on to the next table
                }

                VTable.Assert(table != null, "Candidate Algorithmic inconsistency");

                // We have found the table!
                table[index] = Magic.addressOf(obj);

                // The following looks like a 64-bit porting issue (32-bit truncation).
                // But actually I'm just ensuring that object pointers have the low bit
                // cleared.
                VTable.Assert(!IsLink((int)objPtr));
            }
            finally {
                if (lockHeld)
                {
                    spinLock.Release();
#if SINGULARITY
                    Processor.RestoreLocalPreemption(disabled);
#endif
                }
            }
#endif // REFERENCE_COUNTING_GC
        }
Example #21
0
 internal void Allocate(Delegate del)
 {
     VTable.Assert(this.del == null);
     this.del = del;
 }
Example #22
0
 internal RuntimeType()
 {
     VTable.NotReached("RuntimeType constructor not supported");
 }
Example #23
0
 internal void DeAllocate(int nextIdx)
 {
     VTable.Assert(this.del != null);
     this.del     = null;
     this.nextIdx = nextIdx;
 }
Example #24
0
 extern private static Object AllocateObject(VTable vtable);
Example #25
0
 internal Delegate Delegate()
 {
     VTable.Assert(this.del != null);
     return(this.del);
 }
Example #26
0
 public static void Assert(bool truth)
 {
     VTable.Assert(truth);
 }
Example #27
0
 internal int NextIdx()
 {
     VTable.Assert(this.del == null);
     return(this.nextIdx);
 }
Example #28
0
 public TRef(Object o)
 {
     VTable.Assert(o != null);
     this.o = o;
 }
Example #29
0
        static GC() // Class Constructor (cctor)
        {
            GC.Initialize();
            switch (gcType)
            {
#if !SINGULARITY || ADAPTIVE_COPYING_COLLECTOR
            case GCType.AdaptiveCopyingCollector: {
                AdaptiveCopyingCollector.Initialize();
                GC.installedGC = AdaptiveCopyingCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || MARK_SWEEP_COLLECTOR
            case GCType.MarkSweepCollector: {
                MarkSweepCollector.Initialize();
                GC.installedGC = MarkSweepCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || TABLE_MARK_SWEEP_COLLECTOR
            case GCType.TableMarkSweepCollector: {
                SimpleMarkSweepCollector.Initialize();
                GC.installedGC = SimpleMarkSweepCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || SEMISPACE_COLLECTOR
            case GCType.SemispaceCollector: {
                SemispaceCollector.Initialize();
                GC.installedGC = SemispaceCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || SLIDING_COLLECTOR
            case GCType.SlidingCollector: {
                SlidingCollector.Initialize();
                GC.installedGC = SlidingCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || CONCURRENT_MS_COLLECTOR
            case GCType.ConcurrentMSCollector: {
                ConcurrentMSCollector.Initialize();
                GC.installedGC = ConcurrentMSCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || ATOMIC_RC_COLLECTOR
            case GCType.AtomicRCCollector: {
                AtomicRCCollector.Initialize();
                GC.installedGC = AtomicRCCollector.instance;
                break;
            }
#endif
#if !SINGULARITY
            case GCType.ReferenceCountingCollector: {
                ReferenceCountingCollector.Initialize();
                GC.installedGC = ReferenceCountingCollector.instance;
                break;
            }
#endif
#if !SINGULARITY
            case GCType.DeferredReferenceCountingCollector: {
                DeferredReferenceCountingCollector.Initialize();
                GC.installedGC = DeferredReferenceCountingCollector.instance;
                break;
            }
#endif
#if !SINGULARITY || NULL_COLLECTOR
            case GCType.NullCollector: {
                VTable.Assert(wbType == 0, "No need for a write barrier");
                GC.installedGC =
                    (NullCollector)
                    BootstrapMemory.Allocate(typeof(NullCollector));
                break;
            }
#endif
#if !SINGULARITY
            case GCType.CoCoMSCollector: {
                CoCoMSCollector.Initialize();
                GC.installedGC = CoCoMSCollector.instance;
                break;
            }
#endif
            default: {
                VTable.NotReached("Unknown GC type: " + gcType);
                break;
            }
            }
            GC.installedGC.NewThreadNotification(Thread.initialThread, true);
            GC.installedGC.ThreadStartNotification(Thread.initialThread.threadIndex);
        }