Exemple #1
0
        protected override void PreGC(int generation)
        {
            try {
                // Take ownership of the buffer to prevent mutating threads from
                // interleaving with us.

                DebugStub.Assert(Buffer.OwningThread == null);
                Buffer.OwningThread = Thread.CurrentThread;

                Buffer.LogTick();

                if (generation >= maxGeneration)
                {
                    generation = maxGeneration - 1;
                }
                generations[generation]++;

                unsafe
                {
                    fixed(int *ptr = &generations[0])
                    {
                        ProfilerBuffer.LogGenerations(maxGeneration, ptr);
                    }
                }
            }
            catch (Exception) {
                enabled = false;
                throw;
            }
        }
Exemple #2
0
        internal int Release()
        {
            int decreased = Interlocked.Decrement(ref refCount);

            DebugStub.Assert(decreased > 0);
            return(decreased);
        }
Exemple #3
0
        // In the heap reachability graph, we are starting to dump a new object
        protected override void StartScanOneObject(UIntPtr objectAddress, Type type, UIntPtr size)
        {
            DebugStub.Assert(tempGCBufferEntries == 0);

            tempGCBuffer[tempGCBufferEntries++] = (UIntPtr)objectAddress;
            tempGCBuffer[tempGCBufferEntries++] = (UIntPtr)GetTypeId(type);
            tempGCBuffer[tempGCBufferEntries++] = (UIntPtr)size;
        }
Exemple #4
0
        // The rest of our implementation details:

        private uint GetStackId(Type type, UIntPtr size, UIntPtr[] stackEips, uint stackSize)
        {
            // First make sure that we have a type record for the object being
            // instantiated at this stack.

            uint typeNo = GetTypeId(type);

            DebugStub.Assert(stackSize <= stackEips.Length);

            // Then, make sure we have a function record for each Eip in the stack.  Of course
            // we don't know when a bunch of Eips map to different offsets in the same function.
            // So make a function for each unique Eip & fix it up in the post-processing.
            // Hopefully there aren't too many unique callsites in each method.

            ulong hash = typeNo; // perhaps "typeNo ^ size" ?

            for (int i = 0; i < stackSize; i++)
            {
                // Map the individual Eips into their corresponding functions
                stackNos[i] = GetFunctionId(stackEips[i]);
                hash        = (hash << 11) + (hash ^ stackNos[i]);
            }

            // TODO: Note that we will statistically map some distinct stacks into the same
            // stack if they have the same hash.
            object o = stackTable[hash];

            if (o != null)
            {
                return((uint)o);
            }

            // It's a novel stack.  Note that we embed the size into the stack, but we
            // don't include the size in the hash.  There's a technique for sharing
            // prefixes of stacks that could be explored here to get more accurate profiles
            // without huge stack expansion.
            // TODO: consider the above.

            uint stackNo = nextStackNo++;

            // Stacks are emitted backwards, presumably to support common prefixes better.
            for (int i = (int)stackSize - 1; i >= 0; i--)
            {
                functionsIDs[stackSize - 1 - i] = stackNos[i];
            }

            unsafe
            {
                fixed(uint *ptr = &functionsIDs[0])
                {
                    ProfilerBuffer.LogStack(stackNo, typeNo, size, stackSize, ptr);
                }
            }
            stackTable[hash] = stackNo;

            return(stackNo);
        }
Exemple #5
0
        private static void ARM_SPIN_FOREVER(string msg)
        {
            DebugStub.Assert(!Processor.InterruptsDisabled());
            Processor p = Processor.CurrentProcessor;

            DebugStub.WriteLine(msg);
            DateTime last = DateTime.Now;

            uint n = 0;

            while (true)
            {
                Thread.Sleep(1000);
            }
        }
Exemple #6
0
        private static uint AssignNewPDSlot(ProtectionDomain pd)
        {
            // Consider; Assert tableLock is aquired?

            for (uint i = 0; i < PdTable.Length; i++)
            {
                uint index = (uint)((PdIndexGenerator + i) % PdTable.Length);

                if (PdTable[index] == null)
                {
                    PdTable[index]   = pd;
                    PdIndexGenerator = (uint)((index + 1) % PdTable.Length);
                    return(index);
                }
            }

            DebugStub.Assert(false, "Out of process domain slots!");
            return(uint.MaxValue);
        }
Exemple #7
0
        private static int GetCpuCount(out int cpusLength)
        {
            int cpuReal  = Platform.ThePlatform.CpuRealCount;
            int cpuLimit = Platform.ThePlatform.CpuMaxCount;

            DebugStub.Assert(cpuReal <= cpuLimit);

            // See if the command line argument limits our processor count
            int cpuCount = GetIntegerArgument("mp", cpuReal);

            cpusLength = cpuReal;

#if !SINGULARITY_MP
            if (cpuCount > 1)
            {
                Console.WriteLine("Limiting processors to 1 due to SP build");
                cpuCount = 1;
            }
#endif
            return(cpuCount);
        }
Exemple #8
0
        // Create a log entry for the allocation that just occurred on this thread.
        protected override void Allocation(UIntPtr objAddr, Type type, UIntPtr size)
        {
            bool iflag;

            // We cannot recurse inside an Allocation notification, or we will simply
            // blow the stack on the first entry.  Also, we don't want to log allocations
            // that occur as a consequence of logging the state of the GC heap -- though
            // we could support that if we chose to.

            if (enabled &&
                recurseThread != Thread.CurrentThread &&            // recurse?
                Buffer.OwningThread != Thread.CurrentThread)        // GC logging?

            {
                iflag = Processor.DisableLocalPreemption();
                allocationLock.Acquire();

                try {
                    DebugStub.Assert(recurseThread == null);
                    recurseThread = Thread.CurrentThread;

                    Buffer.LogTick();

                    uint stackSize = Isa.GetStackReturnAddresses(stackEips);
                    uint stkNo     = 0;

                    if (stackSize > 0)
                    {
                        stkNo = GetStackId(type, size, stackEips, stackSize);
                    }

                    ProfilerBuffer.LogAllocation(Thread.CurrentThread.GetThreadId(), objAddr, stkNo);
                }
                finally {
                    recurseThread = null;
                    allocationLock.Release();
                    Processor.RestoreLocalPreemption(iflag);
                }
            }
        }
Exemple #9
0
        // A GC has finished.  The world is in a sane place, except that we might not
        // have started up all the mutator threads if this is a StopTheWorld collection.
        protected override void PostGC()
        {
            try {
                // emit the fact a GC has happened, including the state of the heap.
                // TODO: have another function to log the tick count here to estimate the
                // time spent in GC too.

                Buffer.LogTick();

                //  We should have an empty buffer, meaning we completed logging from the
                //  previous operation while entering this code.

                DebugStub.Assert(tempGCBufferEntries == 0);

                ScanRoots();

                unsafe
                {
                    fixed(UIntPtr *ptr = &tempGCBuffer[0])
                    {
                        ProfilerBuffer.LogRoots(tempGCBufferEntries, ptr);
                        tempGCBufferEntries = 0;
                    }
                }

                // Write all the reachability graph of the heap
                ScanObjects();

                // Once we have finished writing everything, we can allow mutator threads to
                // share access to the fileBuffer with their own consistent entries.
                DebugStub.Assert(Buffer.OwningThread == Thread.CurrentThread);
                Buffer.OwningThread = null;
            }
            catch (Exception) {
                enabled = false;
                throw;
            }
        }
Exemple #10
0
        //
        // Someone must arrange to call this from *within* the
        // Protection Domain for us to have an opportunity to finish
        // initializing.
        //
        internal unsafe void InitHook()
        {
            // If paging is disabled then just return immediately
            if (!MemoryManager.UseAddressTranslation)
            {
                return;
            }

            DebugStub.Assert(AddressSpace.CurrentAddressSpace == this.AddressSpace);

            if (this.initialized)
            {
                // Someone else has already set up the space
                return;
            }

            bool iflag = initSpin.Lock();

            try {
                if (this.initialized)
                {
                    // Someone else snuck in and initialized
                    return;
                }

                //
                // We're first into this space, so set it up.
                //
#if VERBOSE
                DebugStub.WriteLine("Setting up protection domain \"{0}\"",
                                    __arglist(this.name));
#endif

                userRange = new VirtualMemoryRange(VMManager.UserHeapBase,
                                                   VMManager.UserHeapLimit,
                                                   this);
#if PAGING
                if (kernelMode)
                {
                    // This will be a ring-0, trusted domain, so just
                    // point the userSharedHeap at the kernel's comm heap.
                    userSharedHeap = SharedHeap.KernelSharedHeap;

                    this.initialized = true;
                }
                else
                {
                    // Create a new shared heap that lives in
                    // user-land.
                    userSharedHeap = new SharedHeap(this, userRange);
#if VERBOSE
                    DebugStub.WriteLine("  ...Created a shared heap");
#endif

                    //
                    // N.B.: this is kind of tricky. Loading an
                    // executable image involves allocating memory,
                    // which goes through this object. So, before
                    // attempting the load, mark ourselves as initialized.
                    //
                    // ---- DON'T PUT GENUINE INITIALIZATION
                    //      CODE BELOW HERE! ---------
                    this.initialized = true;

                    // Load our own, protection-domain-private copy of the
                    // ABI stubs. These will get shared by all apps in
                    // this domain.
                    IoMemory syscallsMemory = Binder.LoadRawImage("/init", "syscalls.dll");
                    IoMemory loadedMemory;

                    // Load the stubs library into the user range, but make
                    // the kernel process the logical owner. This seems like
                    // the only sensible approach since the stubs do not
                    // belong to any particular process but must be in the
                    // user range of memory.

                    // N.B.: RE-ENTERS this object!
                    ring3AbiImage = PEImage.Load(Process.kernelProcess, syscallsMemory,
                                                 out loadedMemory,
                                                 false, // isForMp
                                                 false  // inKernelSpace
                                                 );

                    ring3AbiExports = ring3AbiImage.GetExportTable(loadedMemory);
#if VERBOSE
                    DebugStub.WriteLine("  ...Loaded ring-3 ABI stubs");
#endif
                }
#else // PAGING
                this.initialized = true;
#endif // PAGING
            }
            finally {
                DebugStub.Assert(this.initialized);
                initSpin.Unlock(iflag);
            }
        }
Exemple #11
0
 internal static void InvalidateTLBEntry(UIntPtr pageAddr)
 {
     DebugStub.Assert(MemoryManager.IsPageAligned(pageAddr));
     Isa.InvalidateTLBEntry(pageAddr);
 }