internal static unsafe UIntPtr GetStackSegment(UIntPtr size, ref ThreadContext context, bool kernelAllocation, bool initialStack) { #if SINGULARITY_LINKED_STACKS #else if (!initialStack) { // If we get here, then the initial stack size must not have // been sufficient to ensure that we don't need linked stacks. DebugStub.Break(); } #endif UIntPtr begin = context.stackBegin; UIntPtr limit = context.stackLimit; #if DO_TRACE_STACKS Kernel.Waypoint(666); #endif StackHead *head = GetStackSegmentRaw(size, ref context, kernelAllocation, initialStack); if (head != null) { head->prevBegin = begin; head->prevLimit = limit; head->esp = 0; } return((UIntPtr)head); }
internal static unsafe void ActivatePreviousStackSegmentLimit() { // To avoid sprinkling [NoStackOverflowCheck] attributes // on too many methods, we manually inline a couple of methods. // ThreadContext *context = Processor.GetCurrentThreadContext(); ThreadRecord * threadRecord = Isa.GetCurrentThread(); ThreadContext *context = (ThreadContext *)threadRecord; StackHead * head = (StackHead *) (context->stackBegin - sizeof(StackHead)); // Isa.StackLimit = head->prevLimit; threadRecord->activeStackLimit = head->prevLimit; }
internal static unsafe void ReturnStackSegmentRawCommon(ref ThreadContext context, bool kernelAllocation, bool initialStack) { UIntPtr begin = context.stackBegin; UIntPtr limit = context.stackLimit; StackHead *head = (StackHead *)(begin - sizeof(StackHead)); #if DO_TRACE_STACKS Kernel.Waypoint(669); #endif UIntPtr addr = limit - SafetyBufferSize; UIntPtr size = begin - limit + SafetyBufferSize; #if DEBUG_STACK_VERBOSE fixed(ThreadContext *ptr = &context) { Tracing.Log(Tracing.Debug, "ReturnStackSegmentRaw(ctx={0:x8}) [{1:x8}..{2:x8}]\n", (UIntPtr)ptr, context.stackLimit, context.stackBegin); } #endif #if !PAGING context.stackBegin = head->prevBegin; context.stackLimit = head->prevLimit; #else //context.stackBegin = head->prevBegin; //context.stackLimit = head->prevLimit; // Moved below, because of the following scenario: // - call UnlinkStack // - UnlinkStack switches to the scheduler stack // - UnlinkStack calls ReturnStackSegmentRaw, which calls // various other methods // - one of the other methods invokes write barrier code // - the write barrier code performs a stack link check // - If context.stackLimit is already set to head->prevLimit, // then it may appear that we're out of stack space, // even if we're really not, so we jump to LinkStack // - LinkStack overwrites the scheduler stack // TODO: really fix this. UIntPtr stackBegin = head->prevBegin; UIntPtr stackLimit = head->prevLimit; #endif Process owner = Process.GetProcessByID(context.processId); // //// See note above in GetStackSegmentRaw //if ((owner != Process.kernelProcess) && //(addr >= BootInfo.KERNEL_BOUNDARY)) { //MemoryManager.UserFree(addr, MemoryManager.PagesFromBytes(size), owner); //} //else { //MemoryManager.KernelFree(addr, MemoryManager.PagesFromBytes(size), owner); //} // MemoryManager.StackFree(addr, MemoryManager.PagesFromBytes(size), owner, kernelAllocation, initialStack); #if PAGING // See comments above. context.stackBegin = stackBegin; context.stackLimit = stackLimit; #endif #if DEBUG_STACK_VERBOSE Tracing.Log(Tracing.Debug, "ReturnStackSegment({0:x8}, {1:x8}) [{2:x8}..{3:x8}]\n", addr, size, context.stackLimit, context.stackBegin); #endif }
internal static unsafe StackHead *GetStackSegmentRaw(UIntPtr size, ref ThreadContext context, bool kernelAllocation, bool initialStack) { // Allocate a new chunk, making room for StackHead at the top. // If you change these constants to add more data, see the // comment about InitialStackSize at the top of this file! #if DO_TRACE_STACKS Kernel.Waypoint(667); #endif if (size == UIntPtr.Zero) { size = InitialStackSize; } size = MemoryManager.PagePad(size + sizeof(StackHead) + SafetyBufferSize); UIntPtr chunk; Process owner = Process.GetProcessByID(context.processId); // //// NOTE: here's where we should be clever about //// whether to allocate a stack chunk in the user range //// or the kernel range. Except, if we switch contexts //// during an ABI call while using a user-range stack //// segment on a paging machine, we die. Gloss over //// this hackily by always getting stack segments //// from the kernel range. //if (kernelAllocation || (owner == Process.kernelProcess)) { // chunk = MemoryManager.KernelAllocate( // MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack); //} //else { // chunk = MemoryManager.UserAllocate( // MemoryManager.PagesFromBytes(size), owner, 0, PageType.Stack); //} // UIntPtr pageCount = MemoryManager.PagesFromBytes(size); #if DEBUG_STACK_VERBOSE fixed(ThreadContext *ptr = &context) { Tracing.Log(Tracing.Debug, "GetStackSegmentRaw(ctx={0:x8},size={1:d}) pages={2} [{3:x8}..{4:x8}]", (UIntPtr)ptr, size, pageCount, context.stackLimit, context.stackBegin); } #endif chunk = MemoryManager.StackAllocate(pageCount, owner, 0, kernelAllocation, initialStack); if (chunk != UIntPtr.Zero) { // NB: We do _not_ zero out stack memory! // We assume that Bartok prevents access to prev contents. StackHead *head = (StackHead *)(chunk + size - sizeof(StackHead)); context.stackBegin = chunk + size; context.stackLimit = chunk + SafetyBufferSize; #if DEBUG_STACK_VERBOSE Tracing.Log(Tracing.Debug, "GetStackSegmentRaw(size={0:d}) -> [{1:x8}..{2:x8}]", size, context.stackLimit, context.stackBegin); #endif return(head); } else { // Stack allocation failed. In the future, we should // trigger a kernel exception; for now, we break to the // debugger. #if DEBUG_STACK_VERBOSE Tracing.Log(Tracing.Debug, "GetStackSegmentRaw: KernelAllocate failed!(siz={0:d})", size); #endif //DebugStub.Break(); return(null); } }