// // This is called to allocate memory for a stack, either an initial stack // or a dynamically allocated stack chunk. // internal static UIntPtr StackAllocate(UIntPtr numPages, Process process, uint extra, bool kernelAllocation, bool initialStack) { UIntPtr result = UIntPtr.Zero; if (useAddressTranslation) { if (KernelRangeWrapper != null) { result = KernelRangeWrapper.Allocate(numPages, process, extra, PageType.Stack); } else { // Very early in the initialization sequence; ASSUME there is not // yet any concurrent access to paging descriptors, and allocate // memory without a paging-descriptor lock. result = KernelRange.Allocate(numPages, process, extra, PageType.Stack, null); } } else { result = FlatPages.StackAllocate(BytesFromPages(numPages), UIntPtr.Zero, MemoryManager.PageSize, process, extra, kernelAllocation, initialStack); } if (kernelAllocation && result == UIntPtr.Zero) { DebugStub.WriteLine("******** Kernel OOM on Stack ********"); // // Our kernel runtime can not handle this right now, so rather than // return a null which will show up as a cryptic lab failure, always // drop to the debugger. // // Note: Reservations should avoid this, so this is an indication that // something has gone wrong in our reservation policy and estimates // of kernel stack usage. // DebugStub.Break(); } return(result); }
internal unsafe UIntPtr Allocate(UIntPtr numPages, Process process, uint extra, PageType type) { CheckAddressSpace(); if (indirect) { return(pRange->Allocate(numPages, process, extra, type, parentDomain)); } else { return(range.Allocate(numPages, process, extra, type, parentDomain)); } }