internal static UIntPtr UserAllocate(UIntPtr numPages, Process process, uint extra, PageType type) { UIntPtr result = UIntPtr.Zero; if (useAddressTranslation) { result = ProtectionDomain.CurrentDomain.UserRange.Allocate( numPages, process, extra, type); } else { result = FlatPages.Allocate(BytesFromPages(numPages), UIntPtr.Zero, MemoryManager.PageSize, process, extra, type); } if (result == UIntPtr.Zero) { UserMemoryFailure(); } return(result); }
internal static void UserFree(UIntPtr addr, UIntPtr numPages, Process process) { if (useAddressTranslation) { ProtectionDomain.CurrentDomain.UserRange.Free(addr, numPages, process); } else { FlatPages.Free(addr, BytesFromPages(numPages), process); } }
internal static void StackFree(UIntPtr startAddr, UIntPtr numPages, Process process, bool kernelAllocation, bool initialStack) { if (useAddressTranslation) { KernelRange.Free(startAddr, numPages, process); } else { FlatPages.StackFree(startAddr, MemoryManager.BytesFromPages(numPages), process, kernelAllocation, initialStack); } }
internal static void FreeIOMemory(UIntPtr addr, UIntPtr size, Process process) { if (useAddressTranslation) { KernelIOMemoryHeap.Free(addr, size, process); } else { FlatPages.Free(addr, size, process); } }
internal static void SetRange(UIntPtr start, UIntPtr bytes, uint tag) { if (useAddressTranslation) { KernelRange.SetRange(start, bytes, tag); } else { FlatPages.SetRange(start, bytes, tag); } }
internal static UIntPtr FreeProcessMemory(Process process) { if (useAddressTranslation) { return(ProtectionDomain.CurrentDomain.UserRange.FreeAll(process)); } else { return(FlatPages.FreeAll(process)); } }
internal static void KernelFree(UIntPtr startAddr, UIntPtr numPages, Process process) { if (useAddressTranslation) { KernelRange.Free(startAddr, numPages, process); } else { FlatPages.Free(startAddr, MemoryManager.BytesFromPages(numPages), process); } }
// // This is called to allocate memory for a stack, either an initial stack // or a dynamically allocated stack chunk. // internal static UIntPtr StackAllocate(UIntPtr numPages, Process process, uint extra, bool kernelAllocation, bool initialStack) { UIntPtr result = UIntPtr.Zero; if (useAddressTranslation) { if (KernelRangeWrapper != null) { result = KernelRangeWrapper.Allocate(numPages, process, extra, PageType.Stack); } else { // Very early in the initialization sequence; ASSUME there is not // yet any concurrent access to paging descriptors, and allocate // memory without a paging-descriptor lock. result = KernelRange.Allocate(numPages, process, extra, PageType.Stack, null); } } else { result = FlatPages.StackAllocate(BytesFromPages(numPages), UIntPtr.Zero, MemoryManager.PageSize, process, extra, kernelAllocation, initialStack); } if (kernelAllocation && result == UIntPtr.Zero) { DebugStub.WriteLine("******** Kernel OOM on Stack ********"); // // Our kernel runtime can not handle this right now, so rather than // return a null which will show up as a cryptic lab failure, always // drop to the debugger. // // Note: Reservations should avoid this, so this is an indication that // something has gone wrong in our reservation policy and estimates // of kernel stack usage. // DebugStub.Break(); } return(result); }
internal static PageType UserQuery(UIntPtr startAddr, out UIntPtr regionAddr, out UIntPtr regionSize) { if (useAddressTranslation) { // TODO: Query NYI DebugStub.Break(); regionAddr = UIntPtr.Zero; regionSize = UIntPtr.Zero; return(PageType.Unknown); } else { return(FlatPages.Query(startAddr, Thread.CurrentProcess, out regionAddr, out regionSize)); } }
internal static PageType KernelQuery(UIntPtr startAddr, out UIntPtr regionAddr, out UIntPtr regionSize) { if (useAddressTranslation) { // TODO: Query not yet implemented DebugStub.Break(); regionAddr = UIntPtr.Zero; regionSize = UIntPtr.Zero; return(PageType.Unknown); } else { return(FlatPages.Query(startAddr, Process.kernelProcess, out regionAddr, out regionSize)); } }
internal static UIntPtr KernelExtend(UIntPtr addr, UIntPtr numPages, Process process, PageType type) { // // We do not report failure here since callers will default to // KernelAllocate and copy if it can't extend the range // if (useAddressTranslation) { // TODO: Extend not yet implemented DebugStub.Break(); return(UIntPtr.Zero); } else { return(FlatPages.AllocateExtend(addr, BytesFromPages(numPages), process, 0, type)); } }
internal static UIntPtr KernelAllocate(UIntPtr numPages, Process process, uint extra, PageType type) { UIntPtr result = UIntPtr.Zero; if (useAddressTranslation) { if (KernelRangeWrapper != null) { result = KernelRangeWrapper.Allocate(numPages, process, extra, type); } else { // Very early in the initialization sequence; ASSUME there is not // yet any concurrent access to paging descriptors, and allocate // memory without a paging-descriptor lock. result = KernelRange.Allocate(numPages, process, extra, type, null); } } else { result = FlatPages.Allocate(BytesFromPages(numPages), UIntPtr.Zero, MemoryManager.PageSize, process, extra, type); } if (result == UIntPtr.Zero) { DebugStub.WriteLine("******** Kernel OOM on Heap ********"); // // Our kernel runtime can not handle this right now, so rather than // return a null which will show up as a cryptic lab failure, always // drop to the debugger. // DebugStub.Break(); } return(result); }
internal static UIntPtr UserExtend(UIntPtr addr, UIntPtr numPages, Process process, PageType type) { UIntPtr result = UIntPtr.Zero; if (useAddressTranslation) { // TODO: Extend NYI DebugStub.Break(); result = UIntPtr.Zero; } else { result = FlatPages.AllocateExtend(addr, BytesFromPages(numPages), process, 0, type); } if (result == UIntPtr.Zero) { UserMemoryFailure(); } return(result); }
internal static UIntPtr AllocateIOMemory(UIntPtr limitAddr, UIntPtr bytes, UIntPtr alignment, Process process) { UIntPtr result = UIntPtr.Zero; if (useAddressTranslation) { result = KernelIOMemoryHeap.Allocate(limitAddr, bytes, alignment, process); } else { if (limitAddr > 0) { result = FlatPages.AllocateBelow(limitAddr, bytes, alignment, process, 0, PageType.NonGC); } else { result = FlatPages.Allocate(bytes, bytes, alignment, process, 0, PageType.NonGC); } } if (result == UIntPtr.Zero) { DebugStub.WriteLine("******** Kernel OOM on IoMemory ********"); // // Our kernel runtime can not handle this right now, so rather than // return a null which will show up as a cryptic lab failure, always // drop to the debugger. // DebugStub.Break(); } return(result); }
///////////////////////////////////// // PUBLIC METHODS ///////////////////////////////////// internal static void Initialize() { DebugStub.WriteLine("Initializing memory subsystem..."); // Only allow paging in HALs which support running in Ring 0 // but always force paging in the HIP builds for compatibility #if !PAGING useAddressTranslation = UseAddressTranslationInCmdLine(); #else useAddressTranslation = true; #endif if (useAddressTranslation) { DebugStub.WriteLine("Using address translation...\n"); Platform p = Platform.ThePlatform; // Set up the hardware-pages table and reserve a range for // I/O memory IOMemoryBaseAddr = PhysicalPages.Initialize(Platform.IO_MEMORY_SIZE); // Set up the I/O memory heap KernelIOMemoryHeap = new PhysicalHeap((UIntPtr)IOMemoryBaseAddr.Value, (UIntPtr)(IOMemoryBaseAddr.Value + Platform.IO_MEMORY_SIZE)); // Set up virtual memory. ** This enables paging ** ! VMManager.Initialize(); // Set up the kernel's memory ranges. // // The kernel's general-purpose range is special because // it *describes* low memory as well as the GC range proper // so the kernel's GC doesn't get confused by pointers to // static data in the kernel image. KernelRange = new VirtualMemoryRange_struct( VMManager.KernelHeapBase, VMManager.KernelHeapLimit, UIntPtr.Zero, VMManager.KernelHeapLimit, null); // no concurrent access to page descriptors yet // Mark the kernel's special areas. First, record the kernel memory. if (p.KernelDllSize != 0) { UIntPtr kernelDllLimit = p.KernelDllBase + p.KernelDllSize; KernelRange.SetRange(p.KernelDllBase, kernelDllLimit, MemoryManager.KernelPageNonGC); } // Record the boot allocated kernel memory. if (p.BootAllocatedMemorySize != 0) { UIntPtr bootAllocatedMemoryLimit = p.BootAllocatedMemory + p.BootAllocatedMemorySize; KernelRange.SetRange(p.BootAllocatedMemory, bootAllocatedMemoryLimit, MemoryManager.KernelPageNonGC); } // Set stack page for CPU 0 KernelRange.SetRange(Platform.BootCpu.KernelStackLimit, (Platform.BootCpu.KernelStackBegin - Platform.BootCpu.KernelStackLimit), MemoryManager.KernelPageStack); DebugStub.WriteLine("MemoryManager initialized with {0} physical pages still free", __arglist(PhysicalPages.GetFreePageCount())); KernelRange.Dump("Initialized"); isInitialized = true; } else { FlatPages.Initialize(); DebugStub.WriteLine("KernelBaseAddr: {0:x8} KernelLimitAddr {1:x8}", __arglist(KernelBaseAddr, KernelBaseAddr + BytesFromPages(KernelPageCount))); } }
public static ulong GetMaxPhysicalMemory() { return(useAddressTranslation ? PhysicalPages.GetMaxMemory() : (ulong)FlatPages.GetMaxMemory()); }
private static void InitServices() { InitGCSupport(); args = GetCommandLine(); VTable.ParseArgs(args); ARM_PROGRESS("Kernel!011"); InitSchedulerTypes(); ARM_PROGRESS("Kernel!018"); Controller.InitializeSystem(); Tracing.InitializeSystem(); ARM_PROGRESS("Kernel!019"); // Read the profiler settings. The values are assumed in kbytes // convert them to bytes for direct consumption ProfilerBufferSize = (uint)GetIntegerArgument("profiler", 0); ProfilerBufferSize *= 1024; ARM_PROGRESS("Kernel!020"); SpinLock.StaticInitialize(); int cpusLength; int cpuCount = GetCpuCount(out cpusLength); Processor.InitializeProcessorTable(cpusLength); ARM_PROGRESS("Kernel!021"); Tracing.Log(Tracing.Audit, "processor"); Processor processor = Processor.EnableProcessor(0); PEImage.Initialize(); ARM_PROGRESS("Kernel!034"); // Initialize the sample profiling for the processor // after the initial breakpoint in kd in the call // PEImage.Initialize(). This will allow enabling profiling // from kd, by overwriting the ProfilerBufferSize value processor.EnableProfiling(); ARM_PROGRESS("Kernel!035"); FlatPages.InitializeMemoryMonitoring(); // initialize endpoints InitType(typeof(Microsoft.Singularity.Channels.EndpointCore)); // TODO Bug 59: Currently broken, need to review paging build. //#if PAGING // Microsoft.Singularity.Channels.EndpointTrusted.StaticInitialize(); //#endif ARM_PROGRESS("Kernel!036"); // get the system manifest IoMemory systemManifest = GetSystemManifest(); ARM_PROGRESS("Kernel!037"); XmlReader xmlReader = new XmlReader(systemManifest); XmlNode xmlData = xmlReader.Parse(); XmlNode manifestRoot = xmlData.GetChild("system"); XmlNode initConfig = manifestRoot.GetChild("initConfig"); ARM_PROGRESS("Kernel!038"); PerfCounters.Initialize(); // need to have processed the manifest before we can call Process initialize ARM_PROGRESS("Kernel!039"); PrincipalImpl.Initialize(initConfig); ARM_PROGRESS("Kernel!040"); Process.Initialize(manifestRoot.GetChild("processConfig")); InitIO(processor, initConfig, manifestRoot.GetChild("drivers")); InitBootTime(); ARM_PROGRESS("Kernel!045"); // From here on, we want lazy type initialization to worry about // competing threads. VTable.InitializeForMultipleThread(); ARM_PROGRESS("Kernel!046"); Console.WriteLine("Running C# Kernel of {0}", GetLinkDate()); Console.WriteLine(); // TODO: remove this Console.WriteLine("Current time: {0}", SystemClock.GetUtcTime().ToString("r")); ARM_PROGRESS("Kernel!047"); InitScheduling(); DirectoryService.StartNotificationThread(); Console.WriteLine("Initializing Shared Heap Walker"); ProtectionDomain.InitializeSharedHeapWalker(); ARM_PROGRESS("Kernel!050"); Console.WriteLine("Initializing Service Thread"); ServiceThread.Initialize(); ARM_PROGRESS("Kernel!051"); GC.EnableHeap(); GCProfilerLogger.StartProfiling(); ARM_PROGRESS("Kernel!052"); Tracing.Log(Tracing.Audit, "Waypoints init"); Waypoints = new long[2048]; WaypointSeq = new int[2048]; WaypointThd = new int[2048]; Tracing.Log(Tracing.Audit, "Interrupts ON."); Processor.RestoreInterrupts(true); ARM_PROGRESS("Kernel!053"); #if ISA_ARM && TEST_GC for (int i = 0; i < 1000; i++) { DebugStub.WriteLine("Iteration {0}", __arglist(i)); ArrayList a = new ArrayList(); for (int j = 0; j < 128; j++) { int size = 1024 * 1024; a.Add(new byte [size]); } } #endif // ISA_ARM ARM_PROGRESS("Kernel!054"); Tracing.Log(Tracing.Audit, "Binder"); Binder.Initialize(manifestRoot.GetChild("namingConventions")); #if ISA_ARM DebugStub.WriteLine("Exporting local namespace to BSP\n"); DirectoryService.ExportArmNamespace(); DebugStub.WriteLine("Export complete...redirecting binder\n"); Binder.RedirectRootRef(); DebugStub.WriteLine("Binder redirect complete\n"); #endif #if false Tracing.Log(Tracing.Audit, "Starting Security Service channels"); PrincipalImpl.Export(); ARM_PROGRESS("Kernel!055"); #endif Tracing.Log(Tracing.Audit, "Creating Root Directory."); //This can be moved below IoSystem.InitializeDirectoryService(); ARM_PROGRESS("Kernel!055"); #if false // Start User space namespace manager Console.WriteLine("Starting Directory Service SIP"); DirectoryService.StartUserSpaceDirectoryService(); #endif ARM_PROGRESS("Kernel!055.5"); #if !ISA_ARM Tracing.Log(Tracing.Audit, "Starting Security Service channels"); PrincipalImpl.Export(); #endif ARM_PROGRESS("Kernel!056"); Console.WriteLine("Initializing system channels"); // starting channels services DebugStub.Print("Initializing Channel Services\n"); ChannelDeliveryImplService.Initialize(); ARM_PROGRESS("Kernel!057"); ConsoleOutput.Initialize(); ARM_PROGRESS("Kernel!058"); // Initialize MP after Binder and ConsoleOutput // are initialized so there are no // initialization races if the additional // threads try to use them. Tracing.Log(Tracing.Audit, "Starting additional processors"); // For ABI to ARM support MpExecution.Initialize(); ARM_PROGRESS("Kernel!059"); mpEndEvent = new ManualResetEvent(false); Tracing.Log(Tracing.Audit, "Initializing Volume Manager."); #if !ISA_ARM IoSystem.InitializeVolumeManager(); #endif // ISA_ARM ARM_PROGRESS("Kernel!060"); InitDrivers(); if (cpuCount > 1) { unsafe { Console.WriteLine("Enabling {0} cpus out of {1} real cpus\n", cpuCount, Platform.ThePlatform.CpuRealCount); } Processor.EnableMoreProcessors(cpuCount); ARM_PROGRESS("Kernel!064"); } Tracing.Log(Tracing.Audit, "Initializing Service Manager."); IoSystem.InitializeServiceManager(manifestRoot.GetChild("serviceConfig")); ARM_PROGRESS("Kernel!065"); InitDiagnostics(); #if !ISA_ARM // At this point consider kernel finshed booting hasBooted = true; #endif // ISA_ARM Processor.StartSampling(); ARM_PROGRESS("Kernel!069"); Microsoft.Singularity.KernelDebugger.KdFilesNamespace.StartNamespaceThread(); ARM_PROGRESS("Kernel!070"); }