private static UIntPtr SpaceToAlign(UIntPtr data, UIntPtr size) { return(MemoryManager.Pad(data, size) - data); }
public unsafe UIntPtr Allocate(UIntPtr limitAddr, UIntPtr bytes, UIntPtr alignment, Process process) { ushort tag = process != null ? (ushort)process.ProcessId : KernelPage; UIntPtr blockPtr; bool iflag = Lock(); if (alignment < MemoryManager.PageSize) { alignment = MemoryManager.PageSize; } try { CheckConsistency(); // Find an appropriately-sized block FreeNode *foundNode = freeList.FindGoodFit(bytes, alignment); if (foundNode == null) { return(UIntPtr.Zero); } DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)foundNode)); // Respect alignment within the node blockPtr = MemoryManager.Pad((UIntPtr)foundNode, alignment); UIntPtr alignedSize = bytes + SpaceToAlign((UIntPtr)foundNode, alignment); DebugStub.Assert(alignedSize == (blockPtr + bytes) - (UIntPtr)foundNode); DebugStub.Assert(foundNode->bytes >= alignedSize); // Give back any extra pages UIntPtr numPages = MemoryManager.PagesFromBytes(MemoryManager.PagePad(alignedSize)); UIntPtr chunkPages = MemoryManager.PagesFromBytes(foundNode->bytes); DebugStub.Assert(chunkPages >= numPages); UIntPtr extraPages = chunkPages - numPages; if (extraPages > 0) { // Give back the extra memory UIntPtr remainderPtr = (UIntPtr)foundNode + (numPages * MemoryManager.PageSize); fixed(PhysicalHeap *thisPtr = &this) { freeList.CreateAndInsert(thisPtr, remainderPtr, extraPages); } } SetPages((UIntPtr)foundNode, numPages, tag); CheckConsistency(); } finally { Unlock(iflag); } // TODO: Flexible limit specification not yet implemented if (limitAddr > UIntPtr.Zero) { DebugStub.Assert(blockPtr < limitAddr); } return(blockPtr); }