public unsafe void Free(UIntPtr addr, UIntPtr bytes, Process process) { if (addr == UIntPtr.Zero) { // Silently accept freeing null return; } // We always hand out memory in page-size chunks, so round up what // the caller thinks their block size is bytes = MemoryManager.PagePad(bytes); // Our blocks always start on page boundaries DebugStub.Assert(MemoryManager.IsPageAligned(addr)); ushort tag = process != null ? (ushort)process.ProcessId : KernelPage; bool iflag = Lock(); try { CheckConsistency(); UIntPtr numPages = MemoryManager.PagesFromBytes(bytes); VerifyOwner(addr, numPages, tag); FreeNode *nextBlock = null; FreeNode *prevBlock = null; if ((addr + bytes) < heapLimit) { fixed(PhysicalHeap *thisPtr = &this) { nextBlock = FreeNode.GetNodeAt(thisPtr, addr + bytes); } } if (addr > startAddr) { fixed(PhysicalHeap *thisPtr = &this) { prevBlock = LastNode.GetNodeFromLast(thisPtr, addr - MemoryManager.PageSize); } } // Don't mark pages as free until we're done discovering the // previous and next blocks, or the attempt to discover // the previous and next blocks gets confused to find itself // adjacent to a free block. SetPages(addr, numPages, FreePage); // Coalesce with the preceding region if (prevBlock != null) { addr = (UIntPtr)prevBlock; bytes += prevBlock->bytes; freeList.Remove(prevBlock); } // Coalesce with the following region if (nextBlock != null) { bytes += nextBlock->bytes; freeList.Remove(nextBlock); } // Blocks should always be integral numbers of pages DebugStub.Assert(MemoryManager.IsPageAligned(bytes)); // Create the free node. fixed(PhysicalHeap *thisPtr = &this) { freeList.CreateAndInsert(thisPtr, addr, bytes / MemoryManager.PageSize); } CheckConsistency(); } finally { Unlock(iflag); } }