Exemple #1
0
            internal static unsafe FreeNode *Create(PhysicalHeap *inHeap,
                                                    UIntPtr addr, UIntPtr pages)
            {
                DebugStub.Assert(addr >= inHeap->startAddr);
                DebugStub.Assert((addr + (pages * MemoryManager.PageSize)) <= inHeap->heapLimit);
                FreeNode *node = (FreeNode *)addr;

                // This had better be a free page in the main table
                DebugStub.Assert(inHeap->PageWord(inHeap->PageIndex(addr)) == FreePage,
                                 "Creating a FreeNode for non-free page {0:x}",
                                 __arglist(addr));

                node->signature = FreeNode.Signature;
                node->bytes     = pages * MemoryManager.PageSize;
                node->prev      = null;
                node->next      = null;
                node->last      = null;

                if (pages > 1)
                {
                    node->last = LastNode.Create(inHeap, addr, node);
                }

                return(node);
            }
Exemple #2
0
            private unsafe void InsertAsTail(FreeNode *node)
            {
                if (tail != null)
                {
                    tail->next = node;
                }

                node->prev = tail;
                tail       = node;
            }
Exemple #3
0
            private unsafe void InsertAsHead(FreeNode *node)
            {
                if (head != null)
                {
                    head->prev = node;
                }

                node->next = head;
                head       = node;
            }
Exemple #4
0
            internal unsafe FreeNode *FindGoodFit(UIntPtr bytes, UIntPtr alignment)
            {
                DebugStub.Assert(alignment >= MemoryManager.PageSize);

                // If it is a small allocation, we try to accelerate the search.
                if (bytes <= SmallSize)
                {
                    for (FreeNode *node = tail; node != null; node = node->prev)
                    {
                        UIntPtr alignedSize = SpaceToAlign((UIntPtr)node, alignment) + bytes;

                        if (alignedSize <= node->bytes)
                        {
                            Remove(node);
                            return(node);
                        }
                    }

                    return(null);
                }
                else
                {
                    // First try to find a region closest in size to bytes...
                    FreeNode *best = null;

                    for (FreeNode *node = head; node != null; node = node->next)
                    {
                        UIntPtr alignedSize = SpaceToAlign((UIntPtr)node, alignment) + bytes;

                        if (alignedSize <= node->bytes)
                        {
                            // If we find a candidate, remember it.
                            best = node;
                            if (bytes == node->bytes)
                            {
                                // Stop if it is the ideal region.
                                break;
                            }
                        }
                        else
                        {
                            // Stop if we've reached smaller regions
                            break;
                        }
                    }

                    if (best != null)
                    {
                        Remove(best);
                    }

                    return(best);
                }
            }
Exemple #5
0
            internal static unsafe LastNode *Create(PhysicalHeap *inHeap,
                                                    UIntPtr addr, FreeNode *node)
            {
                LastNode *last = (LastNode *)(addr + node->bytes - MemoryManager.PageSize);

                DebugStub.Assert((UIntPtr)last >= inHeap->startAddr);
                DebugStub.Assert((UIntPtr)last <= inHeap->heapLimit);
                DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)last));
                last->signature = LastNode.Signature;
                last->node      = node;
                return(last);
            }
Exemple #6
0
            internal unsafe void Remove()
            {
                signature = Removed;

                prev = null;
                next = null;

                if (last != null)
                {
                    last->Remove();
                }
            }
Exemple #7
0
        private unsafe void CheckConsistency()
        {
#if SELF_TEST
            UIntPtr freePagesByTable = 0;

            for (UIntPtr i = 0; i < pageCount; i++)
            {
                UIntPtr pageAddr = startAddr + (MemoryManager.PageSize * i);

                if (PageWord(i) == FreePage)
                {
                    // Validate this block's free information
                    FreeNode *thisBlock = (FreeNode *)pageAddr;
                    DebugStub.Assert(thisBlock->signature == FreeNode.Signature);

                    if (thisBlock->last != null)
                    {
                        // Multi-page free block; validate and skip ahead
                        DebugStub.Assert(thisBlock->last->node == thisBlock);
                        DebugStub.Assert(thisBlock->last->signature == LastNode.Signature);
                        UIntPtr numBytes = (UIntPtr)thisBlock->last - (UIntPtr)pageAddr +
                                           MemoryManager.PageSize;
                        DebugStub.Assert(numBytes == thisBlock->bytes);
                        DebugStub.Assert(MemoryManager.IsPageAligned(numBytes));
                        UIntPtr numPages = MemoryManager.PagesFromBytes(numBytes);

                        for (UIntPtr j = i; j < i + numPages; j++)
                        {
                            DebugStub.Assert(PageWord(j) == FreePage);
                        }

                        i += numPages - 1;
                        freePagesByTable += numPages;
                    }
                    else
                    {
                        // Single-page free block
                        if (i != pageCount - 1)
                        {
                            DebugStub.Assert(PageWord(i + 1) != FreePage);
                        }
                        freePagesByTable++;
                    }
                }
            }

            // Now make sure all free pages are accounted for
            UIntPtr freePagesByList = FreePageCountFromList();
            DebugStub.Assert(freePagesByList == freePagesByTable);
#endif
        }
Exemple #8
0
            internal unsafe void CreateAndInsert(PhysicalHeap *inHeap,
                                                 UIntPtr addr,
                                                 UIntPtr pages)
            {
                DebugStub.Assert(MemoryManager.IsPageAligned(addr),
                                 "PhysicalHeap.CreateAndInsert non page-aligned addr={0:x}",
                                 __arglist(addr));

                FreeNode *node = FreeNode.Create(inHeap, addr, pages);

                DebugStub.Assert(MemoryManager.IsPageAligned(node->bytes),
                                 "PhysicalHeap.CreateAndInsert non page-sized node->bytes={0:x}",
                                 __arglist(node->bytes));

                InsertBySize(node);
            }
Exemple #9
0
        /////////////////////////////////////
        // PRIVATE METHODS
        /////////////////////////////////////

        private unsafe UIntPtr FreePageCountFromList()
        {
            UIntPtr   retval = 0;
            FreeNode *entry  = freeList.head;
            FreeNode *prev   = null;

            while (entry != null)
            {
                DebugStub.Assert(MemoryManager.IsPageAligned(entry->bytes));
                retval += MemoryManager.PagesFromBytes(entry->bytes);
                DebugStub.Assert(entry->prev == prev);
                prev  = entry;
                entry = entry->next;
            }

            return(retval);
        }
Exemple #10
0
            internal unsafe void InsertBySize(FreeNode *node)
            {
                if (head == null)
                {
                    // Empty list
                    DebugStub.Assert(tail == null);
                    head = node;
                    tail = node;
                }
                else
                {
                    if (node->bytes <= SmallSize)
                    {
                        // If the size is pretty small, we insert from the back of the list...
                        for (FreeNode *step = tail; step != null; step = step->prev)
                        {
                            if (step->bytes >= node->bytes)
                            {
                                InsertAsNext(step, node);
                                return;
                            }
                        }

                        // Every entry in the list is smaller than us. Therefore, we're the
                        // new head.
                        InsertAsHead(node);
                    }
                    else
                    {
                        // Insert a region into the list by size.
                        for (FreeNode *step = head; step != null; step = step->next)
                        {
                            if (step->bytes <= node->bytes)
                            {
                                InsertAsPrev(step, node);
                                return;
                            }
                        }

                        // Every entry in the list is larger than us. Therefore, we're
                        // the new tail.
                        InsertAsTail(node);
                    }
                }
            }
Exemple #11
0
            private unsafe void InsertAsPrev(FreeNode *target, FreeNode *node)
            {
                DebugStub.Assert(target != null);

                if (target == head)
                {
                    InsertAsHead(node);
                }
                else
                {
                    node->prev   = target->prev;
                    node->next   = target;
                    target->prev = node;

                    if (node->prev != null)
                    {
                        node->prev->next = node;
                    }
                }
            }
Exemple #12
0
            private unsafe void InsertAsNext(FreeNode *target, FreeNode *node)
            {
                DebugStub.Assert(target != null);

                if (target == tail)
                {
                    InsertAsTail(node);
                }
                else
                {
                    node->next   = target->next;
                    node->prev   = target;
                    target->next = node;

                    if (node->next != null)
                    {
                        node->next->prev = node;
                    }
                }
            }
Exemple #13
0
            internal unsafe void Remove(FreeNode *node)
            {
                if (node->prev != null)
                {
                    node->prev->next = node->next;
                }
                else
                {
                    DebugStub.Assert(head == node);
                    head = node->next;
                }

                if (node->next != null)
                {
                    node->next->prev = node->prev;
                }
                else
                {
                    DebugStub.Assert(tail == node);
                    tail = node->prev;
                }

                node->Remove();
            }
Exemple #14
0
 internal unsafe void Remove()
 {
     signature = Removed;
     node      = null;
 }
Exemple #15
0
        public unsafe void Free(UIntPtr addr, UIntPtr bytes, Process process)
        {
            if (addr == UIntPtr.Zero)
            {
                // Silently accept freeing null
                return;
            }

            // We always hand out memory in page-size chunks, so round up what
            // the caller thinks their block size is
            bytes = MemoryManager.PagePad(bytes);

            // Our blocks always start on page boundaries
            DebugStub.Assert(MemoryManager.IsPageAligned(addr));
            ushort tag   = process != null ? (ushort)process.ProcessId : KernelPage;
            bool   iflag = Lock();

            try {
                CheckConsistency();

                UIntPtr numPages = MemoryManager.PagesFromBytes(bytes);
                VerifyOwner(addr, numPages, tag);

                FreeNode *nextBlock = null;
                FreeNode *prevBlock = null;

                if ((addr + bytes) < heapLimit)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        nextBlock = FreeNode.GetNodeAt(thisPtr, addr + bytes);
                    }
                }

                if (addr > startAddr)
                {
                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        prevBlock = LastNode.GetNodeFromLast(thisPtr, addr - MemoryManager.PageSize);
                    }
                }

                // Don't mark pages as free until we're done discovering the
                // previous and next blocks, or the attempt to discover
                // the previous and next blocks gets confused to find itself
                // adjacent to a free block.
                SetPages(addr, numPages, FreePage);

                // Coalesce with the preceding region
                if (prevBlock != null)
                {
                    addr   = (UIntPtr)prevBlock;
                    bytes += prevBlock->bytes;
                    freeList.Remove(prevBlock);
                }

                // Coalesce with the following region
                if (nextBlock != null)
                {
                    bytes += nextBlock->bytes;
                    freeList.Remove(nextBlock);
                }

                // Blocks should always be integral numbers of pages
                DebugStub.Assert(MemoryManager.IsPageAligned(bytes));

                // Create the free node.
                fixed(PhysicalHeap *thisPtr = &this)
                {
                    freeList.CreateAndInsert(thisPtr, addr, bytes / MemoryManager.PageSize);
                }

                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }
        }
Exemple #16
0
        public unsafe UIntPtr Allocate(UIntPtr limitAddr,
                                       UIntPtr bytes,
                                       UIntPtr alignment,
                                       Process process)
        {
            ushort  tag = process != null ? (ushort)process.ProcessId : KernelPage;
            UIntPtr blockPtr;
            bool    iflag = Lock();

            if (alignment < MemoryManager.PageSize)
            {
                alignment = MemoryManager.PageSize;
            }

            try {
                CheckConsistency();

                // Find an appropriately-sized block
                FreeNode *foundNode = freeList.FindGoodFit(bytes, alignment);

                if (foundNode == null)
                {
                    return(UIntPtr.Zero);
                }

                DebugStub.Assert(MemoryManager.IsPageAligned((UIntPtr)foundNode));

                // Respect alignment within the node
                blockPtr = MemoryManager.Pad((UIntPtr)foundNode, alignment);
                UIntPtr alignedSize = bytes + SpaceToAlign((UIntPtr)foundNode, alignment);
                DebugStub.Assert(alignedSize == (blockPtr + bytes) - (UIntPtr)foundNode);
                DebugStub.Assert(foundNode->bytes >= alignedSize);

                // Give back any extra pages
                UIntPtr numPages   = MemoryManager.PagesFromBytes(MemoryManager.PagePad(alignedSize));
                UIntPtr chunkPages = MemoryManager.PagesFromBytes(foundNode->bytes);

                DebugStub.Assert(chunkPages >= numPages);
                UIntPtr extraPages = chunkPages - numPages;

                if (extraPages > 0)
                {
                    // Give back the extra memory
                    UIntPtr remainderPtr = (UIntPtr)foundNode + (numPages * MemoryManager.PageSize);

                    fixed(PhysicalHeap *thisPtr = &this)
                    {
                        freeList.CreateAndInsert(thisPtr, remainderPtr, extraPages);
                    }
                }

                SetPages((UIntPtr)foundNode, numPages, tag);
                CheckConsistency();
            }
            finally {
                Unlock(iflag);
            }

            // TODO: Flexible limit specification not yet implemented
            if (limitAddr > UIntPtr.Zero)
            {
                DebugStub.Assert(blockPtr < limitAddr);
            }

            return(blockPtr);
        }