private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); if (_blockOrdersCount > 0) { if (GetFreePagesImpl() < pagesCount) { return(KernelResult.OutOfMemory); } } else if (pagesCount != 0) { return(KernelResult.OutOfMemory); } for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong bestFitBlockSize = 1UL << block.Order; ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize; //Check if this is the best fit for this page size. //If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { ulong address = 0; for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } else { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } else { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } //The address being zero means that no free space was found on that order, //just give up and try with the next one. if (address == 0) { break; } //If we are using a larger order than best fit, then we should //split it into smaller blocks. ulong firstFreeBlockSize = 1UL << block.Order; if (firstFreeBlockSize > bestFitBlockSize) { FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); } //Add new allocated page(s) to the pages list. //If an error occurs, then free all allocated pages and fail. KernelResult result = pageList.AddRange(address, blockPagesCount); if (result != KernelResult.Success) { FreePages(address, blockPagesCount); foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } return(result); } pagesCount -= blockPagesCount; } } //Success case, all requested pages were allocated successfully. if (pagesCount == 0) { return(KernelResult.Success); } //Error case, free allocated pages and return out of memory. foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } pageList = null; return(KernelResult.OutOfMemory); }
private KernelResult AllocatePagesImpl(ulong PagesCount, bool Backwards, out KPageList PageList) { PageList = new KPageList(); if (BlockOrdersCount > 0) { if (GetFreePagesImpl() < PagesCount) { return(KernelResult.OutOfMemory); } } else if (PagesCount != 0) { return(KernelResult.OutOfMemory); } for (int BlockIndex = BlockOrdersCount - 1; BlockIndex >= 0; BlockIndex--) { KMemoryRegionBlock Block = Blocks[BlockIndex]; ulong BestFitBlockSize = 1UL << Block.Order; ulong BlockPagesCount = BestFitBlockSize / KMemoryManager.PageSize; //Check if this is the best fit for this page size. //If so, try allocating as much requested pages as possible. while (BlockPagesCount <= PagesCount) { ulong Address = 0; for (int CurrBlockIndex = BlockIndex; CurrBlockIndex < BlockOrdersCount && Address == 0; CurrBlockIndex++) { Block = Blocks[CurrBlockIndex]; int Index = 0; bool ZeroMask = false; for (int Level = 0; Level < Block.MaxLevel; Level++) { long Mask = Block.Masks[Level][Index]; if (Mask == 0) { ZeroMask = true; break; } if (Backwards) { Index = (Index * 64 + 63) - BitUtils.CountLeadingZeros64(Mask); } else { Index = Index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(Mask)); } } if (Block.SizeInBlocksTruncated <= (ulong)Index || ZeroMask) { continue; } Block.FreeCount--; int TempIdx = Index; for (int Level = Block.MaxLevel - 1; Level >= 0; Level--, TempIdx /= 64) { Block.Masks[Level][TempIdx / 64] &= ~(1L << (TempIdx & 63)); if (Block.Masks[Level][TempIdx / 64] != 0) { break; } } Address = Block.StartAligned + ((ulong)Index << Block.Order); } for (int CurrBlockIndex = BlockIndex; CurrBlockIndex < BlockOrdersCount && Address == 0; CurrBlockIndex++) { Block = Blocks[CurrBlockIndex]; int Index = 0; bool ZeroMask = false; for (int Level = 0; Level < Block.MaxLevel; Level++) { long Mask = Block.Masks[Level][Index]; if (Mask == 0) { ZeroMask = true; break; } if (Backwards) { Index = Index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(Mask)); } else { Index = (Index * 64 + 63) - BitUtils.CountLeadingZeros64(Mask); } } if (Block.SizeInBlocksTruncated <= (ulong)Index || ZeroMask) { continue; } Block.FreeCount--; int TempIdx = Index; for (int Level = Block.MaxLevel - 1; Level >= 0; Level--, TempIdx /= 64) { Block.Masks[Level][TempIdx / 64] &= ~(1L << (TempIdx & 63)); if (Block.Masks[Level][TempIdx / 64] != 0) { break; } } Address = Block.StartAligned + ((ulong)Index << Block.Order); } //The address being zero means that no free space was found on that order, //just give up and try with the next one. if (Address == 0) { break; } //If we are using a larger order than best fit, then we should //split it into smaller blocks. ulong FirstFreeBlockSize = 1UL << Block.Order; if (FirstFreeBlockSize > BestFitBlockSize) { FreePages(Address + BestFitBlockSize, (FirstFreeBlockSize - BestFitBlockSize) / KMemoryManager.PageSize); } //Add new allocated page(s) to the pages list. //If an error occurs, then free all allocated pages and fail. KernelResult Result = PageList.AddRange(Address, BlockPagesCount); if (Result != KernelResult.Success) { FreePages(Address, BlockPagesCount); foreach (KPageNode PageNode in PageList) { FreePages(PageNode.Address, PageNode.PagesCount); } return(Result); } PagesCount -= BlockPagesCount; } } //Success case, all requested pages were allocated successfully. if (PagesCount == 0) { return(KernelResult.Success); } //Error case, free allocated pages and return out of memory. foreach (KPageNode PageNode in PageList) { FreePages(PageNode.Address, PageNode.PagesCount); } PageList = null; return(KernelResult.OutOfMemory); }