private ulong GetFreePagesImpl() { ulong availablePages = 0; for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong blockPagesCount = (1UL << block.Order) / KMemoryManager.PageSize; availablePages += blockPagesCount * block.FreeCount; } return(availablePages); }
private ulong GetFreePagesImpl() { ulong AvailablePages = 0; for (int BlockIndex = 0; BlockIndex < BlockOrdersCount; BlockIndex++) { KMemoryRegionBlock Block = Blocks[BlockIndex]; ulong BlockPagesCount = (1UL << Block.Order) / KMemoryManager.PageSize; AvailablePages += BlockPagesCount * Block.FreeCount; } return(AvailablePages); }
private void FreePages(ulong address, ulong pagesCount) { ulong endAddr = address + pagesCount * KMemoryManager.PageSize; int blockIndex = _blockOrdersCount - 1; ulong addressRounded = 0; ulong endAddrTruncated = 0; for (; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock allocInfo = _blocks[blockIndex]; int blockSize = 1 << allocInfo.Order; addressRounded = BitUtils.AlignUp(address, blockSize); endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize); if (addressRounded < endAddrTruncated) { break; } } void FreeRegion(ulong currAddress) { for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && currAddress != 0; currBlockIndex++) { KMemoryRegionBlock block = _blocks[currBlockIndex]; block.FreeCount++; ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order; int index = (int)freedBlocks; for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64) { long mask = block.Masks[level][index / 64]; block.Masks[level][index / 64] = mask | (1L << (index & 63)); if (mask != 0) { break; } } int blockSizeDelta = 1 << (block.NextOrder - block.Order); int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta); if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta)) { break; } currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order); } } //Free inside aligned region. ulong baseAddress = addressRounded; while (baseAddress < endAddrTruncated) { ulong blockSize = 1UL << _blocks[blockIndex].Order; FreeRegion(baseAddress); baseAddress += blockSize; } int nextBlockIndex = blockIndex - 1; //Free region between Address and aligned region start. baseAddress = addressRounded; for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--) { ulong blockSize = 1UL << _blocks[blockIndex].Order; while (baseAddress - blockSize >= address) { baseAddress -= blockSize; FreeRegion(baseAddress); } } //Free region between aligned region end and End Address. baseAddress = endAddrTruncated; for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--) { ulong blockSize = 1UL << _blocks[blockIndex].Order; while (baseAddress + blockSize <= endAddr) { FreeRegion(baseAddress); baseAddress += blockSize; } } }
public KMemoryRegionManager(ulong address, ulong size, ulong endAddr) { _blocks = new KMemoryRegionBlock[BlockOrders.Length]; Address = address; Size = size; EndAddr = endAddr; _blockOrdersCount = BlockOrders.Length; for (int blockIndex = 0; blockIndex < _blockOrdersCount; blockIndex++) { _blocks[blockIndex] = new KMemoryRegionBlock(); _blocks[blockIndex].Order = BlockOrders[blockIndex]; int nextOrder = blockIndex == _blockOrdersCount - 1 ? 0 : BlockOrders[blockIndex + 1]; _blocks[blockIndex].NextOrder = nextOrder; int currBlockSize = 1 << BlockOrders[blockIndex]; int nextBlockSize = currBlockSize; if (nextOrder != 0) { nextBlockSize = 1 << nextOrder; } ulong startAligned = BitUtils.AlignDown(address, nextBlockSize); ulong endAddrAligned = BitUtils.AlignDown(endAddr, currBlockSize); ulong sizeInBlocksTruncated = (endAddrAligned - startAligned) >> BlockOrders[blockIndex]; ulong endAddrRounded = BitUtils.AlignUp(address + size, nextBlockSize); ulong sizeInBlocksRounded = (endAddrRounded - startAligned) >> BlockOrders[blockIndex]; _blocks[blockIndex].StartAligned = startAligned; _blocks[blockIndex].SizeInBlocksTruncated = sizeInBlocksTruncated; _blocks[blockIndex].SizeInBlocksRounded = sizeInBlocksRounded; ulong currSizeInBlocks = sizeInBlocksRounded; int maxLevel = 0; do { maxLevel++; }while ((currSizeInBlocks /= 64) != 0); _blocks[blockIndex].MaxLevel = maxLevel; _blocks[blockIndex].Masks = new long[maxLevel][]; currSizeInBlocks = sizeInBlocksRounded; for (int level = maxLevel - 1; level >= 0; level--) { currSizeInBlocks = (currSizeInBlocks + 63) / 64; _blocks[blockIndex].Masks[level] = new long[currSizeInBlocks]; } } if (size != 0) { FreePages(address, size / KMemoryManager.PageSize); } }
private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); if (_blockOrdersCount > 0) { if (GetFreePagesImpl() < pagesCount) { return(KernelResult.OutOfMemory); } } else if (pagesCount != 0) { return(KernelResult.OutOfMemory); } for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong bestFitBlockSize = 1UL << block.Order; ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize; //Check if this is the best fit for this page size. //If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { ulong address = 0; for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } else { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } else { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } //The address being zero means that no free space was found on that order, //just give up and try with the next one. if (address == 0) { break; } //If we are using a larger order than best fit, then we should //split it into smaller blocks. ulong firstFreeBlockSize = 1UL << block.Order; if (firstFreeBlockSize > bestFitBlockSize) { FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); } //Add new allocated page(s) to the pages list. //If an error occurs, then free all allocated pages and fail. KernelResult result = pageList.AddRange(address, blockPagesCount); if (result != KernelResult.Success) { FreePages(address, blockPagesCount); foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } return(result); } pagesCount -= blockPagesCount; } } //Success case, all requested pages were allocated successfully. if (pagesCount == 0) { return(KernelResult.Success); } //Error case, free allocated pages and return out of memory. foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } pageList = null; return(KernelResult.OutOfMemory); }
private void FreePages(ulong Address, ulong PagesCount) { ulong EndAddr = Address + PagesCount * KMemoryManager.PageSize; int BlockIndex = BlockOrdersCount - 1; ulong AddressRounded = 0; ulong EndAddrTruncated = 0; for (; BlockIndex >= 0; BlockIndex--) { KMemoryRegionBlock AllocInfo = Blocks[BlockIndex]; int BlockSize = 1 << AllocInfo.Order; AddressRounded = BitUtils.AlignUp(Address, BlockSize); EndAddrTruncated = BitUtils.AlignDown(EndAddr, BlockSize); if (AddressRounded < EndAddrTruncated) { break; } } void FreeRegion(ulong CurrAddress) { for (int CurrBlockIndex = BlockIndex; CurrBlockIndex < BlockOrdersCount && CurrAddress != 0; CurrBlockIndex++) { KMemoryRegionBlock Block = Blocks[CurrBlockIndex]; Block.FreeCount++; ulong FreedBlocks = (CurrAddress - Block.StartAligned) >> Block.Order; int Index = (int)FreedBlocks; for (int Level = Block.MaxLevel - 1; Level >= 0; Level--, Index /= 64) { long Mask = Block.Masks[Level][Index / 64]; Block.Masks[Level][Index / 64] = Mask | (1L << (Index & 63)); if (Mask != 0) { break; } } int BlockSizeDelta = 1 << (Block.NextOrder - Block.Order); int FreedBlocksTruncated = BitUtils.AlignDown((int)FreedBlocks, BlockSizeDelta); if (!Block.TryCoalesce(FreedBlocksTruncated, BlockSizeDelta)) { break; } CurrAddress = Block.StartAligned + ((ulong)FreedBlocksTruncated << Block.Order); } } //Free inside aligned region. ulong BaseAddress = AddressRounded; while (BaseAddress < EndAddrTruncated) { ulong BlockSize = 1UL << Blocks[BlockIndex].Order; FreeRegion(BaseAddress); BaseAddress += BlockSize; } int NextBlockIndex = BlockIndex - 1; //Free region between Address and aligned region start. BaseAddress = AddressRounded; for (BlockIndex = NextBlockIndex; BlockIndex >= 0; BlockIndex--) { ulong BlockSize = 1UL << Blocks[BlockIndex].Order; while (BaseAddress - BlockSize >= Address) { BaseAddress -= BlockSize; FreeRegion(BaseAddress); } } //Free region between aligned region end and End Address. BaseAddress = EndAddrTruncated; for (BlockIndex = NextBlockIndex; BlockIndex >= 0; BlockIndex--) { ulong BlockSize = 1UL << Blocks[BlockIndex].Order; while (BaseAddress + BlockSize <= EndAddr) { FreeRegion(BaseAddress); BaseAddress += BlockSize; } } }
public KMemoryRegionManager(ulong Address, ulong Size, ulong EndAddr) { Blocks = new KMemoryRegionBlock[BlockOrders.Length]; this.Address = Address; this.Size = Size; this.EndAddr = EndAddr; BlockOrdersCount = BlockOrders.Length; for (int BlockIndex = 0; BlockIndex < BlockOrdersCount; BlockIndex++) { Blocks[BlockIndex] = new KMemoryRegionBlock(); Blocks[BlockIndex].Order = BlockOrders[BlockIndex]; int NextOrder = BlockIndex == BlockOrdersCount - 1 ? 0 : BlockOrders[BlockIndex + 1]; Blocks[BlockIndex].NextOrder = NextOrder; int CurrBlockSize = 1 << BlockOrders[BlockIndex]; int NextBlockSize = CurrBlockSize; if (NextOrder != 0) { NextBlockSize = 1 << NextOrder; } ulong StartAligned = BitUtils.AlignDown(Address, NextBlockSize); ulong EndAddrAligned = BitUtils.AlignDown(EndAddr, CurrBlockSize); ulong SizeInBlocksTruncated = (EndAddrAligned - StartAligned) >> BlockOrders[BlockIndex]; ulong EndAddrRounded = BitUtils.AlignUp(Address + Size, NextBlockSize); ulong SizeInBlocksRounded = (EndAddrRounded - StartAligned) >> BlockOrders[BlockIndex]; Blocks[BlockIndex].StartAligned = StartAligned; Blocks[BlockIndex].SizeInBlocksTruncated = SizeInBlocksTruncated; Blocks[BlockIndex].SizeInBlocksRounded = SizeInBlocksRounded; ulong CurrSizeInBlocks = SizeInBlocksRounded; int MaxLevel = 0; do { MaxLevel++; }while ((CurrSizeInBlocks /= 64) != 0); Blocks[BlockIndex].MaxLevel = MaxLevel; Blocks[BlockIndex].Masks = new long[MaxLevel][]; CurrSizeInBlocks = SizeInBlocksRounded; for (int Level = MaxLevel - 1; Level >= 0; Level--) { CurrSizeInBlocks = (CurrSizeInBlocks + 63) / 64; Blocks[BlockIndex].Masks[Level] = new long[CurrSizeInBlocks]; } } if (Size != 0) { FreePages(Address, Size / KMemoryManager.PageSize); } }
private KernelResult AllocatePagesImpl(ulong PagesCount, bool Backwards, out KPageList PageList) { PageList = new KPageList(); if (BlockOrdersCount > 0) { if (GetFreePagesImpl() < PagesCount) { return(KernelResult.OutOfMemory); } } else if (PagesCount != 0) { return(KernelResult.OutOfMemory); } for (int BlockIndex = BlockOrdersCount - 1; BlockIndex >= 0; BlockIndex--) { KMemoryRegionBlock Block = Blocks[BlockIndex]; ulong BestFitBlockSize = 1UL << Block.Order; ulong BlockPagesCount = BestFitBlockSize / KMemoryManager.PageSize; //Check if this is the best fit for this page size. //If so, try allocating as much requested pages as possible. while (BlockPagesCount <= PagesCount) { ulong Address = 0; for (int CurrBlockIndex = BlockIndex; CurrBlockIndex < BlockOrdersCount && Address == 0; CurrBlockIndex++) { Block = Blocks[CurrBlockIndex]; int Index = 0; bool ZeroMask = false; for (int Level = 0; Level < Block.MaxLevel; Level++) { long Mask = Block.Masks[Level][Index]; if (Mask == 0) { ZeroMask = true; break; } if (Backwards) { Index = (Index * 64 + 63) - BitUtils.CountLeadingZeros64(Mask); } else { Index = Index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(Mask)); } } if (Block.SizeInBlocksTruncated <= (ulong)Index || ZeroMask) { continue; } Block.FreeCount--; int TempIdx = Index; for (int Level = Block.MaxLevel - 1; Level >= 0; Level--, TempIdx /= 64) { Block.Masks[Level][TempIdx / 64] &= ~(1L << (TempIdx & 63)); if (Block.Masks[Level][TempIdx / 64] != 0) { break; } } Address = Block.StartAligned + ((ulong)Index << Block.Order); } for (int CurrBlockIndex = BlockIndex; CurrBlockIndex < BlockOrdersCount && Address == 0; CurrBlockIndex++) { Block = Blocks[CurrBlockIndex]; int Index = 0; bool ZeroMask = false; for (int Level = 0; Level < Block.MaxLevel; Level++) { long Mask = Block.Masks[Level][Index]; if (Mask == 0) { ZeroMask = true; break; } if (Backwards) { Index = Index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(Mask)); } else { Index = (Index * 64 + 63) - BitUtils.CountLeadingZeros64(Mask); } } if (Block.SizeInBlocksTruncated <= (ulong)Index || ZeroMask) { continue; } Block.FreeCount--; int TempIdx = Index; for (int Level = Block.MaxLevel - 1; Level >= 0; Level--, TempIdx /= 64) { Block.Masks[Level][TempIdx / 64] &= ~(1L << (TempIdx & 63)); if (Block.Masks[Level][TempIdx / 64] != 0) { break; } } Address = Block.StartAligned + ((ulong)Index << Block.Order); } //The address being zero means that no free space was found on that order, //just give up and try with the next one. if (Address == 0) { break; } //If we are using a larger order than best fit, then we should //split it into smaller blocks. ulong FirstFreeBlockSize = 1UL << Block.Order; if (FirstFreeBlockSize > BestFitBlockSize) { FreePages(Address + BestFitBlockSize, (FirstFreeBlockSize - BestFitBlockSize) / KMemoryManager.PageSize); } //Add new allocated page(s) to the pages list. //If an error occurs, then free all allocated pages and fail. KernelResult Result = PageList.AddRange(Address, BlockPagesCount); if (Result != KernelResult.Success) { FreePages(Address, BlockPagesCount); foreach (KPageNode PageNode in PageList) { FreePages(PageNode.Address, PageNode.PagesCount); } return(Result); } PagesCount -= BlockPagesCount; } } //Success case, all requested pages were allocated successfully. if (PagesCount == 0) { return(KernelResult.Success); } //Error case, free allocated pages and return out of memory. foreach (KPageNode PageNode in PageList) { FreePages(PageNode.Address, PageNode.PagesCount); } PageList = null; return(KernelResult.OutOfMemory); }