/// <inheritdoc/> protected override KernelResult Unmap(ulong address, ulong pagesCount) { KPageList pagesToClose = new KPageList(); var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize); foreach (var region in regions) { ulong pa = GetDramAddressFromHostAddress(region.Address); if (pa == ulong.MaxValue) { continue; } pa += DramMemoryMap.DramBase; if (DramMemoryMap.IsHeapPhysicalAddress(pa)) { pagesToClose.AddRange(pa, region.Size / PageSize); } } _cpuMemory.Unmap(address, pagesCount * PageSize); pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager); return(KernelResult.Success); }
/// <inheritdoc/> protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList) { var ranges = _cpuMemory.GetPhysicalRegions(va, size); foreach (var range in ranges) { pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize); } }
private KernelResult AllocatePagesImpl(out KPageList pageList, ulong pagesCount, bool random) { pageList = new KPageList(); int heapIndex = KPageHeap.GetBlockIndex(pagesCount); if (heapIndex < 0) { return(KernelResult.OutOfMemory); } for (int index = heapIndex; index >= 0; index--) { ulong pagesPerAlloc = KPageHeap.GetBlockPagesCount(index); while (pagesCount >= pagesPerAlloc) { ulong allocatedBlock = _pageHeap.AllocateBlock(index, random); if (allocatedBlock == 0) { break; } KernelResult result = pageList.AddRange(allocatedBlock, pagesPerAlloc); if (result != KernelResult.Success) { FreePages(pageList); _pageHeap.Free(allocatedBlock, pagesPerAlloc); return(result); } pagesCount -= pagesPerAlloc; } } if (pagesCount != 0) { FreePages(pageList); return(KernelResult.OutOfMemory); } return(KernelResult.Success); }
private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); if (_blockOrdersCount > 0) { if (GetFreePagesImpl() < pagesCount) { return(KernelResult.OutOfMemory); } } else if (pagesCount != 0) { return(KernelResult.OutOfMemory); } for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong bestFitBlockSize = 1UL << block.Order; ulong blockPagesCount = bestFitBlockSize / KPageTableBase.PageSize; // Check if this is the best fit for this page size. // If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { ulong address = AllocatePagesForOrder(blockIndex, backwards, bestFitBlockSize); // The address being zero means that no free space was found on that order, // just give up and try with the next one. if (address == 0) { break; } // Add new allocated page(s) to the pages list. // If an error occurs, then free all allocated pages and fail. KernelResult result = pageList.AddRange(address, blockPagesCount); if (result != KernelResult.Success) { FreePages(address, blockPagesCount); foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } return(result); } pagesCount -= blockPagesCount; } } // Success case, all requested pages were allocated successfully. if (pagesCount == 0) { return(KernelResult.Success); } // Error case, free allocated pages and return out of memory. foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } pageList = null; return(KernelResult.OutOfMemory); }
private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); if (_blockOrdersCount > 0) { if (GetFreePagesImpl() < pagesCount) { return(KernelResult.OutOfMemory); } } else if (pagesCount != 0) { return(KernelResult.OutOfMemory); } for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong bestFitBlockSize = 1UL << block.Order; ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize; //Check if this is the best fit for this page size. //If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { ulong address = 0; for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } else { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } else { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } //The address being zero means that no free space was found on that order, //just give up and try with the next one. if (address == 0) { break; } //If we are using a larger order than best fit, then we should //split it into smaller blocks. ulong firstFreeBlockSize = 1UL << block.Order; if (firstFreeBlockSize > bestFitBlockSize) { FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); } //Add new allocated page(s) to the pages list. //If an error occurs, then free all allocated pages and fail. KernelResult result = pageList.AddRange(address, blockPagesCount); if (result != KernelResult.Success) { FreePages(address, blockPagesCount); foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } return(result); } pagesCount -= blockPagesCount; } } //Success case, all requested pages were allocated successfully. if (pagesCount == 0) { return(KernelResult.Success); } //Error case, free allocated pages and return out of memory. foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } pageList = null; return(KernelResult.OutOfMemory); }