/// <inheritdoc/> protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue) { using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList); ulong currentVa = address; foreach (var pageNode in pageList) { ulong addr = pageNode.Address - DramMemoryMap.DramBase; ulong size = pageNode.PagesCount * PageSize; Context.Memory.Commit(addr, size); _cpuMemory.Map(currentVa, Context.Memory.GetPointer(addr, size), size); if (shouldFillPages) { _cpuMemory.Fill(currentVa, size, fillValue); } currentVa += size; } scopedPageList.SignalSuccess(); return(KernelResult.Success); }
public KernelResult AllocatePages(ulong pagesCount, bool backwards, out KPageList pageList) { lock (_blocks) { return(AllocatePagesImpl(pagesCount, backwards, out pageList)); } }
/// <inheritdoc/> protected override KernelResult Unmap(ulong address, ulong pagesCount) { KPageList pagesToClose = new KPageList(); var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize); foreach (var region in regions) { ulong pa = GetDramAddressFromHostAddress(region.Address); if (pa == ulong.MaxValue) { continue; } pa += DramMemoryMap.DramBase; if (DramMemoryMap.IsHeapPhysicalAddress(pa)) { pagesToClose.AddRange(pa, region.Size / PageSize); } } _cpuMemory.Unmap(address, pagesCount * PageSize); pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager); return(KernelResult.Success); }
/// <inheritdoc/> protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission) { ulong size = pagesCount * PageSize; KPageList srcPageList = new KPageList(); KPageList dstPageList = new KPageList(); GetPhysicalRegions(src, size, srcPageList); GetPhysicalRegions(dst, size, dstPageList); if (!dstPageList.IsEqual(srcPageList)) { return(KernelResult.InvalidMemRange); } KernelResult result = Unmap(dst, pagesCount); if (result != KernelResult.Success) { return(result); } result = Reprotect(src, pagesCount, newSrcPermission); if (result != KernelResult.Success) { KernelResult mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0); Debug.Assert(mapResult == KernelResult.Success); } return(result); }
public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context) { _pageList = storage.GetPageList(); Permission = KMemoryPermission.ReadAndWrite; _hasBeenInitialized = true; _isMapped = false; }
/// <inheritdoc/> protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList) { var ranges = _cpuMemory.GetPhysicalRegions(va, size); foreach (var range in ranges) { pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize); } }
public void FreePages(KPageList pageList) { lock (_blocks) { foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } } }
public void FreePages(KPageList pageList) { lock (_pageHeap) { foreach (KPageNode pageNode in pageList) { _pageHeap.Free(pageNode.Address, pageNode.PagesCount); } } }
public KSharedMemory( KPageList pageList, long ownerPid, MemoryPermission ownerPermission, MemoryPermission userPermission) { _pageList = pageList; _ownerPid = ownerPid; _ownerPermission = ownerPermission; _userPermission = userPermission; }
public KSharedMemory( KernelContext context, SharedMemoryStorage storage, ulong ownerPid, KMemoryPermission ownerPermission, KMemoryPermission userPermission) : base(context) { _pageList = storage.GetPageList(); _ownerPid = ownerPid; _ownerPermission = ownerPermission; _userPermission = userPermission; }
public KSharedMemory( KernelContext context, KPageList pageList, long ownerPid, MemoryPermission ownerPermission, MemoryPermission userPermission) : base(context) { _pageList = pageList; _ownerPid = ownerPid; _ownerPermission = ownerPermission; _userPermission = userPermission; }
public KSharedMemory( Horizon system, KPageList pageList, long ownerPid, MemoryPermission ownerPermission, MemoryPermission userPermission) : base(system) { _pageList = pageList; _ownerPid = ownerPid; _ownerPermission = ownerPermission; _userPermission = userPermission; }
public SharedMemoryStorage(KernelContext context, KPageList pageList) { _context = context; _pageList = pageList; _size = pageList.GetPagesCount() * KPageTableBase.PageSize; foreach (KPageNode pageNode in pageList) { ulong address = pageNode.Address - DramMemoryMap.DramBase; ulong size = pageNode.PagesCount * KPageTableBase.PageSize; context.Memory.Commit(address, size); } }
/// <inheritdoc/> protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue) { ulong pagesCount = pageList.GetPagesCount(); _cpuMemory.Map(address, 0, pagesCount * PageSize); if (shouldFillPages) { _cpuMemory.Fill(address, pagesCount * PageSize, fillValue); } return(KernelResult.Success); }
private KernelResult AllocatePagesImpl(out KPageList pageList, ulong pagesCount, bool random) { pageList = new KPageList(); int heapIndex = KPageHeap.GetBlockIndex(pagesCount); if (heapIndex < 0) { return(KernelResult.OutOfMemory); } for (int index = heapIndex; index >= 0; index--) { ulong pagesPerAlloc = KPageHeap.GetBlockPagesCount(index); while (pagesCount >= pagesPerAlloc) { ulong allocatedBlock = _pageHeap.AllocateBlock(index, random); if (allocatedBlock == 0) { break; } KernelResult result = pageList.AddRange(allocatedBlock, pagesPerAlloc); if (result != KernelResult.Success) { FreePages(pageList); _pageHeap.Free(allocatedBlock, pagesPerAlloc); return(result); } pagesCount -= pagesPerAlloc; } } if (pagesCount != 0) { FreePages(pageList); return(KernelResult.OutOfMemory); } return(KernelResult.Success); }
public KernelResult AllocatePages(ulong pagesCount, bool backwards, out KPageList pageList) { lock (_blocks) { KernelResult result = AllocatePagesImpl(pagesCount, backwards, out pageList); if (result == KernelResult.Success) { foreach (var node in pageList) { IncrementPagesReferenceCount(node.Address, node.PagesCount); } } return(result); } }
public bool IsEqual(KPageList other) { LinkedListNode <KPageNode> thisNode = Nodes.First; LinkedListNode <KPageNode> otherNode = other.Nodes.First; while (thisNode != null && otherNode != null) { if (thisNode.Value.Address != otherNode.Value.Address || thisNode.Value.PagesCount != otherNode.Value.PagesCount) { return(false); } thisNode = thisNode.Next; otherNode = otherNode.Next; } return(thisNode == null && otherNode == null); }
/// <inheritdoc/> protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission) { KPageList pageList = new KPageList(); GetPhysicalRegions(src, pagesCount * PageSize, pageList); KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None); if (result != KernelResult.Success) { return(result); } result = MapPages(dst, pageList, newDstPermission, false, 0); if (result != KernelResult.Success) { KernelResult reprotectResult = Reprotect(src, pagesCount, oldSrcPermission); Debug.Assert(reprotectResult == KernelResult.Success); } return(result); }
public KernelResult AllocatePages(out KPageList pageList, ulong pagesCount) { if (pagesCount == 0) { pageList = new KPageList(); return(KernelResult.Success); } lock (_pageHeap) { KernelResult result = AllocatePagesImpl(out pageList, pagesCount, false); if (result == KernelResult.Success) { foreach (var node in pageList) { IncrementPagesReferenceCount(node.Address, node.PagesCount); } } return(result); } }
public void SignalSuccess() { _pageList = null; }
private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); if (_blockOrdersCount > 0) { if (GetFreePagesImpl() < pagesCount) { return(KernelResult.OutOfMemory); } } else if (pagesCount != 0) { return(KernelResult.OutOfMemory); } for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong bestFitBlockSize = 1UL << block.Order; ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize; //Check if this is the best fit for this page size. //If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { ulong address = 0; for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } else { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } for (int currBlockIndex = blockIndex; currBlockIndex < _blockOrdersCount && address == 0; currBlockIndex++) { block = _blocks[currBlockIndex]; int index = 0; bool zeroMask = false; for (int level = 0; level < block.MaxLevel; level++) { long mask = block.Masks[level][index]; if (mask == 0) { zeroMask = true; break; } if (backwards) { index = index * 64 + BitUtils.CountLeadingZeros64(BitUtils.ReverseBits64(mask)); } else { index = (index * 64 + 63) - BitUtils.CountLeadingZeros64(mask); } } if (block.SizeInBlocksTruncated <= (ulong)index || zeroMask) { continue; } block.FreeCount--; int tempIdx = index; for (int level = block.MaxLevel - 1; level >= 0; level--, tempIdx /= 64) { block.Masks[level][tempIdx / 64] &= ~(1L << (tempIdx & 63)); if (block.Masks[level][tempIdx / 64] != 0) { break; } } address = block.StartAligned + ((ulong)index << block.Order); } //The address being zero means that no free space was found on that order, //just give up and try with the next one. if (address == 0) { break; } //If we are using a larger order than best fit, then we should //split it into smaller blocks. ulong firstFreeBlockSize = 1UL << block.Order; if (firstFreeBlockSize > bestFitBlockSize) { FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); } //Add new allocated page(s) to the pages list. //If an error occurs, then free all allocated pages and fail. KernelResult result = pageList.AddRange(address, blockPagesCount); if (result != KernelResult.Success) { FreePages(address, blockPagesCount); foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } return(result); } pagesCount -= blockPagesCount; } } //Success case, all requested pages were allocated successfully. if (pagesCount == 0) { return(KernelResult.Success); } //Error case, free allocated pages and return out of memory. foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } pageList = null; return(KernelResult.OutOfMemory); }
private KernelResult AllocatePagesImpl(ulong pagesCount, bool backwards, out KPageList pageList) { pageList = new KPageList(); if (_blockOrdersCount > 0) { if (GetFreePagesImpl() < pagesCount) { return(KernelResult.OutOfMemory); } } else if (pagesCount != 0) { return(KernelResult.OutOfMemory); } for (int blockIndex = _blockOrdersCount - 1; blockIndex >= 0; blockIndex--) { KMemoryRegionBlock block = _blocks[blockIndex]; ulong bestFitBlockSize = 1UL << block.Order; ulong blockPagesCount = bestFitBlockSize / KPageTableBase.PageSize; // Check if this is the best fit for this page size. // If so, try allocating as much requested pages as possible. while (blockPagesCount <= pagesCount) { ulong address = AllocatePagesForOrder(blockIndex, backwards, bestFitBlockSize); // The address being zero means that no free space was found on that order, // just give up and try with the next one. if (address == 0) { break; } // Add new allocated page(s) to the pages list. // If an error occurs, then free all allocated pages and fail. KernelResult result = pageList.AddRange(address, blockPagesCount); if (result != KernelResult.Success) { FreePages(address, blockPagesCount); foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } return(result); } pagesCount -= blockPagesCount; } } // Success case, all requested pages were allocated successfully. if (pagesCount == 0) { return(KernelResult.Success); } // Error case, free allocated pages and return out of memory. foreach (KPageNode pageNode in pageList) { FreePages(pageNode.Address, pageNode.PagesCount); } pageList = null; return(KernelResult.OutOfMemory); }
public KTransferMemory(KernelContext context) : base(context) { _pageList = new KPageList(); }
public KCodeMemory(KernelContext context) : base(context) { _pageList = new KPageList(); _lock = new object(); }
/// <inheritdoc/> protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission) { _cpuMemory.Map(address, 0, pageList.GetPagesCount() * PageSize); return(KernelResult.Success); }
public KScopedPageList(KMemoryManager manager, KPageList pageList) { _manager = manager; _pageList = pageList; pageList.IncrementPagesReferenceCount(manager); }