protected unsafe override void InvokeCallbackWithDataPtr(CachePage <byte[]> page, Action <UIntPtr, uint> callback) { fixed(byte *pBuffer = page.Data) { callback(new UIntPtr(pBuffer), page.DataExtent); } }
protected override uint InvokeCallbackWithDataPtr(CachePage <byte[]> page, Func <UIntPtr, uint, uint> callback) { unsafe { fixed(byte *pBuffer = page.Data) { return(callback(new UIntPtr(pBuffer), page.DataExtent)); } } }
protected override uint CopyDataFromPage(CachePage <UIntPtr> page, IntPtr buffer, uint inPageOffset, uint byteCount) { // Calculate how much of the requested read can be satisfied by the page uint sizeRead = Math.Min(page.DataExtent - inPageOffset, byteCount); unsafe { CacheNativeMethods.Memory.memcpy(buffer, new UIntPtr((byte *)page.Data + inPageOffset), new UIntPtr(sizeRead)); } return(sizeRead); }
private bool IsSinglePageRead(uint offset, uint byteCount) { uint alignedOffset = AlignOffsetToPageBoundary(offset); int dataIndex = (int)(alignedOffset / VirtualAllocPageSize); CachePage startingPage = _pages[dataIndex]; if (startingPage == null) { throw new InvalidOperationException($"Inside IsSinglePageRead but the page at index {dataIndex} is null. You need to call EnsurePageAtOffset or EnsurePageRangeAtOffset before calling this method."); } uint inPageOffset = MapOffsetToPageOffset(offset); return((inPageOffset + byteCount) < startingPage.DataExtent); }
private (uint DataRemoved, uint ItemsSkipped) TryRemoveAllPagesFromCache(bool disposeLocks) { // Assume we will be able to evict all non-null pages uint dataRemoved = 0; uint itemsSkipped = 0; for (int i = 0; i < _pages.Length; i++) { CachePage <byte[]> page = _pages[i]; if (page != null) { ReaderWriterLockSlim dataChunkLock = _pageLocks[i]; if (!dataChunkLock.TryEnterWriteLock(timeout: TimeSpan.Zero)) { // Someone holds a read or write lock on this page, skip it itemsSkipped++; continue; } try { // double check that no other thread already scavenged this entry page = _pages[i]; if (page != null) { ArrayPool <byte> .Shared.Return(page.Data); dataRemoved += page.DataExtent; _pages[i] = null; } } finally { dataChunkLock.ExitWriteLock(); if (disposeLocks) { dataChunkLock.Dispose(); _pageLocks[i] = null; } } } } return(dataRemoved, itemsSkipped); }
private bool IsSinglePageRead(uint offset, uint byteCount) { // It is a simple read if the data lies entirely within a single page uint alignedOffset = AlignOffsetToPageBoundary(offset); int dataIndex = (int)(alignedOffset / PageSize); CachePage startingPage = _dataChunks[dataIndex]; if (startingPage is null) { throw new InvalidOperationException($"Inside IsSinglePageRead but the page at index {dataIndex} is null. You need to call EnsurePageAtOffset or EnsurePageRangeAtOffset before calling this method."); } uint inPageOffset = MapOffsetToPageOffset(offset); return((inPageOffset + byteCount) < startingPage.DataExtent); }
protected unsafe override uint InvokeCallbackWithDataPtr(CachePage <UIntPtr> page, Func <UIntPtr, uint, uint> callback) { return(callback(page.Data, page.DataExtent)); }
private List <(ReaderWriterLockSlim Lock, bool IsHeldAsUpgradeableReadLock)> EnsurePageRangeAtOffset(uint offset, ReaderWriterLockSlim originalReadLock, uint bytesNeeded) { List <(ReaderWriterLockSlim Lock, bool IsHeldAsUpgradeableReadLock)> acquiredLocks = new List <(ReaderWriterLockSlim Lock, bool IsHeldAsUpgradeableReadLock)>(); uint pageAlignedOffset = AlignOffsetToPageBoundary(offset); int dataIndex = (int)(pageAlignedOffset / PageSize); if (_dataChunks[dataIndex] is null) { // THREADING: Our contract is the caller must have acquired the read lock at least for this first page, this is because the caller needs // to ensure the page cannot be evicted even after we return (presumably they want to read data from it). However, before we page in a // currently null page, we have to acquire the write lock. So we acquire an upgradeable read lock on this index, and then double check if // the page entry hasn't been set by someone who beat us to the write lock. We will also return this upgraded lock in the collection of // upgraded locks we have acquired so the caller can release it when they are done reading the page(s) originalReadLock.ExitReadLock(); originalReadLock.EnterUpgradeableReadLock(); originalReadLock.EnterWriteLock(); try { if (_dataChunks[dataIndex] is null) { byte[] data = GetPageAtOffset(pageAlignedOffset, out uint dataRange); _dataChunks[dataIndex] = new CachePage(data, dataRange); UpdateLastAccessTickCount(); Interlocked.Add(ref _entrySize, (int)dataRange); _updateOwningCacheForAddedChunk(_segmentData.VirtualAddress, dataRange); } } catch (Exception) { // THREADING: If we see an exception here we are going to rethrow, which means or caller won't be able to release the upgraded read lock, so do it here // as to not leave this page permanently locked out originalReadLock.ExitWriteLock(); originalReadLock.ExitUpgradeableReadLock(); throw; } // THREADING: Note the read lock held by our call to EnterUpgradeableReadLock is still in effect, so we need to return this lock as one that the // caller must release when they are done acquiredLocks.Add((originalReadLock, IsHeldAsUpgradeableReadLock: true)); // THREADING: Exit our write lock as we are done writing the entry originalReadLock.ExitWriteLock(); } // THREADING: We still either hold the original read lock or our upgraded readlock (if we set the cache page entry above), either way we know // that the entry at dataIndex is non-null uint bytesAvailableOnPage = _dataChunks[dataIndex].DataExtent - (offset - pageAlignedOffset); if (bytesAvailableOnPage < bytesNeeded) { int bytesRemaining = (int)bytesNeeded - (int)bytesAvailableOnPage; do { // Out of data for this memory segment, it may be the case that the read crosses between memory segments if ((dataIndex + 1) == _dataChunks.Length) { return(acquiredLocks); } pageAlignedOffset += PageSize; // Take a read lock on the next page entry originalReadLock = _dataChunkLocks[dataIndex + 1]; originalReadLock.EnterReadLock(); if (_dataChunks[dataIndex + 1] != null) { bytesRemaining -= (int)_dataChunks[++dataIndex].DataExtent; acquiredLocks.Add((originalReadLock, IsHeldAsUpgradeableReadLock: false)); continue; } // THREADING: We know the entry must have been null or we would have continued above, so go ahead and enter as an upgradeable lock and // acquire the write lock originalReadLock.ExitReadLock(); originalReadLock.EnterUpgradeableReadLock(); originalReadLock.EnterWriteLock(); if (_dataChunks[dataIndex + 1] == null) { // Still not set, so we will set it now try { byte[] data = GetPageAtOffset(pageAlignedOffset, out uint dataRange); _dataChunks[++dataIndex] = new CachePage(data, dataRange); UpdateLastAccessTickCount(); Interlocked.Add(ref _entrySize, (int)dataRange); _updateOwningCacheForAddedChunk(_segmentData.VirtualAddress, dataRange); bytesRemaining -= (int)dataRange; } catch (Exception) { // THREADING: If we see an exception here we are going to rethrow, which means or caller won't be able to release the upgraded read lock, so do it here // as to not leave this page permanently locked out originalReadLock.ExitWriteLock(); originalReadLock.ExitUpgradeableReadLock(); // Drop any read locks we have taken up to this point as our caller won't be able to do that since we are re-throwing foreach (var(Lock, IsHeldAsUpgradeableReadLock) in acquiredLocks) { if (IsHeldAsUpgradeableReadLock) { Lock.ExitUpgradeableReadLock(); } else { Lock.ExitReadLock(); } } throw; } } else // someone else beat us to filling this page in, extract the data we need { bytesRemaining -= (int)_dataChunks[++dataIndex].DataExtent; } // THREADING: Exit our write lock as we either wrote the entry or someone else did, but keep our read lock so the page can't be // evicted before the caller can read it originalReadLock.ExitWriteLock(); acquiredLocks.Add((originalReadLock, IsHeldAsUpgradeableReadLock: true)); } while (bytesRemaining > 0); } return(acquiredLocks); }
public override bool GetDataFromAddressUntil(ulong address, byte[] terminatingSequence, out byte[] result) { uint offset = (uint)(address - _segmentData.VirtualAddress); uint pageAlignedOffset = AlignOffsetToPageBoundary(offset); int dataIndex = (int)(pageAlignedOffset / PageSize); List <ReaderWriterLockSlim> locallyAcquiredLocks = new List <ReaderWriterLockSlim> { _dataChunkLocks[dataIndex] }; locallyAcquiredLocks[0].EnterReadLock(); List <(ReaderWriterLockSlim Lock, bool IsHeldAsUpgradeableReadLock)> acquiredLocks = EnsurePageAtOffset(offset, locallyAcquiredLocks[0]); uint pageAdjustedOffset = MapOffsetToPageOffset(offset); List <byte> res = new List <byte>(); try { CachePage curPage = _dataChunks[dataIndex]; while (true) { for (uint i = pageAdjustedOffset; i < curPage.DataExtent;) { bool wasTerminatorMatch = true; for (int j = 0; j < terminatingSequence.Length; j++) { if (curPage.Data[i + j] != terminatingSequence[j]) { wasTerminatorMatch = false; break; } } // We found our terminating sequence, so don't copy it over to the output array if (wasTerminatorMatch) { result = res.ToArray(); return(true); } // copy over the non-matching bytes for (int j = 0; j < terminatingSequence.Length; j++) { res.Add(curPage.Data[i + j]); } i += (uint)terminatingSequence.Length; } // Ran out of data in this segment before we found the end of the sequence if ((dataIndex + 1) == _dataChunks.Length) { result = res.ToArray(); return(false); } // no offsets when we jump to the next page of data pageAdjustedOffset = 0; offset += curPage.DataExtent; locallyAcquiredLocks.Add(_dataChunkLocks[dataIndex + 1]); locallyAcquiredLocks[locallyAcquiredLocks.Count - 1].EnterReadLock(); acquiredLocks.AddRange(EnsurePageAtOffset(offset, locallyAcquiredLocks[locallyAcquiredLocks.Count - 1])); curPage = _dataChunks[++dataIndex]; if (curPage == null) { throw new InvalidOperationException($"Expected a CachePage to exist at {dataIndex} but it was null! EnsurePageAtOffset didn't work."); } } } finally { foreach (var(Lock, IsHeldAsUpgradeableReadLock) in acquiredLocks) { locallyAcquiredLocks.Remove(Lock); if (IsHeldAsUpgradeableReadLock) { Lock.ExitUpgradeableReadLock(); } else { Lock.ExitReadLock(); } } foreach (ReaderWriterLockSlim remainingLock in locallyAcquiredLocks) { remainingLock.ExitReadLock(); } } }
private void ReadPageDataFromOffset(int pageIndex, Action <UIntPtr, uint> dataReader) { bool notifyCacheOfSizeUpdate = false; int addedSize = 0; ReaderWriterLockSlim pageLock = _pageLocks[pageIndex]; pageLock.EnterReadLock(); bool holdsReadLock = true; try { // THREADING: If the data is not null we can just read it directly as we hold the read lock, if it is null we must acquire the write lock in // preperation to fetch the data from physical memory if (_pages[pageIndex] != null) { UpdateLastAccessTickCount(); InvokeCallbackWithDataPtr(_pages[pageIndex], dataReader); } else { pageLock.ExitReadLock(); holdsReadLock = false; pageLock.EnterWriteLock(); try { // THREADING: Double check it's still null (i.e. no other thread beat us to paging this data in between dropping our read lock and acquiring // the write lock) if (_pages[pageIndex] == null) { uint dataRange; T data; (data, dataRange) = GetPageDataAtOffset((uint)pageIndex * EntryPageSize); _pages[pageIndex] = new CachePage <T>(data, dataRange); Interlocked.Add(ref _entrySize, (int)dataRange); UpdateLastAccessTickCount(); InvokeCallbackWithDataPtr(_pages[pageIndex], dataReader); addedSize = (int)dataRange; notifyCacheOfSizeUpdate = true; } else { // Someone else beat us to retrieving the data, so we can just read UpdateLastAccessTickCount(); InvokeCallbackWithDataPtr(_pages[pageIndex], dataReader); } } finally { pageLock.ExitWriteLock(); } } } finally { if (holdsReadLock) { pageLock.ExitReadLock(); } } if (notifyCacheOfSizeUpdate) { _updateOwningCacheForAddedChunk((uint)addedSize); } }
public void Dispose() { if (_pages != null) { for (int i = 0; i < _pages.Length; i++) { ReaderWriterLockSlim pageLock = _pageLocks[i]; pageLock.EnterWriteLock(); try { CachePage page = _pages[i]; if (page != null) { // We need to unmap the physical memory from this VM range and then free the VM range bool unmapPhysicalPagesResult = CacheNativeMethods.AWE.MapUserPhysicalPages(page.Data, numberOfPages: (uint)(VirtualAllocPageSize / Environment.SystemPageSize), pageArray: UIntPtr.Zero); if (!unmapPhysicalPagesResult) { Debug.Fail("MapUserPhysicalPage failed to unmap a phsyical page"); // this is an error but we don't want to remove the ptr entry since we apparently didn't unmap the physical memory continue; } bool virtualFreeRes = CacheNativeMethods.Memory.VirtualFree(page.Data, sizeToFree: UIntPtr.Zero, CacheNativeMethods.Memory.VirtualFreeType.Release); if (!virtualFreeRes) { Debug.Fail("MapUserPhysicalPage failed to unmap a phsyical page"); // this is an error but we already unmapped the physical memory so also throw away our VM pointer _pages[i] = null; continue; } // Done, throw away our VM pointer _pages[i] = null; } } finally { pageLock.ExitWriteLock(); } } uint numberOfPagesToFree = (uint)_pageFrameArrayItemCount; bool freeUserPhyiscalPagesRes = CacheNativeMethods.AWE.FreeUserPhysicalPages(ref numberOfPagesToFree, _pageFrameArray); if (!freeUserPhyiscalPagesRes) { Debug.Fail("Failed tp free our physical pages"); } if (numberOfPagesToFree != _pageFrameArrayItemCount) { Debug.Fail("Failed to free ALL of our physical pages"); } // Free our page frame array CacheNativeMethods.Memory.HeapFree(_pageFrameArray); _pageFrameArray = UIntPtr.Zero; _pages = null; } }
protected override void InvokeCallbackWithDataPtr(CachePage <UIntPtr> page, Action <UIntPtr, uint> callback) { callback(page.Data, page.DataExtent); }
protected abstract uint CopyDataFromPage(CachePage <T> page, IntPtr buffer, uint inPageOffset, uint byteCount);
protected abstract uint InvokeCallbackWithDataPtr(CachePage <T> page, Func <UIntPtr, uint, uint> callback);
protected override void Dispose(bool disposing) { for (int i = 0; i < _pages.Length; i++) { ReaderWriterLockSlim pageLock = _pageLocks[i]; pageLock.EnterWriteLock(); try { CachePage <UIntPtr> page = _pages[i]; if (page != null) { // NOTE: While VirtualAllocPageSize SHOULD be a multiple of SystemPageSize there is no guarantee I can find that says that is true always and everywhere // so to be safe I make sure we don't leave any straggling pages behind if that is true. uint numberOfPages = (uint)(page.DataExtent / SystemPageSize) + ((page.DataExtent % SystemPageSize) == 0 ? 0U : 1U); // We need to unmap the physical memory from this VM range and then free the VM range bool unmapPhysicalPagesResult = CacheNativeMethods.AWE.MapUserPhysicalPages(page.Data, numberOfPages, pageArray: UIntPtr.Zero); if (!unmapPhysicalPagesResult) { Debug.Fail("MapUserPhysicalPage failed to unmap a physical page"); // this is an error but we don't want to remove the ptr entry since we apparently didn't unmap the physical memory continue; } // NOTE: When calling with VirtualFreeTypeRelease sizeToFree must be 0 (which indicates the entire allocation) bool virtualFreeRes = CacheNativeMethods.Memory.VirtualFree(page.Data, sizeToFree: UIntPtr.Zero, CacheNativeMethods.Memory.VirtualFreeType.Release); if (!virtualFreeRes) { Debug.Fail("MapUserPhysicalPage failed to unmap a physical page"); // this is an error but we already unmapped the physical memory so also throw away our VM pointer _pages[i] = null; continue; } // Done, throw away our VM pointer _pages[i] = null; } } finally { pageLock.ExitWriteLock(); if (_pages[i] == null) { pageLock.Dispose(); } } } uint numberOfPagesToFree = (uint)_pageFrameArrayItemCount; bool freeUserPhyiscalPagesRes = CacheNativeMethods.AWE.FreeUserPhysicalPages(ref numberOfPagesToFree, _pageFrameArray); if (!freeUserPhyiscalPagesRes) { Debug.Fail("Failed to free our physical pages"); } if (numberOfPagesToFree != _pageFrameArrayItemCount) { Debug.Fail("Failed to free ALL of our physical pages"); } // Free our page frame array CacheNativeMethods.Memory.HeapFree(_pageFrameArray); _pageFrameArray = UIntPtr.Zero; }
public override long PageOutData() { ThrowIfDisposed(); if (HeapSegmentCacheEventSource.Instance.IsEnabled()) { HeapSegmentCacheEventSource.Instance.PageOutDataStart(); } long sizeRemoved = 0; int maxLoopCount = 5; int pass = 0; for (; pass < maxLoopCount; pass++) { // Assume we will be able to evict all non-null pages bool encounteredBusyPage = false; for (int i = 0; i < _pages.Length; i++) { ReaderWriterLockSlim pageLock = _pageLocks[i]; if (!pageLock.TryEnterWriteLock(timeout: TimeSpan.Zero)) { // Someone holds the writelock on this page, skip it and try to get it in another pass, this prevent us from blocking page out // on someone currently reading a page, they will likely be done by our next pass encounteredBusyPage = true; continue; } try { CachePage <UIntPtr> page = _pages[i]; if (page != null) { uint pagesToUnMap = (uint)(page.DataExtent / SystemPageSize) + (uint)((page.DataExtent % SystemPageSize) != 0 ? 1 : 0); // We need to unmap the physical memory from this VM range and then free the VM range bool unmapPhysicalPagesResult = CacheNativeMethods.AWE.MapUserPhysicalPages(page.Data, pagesToUnMap, pageArray: UIntPtr.Zero); if (!unmapPhysicalPagesResult) { Debug.Fail("MapUserPhysicalPage failed to unmap a physical page"); // this is an error but we don't want to remove the ptr entry since we apparently didn't unmap the physical memory continue; } sizeRemoved += page.DataExtent; bool virtualFreeRes = CacheNativeMethods.Memory.VirtualFree(page.Data, sizeToFree: UIntPtr.Zero, CacheNativeMethods.Memory.VirtualFreeType.Release); if (!virtualFreeRes) { Debug.Fail("MapUserPhysicalPage failed to unmap a physical page"); // this is an error but we already unmapped the physical memory so also throw away our VM pointer _pages[i] = null; continue; } // Done, throw away our VM pointer _pages[i] = null; } } finally { pageLock.ExitWriteLock(); } } // We are done if we didn't encounter any busy (locked) pages if (!encounteredBusyPage) { break; } } // Correct our size based on how much data we could remove int oldCurrent; int newCurrent; do { oldCurrent = _entrySize; newCurrent = Math.Max(MinSize, oldCurrent - (int)sizeRemoved); } while (Interlocked.CompareExchange(ref _entrySize, newCurrent, oldCurrent) != oldCurrent); if (HeapSegmentCacheEventSource.Instance.IsEnabled()) { HeapSegmentCacheEventSource.Instance.PageOutDataEnd(sizeRemoved); } return(sizeRemoved); }
protected abstract void InvokeCallbackWithDataPtr(CachePage <T> page, Action <UIntPtr, uint> callback);