public override unsafe void ReleaseAllocationInfo(byte *baseAddress, long size) { base.ReleaseAllocationInfo(baseAddress, size); if (_supportsUnmapping == false) { return; } var ptr = new IntPtr(baseAddress); if (DeleteOnClose) { if (Syscall.madvise(ptr, new UIntPtr((ulong)size), MAdvFlags.MADV_DONTNEED) != 0) { if (_log.IsInfoEnabled) { _log.Info($"Failed to madvise MDV_DONTNEED for {FileName?.FullPath}"); } } } var result = Syscall.munmap(ptr, (UIntPtr)size); if (result == -1) { var err = Marshal.GetLastWin32Error(); Syscall.ThrowLastError(err, "munmap " + FileName); } NativeMemory.UnregisterFileMapping(FileName.FullPath, ptr, size); }
protected internal override unsafe void PrefetchRanges(Win32MemoryMapNativeMethods.WIN32_MEMORY_RANGE_ENTRY *list, int count) { for (int i = 0; i < count; i++) { // we explicitly ignore the return code here, this is optimization only Syscall.madvise(new IntPtr(list[i].VirtualAddress), (UIntPtr)list[i].NumberOfBytes.ToPointer(), MAdvFlags.MADV_WILLNEED); } }
private unsafe void PrefetchRanges(List <Win32MemoryMapNativeMethods.WIN32_MEMORY_RANGE_ENTRY> ranges) { foreach (var range in ranges) { if (Syscall.madvise(new IntPtr(range.VirtualAddress), (UIntPtr)range.NumberOfBytes.ToPointer(), MAdvFlags.MADV_WILLNEED) == -1) { // ignore this error. TODO : Log ? } } }
public override unsafe byte *AcquirePagePointer(IPagerLevelTransactionState tx, long pageNumber, PagerState pagerState = null) { // We need to decide what pager we are going to use right now or risk inconsistencies when performing prefetches from disk. var state = pagerState ?? _pagerState; if (PlatformDetails.CanPrefetch) { if (this._pagerState.ShouldPrefetchSegment(pageNumber, out void *virtualAddress, out long bytes)) { Syscall.madvise(new IntPtr(virtualAddress), (UIntPtr)bytes, MAdvFlags.MADV_WILLNEED); } } return(base.AcquirePagePointer(tx, pageNumber, state)); }