public void ShiftReadOnlyToTail(out long tailAddress) { tailAddress = GetTailAddress(); long localTailAddress = tailAddress; long currentReadOnlyOffset = ReadOnlyAddress; if (MonotonicUpdate(ref ReadOnlyAddress, tailAddress, out long oldReadOnlyOffset)) { epoch.BumpCurrentEpoch(() => OnPagesMarkedReadOnly(localTailAddress, false)); } }
/// <summary> /// <see cref="IDevice.TruncateUntilSegmentAsync(int, AsyncCallback, IAsyncResult)"/> /// </summary> /// <param name="toSegment"></param> /// <param name="callback"></param> /// <param name="result"></param> public void TruncateUntilSegmentAsync(int toSegment, AsyncCallback callback, IAsyncResult result) { // Reset begin range to at least toAddress if (!Utility.MonotonicUpdate(ref startSegment, toSegment, out int oldStart)) { // If no-op, invoke callback and return immediately callback(result); return; } CountdownEvent countdown = new CountdownEvent(toSegment - oldStart); // This action needs to be epoch-protected because readers may be issuing reads to the deleted segment, unaware of the delete. // Because of earlier compare-and-swap, the caller has exclusive access to the range [oldStartSegment, newStartSegment), and there will // be no double deletes. epoch.BumpCurrentEpoch(() => { for (int i = oldStart; i < toSegment; i++) { RemoveSegmentAsync(i, r => { if (countdown.Signal()) { callback(r); countdown.Dispose(); } }, result); } }); }
private unsafe bool BufferAndLoad(long currentAddress, long currentPage, long currentFrame, long headAddress) { for (int i = 0; i < frameSize; i++) { var nextPage = currentPage + i; // Cannot load page if its not fully written to storage if (headAddress < (nextPage + 1) << allocator.LogPageSizeBits) { continue; } var nextFrame = (currentFrame + i) % frameSize; long val; while ((val = nextLoadedPage[nextFrame]) < nextPage || loadedPage[nextFrame] < nextPage) { if (val < nextPage && Interlocked.CompareExchange(ref nextLoadedPage[nextFrame], nextPage, val) == val) { var tmp_i = i; epoch.BumpCurrentEpoch(() => { allocator.AsyncReadPagesFromDeviceToFrame(tmp_i + (currentAddress >> allocator.LogPageSizeBits), 1, endAddress, AsyncReadPagesCallback, Empty.Default, frame, out loaded[nextFrame], 0, null, null, loadedCancel[nextFrame]); loadedPage[nextFrame] = nextPage; }); } else { epoch.ProtectAndDrain(); } } } return(WaitForFrameLoad(currentAddress, currentFrame)); }
/// <summary> /// Attempts to advance the version to the target version, executing the given action in a critical section /// where no batches are being processed before entering the next version. Each version will be advanced to /// exactly once. This method may fail and return false if given target version is not larger than the /// current version (possibly due to concurrent invocations to advance to the same version). /// After the method returns, subsequent calls to Version() and Enter() will return at least the value of /// targetVersion. /// </summary> /// <param name="criticalSection"> The logic to execute in a critical section </param> /// <param name="targetVersion"> The version to advance to, or -1 for the immediate next version</param> /// <returns> Whether the advance was successful </returns> public bool TryAdvanceVersion(Action <long, long> criticalSection, long targetVersion = -1) { var ev = new ManualResetEventSlim(); // Compare and exchange to install our advance while (Interlocked.CompareExchange(ref versionChanged, ev, null) != null) { } if (targetVersion != -1 && targetVersion <= version) { versionChanged.Set(); versionChanged = null; return(false); } // Any thread that sees ev will be in v + 1, because the bump happens only after ev is set. var original = Interlocked.Read(ref version); epoch.BumpCurrentEpoch(() => { version = targetVersion == -1 ? original + 1 : targetVersion; criticalSection(original, version); versionChanged.Set(); versionChanged = null; }); // Make sure that even if we are the only thread, we are able to make progress if (!epoch.ThisInstanceProtected()) { epoch.Resume(); epoch.Suspend(); } return(true); }
/// <summary> /// Buffer and load /// </summary> /// <param name="currentAddress"></param> /// <param name="currentPage"></param> /// <param name="currentFrame"></param> /// <param name="headAddress"></param> /// <param name="endAddress"></param> /// <returns></returns> protected unsafe bool BufferAndLoad(long currentAddress, long currentPage, long currentFrame, long headAddress, long endAddress) { for (int i = 0; i < frameSize; i++) { var nextPage = currentPage + i; var pageStartAddress = nextPage << logPageSizeBits; // Cannot load page if it is entirely in memory or beyond the end address if (pageStartAddress >= headAddress || pageStartAddress >= endAddress) { continue; } var pageEndAddress = (nextPage + 1) << logPageSizeBits; if (endAddress < pageEndAddress) { pageEndAddress = endAddress; } if (headAddress < pageEndAddress) { pageEndAddress = headAddress; } var nextFrame = (currentFrame + i) % frameSize; long val; while ((val = nextLoadedPage[nextFrame]) < pageEndAddress || loadedPage[nextFrame] < pageEndAddress) { if (val < pageEndAddress && Interlocked.CompareExchange(ref nextLoadedPage[nextFrame], pageEndAddress, val) == val) { var tmp_i = i; if (epoch != null) { epoch.BumpCurrentEpoch(() => { AsyncReadPagesFromDeviceToFrame(tmp_i + (currentAddress >> logPageSizeBits), 1, endAddress, Empty.Default, out loaded[nextFrame], 0, null, null, loadedCancel[nextFrame]); loadedPage[nextFrame] = pageEndAddress; }); } else { AsyncReadPagesFromDeviceToFrame(tmp_i + (currentAddress >> logPageSizeBits), 1, endAddress, Empty.Default, out loaded[nextFrame], 0, null, null, loadedCancel[nextFrame]); loadedPage[nextFrame] = pageEndAddress; } } else { epoch?.ProtectAndDrain(); } } } return(WaitForFrameLoad(currentAddress, currentFrame)); }
private unsafe bool BufferAndLoad(long currentAddress, long currentPage, long currentFrame, long headAddress) { for (int i = 0; i < frameSize; i++) { var nextPage = currentPage + i; var pageEndAddress = (nextPage + 1) << allocator.LogPageSizeBits; if (fasterLog.readOnlyMode) { // Support partial page reads of committed data var _flush = fasterLog.CommittedUntilAddress; if (_flush < pageEndAddress) { pageEndAddress = _flush; } } // Cannot load page if its not fully written to storage if (headAddress < pageEndAddress) { continue; } var nextFrame = (currentFrame + i) % frameSize; long val; while ((val = nextLoadedPage[nextFrame]) < pageEndAddress || loadedPage[nextFrame] < pageEndAddress) { if (val < pageEndAddress && Interlocked.CompareExchange(ref nextLoadedPage[nextFrame], pageEndAddress, val) == val) { var tmp_i = i; epoch.BumpCurrentEpoch(() => { allocator.AsyncReadPagesFromDeviceToFrame(tmp_i + (currentAddress >> allocator.LogPageSizeBits), 1, endAddress, AsyncReadPagesCallback, Empty.Default, frame, out loaded[nextFrame], 0, null, null, loadedCancel[nextFrame]); loadedPage[nextFrame] = pageEndAddress; }); } else { epoch.ProtectAndDrain(); } } } return(WaitForFrameLoad(currentAddress, currentFrame)); }