public void FasterLogShiftTailStressTest() { // Get an excruciatingly slow storage device to maximize chance of clogging the flush pipeline device = new LocalMemoryDevice(1L << 28, 1 << 28, 2, sector_size: 512, latencyMs: 50, fileName: "stress.log"); var logSettings = new FasterLogSettings { LogDevice = device, LogChecksum = LogChecksumType.None, LogCommitManager = manager, SegmentSizeBits = 28 }; log = new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } for (int i = 0; i < 5 * numEntries; i++) { log.Enqueue(entry); } // for comparison, insert some entries without any commit records var referenceTailLength = log.TailAddress; var enqueueDone = new ManualResetEventSlim(); var commitThreads = new List <Thread>(); // Make sure to spin up many commit threads to expose lots of interleavings for (var i = 0; i < 2 * Math.Max(1, Environment.ProcessorCount - 1); i++) { commitThreads.Add(new Thread(() => { // Otherwise, absolutely clog the commit pipeline while (!enqueueDone.IsSet) { log.Commit(); } })); } foreach (var t in commitThreads) { t.Start(); } for (int i = 0; i < 5 * numEntries; i++) { log.Enqueue(entry); } enqueueDone.Set(); foreach (var t in commitThreads) { t.Join(); } // We expect the test to finish and not get stuck somewhere // Ensure clean shutdown log.Commit(true); }
public async ValueTask FlakyLogTestCleanFailure([Values] bool isAsync) { var errorOptions = new ErrorSimulationOptions { readTransientErrorRate = 0, readPermanentErrorRate = 0, writeTransientErrorRate = 0, writePermanentErrorRate = 0.1, }; device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true), errorOptions); var logSettings = new FasterLogSettings { LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager }; log = new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } try { // Ensure we execute long enough to trigger errors for (int j = 0; j < 100; j++) { for (int i = 0; i < numEntries; i++) { log.Enqueue(entry); } if (isAsync) { await log.CommitAsync(); } else { log.Commit(); } } } catch (CommitFailureException e) { var errorRangeStart = e.LinkedCommitInfo.CommitInfo.FromAddress; Assert.LessOrEqual(log.CommittedUntilAddress, errorRangeStart); Assert.LessOrEqual(log.FlushedUntilAddress, errorRangeStart); return; } // Should not ignore failures Assert.Fail(); }
public async Task InitializeAsync() { if (!System.IO.Directory.Exists(this._BaseFolder)) { System.IO.Directory.CreateDirectory(this._BaseFolder); } FasterLogSettings logSettings = new FasterLogSettings(); this._Log = await FasterLog.CreateAsync(logSettings); }
// This creates a separate FasterLog over the same log file, using RecoverReadOnly to continuously update // to the primary FasterLog's commits. private async Task ReadOnlyConsumerAsync(string deviceName, CancellationToken cancellationToken, bool isAsync) { using var device = Devices.CreateLogDevice(deviceName); var logSettings = new FasterLogSettings { LogDevice = device, ReadOnlyMode = true, PageSizeBits = 9, SegmentSizeBits = 9 }; using var log = isAsync ? await FasterLog.CreateAsync(logSettings) : new FasterLog(logSettings); var _ = BeginRecoverAsyncLoop(); // This enumerator waits asynchronously when we have reached the committed tail of the duplicate FasterLog. When RecoverReadOnly // reads new data committed by the primary FasterLog, it signals commit completion to let iter continue to the new tail. using var iter = log.Scan(log.BeginAddress, long.MaxValue); var prevValue = -1L; try { await foreach (var(result, _, _, nextAddress) in iter.GetAsyncEnumerable(cancellationToken)) { var value = long.Parse(Encoding.UTF8.GetString(result)); Assert.AreEqual(prevValue + 1, value); prevValue = value; iter.CompleteUntil(nextAddress); if (prevValue == NumElements - 1) { done.Release(); } } } catch (OperationCanceledException) { } Assert.AreEqual(NumElements - 1, prevValue); async Task BeginRecoverAsyncLoop() { while (!cancellationToken.IsCancellationRequested) { // Delay for a while before recovering to the last commit by the primary FasterLog instance. await Task.Delay(TimeSpan.FromMilliseconds(RestorePeriodMs), cancellationToken); if (cancellationToken.IsCancellationRequested) { break; } if (isAsync) { await log.RecoverReadOnlyAsync(); } else { log.RecoverReadOnly(); } } } }
public async Task RecoverReadOnlyCheck1([Values] bool isAsync) { using var device = Devices.CreateLogDevice(deviceName); var logSettings = new FasterLogSettings { LogDevice = device, MemorySizeBits = 11, PageSizeBits = 9, MutableFraction = 0.5, SegmentSizeBits = 9 }; using var log = isAsync ? await FasterLog.CreateAsync(logSettings) : new FasterLog(logSettings); await Task.WhenAll(ProducerAsync(log, cts), CommitterAsync(log, cts.Token), ReadOnlyConsumerAsync(deviceName, cts.Token, isAsync)); }
/// <summary> /// Main program entry point /// </summary> static void Main() { bool sync = true; // Populate entry being inserted for (int i = 0; i < entryLength; i++) { staticEntry[i] = (byte)i; } // Create settings to write logs and commits at specified local path using var config = new FasterLogSettings("./FasterLogSample", deleteDirOnDispose: true); // FasterLog will recover and resume if there is a previous commit found log = new FasterLog(config); using (iter = log.Scan(log.BeginAddress, long.MaxValue)) { if (sync) { // Log writer thread: create as many as needed new Thread(new ThreadStart(LogWriterThread)).Start(); // Threads for iterator scan: create as many as needed new Thread(() => ScanThread()).Start(); // Threads for reporting, commit new Thread(new ThreadStart(ReportThread)).Start(); var t = new Thread(new ThreadStart(CommitThread)); t.Start(); t.Join(); } else { // Async version of demo: expect lower performance // particularly for small payload sizes const int NumParallelTasks = 10_000; ThreadPool.SetMinThreads(2 * Environment.ProcessorCount, 2 * Environment.ProcessorCount); TaskScheduler.UnobservedTaskException += (object sender, UnobservedTaskExceptionEventArgs e) => { Console.WriteLine($"Unobserved task exception: {e.Exception}"); e.SetObserved(); }; Task[] tasks = new Task[NumParallelTasks]; for (int i = 0; i < NumParallelTasks; i++) { int local = i; tasks[i] = Task.Run(() => AsyncLogWriter(local)); } var scan = Task.Run(() => AsyncScan()); // Threads for reporting, commit new Thread(new ThreadStart(ReportThread)).Start(); new Thread(new ThreadStart(CommitThread)).Start(); Task.WaitAll(tasks); Task.WaitAll(scan); } } }
private async ValueTask FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager, FasterLogTests.IteratorType iteratorType) { var logSettings = new FasterLogSettings { PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager }; log = FasterLogTests.IsAsync(iteratorType) ? await FasterLog.CreateAsync(logSettings) : new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } for (int i = 0; i < numEntries; i++) { log.Enqueue(entry); } log.Commit(true); using (var iter = log.Scan(0, long.MaxValue)) { var counter = new FasterLogTests.Counter(log); switch (iteratorType) { case FasterLogTests.IteratorType.AsyncByteVector: await foreach ((byte[] result, _, _, long nextAddress) in iter.GetAsyncEnumerable()) { Assert.IsTrue(result.SequenceEqual(entry)); counter.IncrementAndMaybeTruncateUntil(nextAddress); // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add). // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme. if (nextAddress == log.TailAddress) { break; } } break; case FasterLogTests.IteratorType.AsyncMemoryOwner: await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared)) { Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry)); result.Dispose(); counter.IncrementAndMaybeTruncateUntil(nextAddress); // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add). // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme. if (nextAddress == log.TailAddress) { break; } } break; case FasterLogTests.IteratorType.Sync: while (iter.GetNext(out byte[] result, out _, out _)) { Assert.IsTrue(result.SequenceEqual(entry)); counter.IncrementAndMaybeTruncateUntil(iter.NextAddress); } break; default: Assert.Fail("Unknown IteratorType"); break; } Assert.IsTrue(counter.count == numEntries); } log.Dispose(); }
public void FlakyLogTestConcurrentWriteFailure() { var errorOptions = new ErrorSimulationOptions { readTransientErrorRate = 0, readPermanentErrorRate = 0, writeTransientErrorRate = 0, writePermanentErrorRate = 0.05, }; device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true), errorOptions); var logSettings = new FasterLogSettings { LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager }; log = new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } var failureList = new List <CommitFailureException>(); ThreadStart runTask = () => { var random = new Random(); try { // Ensure we execute long enough to trigger errors for (int j = 0; j < 100; j++) { for (int i = 0; i < numEntries; i++) { log.Enqueue(entry); // create randomly interleaved concurrent writes if (random.NextDouble() < 0.1) { log.Commit(); } } } } catch (CommitFailureException e) { lock (failureList) failureList.Add(e); } }; var threads = new List <Thread>(); for (var i = 0; i < Environment.ProcessorCount + 1; i++) { var t = new Thread(runTask); t.Start(); threads.Add(t); } foreach (var thread in threads) { thread.Join(); } // Every thread observed the failure Assert.IsTrue(failureList.Count == threads.Count); // They all observed the same failure foreach (var failure in failureList) { Assert.AreEqual(failure.LinkedCommitInfo.CommitInfo, failureList[0].LinkedCommitInfo.CommitInfo); } }
public async ValueTask FlakyLogTestTolerateFailure([Values] IteratorType iteratorType) { var errorOptions = new ErrorSimulationOptions { readTransientErrorRate = 0, readPermanentErrorRate = 0, writeTransientErrorRate = 0, writePermanentErrorRate = 0.1, }; device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true), errorOptions); var logSettings = new FasterLogSettings { LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, TolerateDeviceFailure = true }; log = new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } // Ensure we write enough to trigger errors for (int i = 0; i < 1000; i++) { log.Enqueue(entry); try { if (IsAsync(iteratorType)) { await log.CommitAsync(); } else { log.Commit(); } } catch (CommitFailureException) { // Ignore failure } } // For surviving entries, scan should still work best-effort // If endAddress > log.TailAddress then GetAsyncEnumerable() will wait until more entries are added. var endAddress = IsAsync(iteratorType) ? log.CommittedUntilAddress : long.MaxValue; var recoveredLog = new FasterLog(logSettings); using var iter = recoveredLog.Scan(0, endAddress); switch (iteratorType) { case IteratorType.AsyncByteVector: await foreach ((byte[] result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable()) { Assert.IsTrue(result.SequenceEqual(entry)); } break; case IteratorType.AsyncMemoryOwner: await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared)) { Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry)); result.Dispose(); } break; case IteratorType.Sync: while (iter.GetNext(out byte[] result, out _, out _)) { Assert.IsTrue(result.SequenceEqual(entry)); } break; default: Assert.Fail("Unknown IteratorType"); break; } recoveredLog.Dispose(); }
public void FasterLogSimpleFastCommitTest([Values] TestUtils.DeviceType deviceType) { var cookie = new byte[100]; new Random().NextBytes(cookie); var filename = path + "fastCommit" + deviceType.ToString() + ".log"; device = TestUtils.CreateTestDevice(deviceType, filename, deleteOnClose: true); var logSettings = new FasterLogSettings { LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, FastCommitMode = true, TryRecoverLatest = false }; log = new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } for (int i = 0; i < numEntries; i++) { log.Enqueue(entry); } var cookie1 = new byte[100]; new Random().NextBytes(cookie1); var commitSuccessful = log.CommitStrongly(out var commit1Addr, out _, true, cookie1, 1); Assert.IsTrue(commitSuccessful); for (int i = 0; i < numEntries; i++) { log.Enqueue(entry); } var cookie2 = new byte[100]; new Random().NextBytes(cookie2); commitSuccessful = log.CommitStrongly(out var commit2Addr, out _, true, cookie2, 2); Assert.IsTrue(commitSuccessful); for (int i = 0; i < numEntries; i++) { log.Enqueue(entry); } var cookie6 = new byte[100]; new Random().NextBytes(cookie6); commitSuccessful = log.CommitStrongly(out var commit6Addr, out _, true, cookie6, 6); Assert.IsTrue(commitSuccessful); // Wait for all metadata writes to be complete to avoid a concurrent access exception log.Dispose(); log = null; // be a deviant and remove commit metadata files manager.RemoveAllCommits(); // Recovery should still work var recoveredLog = new FasterLog(logSettings); recoveredLog.Recover(1); Assert.AreEqual(cookie1, recoveredLog.RecoveredCookie); Assert.AreEqual(commit1Addr, recoveredLog.TailAddress); recoveredLog.Dispose(); recoveredLog = new FasterLog(logSettings); recoveredLog.Recover(2); Assert.AreEqual(cookie2, recoveredLog.RecoveredCookie); Assert.AreEqual(commit2Addr, recoveredLog.TailAddress); recoveredLog.Dispose(); // Default argument should recover to most recent, if TryRecoverLatest is set logSettings.TryRecoverLatest = true; recoveredLog = new FasterLog(logSettings); Assert.AreEqual(cookie6, recoveredLog.RecoveredCookie); Assert.AreEqual(commit6Addr, recoveredLog.TailAddress); recoveredLog.Dispose(); }
public void CommitRecordBoundedGrowthTest([Values] TestUtils.DeviceType deviceType) { var cookie = new byte[100]; new Random().NextBytes(cookie); var filename = path + "boundedGrowth" + deviceType.ToString() + ".log"; device = TestUtils.CreateTestDevice(deviceType, filename, deleteOnClose: true); var logSettings = new FasterLogSettings { LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, FastCommitMode = true }; log = new FasterLog(logSettings); byte[] entry = new byte[entryLength]; for (int i = 0; i < entryLength; i++) { entry[i] = (byte)i; } for (int i = 0; i < 5 * numEntries; i++) { log.Enqueue(entry); } // for comparison, insert some entries without any commit records var referenceTailLength = log.TailAddress; var enqueueDone = new ManualResetEventSlim(); var commitThreads = new List <Thread>(); // Make sure to not spin up too many commit threads, otherwise we might clog epochs and halt progress for (var i = 0; i < Math.Max(1, Environment.ProcessorCount - 1); i++) { commitThreads.Add(new Thread(() => { // Otherwise, absolutely clog the commit pipeline while (!enqueueDone.IsSet) { log.Commit(); } })); } foreach (var t in commitThreads) { t.Start(); } for (int i = 0; i < 5 * numEntries; i++) { log.Enqueue(entry); } enqueueDone.Set(); foreach (var t in commitThreads) { t.Join(); } // TODO: Hardcoded constant --- if this number changes in FasterLogRecoverInfo, it needs to be updated here too var commitRecordSize = 44; var logTailGrowth = log.TailAddress - referenceTailLength; // Check that we are not growing the log more than one commit record per user entry Assert.IsTrue(logTailGrowth - referenceTailLength <= commitRecordSize * 5 * numEntries); // Ensure clean shutdown log.Commit(true); }