Example #1
0
        public void FasterLogShiftTailStressTest()
        {
            // Get an excruciatingly slow storage device to maximize chance of clogging the flush pipeline
            device = new LocalMemoryDevice(1L << 28, 1 << 28, 2, sector_size: 512, latencyMs: 50, fileName: "stress.log");
            var logSettings = new FasterLogSettings {
                LogDevice = device, LogChecksum = LogChecksumType.None, LogCommitManager = manager, SegmentSizeBits = 28
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }

            // for comparison, insert some entries without any commit records
            var referenceTailLength = log.TailAddress;

            var enqueueDone   = new ManualResetEventSlim();
            var commitThreads = new List <Thread>();

            // Make sure to spin up many commit threads to expose lots of interleavings
            for (var i = 0; i < 2 * Math.Max(1, Environment.ProcessorCount - 1); i++)
            {
                commitThreads.Add(new Thread(() =>
                {
                    // Otherwise, absolutely clog the commit pipeline
                    while (!enqueueDone.IsSet)
                    {
                        log.Commit();
                    }
                }));
            }

            foreach (var t in commitThreads)
            {
                t.Start();
            }
            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }
            enqueueDone.Set();

            foreach (var t in commitThreads)
            {
                t.Join();
            }

            // We expect the test to finish and not get stuck somewhere

            // Ensure clean shutdown
            log.Commit(true);
        }
Example #2
0
        public void FasterLogTest6([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, MemorySizeBits = 20, PageSizeBits = 14, LogChecksum = logChecksum, LogCommitManager = manager
            });
            byte[] data1 = new byte[1000];
            for (int i = 0; i < 100; i++)
            {
                data1[i] = (byte)i;
            }

            for (int i = 0; i < 100; i++)
            {
                log.Enqueue(data1);
            }
            log.RefreshUncommitted();
            Assert.IsTrue(log.SafeTailAddress == log.TailAddress);

            Assert.IsTrue(log.CommittedUntilAddress < log.SafeTailAddress);

            using (var iter = log.Scan(0, long.MaxValue, scanUncommitted: true))
            {
                while (iter.GetNext(out _, out _, out _))
                {
                    log.TruncateUntil(iter.NextAddress);
                }
                Assert.IsTrue(iter.NextAddress == log.SafeTailAddress);
                log.Enqueue(data1);
                Assert.IsFalse(iter.GetNext(out _, out _, out _));
                log.RefreshUncommitted();
                Assert.IsTrue(iter.GetNext(out _, out _, out _));
            }
            log.Dispose();
        }
Example #3
0
 static void LogWriter(FasterLog log, byte[] entry)
 {
     // Enter in some entries then wait on this separate thread
     log.Enqueue(entry);
     log.Enqueue(entry);
     log.Enqueue(entry);
     log.WaitForCommit(log.TailAddress);
 }
Example #4
0
        public void PopulateUncommittedLog(FasterLog logUncommitted)
        {
            //****** Populate uncommitted log / device for ScanUncommittedTest
            // Set Default entry data
            for (int j = 0; j < entryLength; j++)
            {
                entry[j] = (byte)j;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int j = 0; j < numEntries; j++)
            {
                // Flag one part of entry data that corresponds to index
                if (j < entryLength)
                {
                    entry[j] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((j > 0) && (j < entryLength))
                {
                    entry[j - 1] = (byte)(j - 1);
                }

                // Add to FasterLog
                logUncommitted.Enqueue(entry);
            }

            // refresh uncommitted so can see it when scan - do NOT commit though
            logUncommitted.RefreshUncommitted(true);
        }
Example #5
0
        private void FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager)
        {
            log = new FasterLog(new FasterLogSettings {
                PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager
            });

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                int count = 0;
                while (iter.GetNext(out byte[] result, out int length, out long currentAddress))
                {
                    count++;
                    Assert.IsTrue(result.SequenceEqual(entry));
                    if (count % 100 == 0)
                    {
                        log.TruncateUntil(iter.NextAddress);
                    }
                }
                Assert.IsTrue(count == numEntries);
            }

            log.Dispose();
        }
Example #6
0
        public void PopulateLog(FasterLog log)
        {
            //****** Populate log for Basic data for tests
            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                // Add to FasterLog
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);
        }
Example #7
0
        public void DeviceAndLogConfig()
        {
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // Verify
            Assert.IsTrue(File.Exists(path + "/log-commits/commit.0.0"));
            Assert.IsTrue(File.Exists(path + "/DeviceConfig.0"));

            // Read the log just to verify can actually read it
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result[currentEntry] == currentEntry, "Fail - Result[" + currentEntry.ToString() + "]: is not same as " + currentEntry.ToString());

                    currentEntry++;
                }
            }
        }
Example #8
0
        public void BasicHighLatencyDeviceTest()
        {
            TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true);

            // Create devices \ log for test for in memory device
            using LocalMemoryDevice device = new LocalMemoryDevice(1L << 28, 1L << 25, 2, latencyMs: 20);
            using FasterLog LocalMemorylog = new FasterLog(new FasterLogSettings { LogDevice = device, PageSizeBits = 80, MemorySizeBits = 20, GetMemory = null, SegmentSizeBits = 80, MutableFraction = 0.2, LogCommitManager = null });

            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                LocalMemorylog.Enqueue(entry);
            }

            // Commit to the log
            LocalMemorylog.Commit(true);

            // Read the log just to verify was actually committed
            int currentEntry = 0;

            using (var iter = LocalMemorylog.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result[currentEntry] == currentEntry, "Fail - Result[" + currentEntry.ToString() + "]: is not same as " + currentEntry.ToString());
                    currentEntry++;
                }
            }
        }
        public void ManagedLocalStoreFullParamsTest()
        {
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                logFullParams.Enqueue(entry);
            }

            // Commit to the log
            logFullParams.Commit(true);

            // Verify
            Assert.IsTrue(File.Exists(path + "/log-commits/commit.1.0"));
            Assert.IsTrue(File.Exists(path + "/ManagedLocalStore.log.0"));

            // Read the log just to verify can actually read it
            int currentEntry = 0;

            using var iter = logFullParams.Scan(0, 100_000_000);
            while (iter.GetNext(out byte[] result, out _, out _))
            {
                Assert.AreEqual(currentEntry, result[currentEntry]);
                currentEntry++;
            }
        }
Example #10
0
        public void FasterLogTest1([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, LogChecksum = logChecksum
            });

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                int count = 0;
                while (iter.GetNext(out byte[] result, out int length))
                {
                    count++;
                    Assert.IsTrue(result.SequenceEqual(entry));
                    if (count % 100 == 0)
                    {
                        log.TruncateUntil(iter.CurrentAddress);
                    }
                }
                Assert.IsTrue(count == numEntries);
            }

            log.Dispose();
        }
Example #11
0
        public async ValueTask FlakyLogTestCleanFailure([Values] bool isAsync)
        {
            var errorOptions = new ErrorSimulationOptions
            {
                readTransientErrorRate  = 0,
                readPermanentErrorRate  = 0,
                writeTransientErrorRate = 0,
                writePermanentErrorRate = 0.1,
            };

            device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true),
                                              errorOptions);
            var logSettings = new FasterLogSettings
            {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            try
            {
                // Ensure we execute long enough to trigger errors
                for (int j = 0; j < 100; j++)
                {
                    for (int i = 0; i < numEntries; i++)
                    {
                        log.Enqueue(entry);
                    }

                    if (isAsync)
                    {
                        await log.CommitAsync();
                    }
                    else
                    {
                        log.Commit();
                    }
                }
            }
            catch (CommitFailureException e)
            {
                var errorRangeStart = e.LinkedCommitInfo.CommitInfo.FromAddress;
                Assert.LessOrEqual(log.CommittedUntilAddress, errorRangeStart);
                Assert.LessOrEqual(log.FlushedUntilAddress, errorRangeStart);
                return;
            }

            // Should not ignore failures
            Assert.Fail();
        }
Example #12
0
        private async Task ProducerAsync(FasterLog log, CancellationTokenSource cts)
        {
            for (var i = 0L; i < NumElements; ++i)
            {
                log.Enqueue(Encoding.UTF8.GetBytes(i.ToString()));
                log.RefreshUncommitted();
                await Task.Delay(TimeSpan.FromMilliseconds(ProducerPauseMs));
            }
            await Task.Delay(TimeSpan.FromMilliseconds(CommitPeriodMs * 4));

            cts.Cancel();
        }
Example #13
0
        private async Task ProducerAsync(FasterLog log, CancellationTokenSource cts)
        {
            for (var i = 0L; i < NumElements; ++i)
            {
                log.Enqueue(Encoding.UTF8.GetBytes(i.ToString()));
                log.RefreshUncommitted();
                await Task.Delay(TimeSpan.FromMilliseconds(ProducerPauseMs));
            }
            // Ensure the reader had time to see all data
            await done.WaitAsync();

            cts.Cancel();
        }
Example #14
0
        static async Task ProducerAsync(FasterLog log, CancellationToken cancellationToken)
        {
            var i = 0L;

            while (!cancellationToken.IsCancellationRequested)
            {
                log.Enqueue(Encoding.UTF8.GetBytes(i.ToString()));
                log.RefreshUncommitted(true);

                i++;

                await Task.Delay(TimeSpan.FromMilliseconds(10));
            }
        }
Example #15
0
        static void ScanThread()
        {
            Random r = new Random();

            byte[] result;

            using (iter = log.Scan(log.BeginAddress, long.MaxValue))
            {
                while (true)
                {
                    while (!iter.GetNext(out result, out int length))
                    {
                        // For finite end address, check if iteration ended
                        // if (iter.CurrentAddress >= endAddress) return;
                        iter.WaitAsync().GetAwaiter().GetResult();
                    }

                    // Memory pool variant:
                    // iter.GetNext(pool, out IMemoryOwner<byte> resultMem, out int length))

                    if (Different(result, staticEntry, out int location))
                    {
                        throw new Exception("Invalid entry found");
                    }

                    // Re-insert entry with small probability
                    if (r.Next(100) < 10)
                    {
                        log.Enqueue(result);
                    }

                    // Example of random read from given address
                    // (result, _) = log.ReadAsync(iter.CurrentAddress).GetAwaiter().GetResult();

                    // Truncate log until after recently processed entry
                    log.TruncateUntil(iter.NextAddress);

                    // Safer truncate variant: truncate until start of page
                    // log.TruncateUntilPageStart(iter.NextAddress);
                }
            }

            // Example of recoverable (named) iterator:
            // using (iter = log.Scan(log.BeginAddress, long.MaxValue, "foo"))
        }
Example #16
0
        static async Task ProducerAsync(FasterLog log, CancellationToken cancellationToken)
        {
            try
            {
                var i = 0L;
                while (!cancellationToken.IsCancellationRequested)
                {
                    // Console.WriteLine($"Producing {i}");

                    log.Enqueue(Encoding.UTF8.GetBytes(i.ToString()));
                    log.RefreshUncommitted();

                    i++;

                    await Task.Delay(TimeSpan.FromMilliseconds(10));
                }
            }
            catch (OperationCanceledException) { }
            Console.WriteLine("Producer complete");
        }
Example #17
0
        public async Task FasterLogTest4([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, PageSizeBits = 14, LogChecksum = logChecksum
            });
            byte[] data1 = new byte[100];
            for (int i = 0; i < 100; i++)
            {
                data1[i] = (byte)i;
            }

            for (int i = 0; i < 100; i++)
            {
                log.Enqueue(data1);
            }

            Assert.IsTrue(log.CommittedUntilAddress == log.BeginAddress);
            await log.CommitAsync();

            Assert.IsTrue(log.CommittedUntilAddress == log.TailAddress);
            Assert.IsTrue(log.CommittedBeginAddress == log.BeginAddress);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                // Should read the "hole" and return false
                var iterResult = iter.GetNext(out byte[] entry, out _, out _);
                log.TruncateUntil(iter.NextAddress);

                Assert.IsTrue(log.CommittedUntilAddress == log.TailAddress);
                Assert.IsTrue(log.CommittedBeginAddress < log.BeginAddress);
                Assert.IsTrue(iter.NextAddress == log.BeginAddress);

                await log.CommitAsync();

                Assert.IsTrue(log.CommittedUntilAddress == log.TailAddress);
                Assert.IsTrue(log.CommittedBeginAddress == log.BeginAddress);
            }
            log.Dispose();
        }
        public void ManagedLocalStoreBasicTest()
        {
            int  entryLength       = 20;
            int  numEntries        = 1000;
            int  numEnqueueThreads = 1;
            int  numIterThreads    = 1;
            bool commitThread      = false;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            bool disposeCommitThread = false;
            var  commit =
                new Thread(() =>
            {
                while (!disposeCommitThread)
                {
                    log.Commit(true);
                }
            });

            if (commitThread)
            {
                commit.Start();
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            if (commitThread)
            {
                disposeCommitThread = true;
                commit.Join();
            }

            // Final commit to the log
            log.Commit(true);

            int currentEntry = 0;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            if (numEnqueueThreads == 1)
                            {
                                Assert.AreEqual((byte)currentEntry, result[0]);
                            }
                            currentEntry++;
                        }
                    }

                    Assert.AreEqual(numEntries * numEnqueueThreads, currentEntry);
                });
            }

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            // Make sure number of entries is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(numEntries, currentEntry);
        }
Example #19
0
        public async ValueTask FlakyLogTestTolerateFailure([Values] IteratorType iteratorType)
        {
            var errorOptions = new ErrorSimulationOptions
            {
                readTransientErrorRate  = 0,
                readPermanentErrorRate  = 0,
                writeTransientErrorRate = 0,
                writePermanentErrorRate = 0.1,
            };

            device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true),
                                              errorOptions);
            var logSettings = new FasterLogSettings
            {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, TolerateDeviceFailure = true
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Ensure we write enough to trigger errors
            for (int i = 0; i < 1000; i++)
            {
                log.Enqueue(entry);
                try
                {
                    if (IsAsync(iteratorType))
                    {
                        await log.CommitAsync();
                    }
                    else
                    {
                        log.Commit();
                    }
                }
                catch (CommitFailureException)
                {
                    // Ignore failure
                }
            }

            // For surviving entries, scan should still work best-effort
            // If endAddress > log.TailAddress then GetAsyncEnumerable() will wait until more entries are added.
            var endAddress   = IsAsync(iteratorType) ? log.CommittedUntilAddress : long.MaxValue;
            var recoveredLog = new FasterLog(logSettings);

            using var iter = recoveredLog.Scan(0, endAddress);
            switch (iteratorType)
            {
            case IteratorType.AsyncByteVector:
                await foreach ((byte[] result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable())
                {
                    Assert.IsTrue(result.SequenceEqual(entry));
                }
                break;

            case IteratorType.AsyncMemoryOwner:
                await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared))
                {
                    Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry));
                    result.Dispose();
                }
                break;

            case IteratorType.Sync:
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result.SequenceEqual(entry));
                }
                break;

            default:
                Assert.Fail("Unknown IteratorType");
                break;
            }
            recoveredLog.Dispose();
        }
Example #20
0
        public void FlakyLogTestConcurrentWriteFailure()
        {
            var errorOptions = new ErrorSimulationOptions
            {
                readTransientErrorRate  = 0,
                readPermanentErrorRate  = 0,
                writeTransientErrorRate = 0,
                writePermanentErrorRate = 0.05,
            };

            device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true),
                                              errorOptions);
            var logSettings = new FasterLogSettings
            {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            var         failureList = new List <CommitFailureException>();
            ThreadStart runTask     = () =>
            {
                var random = new Random();
                try
                {
                    // Ensure we execute long enough to trigger errors
                    for (int j = 0; j < 100; j++)
                    {
                        for (int i = 0; i < numEntries; i++)
                        {
                            log.Enqueue(entry);
                            // create randomly interleaved concurrent writes
                            if (random.NextDouble() < 0.1)
                            {
                                log.Commit();
                            }
                        }
                    }
                }
                catch (CommitFailureException e)
                {
                    lock (failureList)
                        failureList.Add(e);
                }
            };

            var threads = new List <Thread>();

            for (var i = 0; i < Environment.ProcessorCount + 1; i++)
            {
                var t = new Thread(runTask);
                t.Start();
                threads.Add(t);
            }

            foreach (var thread in threads)
            {
                thread.Join();
            }

            // Every thread observed the failure
            Assert.IsTrue(failureList.Count == threads.Count);
            // They all observed the same failure
            foreach (var failure in failureList)
            {
                Assert.AreEqual(failure.LinkedCommitInfo.CommitInfo, failureList[0].LinkedCommitInfo.CommitInfo);
            }
        }
Example #21
0
        public void ManagedLocalStoreBasicTest()
        {
            int entryLength       = 20;
            int numEntries        = 1000;
            int numEnqueueThreads = 1;
            int numIterThreads    = 1;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            // Commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    int currentEntry = 0;
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            // set check flag to show got in here
                            datacheckrun = true;

                            if (numEnqueueThreads == 1)
                            {
                                Assert.IsTrue(result[0] == (byte)currentEntry, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString());
                            }
                            currentEntry++;
                        }
                    }

                    Assert.IsTrue(currentEntry == numEntries * numEnqueueThreads);
                });
            }

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Example #22
0
        public void CommitRecordBoundedGrowthTest([Values] TestUtils.DeviceType deviceType)
        {
            var cookie = new byte[100];

            new Random().NextBytes(cookie);

            var filename = path + "boundedGrowth" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename, deleteOnClose: true);
            var logSettings = new FasterLogSettings {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, FastCommitMode = true
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }

            // for comparison, insert some entries without any commit records
            var referenceTailLength = log.TailAddress;

            var enqueueDone   = new ManualResetEventSlim();
            var commitThreads = new List <Thread>();

            // Make sure to not spin up too many commit threads, otherwise we might clog epochs and halt progress
            for (var i = 0; i < Math.Max(1, Environment.ProcessorCount - 1); i++)
            {
                commitThreads.Add(new Thread(() =>
                {
                    // Otherwise, absolutely clog the commit pipeline
                    while (!enqueueDone.IsSet)
                    {
                        log.Commit();
                    }
                }));
            }

            foreach (var t in commitThreads)
            {
                t.Start();
            }
            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }
            enqueueDone.Set();

            foreach (var t in commitThreads)
            {
                t.Join();
            }


            // TODO: Hardcoded constant --- if this number changes in FasterLogRecoverInfo, it needs to be updated here too
            var commitRecordSize = 44;
            var logTailGrowth    = log.TailAddress - referenceTailLength;

            // Check that we are not growing the log more than one commit record per user entry
            Assert.IsTrue(logTailGrowth - referenceTailLength <= commitRecordSize * 5 * numEntries);

            // Ensure clean shutdown
            log.Commit(true);
        }
Example #23
0
        public void Setup()
        {
            // Clean up log files from previous test runs in case they weren't cleaned up
            try { new DirectoryInfo(path).Delete(true); }
            catch {}

            // Set up the Devices \ logs
            device = Devices.CreateLogDevice(path + "LogScan", deleteOnClose: true);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device
            });
            deviceUnCommitted = Devices.CreateLogDevice(path + "LogScanUncommitted", deleteOnClose: true);
            logUncommitted    = new FasterLog(new FasterLogSettings {
                LogDevice = deviceUnCommitted
            });

            //****** Populate log for Basic data for tests
            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                // Add to FasterLog
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);


            //****** Populate uncommitted log / device for ScanUncommittedTest
            // Set Default entry data
            for (int j = 0; j < entryLength; j++)
            {
                entry[j] = (byte)j;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int j = 0; j < numEntries; j++)
            {
                // Flag one part of entry data that corresponds to index
                if (j < entryLength)
                {
                    entry[j] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((j > 0) && (j < entryLength))
                {
                    entry[j - 1] = (byte)(j - 1);
                }

                // Add to FasterLog
                logUncommitted.Enqueue(entry);
            }

            // refresh uncommitted so can see it when scan - do NOT commit though
            logUncommitted.RefreshUncommitted(true);
        }
Example #24
0
        public void ManagedLocalStoreBasicTest()
        {
            int entryLength = 20;
            int numEntries  = 1000;
            int entryFlag   = 9999;


            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                // Default is add bytes so no need to do anything with it
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            // Read the log - Look for the flag so know each entry is unique
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // set check flag to show got in here
                        datacheckrun = true;

                        Assert.IsTrue(result[currentEntry] == (byte)entryFlag, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString() + "  entryFlag:" + entryFlag);

                        currentEntry++;
                    }
                }
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Example #25
0
        public void LogReadAsyncBasicTest([Values] ParameterDefaultsIteratorType iteratorType)
        {
            int entryLength = 100;
            int numEntries  = 1000000;
            int entryFlag   = 9999;

            byte[] entry = new byte[entryLength];

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // Read one entry based on different parameters for AsyncReadOnly and verify
            switch (iteratorType)
            {
            case ParameterDefaultsIteratorType.DefaultParams:
                // Read one entry and verify
                var record       = log.ReadAsync(log.BeginAddress);
                var foundFlagged = record.Result.Item1[0];    // 15
                var foundEntry   = record.Result.Item1[1];    // 1
                var foundTotal   = record.Result.Item2;

                Assert.IsTrue(foundFlagged == (byte)entryFlag, "Fail reading data - Found Flagged Entry:" + foundFlagged.ToString() + "  Expected Flagged entry:" + entryFlag);
                Assert.IsTrue(foundEntry == 1, "Fail reading data - Found Normal Entry:" + foundEntry.ToString() + "  Expected Value: 1");
                Assert.IsTrue(foundTotal == 100, "Fail reading data - Found Total:" + foundTotal.ToString() + "  Expected Total: 100");

                break;

            case ParameterDefaultsIteratorType.LengthParam:
                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 208);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.IsTrue(foundFlagged == (byte)entryFlag, "Fail reading data - Found Flagged Entry:" + foundFlagged.ToString() + "  Expected Flagged entry:" + entryFlag);
                Assert.IsTrue(foundEntry == 1, "Fail reading data - Found Normal Entry:" + foundEntry.ToString() + "  Expected Value: 1");
                Assert.IsTrue(foundTotal == 100, "Fail reading data - Found Total:" + foundTotal.ToString() + "  Expected Total: 100");

                break;

            case ParameterDefaultsIteratorType.TokenParam:
                var cts = new CancellationToken();

                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 104, cts);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.IsTrue(foundFlagged == (byte)entryFlag, "Fail reading data - Found Flagged Entry:" + foundFlagged.ToString() + "  Expected Flagged entry:" + entryFlag);
                Assert.IsTrue(foundEntry == 1, "Fail reading data - Found Normal Entry:" + foundEntry.ToString() + "  Expected Value: 1");
                Assert.IsTrue(foundTotal == 100, "Fail reading data - Found Total:" + foundTotal.ToString() + "  Expected Total: 100");

                break;

            default:
                Assert.Fail("Unknown case ParameterDefaultsIteratorType.DefaultParams:");
                break;
            }
        }
Example #26
0
        public void LogReadAsyncBasicTest([Values] ParameterDefaultsIteratorType iteratorType, [Values] TestUtils.DeviceType deviceType)
        {
            int    entryLength = 20;
            int    numEntries  = 500;
            int    entryFlag   = 9999;
            string filename    = path + "LogReadAsync" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path
            });

            byte[] entry = new byte[entryLength];

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);


            // Read one entry based on different parameters for AsyncReadOnly and verify
            switch (iteratorType)
            {
            case ParameterDefaultsIteratorType.DefaultParams:
                // Read one entry and verify
                var record       = log.ReadAsync(log.BeginAddress);
                var foundFlagged = record.Result.Item1[0];    // 15
                var foundEntry   = record.Result.Item1[1];    // 1
                var foundTotal   = record.Result.Item2;

                Assert.AreEqual((byte)entryFlag, foundFlagged, $"Fail reading Flagged Entry");
                Assert.AreEqual(1, foundEntry, $"Fail reading Normal Entry");
                Assert.AreEqual(entryLength, foundTotal, $"Fail reading Total");

                break;

            case ParameterDefaultsIteratorType.LengthParam:
                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 208);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.AreEqual((byte)entryFlag, foundFlagged, $"Fail reading Flagged Entry");
                Assert.AreEqual(1, foundEntry, $"Fail reading Normal Entry");
                Assert.AreEqual(entryLength, foundTotal, $"Fail readingTotal");

                break;

            case ParameterDefaultsIteratorType.TokenParam:
                var cts = new CancellationToken();

                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 104, cts);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.AreEqual((byte)entryFlag, foundFlagged, $"Fail readingFlagged Entry");
                Assert.AreEqual(1, foundEntry, $"Fail reading Normal Entry");
                Assert.AreEqual(entryLength, foundTotal, $"Fail reading Total");

                // Read one entry as IMemoryOwner and verify
                var recordMemoryOwner = log.ReadAsync(log.BeginAddress, MemoryPool <byte> .Shared, 104, cts);
                var foundFlaggedMem   = recordMemoryOwner.Result.Item1.Memory.Span[0];  // 15
                var foundEntryMem     = recordMemoryOwner.Result.Item1.Memory.Span[1];  // 1
                var foundTotalMem     = recordMemoryOwner.Result.Item2;

                Assert.IsTrue(foundFlagged == foundFlaggedMem, $"MemoryPool-based ReadAsync result does not match that of the byte array one. value: {foundFlaggedMem} expected: {foundFlagged}");
                Assert.IsTrue(foundEntry == foundEntryMem, $"MemoryPool-based ReadAsync result does not match that of the byte array one. value: {foundEntryMem} expected: {foundEntry}");
                Assert.IsTrue(foundTotal == foundTotalMem, $"MemoryPool-based ReadAsync result does not match that of the byte array one. value: {foundTotalMem} expected: {foundTotal}");

                break;

            default:
                Assert.Fail("Unknown case ParameterDefaultsIteratorType.DefaultParams:");
                break;
            }
        }
Example #27
0
        private async ValueTask FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager, FasterLogTests.IteratorType iteratorType)
        {
            var logSettings = new FasterLogSettings {
                PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager
            };

            log = FasterLogTests.IsAsync(iteratorType) ? await FasterLog.CreateAsync(logSettings) : new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                var counter = new FasterLogTests.Counter(log);

                switch (iteratorType)
                {
                case FasterLogTests.IteratorType.AsyncByteVector:
                    await foreach ((byte[] result, _, _, long nextAddress) in iter.GetAsyncEnumerable())
                    {
                        Assert.IsTrue(result.SequenceEqual(entry));
                        counter.IncrementAndMaybeTruncateUntil(nextAddress);

                        // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add).
                        // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because
                        // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme.
                        if (nextAddress == log.TailAddress)
                        {
                            break;
                        }
                    }
                    break;

                case FasterLogTests.IteratorType.AsyncMemoryOwner:
                    await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared))
                    {
                        Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry));
                        result.Dispose();
                        counter.IncrementAndMaybeTruncateUntil(nextAddress);

                        // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add).
                        // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because
                        // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme.
                        if (nextAddress == log.TailAddress)
                        {
                            break;
                        }
                    }
                    break;

                case FasterLogTests.IteratorType.Sync:
                    while (iter.GetNext(out byte[] result, out _, out _))
                    {
                        Assert.IsTrue(result.SequenceEqual(entry));
                        counter.IncrementAndMaybeTruncateUntil(iter.NextAddress);
                    }
                    break;

                default:
                    Assert.Fail("Unknown IteratorType");
                    break;
                }
                Assert.IsTrue(counter.count == numEntries);
            }

            log.Dispose();
        }
Example #28
0
        public static void ManagedLocalStoreBasicTest()
        {
            int  entryLength       = 20;
            int  numEntries        = 500_000;
            int  numEnqueueThreads = 1;
            int  numIterThreads    = 1;
            bool commitThread      = false;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            bool disposeCommitThread = false;
            var  commit =
                new Thread(() =>
            {
                while (!disposeCommitThread)
                {
                    Thread.Sleep(10);
                    log.Commit(true);
                }
            });

            if (commitThread)
            {
                commit.Start();
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }

            Console.WriteLine("Populating log...");
            var sw = Stopwatch.StartNew();

            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            sw.Stop();
            Console.WriteLine($"{numEntries} items enqueued to the log by {numEnqueueThreads} threads in {sw.ElapsedMilliseconds} ms");

            if (commitThread)
            {
                disposeCommitThread = true;
                commit.Join();
            }

            // Final commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    int currentEntry = 0;
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            // set check flag to show got in here
                            datacheckrun = true;

                            if (numEnqueueThreads == 1)
                            {
                                if (result[0] != (byte)currentEntry)
                                {
                                    throw new Exception("Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString());
                                }
                            }
                            currentEntry++;
                        }
                    }

                    if (currentEntry != numEntries * numEnqueueThreads)
                    {
                        throw new Exception("Error");
                    }
                });
            }

            sw.Restart();

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            sw.Stop();
            Console.WriteLine($"{numEntries} items iterated in the log by {numIterThreads} threads in {sw.ElapsedMilliseconds} ms");

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                throw new Exception("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
        public async Task RecoverLog(bool useAzure, int numEntries, int maxBytesPerEntry)
        {
            List <byte[]> entries   = new List <byte[]>();
            List <long>   positions = new List <long>();

            var random = new Random(0);

            var taskHubName = useAzure ? "test-taskhub" : Guid.NewGuid().ToString("N");
            var account     = useAzure ? CloudStorageAccount.Parse(Environment.GetEnvironmentVariable(TestConstants.StorageConnectionName)) : null;
            var logger      = this.loggerFactory.CreateLogger("testlogger");

            await BlobManager.DeleteTaskhubStorageAsync(account, taskHubName);

            // first, commit some number of random entries to the log and record the commit positions
            {
                var blobManager = new BlobManager(
                    account,
                    account,
                    taskHubName,
                    logger,
                    Microsoft.Extensions.Logging.LogLevel.Trace,
                    0,
                    new PartitionErrorHandler(0, logger, Microsoft.Extensions.Logging.LogLevel.Trace, "account", taskHubName));

                await blobManager.StartAsync();

                var log = new FasterLog(blobManager, new NetheriteOrchestrationServiceSettings());

                for (int i = 0; i < numEntries; i++)
                {
                    var bytes = new byte[1 + random.Next(maxBytesPerEntry)];
                    random.NextBytes(bytes);
                    entries.Add(bytes);
                    positions.Add(log.Enqueue(entries[i]));
                }
                await log.CommitAsync();

                await blobManager.StopAsync();
            }

            // then, read back all the entries, and compare position and content
            {
                var blobManager = new BlobManager(
                    account,
                    account,
                    taskHubName,
                    logger,
                    Microsoft.Extensions.Logging.LogLevel.Trace,
                    0,
                    new PartitionErrorHandler(0, logger, Microsoft.Extensions.Logging.LogLevel.Trace, "account", taskHubName));

                await blobManager.StartAsync();

                var log = new FasterLog(blobManager, new NetheriteOrchestrationServiceSettings());

                int iterationCount = 0;
                await Iterate(0, positions[positions.Count - 1]);

                async Task Iterate(long from, long to)
                {
                    using (var iter = log.Scan(from, to + 1))
                    {
                        byte[] result;
                        int    entryLength;
                        long   currentAddress;

                        while (true)
                        {
                            var next = iter.NextAddress;

                            while (!iter.GetNext(out result, out entryLength, out currentAddress))
                            {
                                if (currentAddress >= to)
                                {
                                    Assert.Equal(iterationCount, numEntries);
                                    return;
                                }
                                await iter.WaitAsync();
                            }

                            // process entry
                            Assert.Equal(positions[iterationCount], next);
                            var reference = entries[iterationCount];
                            Assert.Equal(reference.Length, entryLength);
                            for (int i = 0; i < entryLength; i++)
                            {
                                Assert.Equal(reference[i], result[i]);
                            }

                            iterationCount++;
                        }
                    }
                }

                await blobManager.StopAsync();
            }

            await BlobManager.DeleteTaskhubStorageAsync(account, taskHubName);
        }
Example #30
0
        public void FasterLogSimpleFastCommitTest([Values] TestUtils.DeviceType deviceType)
        {
            var cookie = new byte[100];

            new Random().NextBytes(cookie);

            var filename = path + "fastCommit" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename, deleteOnClose: true);
            var logSettings = new FasterLogSettings {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, FastCommitMode = true, TryRecoverLatest = false
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }

            var cookie1 = new byte[100];

            new Random().NextBytes(cookie1);
            var commitSuccessful = log.CommitStrongly(out var commit1Addr, out _, true, cookie1, 1);

            Assert.IsTrue(commitSuccessful);

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }

            var cookie2 = new byte[100];

            new Random().NextBytes(cookie2);
            commitSuccessful = log.CommitStrongly(out var commit2Addr, out _, true, cookie2, 2);
            Assert.IsTrue(commitSuccessful);

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }

            var cookie6 = new byte[100];

            new Random().NextBytes(cookie6);
            commitSuccessful = log.CommitStrongly(out var commit6Addr, out _, true, cookie6, 6);
            Assert.IsTrue(commitSuccessful);

            // Wait for all metadata writes to be complete to avoid a concurrent access exception
            log.Dispose();
            log = null;

            // be a deviant and remove commit metadata files
            manager.RemoveAllCommits();

            // Recovery should still work
            var recoveredLog = new FasterLog(logSettings);

            recoveredLog.Recover(1);
            Assert.AreEqual(cookie1, recoveredLog.RecoveredCookie);
            Assert.AreEqual(commit1Addr, recoveredLog.TailAddress);
            recoveredLog.Dispose();

            recoveredLog = new FasterLog(logSettings);
            recoveredLog.Recover(2);
            Assert.AreEqual(cookie2, recoveredLog.RecoveredCookie);
            Assert.AreEqual(commit2Addr, recoveredLog.TailAddress);
            recoveredLog.Dispose();

            // Default argument should recover to most recent, if TryRecoverLatest is set
            logSettings.TryRecoverLatest = true;
            recoveredLog = new FasterLog(logSettings);
            Assert.AreEqual(cookie6, recoveredLog.RecoveredCookie);
            Assert.AreEqual(commit6Addr, recoveredLog.TailAddress);
            recoveredLog.Dispose();
        }