Beispiel #1
0
        public void FasterLogShiftTailStressTest()
        {
            // Get an excruciatingly slow storage device to maximize chance of clogging the flush pipeline
            device = new LocalMemoryDevice(1L << 28, 1 << 28, 2, sector_size: 512, latencyMs: 50, fileName: "stress.log");
            var logSettings = new FasterLogSettings {
                LogDevice = device, LogChecksum = LogChecksumType.None, LogCommitManager = manager, SegmentSizeBits = 28
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }

            // for comparison, insert some entries without any commit records
            var referenceTailLength = log.TailAddress;

            var enqueueDone   = new ManualResetEventSlim();
            var commitThreads = new List <Thread>();

            // Make sure to spin up many commit threads to expose lots of interleavings
            for (var i = 0; i < 2 * Math.Max(1, Environment.ProcessorCount - 1); i++)
            {
                commitThreads.Add(new Thread(() =>
                {
                    // Otherwise, absolutely clog the commit pipeline
                    while (!enqueueDone.IsSet)
                    {
                        log.Commit();
                    }
                }));
            }

            foreach (var t in commitThreads)
            {
                t.Start();
            }
            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }
            enqueueDone.Set();

            foreach (var t in commitThreads)
            {
                t.Join();
            }

            // We expect the test to finish and not get stuck somewhere

            // Ensure clean shutdown
            log.Commit(true);
        }
Beispiel #2
0
/*
 *      public async override Task OnDeactivateAsync()
 *      {
 *          string primaryKey = this.GetPrimaryKeyString();
 *          _log.Commit(true);
 *          Console.WriteLine($"Ending {_serviceName} - {_recordStart:g} - {_metric}");
 *          await base.OnDeactivateAsync();
 *      }*/

        public async Task AddRecord(Record record)
        {
            _recordCount++;
            var offset = (long)record.Time - _recordStart.Ticks;

            foreach (var kvp in record.Attributes)
            {
                var entry = new LogEntry()
                {
                    Offset      = (uint)offset,
                    MetricValue = record.Metricvalue,
                    KeyName     = kvp.Key,
                    KeyValue    = kvp.Value,
                    RecordId    = _recordCount
                };
                var bytes = entry.ToByteArray();
                await _log.EnqueueAsync(bytes);
            }

            if (_recordCount % 10_000 == 0)
            {
                _log.Commit();
                Console.WriteLine($"{_recordCount} {_recordStart.Ticks}");
            }
        }
Beispiel #3
0
        public void PopulateLog(FasterLog log)
        {
            //****** Populate log for Basic data for tests
            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                // Add to FasterLog
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);
        }
Beispiel #4
0
        public void TestDisposeReleasesFileLocksWithCompletedCommit([Values] TestUtils.DeviceType deviceType)
        {
            string path     = TestUtils.MethodTestDir + "/";
            string filename = path + "TestDisposeRelease" + deviceType.ToString() + ".log";

            DirectoryInfo di        = Directory.CreateDirectory(path);
            IDevice       device    = TestUtils.CreateTestDevice(deviceType, filename);
            FasterLog     fasterLog = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path, LogChecksum = LogChecksumType.PerEntry
            });

            Assert.IsTrue(fasterLog.TryEnqueue(new byte[100], out _));

            fasterLog.Commit(spinWait: true);
            fasterLog.Dispose();
            device.Dispose();
            while (true)
            {
                try
                {
                    di.Delete(recursive: true);
                    break;
                }
                catch { }
            }
        }
Beispiel #5
0
        public void DeviceAndLogConfig()
        {
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // Verify
            Assert.IsTrue(File.Exists(path + "/log-commits/commit.0.0"));
            Assert.IsTrue(File.Exists(path + "/DeviceConfig.0"));

            // Read the log just to verify can actually read it
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result[currentEntry] == currentEntry, "Fail - Result[" + currentEntry.ToString() + "]: is not same as " + currentEntry.ToString());

                    currentEntry++;
                }
            }
        }
        public void ManagedLocalStoreFullParamsTest()
        {
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                logFullParams.Enqueue(entry);
            }

            // Commit to the log
            logFullParams.Commit(true);

            // Verify
            Assert.IsTrue(File.Exists(path + "/log-commits/commit.1.0"));
            Assert.IsTrue(File.Exists(path + "/ManagedLocalStore.log.0"));

            // Read the log just to verify can actually read it
            int currentEntry = 0;

            using var iter = logFullParams.Scan(0, 100_000_000);
            while (iter.GetNext(out byte[] result, out _, out _))
            {
                Assert.AreEqual(currentEntry, result[currentEntry]);
                currentEntry++;
            }
        }
Beispiel #7
0
        private void FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager)
        {
            log = new FasterLog(new FasterLogSettings {
                PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager
            });

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                int count = 0;
                while (iter.GetNext(out byte[] result, out int length, out long currentAddress))
                {
                    count++;
                    Assert.IsTrue(result.SequenceEqual(entry));
                    if (count % 100 == 0)
                    {
                        log.TruncateUntil(iter.NextAddress);
                    }
                }
                Assert.IsTrue(count == numEntries);
            }

            log.Dispose();
        }
Beispiel #8
0
        public void BasicHighLatencyDeviceTest()
        {
            TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true);

            // Create devices \ log for test for in memory device
            using LocalMemoryDevice device = new LocalMemoryDevice(1L << 28, 1L << 25, 2, latencyMs: 20);
            using FasterLog LocalMemorylog = new FasterLog(new FasterLogSettings { LogDevice = device, PageSizeBits = 80, MemorySizeBits = 20, GetMemory = null, SegmentSizeBits = 80, MutableFraction = 0.2, LogCommitManager = null });

            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                LocalMemorylog.Enqueue(entry);
            }

            // Commit to the log
            LocalMemorylog.Commit(true);

            // Read the log just to verify was actually committed
            int currentEntry = 0;

            using (var iter = LocalMemorylog.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result[currentEntry] == currentEntry, "Fail - Result[" + currentEntry.ToString() + "]: is not same as " + currentEntry.ToString());
                    currentEntry++;
                }
            }
        }
Beispiel #9
0
        public void FasterLogTest1([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, LogChecksum = logChecksum
            });

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                int count = 0;
                while (iter.GetNext(out byte[] result, out int length))
                {
                    count++;
                    Assert.IsTrue(result.SequenceEqual(entry));
                    if (count % 100 == 0)
                    {
                        log.TruncateUntil(iter.CurrentAddress);
                    }
                }
                Assert.IsTrue(count == numEntries);
            }

            log.Dispose();
        }
Beispiel #10
0
        public async ValueTask FlakyLogTestCleanFailure([Values] bool isAsync)
        {
            var errorOptions = new ErrorSimulationOptions
            {
                readTransientErrorRate  = 0,
                readPermanentErrorRate  = 0,
                writeTransientErrorRate = 0,
                writePermanentErrorRate = 0.1,
            };

            device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true),
                                              errorOptions);
            var logSettings = new FasterLogSettings
            {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            try
            {
                // Ensure we execute long enough to trigger errors
                for (int j = 0; j < 100; j++)
                {
                    for (int i = 0; i < numEntries; i++)
                    {
                        log.Enqueue(entry);
                    }

                    if (isAsync)
                    {
                        await log.CommitAsync();
                    }
                    else
                    {
                        log.Commit();
                    }
                }
            }
            catch (CommitFailureException e)
            {
                var errorRangeStart = e.LinkedCommitInfo.CommitInfo.FromAddress;
                Assert.LessOrEqual(log.CommittedUntilAddress, errorRangeStart);
                Assert.LessOrEqual(log.FlushedUntilAddress, errorRangeStart);
                return;
            }

            // Should not ignore failures
            Assert.Fail();
        }
Beispiel #11
0
        static void CommitThread()
        {
            //Task<LinkedCommitInfo> prevCommitTask = null;
            while (true)
            {
                Thread.Sleep(5);
                log.Commit(true);

                // Async version
                // await log.CommitAsync();

                // Async version that catches all commit failures in between
                //try
                //{
                //    prevCommitTask = await log.CommitAsync(prevCommitTask);
                //}
                //catch (CommitFailureException e)
                //{
                //    Console.WriteLine(e);
                //    prevCommitTask = e.LinkedCommitInfo.nextTcs.Task;
                //}
            }
        }
Beispiel #12
0
        public void TestDisposeReleasesFileLocksWithInprogressCommit()
        {
            string        commitPath = TestContext.CurrentContext.TestDirectory + "/" + TestContext.CurrentContext.Test.Name + "/";
            DirectoryInfo di         = Directory.CreateDirectory(commitPath);
            IDevice       device     = Devices.CreateLogDevice(commitPath + "testDisposeReleasesFileLocksWithInprogressCommit.log", preallocateFile: true, deleteOnClose: false);
            FasterLog     fasterLog  = new FasterLog(new FasterLogSettings {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry
            });

            Assert.IsTrue(fasterLog.TryEnqueue(new byte[100], out long beginAddress));
            fasterLog.Commit(spinWait: false);
            fasterLog.Dispose();
            device.Dispose();
            while (true)
            {
                try
                {
                    di.Delete(recursive: true);
                    break;
                }
                catch { }
            }
        }
Beispiel #13
0
        public async Task FasterLogTest5([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum
            });

            int  headerSize = logChecksum == LogChecksumType.None ? 4 : 12;
            bool _disposed  = false;
            var  commit     = new Thread(() => { while (!_disposed)
                                                 {
                                                     log.Commit(true); Thread.Sleep(1);
                                                 }
                                         });

            commit.Start();

            // 65536=page size|headerSize|64=log header
            await log.EnqueueAndWaitForCommitAsync(new byte[65536 - headerSize - 64]);

            // 65536=page size|headerSize
            await log.EnqueueAndWaitForCommitAsync(new byte[65536 - headerSize]);

            // 65536=page size|headerSize
            await log.EnqueueAndWaitForCommitAsync(new byte[65536 - headerSize]);

            // 65536=page size|headerSize
            await log.EnqueueAndWaitForCommitAsync(new byte[65536 - headerSize]);

            // 65536=page size|headerSize
            await log.EnqueueAndWaitForCommitAsync(new byte[65536 - headerSize]);

            _disposed = true;

            commit.Join();
            log.Dispose();
        }
Beispiel #14
0
        public void ManagedLocalStoreBasicTest()
        {
            int entryLength = 20;
            int numEntries  = 1000;
            int entryFlag   = 9999;


            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                // Default is add bytes so no need to do anything with it
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            // Read the log - Look for the flag so know each entry is unique
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // set check flag to show got in here
                        datacheckrun = true;

                        Assert.IsTrue(result[currentEntry] == (byte)entryFlag, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString() + "  entryFlag:" + entryFlag);

                        currentEntry++;
                    }
                }
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Beispiel #15
0
        public void CommitRecordBoundedGrowthTest([Values] TestUtils.DeviceType deviceType)
        {
            var cookie = new byte[100];

            new Random().NextBytes(cookie);

            var filename = path + "boundedGrowth" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename, deleteOnClose: true);
            var logSettings = new FasterLogSettings {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, FastCommitMode = true
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }

            // for comparison, insert some entries without any commit records
            var referenceTailLength = log.TailAddress;

            var enqueueDone   = new ManualResetEventSlim();
            var commitThreads = new List <Thread>();

            // Make sure to not spin up too many commit threads, otherwise we might clog epochs and halt progress
            for (var i = 0; i < Math.Max(1, Environment.ProcessorCount - 1); i++)
            {
                commitThreads.Add(new Thread(() =>
                {
                    // Otherwise, absolutely clog the commit pipeline
                    while (!enqueueDone.IsSet)
                    {
                        log.Commit();
                    }
                }));
            }

            foreach (var t in commitThreads)
            {
                t.Start();
            }
            for (int i = 0; i < 5 * numEntries; i++)
            {
                log.Enqueue(entry);
            }
            enqueueDone.Set();

            foreach (var t in commitThreads)
            {
                t.Join();
            }


            // TODO: Hardcoded constant --- if this number changes in FasterLogRecoverInfo, it needs to be updated here too
            var commitRecordSize = 44;
            var logTailGrowth    = log.TailAddress - referenceTailLength;

            // Check that we are not growing the log more than one commit record per user entry
            Assert.IsTrue(logTailGrowth - referenceTailLength <= commitRecordSize * 5 * numEntries);

            // Ensure clean shutdown
            log.Commit(true);
        }
Beispiel #16
0
        public void LogReadAsyncBasicTest([Values] ParameterDefaultsIteratorType iteratorType, [Values] TestUtils.DeviceType deviceType)
        {
            int    entryLength = 20;
            int    numEntries  = 500;
            int    entryFlag   = 9999;
            string filename    = path + "LogReadAsync" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path
            });

            byte[] entry = new byte[entryLength];

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);


            // Read one entry based on different parameters for AsyncReadOnly and verify
            switch (iteratorType)
            {
            case ParameterDefaultsIteratorType.DefaultParams:
                // Read one entry and verify
                var record       = log.ReadAsync(log.BeginAddress);
                var foundFlagged = record.Result.Item1[0];    // 15
                var foundEntry   = record.Result.Item1[1];    // 1
                var foundTotal   = record.Result.Item2;

                Assert.AreEqual((byte)entryFlag, foundFlagged, $"Fail reading Flagged Entry");
                Assert.AreEqual(1, foundEntry, $"Fail reading Normal Entry");
                Assert.AreEqual(entryLength, foundTotal, $"Fail reading Total");

                break;

            case ParameterDefaultsIteratorType.LengthParam:
                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 208);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.AreEqual((byte)entryFlag, foundFlagged, $"Fail reading Flagged Entry");
                Assert.AreEqual(1, foundEntry, $"Fail reading Normal Entry");
                Assert.AreEqual(entryLength, foundTotal, $"Fail readingTotal");

                break;

            case ParameterDefaultsIteratorType.TokenParam:
                var cts = new CancellationToken();

                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 104, cts);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.AreEqual((byte)entryFlag, foundFlagged, $"Fail readingFlagged Entry");
                Assert.AreEqual(1, foundEntry, $"Fail reading Normal Entry");
                Assert.AreEqual(entryLength, foundTotal, $"Fail reading Total");

                // Read one entry as IMemoryOwner and verify
                var recordMemoryOwner = log.ReadAsync(log.BeginAddress, MemoryPool <byte> .Shared, 104, cts);
                var foundFlaggedMem   = recordMemoryOwner.Result.Item1.Memory.Span[0];  // 15
                var foundEntryMem     = recordMemoryOwner.Result.Item1.Memory.Span[1];  // 1
                var foundTotalMem     = recordMemoryOwner.Result.Item2;

                Assert.IsTrue(foundFlagged == foundFlaggedMem, $"MemoryPool-based ReadAsync result does not match that of the byte array one. value: {foundFlaggedMem} expected: {foundFlagged}");
                Assert.IsTrue(foundEntry == foundEntryMem, $"MemoryPool-based ReadAsync result does not match that of the byte array one. value: {foundEntryMem} expected: {foundEntry}");
                Assert.IsTrue(foundTotal == foundTotalMem, $"MemoryPool-based ReadAsync result does not match that of the byte array one. value: {foundTotalMem} expected: {foundTotal}");

                break;

            default:
                Assert.Fail("Unknown case ParameterDefaultsIteratorType.DefaultParams:");
                break;
            }
        }
Beispiel #17
0
        public void LogReadAsyncBasicTest([Values] ParameterDefaultsIteratorType iteratorType)
        {
            int entryLength = 100;
            int numEntries  = 1000000;
            int entryFlag   = 9999;

            byte[] entry = new byte[entryLength];

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // Read one entry based on different parameters for AsyncReadOnly and verify
            switch (iteratorType)
            {
            case ParameterDefaultsIteratorType.DefaultParams:
                // Read one entry and verify
                var record       = log.ReadAsync(log.BeginAddress);
                var foundFlagged = record.Result.Item1[0];    // 15
                var foundEntry   = record.Result.Item1[1];    // 1
                var foundTotal   = record.Result.Item2;

                Assert.IsTrue(foundFlagged == (byte)entryFlag, "Fail reading data - Found Flagged Entry:" + foundFlagged.ToString() + "  Expected Flagged entry:" + entryFlag);
                Assert.IsTrue(foundEntry == 1, "Fail reading data - Found Normal Entry:" + foundEntry.ToString() + "  Expected Value: 1");
                Assert.IsTrue(foundTotal == 100, "Fail reading data - Found Total:" + foundTotal.ToString() + "  Expected Total: 100");

                break;

            case ParameterDefaultsIteratorType.LengthParam:
                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 208);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.IsTrue(foundFlagged == (byte)entryFlag, "Fail reading data - Found Flagged Entry:" + foundFlagged.ToString() + "  Expected Flagged entry:" + entryFlag);
                Assert.IsTrue(foundEntry == 1, "Fail reading data - Found Normal Entry:" + foundEntry.ToString() + "  Expected Value: 1");
                Assert.IsTrue(foundTotal == 100, "Fail reading data - Found Total:" + foundTotal.ToString() + "  Expected Total: 100");

                break;

            case ParameterDefaultsIteratorType.TokenParam:
                var cts = new CancellationToken();

                // Read one entry and verify
                record       = log.ReadAsync(log.BeginAddress, 104, cts);
                foundFlagged = record.Result.Item1[0];    // 15
                foundEntry   = record.Result.Item1[1];    // 1
                foundTotal   = record.Result.Item2;

                Assert.IsTrue(foundFlagged == (byte)entryFlag, "Fail reading data - Found Flagged Entry:" + foundFlagged.ToString() + "  Expected Flagged entry:" + entryFlag);
                Assert.IsTrue(foundEntry == 1, "Fail reading data - Found Normal Entry:" + foundEntry.ToString() + "  Expected Value: 1");
                Assert.IsTrue(foundTotal == 100, "Fail reading data - Found Total:" + foundTotal.ToString() + "  Expected Total: 100");

                break;

            default:
                Assert.Fail("Unknown case ParameterDefaultsIteratorType.DefaultParams:");
                break;
            }
        }
        public void ManagedLocalStoreBasicTest()
        {
            int  entryLength       = 20;
            int  numEntries        = 1000;
            int  numEnqueueThreads = 1;
            int  numIterThreads    = 1;
            bool commitThread      = false;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            bool disposeCommitThread = false;
            var  commit =
                new Thread(() =>
            {
                while (!disposeCommitThread)
                {
                    log.Commit(true);
                }
            });

            if (commitThread)
            {
                commit.Start();
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            if (commitThread)
            {
                disposeCommitThread = true;
                commit.Join();
            }

            // Final commit to the log
            log.Commit(true);

            int currentEntry = 0;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            if (numEnqueueThreads == 1)
                            {
                                Assert.AreEqual((byte)currentEntry, result[0]);
                            }
                            currentEntry++;
                        }
                    }

                    Assert.AreEqual(numEntries * numEnqueueThreads, currentEntry);
                });
            }

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            // Make sure number of entries is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(numEntries, currentEntry);
        }
Beispiel #19
0
        public static void ManagedLocalStoreBasicTest()
        {
            int  entryLength       = 20;
            int  numEntries        = 500_000;
            int  numEnqueueThreads = 1;
            int  numIterThreads    = 1;
            bool commitThread      = false;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            bool disposeCommitThread = false;
            var  commit =
                new Thread(() =>
            {
                while (!disposeCommitThread)
                {
                    Thread.Sleep(10);
                    log.Commit(true);
                }
            });

            if (commitThread)
            {
                commit.Start();
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }

            Console.WriteLine("Populating log...");
            var sw = Stopwatch.StartNew();

            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            sw.Stop();
            Console.WriteLine($"{numEntries} items enqueued to the log by {numEnqueueThreads} threads in {sw.ElapsedMilliseconds} ms");

            if (commitThread)
            {
                disposeCommitThread = true;
                commit.Join();
            }

            // Final commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    int currentEntry = 0;
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            // set check flag to show got in here
                            datacheckrun = true;

                            if (numEnqueueThreads == 1)
                            {
                                if (result[0] != (byte)currentEntry)
                                {
                                    throw new Exception("Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString());
                                }
                            }
                            currentEntry++;
                        }
                    }

                    if (currentEntry != numEntries * numEnqueueThreads)
                    {
                        throw new Exception("Error");
                    }
                });
            }

            sw.Restart();

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            sw.Stop();
            Console.WriteLine($"{numEntries} items iterated in the log by {numIterThreads} threads in {sw.ElapsedMilliseconds} ms");

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                throw new Exception("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Beispiel #20
0
        public async ValueTask FlakyLogTestTolerateFailure([Values] IteratorType iteratorType)
        {
            var errorOptions = new ErrorSimulationOptions
            {
                readTransientErrorRate  = 0,
                readPermanentErrorRate  = 0,
                writeTransientErrorRate = 0,
                writePermanentErrorRate = 0.1,
            };

            device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true),
                                              errorOptions);
            var logSettings = new FasterLogSettings
            {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager, TolerateDeviceFailure = true
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Ensure we write enough to trigger errors
            for (int i = 0; i < 1000; i++)
            {
                log.Enqueue(entry);
                try
                {
                    if (IsAsync(iteratorType))
                    {
                        await log.CommitAsync();
                    }
                    else
                    {
                        log.Commit();
                    }
                }
                catch (CommitFailureException)
                {
                    // Ignore failure
                }
            }

            // For surviving entries, scan should still work best-effort
            // If endAddress > log.TailAddress then GetAsyncEnumerable() will wait until more entries are added.
            var endAddress   = IsAsync(iteratorType) ? log.CommittedUntilAddress : long.MaxValue;
            var recoveredLog = new FasterLog(logSettings);

            using var iter = recoveredLog.Scan(0, endAddress);
            switch (iteratorType)
            {
            case IteratorType.AsyncByteVector:
                await foreach ((byte[] result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable())
                {
                    Assert.IsTrue(result.SequenceEqual(entry));
                }
                break;

            case IteratorType.AsyncMemoryOwner:
                await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared))
                {
                    Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry));
                    result.Dispose();
                }
                break;

            case IteratorType.Sync:
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result.SequenceEqual(entry));
                }
                break;

            default:
                Assert.Fail("Unknown IteratorType");
                break;
            }
            recoveredLog.Dispose();
        }
        private async ValueTask FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager, FasterLogTests.IteratorType iteratorType)
        {
            var logSettings = new FasterLogSettings {
                PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager
            };

            log = FasterLogTests.IsAsync(iteratorType) ? await FasterLog.CreateAsync(logSettings) : new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                var counter = new FasterLogTests.Counter(log);

                switch (iteratorType)
                {
                case FasterLogTests.IteratorType.AsyncByteVector:
                    await foreach ((byte[] result, _, _, long nextAddress) in iter.GetAsyncEnumerable())
                    {
                        Assert.IsTrue(result.SequenceEqual(entry));
                        counter.IncrementAndMaybeTruncateUntil(nextAddress);

                        // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add).
                        // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because
                        // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme.
                        if (nextAddress == log.TailAddress)
                        {
                            break;
                        }
                    }
                    break;

                case FasterLogTests.IteratorType.AsyncMemoryOwner:
                    await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared))
                    {
                        Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry));
                        result.Dispose();
                        counter.IncrementAndMaybeTruncateUntil(nextAddress);

                        // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add).
                        // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because
                        // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme.
                        if (nextAddress == log.TailAddress)
                        {
                            break;
                        }
                    }
                    break;

                case FasterLogTests.IteratorType.Sync:
                    while (iter.GetNext(out byte[] result, out _, out _))
                    {
                        Assert.IsTrue(result.SequenceEqual(entry));
                        counter.IncrementAndMaybeTruncateUntil(iter.NextAddress);
                    }
                    break;

                default:
                    Assert.Fail("Unknown IteratorType");
                    break;
                }
                Assert.IsTrue(counter.count == numEntries);
            }

            log.Dispose();
        }
Beispiel #22
0
        public void FlakyLogTestConcurrentWriteFailure()
        {
            var errorOptions = new ErrorSimulationOptions
            {
                readTransientErrorRate  = 0,
                readPermanentErrorRate  = 0,
                writeTransientErrorRate = 0,
                writePermanentErrorRate = 0.05,
            };

            device = new SimulatedFlakyDevice(Devices.CreateLogDevice(path + "fasterlog.log", deleteOnClose: true),
                                              errorOptions);
            var logSettings = new FasterLogSettings
            {
                LogDevice = device, LogChecksum = LogChecksumType.PerEntry, LogCommitManager = manager
            };

            log = new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            var         failureList = new List <CommitFailureException>();
            ThreadStart runTask     = () =>
            {
                var random = new Random();
                try
                {
                    // Ensure we execute long enough to trigger errors
                    for (int j = 0; j < 100; j++)
                    {
                        for (int i = 0; i < numEntries; i++)
                        {
                            log.Enqueue(entry);
                            // create randomly interleaved concurrent writes
                            if (random.NextDouble() < 0.1)
                            {
                                log.Commit();
                            }
                        }
                    }
                }
                catch (CommitFailureException e)
                {
                    lock (failureList)
                        failureList.Add(e);
                }
            };

            var threads = new List <Thread>();

            for (var i = 0; i < Environment.ProcessorCount + 1; i++)
            {
                var t = new Thread(runTask);
                t.Start();
                threads.Add(t);
            }

            foreach (var thread in threads)
            {
                thread.Join();
            }

            // Every thread observed the failure
            Assert.IsTrue(failureList.Count == threads.Count);
            // They all observed the same failure
            foreach (var failure in failureList)
            {
                Assert.AreEqual(failure.LinkedCommitInfo.CommitInfo, failureList[0].LinkedCommitInfo.CommitInfo);
            }
        }
Beispiel #23
0
        public void WaitForCommitBasicTest(string SyncTest)
        {
            CancellationTokenSource cts   = new CancellationTokenSource();
            CancellationToken       token = cts.Token;

            // make it small since launching each on separate threads
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            Task currentTask;

            // Enqueue and Commit in a separate thread (wait there until commit is done though).
            if (SyncTest == "Sync")
            {
                currentTask = Task.Run(() => LogWriter(log, entry), token);
            }
            else
            {
                currentTask = Task.Run(() => LogWriterAsync(log, entry), token);
            }

            // Give all a second or so to queue up and to help with timing issues - shouldn't need but timing issues
            Thread.Sleep(2000);

            // Commit to the log
            log.Commit(true);
            currentTask.Wait(4000, token);

            // double check to make sure finished - seen cases where timing kept running even after commit done
            if (currentTask.Status != TaskStatus.RanToCompletion)
            {
                cts.Cancel();
            }

            // flag to make sure data has been checked
            bool datacheckrun = false;

            // Read the log to make sure all entries are put in
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // set check flag to show got in here
                        datacheckrun = true;

                        Assert.IsTrue(result[currentEntry] == (byte)currentEntry, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString() + " not match expected:" + currentEntry);

                        currentEntry++;
                    }
                }
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }

            // NOTE: seeing issues where task is not running to completion on Release builds
            // This is a final check to make sure task finished. If didn't then assert
            // One note - if made it this far, know that data was Enqueue and read properly, so just
            // case of task not stopping
            if (currentTask.Status != TaskStatus.RanToCompletion)
            {
                Assert.Fail("Final Status check Failure -- Task should be 'RanToCompletion' but current Status is:" + currentTask.Status);
            }
        }
Beispiel #24
0
        public void ManagedLocalStoreBasicTest()
        {
            int entryLength       = 20;
            int numEntries        = 1000;
            int numEnqueueThreads = 1;
            int numIterThreads    = 1;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            // Commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    int currentEntry = 0;
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            // set check flag to show got in here
                            datacheckrun = true;

                            if (numEnqueueThreads == 1)
                            {
                                Assert.IsTrue(result[0] == (byte)currentEntry, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString());
                            }
                            currentEntry++;
                        }
                    }

                    Assert.IsTrue(currentEntry == numEntries * numEnqueueThreads);
                });
            }

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Beispiel #25
0
        public void Setup()
        {
            // Clean up log files from previous test runs in case they weren't cleaned up
            try { new DirectoryInfo(path).Delete(true); }
            catch {}

            // Set up the Devices \ logs
            device = Devices.CreateLogDevice(path + "LogScan", deleteOnClose: true);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device
            });
            deviceUnCommitted = Devices.CreateLogDevice(path + "LogScanUncommitted", deleteOnClose: true);
            logUncommitted    = new FasterLog(new FasterLogSettings {
                LogDevice = deviceUnCommitted
            });

            //****** Populate log for Basic data for tests
            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int i = 0; i < numEntries; i++)
            {
                // Flag one part of entry data that corresponds to index
                if (i < entryLength)
                {
                    entry[i] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((i > 0) && (i < entryLength))
                {
                    entry[i - 1] = (byte)(i - 1);
                }

                // Add to FasterLog
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);


            //****** Populate uncommitted log / device for ScanUncommittedTest
            // Set Default entry data
            for (int j = 0; j < entryLength; j++)
            {
                entry[j] = (byte)j;
            }

            // Enqueue but set each Entry in a way that can differentiate between entries
            for (int j = 0; j < numEntries; j++)
            {
                // Flag one part of entry data that corresponds to index
                if (j < entryLength)
                {
                    entry[j] = (byte)entryFlag;
                }

                // puts back the previous entry value
                if ((j > 0) && (j < entryLength))
                {
                    entry[j - 1] = (byte)(j - 1);
                }

                // Add to FasterLog
                logUncommitted.Enqueue(entry);
            }

            // refresh uncommitted so can see it when scan - do NOT commit though
            logUncommitted.RefreshUncommitted(true);
        }