Esempio n. 1
0
        public void FasterLogTest6([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, MemorySizeBits = 20, PageSizeBits = 14, LogChecksum = logChecksum, LogCommitManager = manager
            });
            byte[] data1 = new byte[1000];
            for (int i = 0; i < 100; i++)
            {
                data1[i] = (byte)i;
            }

            for (int i = 0; i < 100; i++)
            {
                log.Enqueue(data1);
            }
            log.RefreshUncommitted();
            Assert.IsTrue(log.SafeTailAddress == log.TailAddress);

            Assert.IsTrue(log.CommittedUntilAddress < log.SafeTailAddress);

            using (var iter = log.Scan(0, long.MaxValue, scanUncommitted: true))
            {
                while (iter.GetNext(out _, out _, out _))
                {
                    log.TruncateUntil(iter.NextAddress);
                }
                Assert.IsTrue(iter.NextAddress == log.SafeTailAddress);
                log.Enqueue(data1);
                Assert.IsFalse(iter.GetNext(out _, out _, out _));
                log.RefreshUncommitted();
                Assert.IsTrue(iter.GetNext(out _, out _, out _));
            }
            log.Dispose();
        }
Esempio n. 2
0
        public void ScanWithoutRecoverTest([Values] TestUtils.DeviceType deviceType)
        {
            // You may also force an iterator to start at the specified begin address, i.e., without recovering: recover parameter = false

            // Create log and device here (not in setup) because using DeviceType Enum which can't be used in Setup
            string filename = path + "LogScanWithoutRecover" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path
            });
            PopulateLog(log);

            // Read the log
            int currentEntry = 9;   // since starting at specified address of 1000, need to set current entry as 9 so verification starts at proper spot

            using (var iter = log.Scan(1000, 100_000_000, recover: false))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // Span Batch only added first entry several times so have separate verification
                        Assert.AreEqual((byte)entryFlag, result[currentEntry]);
                        currentEntry++;
                    }
                }
            }

            // Make sure expected length is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(entryLength, currentEntry);
        }
Esempio n. 3
0
        public void ScanUncommittedTest([Values] TestUtils.DeviceType deviceType)
        {
            // Create log and device here (not in setup) because using DeviceType Enum which can't be used in Setup
            string filename = path + "LogScan" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path
            });
            PopulateUncommittedLog(log);

            // Setting scanUnCommitted to true is actual test here.
            // Read the log - Look for the flag so know each entry is unique and still reads uncommitted
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000, scanUncommitted: true))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // Span Batch only added first entry several times so have separate verification
                        Assert.AreEqual((byte)entryFlag, result[currentEntry]);
                        currentEntry++;
                    }
                }
            }

            // Make sure expected length is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(entryLength, currentEntry);
        }
Esempio n. 4
0
        public void DeviceAndLogConfig()
        {
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                log.Enqueue(entry);
            }

            // Commit to the log
            log.Commit(true);

            // Verify
            Assert.IsTrue(File.Exists(path + "/log-commits/commit.0.0"));
            Assert.IsTrue(File.Exists(path + "/DeviceConfig.0"));

            // Read the log just to verify can actually read it
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result[currentEntry] == currentEntry, "Fail - Result[" + currentEntry.ToString() + "]: is not same as " + currentEntry.ToString());

                    currentEntry++;
                }
            }
        }
Esempio n. 5
0
        public void ScanByNameTest([Values] TestUtils.DeviceType deviceType)
        {
            //You can persist iterators(or more precisely, their CompletedUntilAddress) as part of a commit by simply naming them during their creation.

            // Create log and device here (not in setup) because using DeviceType Enum which can't be used in Setup
            string filename = path + "LogScanByName" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path
            });
            PopulateLog(log);

            // Read the log - Look for the flag so know each entry is unique
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000, name: "TestScan", recover: true))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // Span Batch only added first entry several times so have separate verification
                        Assert.AreEqual((byte)entryFlag, result[currentEntry]);
                        currentEntry++;
                    }
                }
            }

            // Make sure expected length is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(entryLength, currentEntry);
        }
        public async Task FasterLogResumePersistedReaderSpec([Values] LogChecksumType logChecksum)
        {
            var    input1     = new byte[] { 0, 1, 2, 3 };
            var    input2     = new byte[] { 4, 5, 6, 7, 8, 9, 10 };
            var    input3     = new byte[] { 11, 12 };
            string readerName = "abc";

            using (var l = new FasterLog(new FasterLogSettings {
                LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum, LogCommitFile = commitPath
            }))
            {
                await l.EnqueueAsync(input1);

                await l.EnqueueAsync(input2);

                await l.EnqueueAsync(input3);

                await l.CommitAsync();

                using var originalIterator = l.Scan(0, long.MaxValue, readerName);
                Assert.IsTrue(originalIterator.GetNext(out _, out _, out _, out long recoveryAddress));
                originalIterator.CompleteUntil(recoveryAddress);
                Assert.IsTrue(originalIterator.GetNext(out _, out _, out _, out _));  // move the reader ahead
                await l.CommitAsync();
            }

            using (var l = new FasterLog(new FasterLogSettings {
                LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum, LogCommitFile = commitPath
            }))
            {
                using var recoveredIterator = l.Scan(0, long.MaxValue, readerName);
                Assert.IsTrue(recoveredIterator.GetNext(out byte[] outBuf, out _, out _, out _));
                Assert.True(input2.SequenceEqual(outBuf));  // we should have read in input2, not input1 or input3
            }
        }
        public void ManagedLocalStoreFullParamsTest()
        {
            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                logFullParams.Enqueue(entry);
            }

            // Commit to the log
            logFullParams.Commit(true);

            // Verify
            Assert.IsTrue(File.Exists(path + "/log-commits/commit.1.0"));
            Assert.IsTrue(File.Exists(path + "/ManagedLocalStore.log.0"));

            // Read the log just to verify can actually read it
            int currentEntry = 0;

            using var iter = logFullParams.Scan(0, 100_000_000);
            while (iter.GetNext(out byte[] result, out _, out _))
            {
                Assert.AreEqual(currentEntry, result[currentEntry]);
                currentEntry++;
            }
        }
Esempio n. 8
0
        public void ScanUncommittedTest()
        {
            // flag to make sure data has been checked
            bool datacheckrun = false;

            // Setting scanUnCommitted to true is actual test here.
            // Read the log - Look for the flag so know each entry is unique and still reads uncommitted
            int currentEntry = 0;

            using (var iter = logUncommitted.Scan(0, 100_000_000, scanUncommitted: true))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // set check flag to show got in here
                        datacheckrun = true;

                        // Span Batch only added first entry several times so have separate verification
                        Assert.IsTrue(result[currentEntry] == (byte)entryFlag, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString() + "  entryFlag:" + entryFlag);

                        currentEntry++;
                    }
                }
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Esempio n. 9
0
        private void FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager)
        {
            log = new FasterLog(new FasterLogSettings {
                PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager
            });

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                int count = 0;
                while (iter.GetNext(out byte[] result, out int length, out long currentAddress))
                {
                    count++;
                    Assert.IsTrue(result.SequenceEqual(entry));
                    if (count % 100 == 0)
                    {
                        log.TruncateUntil(iter.NextAddress);
                    }
                }
                Assert.IsTrue(count == numEntries);
            }

            log.Dispose();
        }
Esempio n. 10
0
        public void BasicHighLatencyDeviceTest()
        {
            TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true);

            // Create devices \ log for test for in memory device
            using LocalMemoryDevice device = new LocalMemoryDevice(1L << 28, 1L << 25, 2, latencyMs: 20);
            using FasterLog LocalMemorylog = new FasterLog(new FasterLogSettings { LogDevice = device, PageSizeBits = 80, MemorySizeBits = 20, GetMemory = null, SegmentSizeBits = 80, MutableFraction = 0.2, LogCommitManager = null });

            int entryLength = 10;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
                LocalMemorylog.Enqueue(entry);
            }

            // Commit to the log
            LocalMemorylog.Commit(true);

            // Read the log just to verify was actually committed
            int currentEntry = 0;

            using (var iter = LocalMemorylog.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    Assert.IsTrue(result[currentEntry] == currentEntry, "Fail - Result[" + currentEntry.ToString() + "]: is not same as " + currentEntry.ToString());
                    currentEntry++;
                }
            }
        }
Esempio n. 11
0
        public void ScanConsumerTest([Values] TestUtils.DeviceType deviceType)
        {
            // Create log and device here (not in setup) because using DeviceType Enum which can't be used in Setup
            string filename = path + "LogScanDefault" + deviceType.ToString() + ".log";

            device = TestUtils.CreateTestDevice(deviceType, filename);
            log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, SegmentSizeBits = 22, LogCommitDir = path
            });
            PopulateLog(log);

            // Basic default scan from start to end
            // Indirectly used in other tests, but good to have the basic test here for completeness

            // Read the log - Look for the flag so know each entry is unique
            var consumer = new TestConsumer();

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.TryConsumeNext(consumer))
                {
                }
            }

            // Make sure expected length is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(entryLength, consumer.currentEntry);
        }
Esempio n. 12
0
        public void FasterLogTest1([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, LogChecksum = logChecksum
            });

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                int count = 0;
                while (iter.GetNext(out byte[] result, out int length))
                {
                    count++;
                    Assert.IsTrue(result.SequenceEqual(entry));
                    if (count % 100 == 0)
                    {
                        log.TruncateUntil(iter.CurrentAddress);
                    }
                }
                Assert.IsTrue(count == numEntries);
            }

            log.Dispose();
        }
Esempio n. 13
0
        static void ScanThread()
        {
            Random r = new Random();

            byte[] result;

            using (iter = log.Scan(log.BeginAddress, long.MaxValue))
            {
                while (true)
                {
                    while (!iter.GetNext(out result, out int length))
                    {
                        // For finite end address, check if iteration ended
                        // if (iter.CurrentAddress >= endAddress) return;
                        iter.WaitAsync().GetAwaiter().GetResult();
                    }

                    // Memory pool variant:
                    // iter.GetNext(pool, out IMemoryOwner<byte> resultMem, out int length))

                    if (Different(result, staticEntry, out int location))
                    {
                        throw new Exception("Invalid entry found");
                    }

                    // Re-insert entry with small probability
                    if (r.Next(100) < 10)
                    {
                        log.Enqueue(result);
                    }

                    // Example of random read from given address
                    // (result, _) = log.ReadAsync(iter.CurrentAddress).GetAwaiter().GetResult();

                    // Truncate log until after recently processed entry
                    log.TruncateUntil(iter.NextAddress);

                    // Safer truncate variant: truncate until start of page
                    // log.TruncateUntilPageStart(iter.NextAddress);
                }
            }

            // Example of recoverable (named) iterator:
            // using (iter = log.Scan(log.BeginAddress, long.MaxValue, "foo"))
        }
Esempio n. 14
0
        public async Task FasterLogResumePersistedReader2([Values] LogChecksumType logChecksum, [Values] bool overwriteLogCommits, [Values] bool removeOutdated)
        {
            var    input1     = new byte[] { 0, 1, 2, 3 };
            var    input2     = new byte[] { 4, 5, 6, 7, 8, 9, 10 };
            var    input3     = new byte[] { 11, 12 };
            string readerName = "abc";

            using (var logCommitManager = new DeviceLogCommitCheckpointManager(new LocalStorageNamedDeviceFactory(), new DefaultCheckpointNamingScheme(commitPath), overwriteLogCommits, removeOutdated))
            {
                long originalCompleted;

                using (var l = new FasterLog(new FasterLogSettings {
                    LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum, LogCommitManager = logCommitManager
                }))
                {
                    await l.EnqueueAsync(input1);

                    await l.CommitAsync();

                    await l.EnqueueAsync(input2);

                    await l.CommitAsync();

                    await l.EnqueueAsync(input3);

                    await l.CommitAsync();

                    using (var originalIterator = l.Scan(0, long.MaxValue, readerName))
                    {
                        originalIterator.GetNext(out _, out _, out _, out long recoveryAddress);
                        originalIterator.CompleteUntil(recoveryAddress);
                        originalIterator.GetNext(out _, out _, out _, out _);  // move the reader ahead
                        await l.CommitAsync();

                        originalCompleted = originalIterator.CompletedUntilAddress;
                    }
                }

                using (var l = new FasterLog(new FasterLogSettings {
                    LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum, LogCommitManager = logCommitManager
                }))
                {
                    using (var recoveredIterator = l.Scan(0, long.MaxValue, readerName))
                    {
                        recoveredIterator.GetNext(out byte[] outBuf, out _, out _, out _);

                        // we should have read in input2, not input1 or input3
                        Assert.True(input2.SequenceEqual(outBuf), $"Original: {input2[0]}, Recovered: {outBuf[0]}, Original: {originalCompleted}, Recovered: {recoveredIterator.CompletedUntilAddress}");

                        // TestContext.Progress.WriteLine($"Original: {originalCompleted}, Recovered: {recoveredIterator.CompletedUntilAddress}");
                    }
                }
            }
        }
Esempio n. 15
0
        // This creates a separate FasterLog over the same log file, using RecoverReadOnly to continuously update
        // to the primary FasterLog's commits.
        public async Task SeparateConsumerAsync(CancellationToken cancellationToken)
        {
            var _ = BeginRecoverReadOnlyLoop(logReadOnly, cancellationToken);

            // This enumerator waits asynchronously when we have reached the committed tail of the duplicate FasterLog. When RecoverReadOnly
            // reads new data committed by the primary FasterLog, it signals commit completion to let iter continue to the new tail.
            using var iter = logReadOnly.Scan(logReadOnly.BeginAddress, long.MaxValue);
            await foreach (var(result, length, currentAddress, nextAddress) in iter.GetAsyncEnumerable(cancellationToken))
            {
                iter.CompleteUntil(nextAddress);
            }
        }
Esempio n. 16
0
        public void ScanBasicDefaultTest()
        {
            // Basic default scan from start to end
            // Indirectly used in other tests, but good to have the basic test here for completeness

            // flag to make sure data has been checked
            bool datacheckrun = false;

            // Read the log - Look for the flag so know each entry is unique
            int currentEntry = 0;

            using (var iter = log.Scan(0, 100_000_000))
            {
                while (iter.GetNext(out byte[] result, out _, out _))
                {
                    if (currentEntry < entryLength)
                    {
                        // set check flag to show got in here
                        datacheckrun = true;

                        // Span Batch only added first entry several times so have separate verification
                        Assert.IsTrue(result[currentEntry] == (byte)entryFlag, "Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString() + "  entryFlag:" + entryFlag);

                        currentEntry++;
                    }
                }
            }

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                Assert.Fail("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Esempio n. 17
0
        public async Task FasterLogTest2([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, LogChecksum = logChecksum, LogCommitManager = manager
            });
            byte[] data1 = new byte[10000];
            for (int i = 0; i < 10000; i++)
            {
                data1[i] = (byte)i;
            }

            using (var iter = log.Scan(0, long.MaxValue, scanBufferingMode: ScanBufferingMode.SinglePageBuffering))
            {
                int i = 0;
                while (i++ < 500)
                {
                    var waitingReader = iter.WaitAsync();
                    Assert.IsTrue(!waitingReader.IsCompleted);

                    while (!log.TryEnqueue(data1, out _))
                    {
                        ;
                    }

                    // We might have auto-committed at page boundary
                    // Ensure we don't find new entry in iterator
                    while (waitingReader.IsCompleted)
                    {
                        var _next = iter.GetNext(out _, out _, out _);
                        Assert.IsFalse(_next);
                        waitingReader = iter.WaitAsync();
                    }
                    Assert.IsFalse(waitingReader.IsCompleted);

                    await log.CommitAsync();

                    while (!waitingReader.IsCompleted)
                    {
                        ;
                    }
                    Assert.IsTrue(waitingReader.IsCompleted);

                    var curr = iter.GetNext(out byte[] result, out _, out _);
                    Assert.IsTrue(curr);
                    Assert.IsTrue(result.SequenceEqual(data1));

                    var next = iter.GetNext(out _, out _, out _);
                    Assert.IsFalse(next);
                }
            }
            log.Dispose();
        }
Esempio n. 18
0
        public async Task <Option <(string, long, long)> > GetNext()
        {
            using FasterLogScanIterator iter = logger.Scan(nextAddress, 100_000_000);
            while (true)
            {
                byte[] entry;
                int    length;

                while (!iter.GetNext(out entry, out length))
                {
                    if (iter.CurrentAddress >= 100_000_000)
                    {
                        return(Option.None <(string, long, long)>());
                    }
                }

                UTF8Encoding encoding = new UTF8Encoding();
                await iter.WaitAsync();

                nextAddress = iter.NextAddress;
                return(Option.Some((encoding.GetString(entry), iter.CurrentAddress, iter.NextAddress)));                 // Possible to pipe
            }
        }
Esempio n. 19
0
        static async Task SeparateConsumerAsync(CancellationToken cancellationToken)
        {
            var device = Devices.CreateLogDevice(path + "mylog");
            var log    = new FasterLog(new FasterLogSettings {
                LogDevice = device, ReadOnlyMode = true, PageSizeBits = 9, SegmentSizeBits = 9
            });
            var _ = RecoverAsync(log, cancellationToken);

            using var iter = log.Scan(log.BeginAddress, long.MaxValue);

            await foreach (var(result, length, currentAddress, nextAddress) in iter.GetAsyncEnumerable(cancellationToken))
            {
                Console.WriteLine($"Consuming {Encoding.UTF8.GetString(result)}");
                iter.CompleteUntil(nextAddress);
            }
        }
Esempio n. 20
0
        public async Task <List <(string, long, long)> > StartScan(string devicePath)
        {
            IDevice   device = Devices.CreateLogDevice(devicePath);
            FasterLog logger = new FasterLog(new FasterLogSettings {
                LogDevice = device
            });
            long nextAddress = 0;
            bool keepGoing   = true;
            int  i           = 0;

            var result = new List <(string, long, long)>();

            // using (FasterLogScanIterator iter = logger.Scan(logger.BeginAddress, 100_000_000, name: nameof(GetListAsync)))
            using (FasterLogScanIterator iter = logger.Scan(nextAddress, 1_000_000_000))
            {
                while (keepGoing)
                {
                    Console.WriteLine("Going");
                    LocalTime timeOfDay;
                    await foreach ((byte[] bytes, int length) in iter.GetAsyncEnumerable())
                    {
                        DateTimeZone tz = DateTimeZoneProviders.Tzdb.GetSystemDefault();
                        timeOfDay   = SystemClock.Instance.GetCurrentInstant().InZone(tz).TimeOfDay;
                        nextAddress = iter.NextAddress;
                        Console.WriteLine("Time={1} NextAddress={0}, Count={2}", iter.NextAddress, timeOfDay, i++);
                        var          cts      = new CancellationTokenSource();
                        UTF8Encoding encoding = new UTF8Encoding();

                        try
                        {
                            await Task.WhenAny(WaitAsync(iter), SetTimeout(cts));
                        }
                        catch (Exception e)
                        {
                            Console.Error.WriteLine($"Error={e.GetType()}, Message={e.ToString()}");
                            break;
                        }

                        timeOfDay = SystemClock.Instance.GetCurrentInstant().InZone(tz).TimeOfDay;
                        Console.WriteLine("Time={2} ContentLength={0}", bytes.Length, iter.NextAddress, timeOfDay);
                    }
                    await Task.Delay(5000);
                }
            }

            return(result);
        }
Esempio n. 21
0
        public async Task FasterLogTest3([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, PageSizeBits = 14, LogChecksum = logChecksum
            });
            byte[] data1 = new byte[10000];
            for (int i = 0; i < 10000; i++)
            {
                data1[i] = (byte)i;
            }

            using (var iter = log.Scan(0, long.MaxValue, scanBufferingMode: ScanBufferingMode.SinglePageBuffering))
            {
                var appendResult = log.TryEnqueue(data1, out _);
                Assert.IsTrue(appendResult);
                await log.CommitAsync();

                await iter.WaitAsync();

                var iterResult = iter.GetNext(out byte[] entry, out _, out _);
                Assert.IsTrue(iterResult);

                appendResult = log.TryEnqueue(data1, out _);
                Assert.IsFalse(appendResult);
                await iter.WaitAsync();

                // Should read the "hole" and return false
                iterResult = iter.GetNext(out entry, out _, out _);
                Assert.IsFalse(iterResult);

                // Should wait for next item
                var task = iter.WaitAsync();
                Assert.IsFalse(task.IsCompleted);

                appendResult = log.TryEnqueue(data1, out _);
                Assert.IsTrue(appendResult);
                await log.CommitAsync();

                await task;
                iterResult = iter.GetNext(out entry, out _, out _);
                Assert.IsTrue(iterResult);
            }
            log.Dispose();
        }
Esempio n. 22
0
        static async Task ConsumerAsync(FasterLog log, CancellationToken cancellationToken)
        {
            using var iter = log.Scan(log.BeginAddress, long.MaxValue, "foo", true, ScanBufferingMode.DoublePageBuffering, scanUncommitted: true);

            int count = 0;

            await foreach (var(result, length, currentAddress, nextAddress) in iter.GetAsyncEnumerable(cancellationToken))
            {
                Console.WriteLine($"Consuming {Encoding.UTF8.GetString(result)}");
                iter.CompleteUntil(nextAddress);
                log.TruncateUntil(nextAddress);

                // We simulate temporary slow down of data consumption
                // This will cause transient log spill to disk (observe folder on storage)
                if (count++ > 1000 && count < 1200)
                {
                    Thread.Sleep(100);
                }
            }
        }
Esempio n. 23
0
        // This creates a separate FasterLog over the same log file, using RecoverReadOnly to continuously update
        // to the primary FasterLog's commits.
        static async Task SeparateConsumerAsync(CancellationToken cancellationToken)
        {
            using var device = Devices.CreateLogDevice(path + "mylog");
            using var log    = new FasterLog(new FasterLogSettings { LogDevice = device, ReadOnlyMode = true, PageSizeBits = 9, SegmentSizeBits = 9 });

            try
            {
                var _ = BeginRecoverAsyncLoop(log, cancellationToken);

                // This enumerator waits asynchronously when we have reached the committed tail of the duplicate FasterLog. When RecoverReadOnly
                // reads new data committed by the primary FasterLog, it signals commit completion to let iter continue to the new tail.
                using var iter = log.Scan(log.BeginAddress, long.MaxValue);
                await foreach (var(result, length, currentAddress, nextAddress) in iter.GetAsyncEnumerable(cancellationToken))
                {
                    Console.WriteLine($"Separate Log Consuming {Encoding.UTF8.GetString(result)}");
                    iter.CompleteUntil(nextAddress);
                }
            }
            catch (OperationCanceledException) { }
            Console.WriteLine("SeparateConsumer complete");
        }
Esempio n. 24
0
        public async Task FasterLogTest4([Values] LogChecksumType logChecksum)
        {
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device, PageSizeBits = 14, LogChecksum = logChecksum
            });
            byte[] data1 = new byte[100];
            for (int i = 0; i < 100; i++)
            {
                data1[i] = (byte)i;
            }

            for (int i = 0; i < 100; i++)
            {
                log.Enqueue(data1);
            }

            Assert.IsTrue(log.CommittedUntilAddress == log.BeginAddress);
            await log.CommitAsync();

            Assert.IsTrue(log.CommittedUntilAddress == log.TailAddress);
            Assert.IsTrue(log.CommittedBeginAddress == log.BeginAddress);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                // Should read the "hole" and return false
                var iterResult = iter.GetNext(out byte[] entry, out _, out _);
                log.TruncateUntil(iter.NextAddress);

                Assert.IsTrue(log.CommittedUntilAddress == log.TailAddress);
                Assert.IsTrue(log.CommittedBeginAddress < log.BeginAddress);
                Assert.IsTrue(iter.NextAddress == log.BeginAddress);

                await log.CommitAsync();

                Assert.IsTrue(log.CommittedUntilAddress == log.TailAddress);
                Assert.IsTrue(log.CommittedBeginAddress == log.BeginAddress);
            }
            log.Dispose();
        }
Esempio n. 25
0
        /// <summary>
        /// Main program entry point
        /// </summary>
        static void Main()
        {
            bool sync = true;

            // Populate entry being inserted
            for (int i = 0; i < entryLength; i++)
            {
                staticEntry[i] = (byte)i;
            }

            var     path   = Path.GetTempPath() + "FasterLogSample/";
            IDevice device = Devices.CreateLogDevice(path + "hlog.log");

            // FasterLog will recover and resume if there is a previous commit found
            log = new FasterLog(new FasterLogSettings {
                LogDevice = device
            });

            using (iter = log.Scan(log.BeginAddress, long.MaxValue))
            {
                if (sync)
                {
                    // Log writer thread: create as many as needed
                    new Thread(new ThreadStart(LogWriterThread)).Start();

                    // Threads for iterator scan: create as many as needed
                    new Thread(() => ScanThread()).Start();

                    // Threads for reporting, commit
                    new Thread(new ThreadStart(ReportThread)).Start();
                    var t = new Thread(new ThreadStart(CommitThread));
                    t.Start();
                    t.Join();
                }
                else
                {
                    // Async version of demo: expect lower performance
                    // particularly for small payload sizes

                    const int NumParallelTasks = 10_000;
                    ThreadPool.SetMinThreads(2 * Environment.ProcessorCount, 2 * Environment.ProcessorCount);
                    TaskScheduler.UnobservedTaskException += (object sender, UnobservedTaskExceptionEventArgs e) =>
                    {
                        Console.WriteLine($"Unobserved task exception: {e.Exception}");
                        e.SetObserved();
                    };

                    Task[] tasks = new Task[NumParallelTasks];
                    for (int i = 0; i < NumParallelTasks; i++)
                    {
                        int local = i;
                        tasks[i] = Task.Run(() => AsyncLogWriter(local));
                    }

                    var scan = Task.Run(() => AsyncScan());

                    // Threads for reporting, commit
                    new Thread(new ThreadStart(ReportThread)).Start();
                    new Thread(new ThreadStart(CommitThread)).Start();

                    Task.WaitAll(tasks);
                    Task.WaitAll(scan);
                }
            }
        }
Esempio n. 26
0
        public static void ManagedLocalStoreBasicTest()
        {
            int  entryLength       = 20;
            int  numEntries        = 500_000;
            int  numEnqueueThreads = 1;
            int  numIterThreads    = 1;
            bool commitThread      = false;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            bool disposeCommitThread = false;
            var  commit =
                new Thread(() =>
            {
                while (!disposeCommitThread)
                {
                    Thread.Sleep(10);
                    log.Commit(true);
                }
            });

            if (commitThread)
            {
                commit.Start();
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }

            Console.WriteLine("Populating log...");
            var sw = Stopwatch.StartNew();

            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            sw.Stop();
            Console.WriteLine($"{numEntries} items enqueued to the log by {numEnqueueThreads} threads in {sw.ElapsedMilliseconds} ms");

            if (commitThread)
            {
                disposeCommitThread = true;
                commit.Join();
            }

            // Final commit to the log
            log.Commit(true);

            // flag to make sure data has been checked
            bool datacheckrun = false;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    int currentEntry = 0;
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            // set check flag to show got in here
                            datacheckrun = true;

                            if (numEnqueueThreads == 1)
                            {
                                if (result[0] != (byte)currentEntry)
                                {
                                    throw new Exception("Fail - Result[" + currentEntry.ToString() + "]:" + result[0].ToString());
                                }
                            }
                            currentEntry++;
                        }
                    }

                    if (currentEntry != numEntries * numEnqueueThreads)
                    {
                        throw new Exception("Error");
                    }
                });
            }

            sw.Restart();

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            sw.Stop();
            Console.WriteLine($"{numEntries} items iterated in the log by {numIterThreads} threads in {sw.ElapsedMilliseconds} ms");

            // if data verification was skipped, then pop a fail
            if (datacheckrun == false)
            {
                throw new Exception("Failure -- data loop after log.Scan never entered so wasn't verified. ");
            }
        }
Esempio n. 27
0
        private async ValueTask FasterLogTest1(LogChecksumType logChecksum, IDevice device, ILogCommitManager logCommitManager, FasterLogTests.IteratorType iteratorType)
        {
            var logSettings = new FasterLogSettings {
                PageSizeBits = 20, SegmentSizeBits = 20, LogDevice = device, LogChecksum = logChecksum, LogCommitManager = logCommitManager
            };

            log = FasterLogTests.IsAsync(iteratorType) ? await FasterLog.CreateAsync(logSettings) : new FasterLog(logSettings);

            byte[] entry = new byte[entryLength];
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            for (int i = 0; i < numEntries; i++)
            {
                log.Enqueue(entry);
            }
            log.Commit(true);

            using (var iter = log.Scan(0, long.MaxValue))
            {
                var counter = new FasterLogTests.Counter(log);

                switch (iteratorType)
                {
                case FasterLogTests.IteratorType.AsyncByteVector:
                    await foreach ((byte[] result, _, _, long nextAddress) in iter.GetAsyncEnumerable())
                    {
                        Assert.IsTrue(result.SequenceEqual(entry));
                        counter.IncrementAndMaybeTruncateUntil(nextAddress);

                        // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add).
                        // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because
                        // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme.
                        if (nextAddress == log.TailAddress)
                        {
                            break;
                        }
                    }
                    break;

                case FasterLogTests.IteratorType.AsyncMemoryOwner:
                    await foreach ((IMemoryOwner <byte> result, int _, long _, long nextAddress) in iter.GetAsyncEnumerable(MemoryPool <byte> .Shared))
                    {
                        Assert.IsTrue(result.Memory.Span.ToArray().Take(entry.Length).SequenceEqual(entry));
                        result.Dispose();
                        counter.IncrementAndMaybeTruncateUntil(nextAddress);

                        // MoveNextAsync() would hang at TailAddress, waiting for more entries (that we don't add).
                        // Note: If this happens and the test has to be canceled, there may be a leftover blob from the log.Commit(), because
                        // the log device isn't Dispose()d; the symptom is currently a numeric string format error in DefaultCheckpointNamingScheme.
                        if (nextAddress == log.TailAddress)
                        {
                            break;
                        }
                    }
                    break;

                case FasterLogTests.IteratorType.Sync:
                    while (iter.GetNext(out byte[] result, out _, out _))
                    {
                        Assert.IsTrue(result.SequenceEqual(entry));
                        counter.IncrementAndMaybeTruncateUntil(iter.NextAddress);
                    }
                    break;

                default:
                    Assert.Fail("Unknown IteratorType");
                    break;
                }
                Assert.IsTrue(counter.count == numEntries);
            }

            log.Dispose();
        }
Esempio n. 28
0
        public async Task FasterLogResumePersistedReader3([Values] LogChecksumType logChecksum, [Values] bool overwriteLogCommits, [Values] bool removeOutdated)
        {
            var    input1     = new byte[] { 0, 1, 2, 3 };
            var    input2     = new byte[] { 4, 5, 6, 7, 8, 9, 10 };
            var    input3     = new byte[] { 11, 12 };
            string readerName = "abcd";

            using (var logCommitManager = new DeviceLogCommitCheckpointManager(new LocalStorageNamedDeviceFactory(), new DefaultCheckpointNamingScheme(commitPath), overwriteLogCommits, removeOutdated))
            {
                long originalCompleted;

                using (var l = new FasterLog(new FasterLogSettings {
                    LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum, LogCommitManager = logCommitManager
                }))
                {
                    await l.EnqueueAsync(input1);

                    await l.CommitAsync();

                    await l.EnqueueAsync(input2);

                    await l.CommitAsync();

                    await l.EnqueueAsync(input3);

                    await l.CommitAsync();

                    using var originalIterator = l.Scan(0, l.TailAddress, readerName);

                    int count = 0;
                    await foreach (var item in originalIterator.GetAsyncEnumerable())
                    {
                        if (count < 2) // we complete 1st and 2nd item read
                        {
                            originalIterator.CompleteUntil(item.nextAddress);
                        }

                        if (count < 1) // we commit only 1st item read
                        {
                            await l.CommitAsync();
                        }

                        count++;
                    }
                    originalCompleted = originalIterator.CompletedUntilAddress;
                }

                using (var l = new FasterLog(new FasterLogSettings {
                    LogDevice = device, PageSizeBits = 16, MemorySizeBits = 16, LogChecksum = logChecksum, LogCommitManager = logCommitManager
                }))
                {
                    using var recoveredIterator = l.Scan(0, l.TailAddress, readerName);

                    int count = 0;
                    await foreach (var item in recoveredIterator.GetAsyncEnumerable())
                    {
                        if (count == 0) // resumed iterator will start at item2
                        {
                            Assert.True(input2.SequenceEqual(item.entry), $"Original: {input2[0]}, Recovered: {item.entry[0]}, Original: {originalCompleted}, Recovered: {recoveredIterator.CompletedUntilAddress}");
                        }
                        count++;
                    }
                    Assert.IsTrue(count == 2);
                }
            }
        }
        public void ManagedLocalStoreBasicTest()
        {
            int  entryLength       = 20;
            int  numEntries        = 1000;
            int  numEnqueueThreads = 1;
            int  numIterThreads    = 1;
            bool commitThread      = false;

            // Set Default entry data
            for (int i = 0; i < entryLength; i++)
            {
                entry[i] = (byte)i;
            }

            bool disposeCommitThread = false;
            var  commit =
                new Thread(() =>
            {
                while (!disposeCommitThread)
                {
                    log.Commit(true);
                }
            });

            if (commitThread)
            {
                commit.Start();
            }

            Thread[] th = new Thread[numEnqueueThreads];
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t] =
                    new Thread(() =>
                {
                    // Enqueue but set each Entry in a way that can differentiate between entries
                    for (int i = 0; i < numEntries; i++)
                    {
                        // Flag one part of entry data that corresponds to index
                        entry[0] = (byte)i;

                        // Default is add bytes so no need to do anything with it
                        log.Enqueue(entry);
                    }
                });
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Start();
            }
            for (int t = 0; t < numEnqueueThreads; t++)
            {
                th[t].Join();
            }

            if (commitThread)
            {
                disposeCommitThread = true;
                commit.Join();
            }

            // Final commit to the log
            log.Commit(true);

            int currentEntry = 0;

            Thread[] th2 = new Thread[numIterThreads];
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t] =
                    new Thread(() =>
                {
                    // Read the log - Look for the flag so know each entry is unique
                    using (var iter = log.Scan(0, long.MaxValue))
                    {
                        while (iter.GetNext(out byte[] result, out _, out _))
                        {
                            if (numEnqueueThreads == 1)
                            {
                                Assert.AreEqual((byte)currentEntry, result[0]);
                            }
                            currentEntry++;
                        }
                    }

                    Assert.AreEqual(numEntries * numEnqueueThreads, currentEntry);
                });
            }

            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Start();
            }
            for (int t = 0; t < numIterThreads; t++)
            {
                th2[t].Join();
            }

            // Make sure number of entries is same as current - also makes sure that data verification was not skipped
            Assert.AreEqual(numEntries, currentEntry);
        }
Esempio n. 30
0
        public async Task <RecordQueryResult> QueryIndividual(RecordQuery query)
        {
            var ticks = query.EntryMinute.UtcDateTime.Ticks;
            var f     =
                $"/Users/mdurham/Source/ImprovTime/ImprovTimeConsole/improv/{query.ServiceName}.{query.MetricName}.{ticks}/hlog.log";
            var fInfo = new FileInfo(f);

            if (!fInfo.Directory.Exists)
            {
                return(new RecordQueryResult()
                {
                    Result = 0,
                    Source = query
                });
            }
            var kvFile =
                $"/Users/mdurham/Source/ImprovTime/ImprovTimeConsole/improv/{query.ServiceName}.{query.MetricName}.{ticks}.kv";

            using var env = new LightningEnvironment(kvFile);

            env.MaxDatabases = 5;
            env.Open();
            using var tx = env.BeginTransaction();



            using var fasterKvDevice = Devices.CreateLogDevice(kvFile);
            StringBuilder key = new StringBuilder();

            key.Append(query.ServiceName);
            key.Append(query.MetricName);
            key.Append(query.EntryMinute.Ticks);
            foreach (var kvp in query.Attributes)
            {
                key.Append(kvp.Name + ":" + kvp.Value + ";");
            }

            key.Append(query.Aggregate.ToString());
            var keyBytes = UTF8Encoding.UTF8.GetBytes(key.ToString());

            using (var db = tx.OpenDatabase("kv", new DatabaseConfiguration {
                Flags = DatabaseOpenFlags.Create
            }))
            {
                var result = tx.Get(db, keyBytes);
                if (result.resultCode == MDBResultCode.Success)
                {
                    // TODO look at using a span
                    var value = BitConverter.ToDouble(result.value.CopyToNewArray(), 0);
                    return(new RecordQueryResult()
                    {
                        Result = value,
                        Source = query
                    });
                }
            }


            var device = Devices.CreateLogDevice(f);
            var log    = new FasterLog(new FasterLogSettings {
                LogDevice = device
            });
            // Record Id and Count
            double totalCount = 0;
            double sumValue   = 0;

            using (var iter = log.Scan(log.BeginAddress, long.MaxValue))
            {
                var more = iter.GetNext(out byte[] result, out int entryLength, out long currentAddress,
                                        out long nextAddress);
                var entry             = LogEntry.Parser.ParseFrom(result);
                var matchedAttributes = 0;
                // Set out initial value of old to the same as entry
                LogEntry oldEntry = entry;
                while (more)
                {
                    if (entry.RecordId != oldEntry.RecordId)
                    {
                        if (matchedAttributes >= query.Attributes.Count)
                        {
                            totalCount++;
                            sumValue += oldEntry.MetricValue;
                        }
                        // Reset matched attributes
                        matchedAttributes = 0;
                    }

                    if (IsValid(entry, query))
                    {
                        matchedAttributes++;
                    }

                    more = iter.GetNext(out result, out entryLength, out currentAddress,
                                        out nextAddress);
                    oldEntry = entry;
                    if (more)
                    {
                        entry = LogEntry.Parser.ParseFrom(result);
                    }
                }
            }



            if (query.Aggregate == Aggregate.Count)
            {
                SaveKV(tx, keyBytes, totalCount);
                return(new RecordQueryResult()
                {
                    Result = totalCount,
                    Source = query
                });
            }

            if (query.Aggregate == Aggregate.Sum)
            {
                SaveKV(tx, keyBytes, sumValue);
                return(new RecordQueryResult()
                {
                    Result = sumValue,
                    Source = query
                });
            }

            return(null);
        }