Example #1
0
        public void Recovery_must_not_delete_journals_that_havent_been_synced_yet()
        {
            RequireFileBasedPager();

            var r     = new Random();
            var bytes = new byte[512];

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator)
            {
                AfterGatherInformationAction = () => Env.FlushLogToDataFile()
            })
            {
                var syncResult = operation.SyncDataFile();
            }

            RestartDatabase();
        }
Example #2
0
        public void Can_successfully_sync_journals_after_recovery()
        {
            RequireFileBasedPager();

            var r     = new Random(1);
            var bytes = new byte[512];

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            StopDatabase();

            StartDatabase();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            StopDatabase();

            StartDatabase();

            using (var tx = Env.ReadTransaction())
            {
                using (var it = tx.ReadTree("tree").Iterate(prefetch: false))
                {
                    Assert.True(it.Seek(Slices.BeforeAllKeys));

                    var count = 0;

                    do
                    {
                        count++;
                    } while (it.MoveNext());

                    Assert.Equal(100, count);
                }
            }
        }
Example #3
0
        public void CorruptedSingleTransactionPage_WontStopTheRecoveryIfIgnoreErrorsOfSyncedTransactionIsSet()
        {
            RequireFileBasedPager();

            using (var tx = Env.WriteTransaction())
            {
                tx.CreateTree("tree");

                tx.Commit();
            }

            for (var i = 0; i < 100; i++)
            {
                var buffer = new byte[1000];
                new Random().NextBytes(buffer);
                using (var tx = Env.WriteTransaction())
                {
                    for (int j = 0; j < 100; j++)
                    {
                        tx.CreateTree("tree").Add("a" + i.ToString() + j.ToString(), new MemoryStream(buffer));
                    }

                    tx.Commit();
                }
            }

            var lastJournal = Env.Journal.GetCurrentJournalInfo().CurrentJournal;

            // let's flush and sync
            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            for (var i = 0; i < 100; i++)
            {
                var buffer = new byte[1000];
                new Random().NextBytes(buffer);
                using (var tx = Env.WriteTransaction())
                {
                    for (int j = 0; j < 100; j++)
                    {
                        tx.CreateTree("tree").Add("b" + i.ToString() + j.ToString(), new MemoryStream(buffer));
                    }

                    tx.Commit();
                }
            }

            StopDatabase();

            CorruptJournal(lastJournal, 4 * Constants.Size.Kilobyte * 4);

            StartDatabase();

            Assert.True(_onIntegrityErrorOfAlreadySyncedDataHandlerWasCalled);

            using (var tx = Env.ReadTransaction())
            {
                for (var i = 0; i < 100; i++)
                {
                    for (int j = 0; j < 100; j++)
                    {
                        var readA = tx.ReadTree("tree").Read("a" + i.ToString() + j.ToString());

                        Assert.NotNull(readA);

                        var readB = tx.ReadTree("tree").Read("b" + i.ToString() + j.ToString());

                        Assert.NotNull(readB);
                    }

                    tx.Commit();
                }
            }
        }
Example #4
0
        public void MustNotReadAndProceedWithRecyclableButEffectivelyEmptyJournalOnRecovery()
        {
            RequireFileBasedPager();

            long lastCommittedTxId = -1;

            var r = new Random(10_09_2021);

            for (int i = 0; i < 20; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    var tree = tx.CreateTree("foo");

                    tree.Add($"items/{i}", new byte[] { 1, 2, 3, (byte)i });

                    tx.Commit();

                    lastCommittedTxId = tx.LowLevelTransaction.Id;
                }
            }

            Assert.Equal(2, Env.Journal.Files.Count);

            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            Assert.Equal(1, Env.Journal.Files.Count);

            var journalPath = ((StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)Env.Options).JournalPath.FullPath;


            var journalsForReuse = new DirectoryInfo(journalPath).GetFiles($"{StorageEnvironmentOptions.RecyclableJournalFileNamePrefix}*");

            Assert.Equal(1, journalsForReuse.Length);


            using (var tx = Env.WriteTransaction())
            {
                // we are writing big values in this tx to ensure we'll have NextFile() call that will create a new journal (based on the recyclable journal file that exists)

                for (int i = 0; i < 100; i++)
                {
                    var bytes = new byte[2000];

                    r.NextBytes(bytes);

                    tx.CreateTree("bar").Add($"bigValues/{i}", bytes);
                }

                Assert.Throws <InvalidOperationException>(() =>
                {
                    using (tx.LowLevelTransaction.ForTestingPurposesOnly().CallJustBeforeWritingToJournal(() => throw new InvalidOperationException()))
                    {
                        tx.Commit();
                    }
                });
            }

            // we failed to commit the above transaction and write data to file but we managed to create _empty_ journal file
            // this journal was created based on the recyclable journal which had old transactions already there that causes problems during recovery below

            RestartDatabase();

            using (var tx = Env.WriteTransaction())
            {
                Assert.Equal(lastCommittedTxId + 1, tx.LowLevelTransaction.Id);
            }
        }
Example #5
0
        public void TheCaseWithTwoEmptyJournalFiles()
        {
            RequireFileBasedPager();

            long lastCommittedTxId = -1;

            var r = new Random(10_09_2021);

            for (int i = 0; i < 20; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    var tree = tx.CreateTree("foo");

                    tree.Add($"items/{i}", new byte[] { 1, 2, 3, (byte)i });

                    tx.Commit();

                    lastCommittedTxId = tx.LowLevelTransaction.Id;
                }
            }

            Assert.Equal(2, Env.Journal.Files.Count);


            var journalPath = ((StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)Env.Options).JournalPath.FullPath;


            var journalsForReuse = new DirectoryInfo(journalPath).GetFiles($"{StorageEnvironmentOptions.RecyclableJournalFileNamePrefix}*");

            Assert.Equal(0, journalsForReuse.Length);


            using (var tx = Env.WriteTransaction())
            {
                // we are writing big values in this tx to ensure we'll have NextFile() call that will create a new journal (empty file - not recyclable)

                for (int i = 0; i < 100; i++)
                {
                    var bytes = new byte[2000];

                    r.NextBytes(bytes);

                    tx.CreateTree("bar").Add($"bigValues/{i}", bytes);
                }

                Assert.Throws <InvalidOperationException>(() =>
                {
                    using (tx.LowLevelTransaction.ForTestingPurposesOnly().CallJustBeforeWritingToJournal(() => throw new InvalidOperationException()))
                    {
                        tx.Commit();
                    }
                });
            }

            // we failed to commit the above transaction and write data to file but we managed to create _empty_ journal file

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            RestartDatabase();

            using (var tx = Env.WriteTransaction())
            {
                // we are writing big values in this tx to ensure we'll have NextFile() call that will create a new journal (again - empty file)

                for (int i = 0; i < 100; i++)
                {
                    var bytes = new byte[2000];

                    r.NextBytes(bytes);

                    tx.CreateTree("bar").Add($"bigValues/{i}", bytes);
                }

                Assert.Throws <InvalidOperationException>(() =>
                {
                    using (tx.LowLevelTransaction.ForTestingPurposesOnly().CallJustBeforeWritingToJournal(() => throw new InvalidOperationException()))
                    {
                        tx.Commit();
                    }
                });
            }

            // we failed to commit the above transaction and write data to file but we managed to create _empty_ journal file
            // at this point we have 2 empty journal files and that caused Debug.Assert() to fail in WriteAheadJournal.RecoverDatabase()

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            RestartDatabase();

            using (var tx = Env.WriteTransaction())
            {
                Assert.Equal(lastCommittedTxId + 1, tx.LowLevelTransaction.Id);
            }
        }
Example #6
0
        public static void Execute(StorageEnvironmentOptions srcOptions,
                                   StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions,
                                   Action <StorageCompactionProgress> progressReport = null,
                                   CancellationToken token = default(CancellationToken))
        {
            if (srcOptions.IncrementalBackupEnabled)
            {
                throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup);
            }

            long minimalCompactedDataFileSize;

            srcOptions.ManualFlushing     = true; // prevent from flushing during compaction - we shouldn't touch any source files
            compactOptions.ManualFlushing = true; // let us flush manually during data copy

            using (var existingEnv = new StorageEnvironment(srcOptions))
                using (var compactedEnv = new StorageEnvironment(compactOptions))
                {
                    CopyTrees(existingEnv, compactedEnv, progressReport, token);

                    compactedEnv.FlushLogToDataFile();
                    bool synced;

                    const int maxNumberOfRetries = 100;

                    var syncRetries = 0;

                    while (true)
                    {
                        token.ThrowIfCancellationRequested();
                        using (var op = new WriteAheadJournal.JournalApplicator.SyncOperation(compactedEnv.Journal.Applicator))
                        {
                            try
                            {
                                synced = op.SyncDataFile();

                                if (synced || ++syncRetries >= maxNumberOfRetries)
                                {
                                    break;
                                }

                                Thread.Sleep(100);
                            }
                            catch (Exception e)
                            {
                                existingEnv.Options.SetCatastrophicFailure(ExceptionDispatchInfo.Capture(e));
                                throw;
                            }
                        }
                    }

                    if (synced)
                    {
                        compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal();
                    }

                    compactedEnv.Cleanup();

                    minimalCompactedDataFileSize = compactedEnv.NextPageNumber * Constants.Storage.PageSize;
                }

            using (var compactedDataFile = SafeFileStream.Create(compactOptions.BasePath.Combine(Constants.DatabaseFilename).FullPath, FileMode.Open, FileAccess.ReadWrite))
            {
                compactedDataFile.SetLength(minimalCompactedDataFileSize);
            }
        }
Example #7
0
        public void Recovery_should_handle_empty_journal_file_and_correctly_set_last_flushed_journal(bool runInMemory)
        {
            if (runInMemory == false)
            {
                RequireFileBasedPager();
            }

            using (var tx = Env.WriteTransaction())
            {
                tx.CreateTree("tree").Add("item", new byte[] { 1, 2, 3 });

                tx.Commit();
            }

            var numberOfJournals = Env.Journal.Files.Count;

            using (var tx = Env.WriteTransaction())
            {
                tx.CreateTree("tree").Add("item", new byte[] { 1, 2, 3 });

                Assert.Throws <InvalidOperationException>(() =>
                {
                    using (tx.LowLevelTransaction.ForTestingPurposesOnly().CallJustBeforeWritingToJournal(() => throw new InvalidOperationException()))
                    {
                        tx.Commit();
                    }
                });
            }

            // we failed to commit the above transaction and write data to file
            // but we managed to create _empty_ journal file

            Assert.Equal(numberOfJournals + 1, Env.Journal.Files.Count);

            RestartDatabase();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                // attempt to sync throws the following exception:
                // System.InvalidOperationException : The lock task failed
                // ----Voron.Exceptions.VoronUnrecoverableErrorException : Error syncing the data file.The last sync tx is 2, but the journal's last tx id is -1, possible file corruption?

                operation.SyncDataFile();
            }

            // let's make sure we can restart immediately after sync

            RestartDatabase();

            // there is nothing to flush but let's validate it won't throw

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            // let's make sure we can put more stuff there

            for (int i = 0; i < 5; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    tx.CreateTree("tree").Add("item", new byte[] { 1, 2, 3 });

                    tx.Commit();
                }
            }

            RestartDatabase();

            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }

            // lets add more data once again and force flushing and syncing

            for (int i = 0; i < 5; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    tx.CreateTree("tree").Add("item", new byte[] { 1, 2, 3 });

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                operation.SyncDataFile();
            }
        }
Example #8
0
        public void After_backup_and_restore_recovery_must_not_throw_missing_journal_if_we_have_synced_everything()
        {
            RequireFileBasedPager();

            var r     = new Random(1); // we must use seed to ensure we always fill up the journal
            var bytes = new byte[512];

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            for (int i = 0; i < 20; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            // here we're filling up the last journal completely, we'll have Available4Kbs == 0 after the commit

            using (var tx = Env.WriteTransaction())
            {
                Tree tree = tx.CreateTree("tree");

                for (int j = 0; j < 226; j++)
                {
                    var specialBytes = new byte[1024];

                    r.NextBytes(specialBytes);
                    tree.Add(new string((char)j, 1000), specialBytes);
                }

                tx.Commit();
            }

            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                var syncResult = operation.SyncDataFile();
            }

            var voronDataDir = new VoronPathSetting(DataDir);

            BackupMethods.Full.ToFile(Env, voronDataDir.Combine("voron-test.backup"));

            BackupMethods.Full.Restore(voronDataDir.Combine("voron-test.backup"), voronDataDir.Combine("backup-test.data"));

            var options = StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, "backup-test.data"));

            options.MaxLogFileSize = Env.Options.MaxLogFileSize;

            using (var env = new StorageEnvironment(options))
            {
                using (var tx = env.ReadTransaction())
                {
                    using (var it = tx.ReadTree("tree").Iterate(prefetch: false))
                    {
                        Assert.True(it.Seek(Slices.BeforeAllKeys));

                        var count = 0;

                        do
                        {
                            count++;
                        } while (it.MoveNext());

                        Assert.Equal(226, count);
                    }
                }
            }
        }
Example #9
0
        public void Recovery_must_not_throw_missing_journal_if_we_have_synced_everything()
        {
            RequireFileBasedPager();

            var r     = new Random(1); // we must use seed to ensure we always fill up the journal
            var bytes = new byte[512];

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            for (int i = 0; i < 20; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            // here we're filling up the last journal completely, we'll have Available4Kbs == 0 after the commit

            using (var tx = Env.WriteTransaction())
            {
                Tree tree = tx.CreateTree("tree");

                for (int j = 0; j < 226; j++)
                {
                    var specialBytes = new byte[1024];

                    r.NextBytes(specialBytes);
                    tree.Add(new string((char)j, 1000), specialBytes);
                }

                tx.Commit();
            }

            Env.FlushLogToDataFile();

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
            {
                var syncResult = operation.SyncDataFile();
            }

            RestartDatabase();

            using (var tx = Env.WriteTransaction())
            {
                tx.CreateTree("foobar");

                tx.Commit();
            }

            RestartDatabase();

            using (var tx = Env.ReadTransaction())
            {
                using (var it = tx.ReadTree("tree").Iterate(prefetch: false))
                {
                    Assert.True(it.Seek(Slices.BeforeAllKeys));

                    var count = 0;

                    do
                    {
                        count++;
                    } while (it.MoveNext());

                    Assert.Equal(226, count);
                }
            }
        }
Example #10
0
    public void RaceConditionBetweenFullBackupAndUpdateDatabaseStateAfterSync()
    {
        RequireFileBasedPager();
        var random = new Random(2);
        var buffer = new byte[8192];

        random.NextBytes(buffer);

        using (var tx = Env.WriteTransaction())
        {
            var tree = tx.CreateTree("foo");
            for (int i = 0; i < 5000; i++)
            {
                tree.Add("items/" + i, new MemoryStream(buffer));
            }

            tx.Commit();
        }

        Assert.True(Env.Journal.Files.Count > 1);

        Env.FlushLogToDataFile(); // force writing data to the data file

        var voronDataDir = new VoronPathSetting(DataDir);

        Env.ForTestingPurposesOnly().ActionToCallDuringFullBackupRighAfterCopyHeaders += () =>
        {
            // here we remove 0000000000000000000.journal file while during backup we'll try to backup it

            Thread syncOperation = new Thread(() =>
            {
                using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
                {
                    var syncResult = operation.SyncDataFile();

                    Assert.True(syncResult);
                }
            });

            syncOperation.Start();

            Assert.False(syncOperation.Join(TimeSpan.FromSeconds(5)));
        };

        BackupMethods.Full.ToFile(Env, voronDataDir.Combine("voron-test.backup"));

        BackupMethods.Full.Restore(voronDataDir.Combine("voron-test.backup"), voronDataDir.Combine("backup-test.data"));

        var options = StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, "backup-test.data"));

        options.MaxLogFileSize = Env.Options.MaxLogFileSize;

        using (var env = new StorageEnvironment(options))
        {
            using (var tx = env.ReadTransaction())
            {
                var tree = tx.CreateTree("foo");
                for (int i = 0; i < 5000; i++)
                {
                    var readResult = tree.Read("items/" + i);
                    Assert.NotNull(readResult);
                    var memoryStream = new MemoryStream();
                    readResult.Reader.CopyTo(memoryStream);
                    Assert.Equal(memoryStream.ToArray(), buffer);
                }
            }
        }
    }
Example #11
0
        public void Full_backup_must_backup_journals_that_we_havent_synced_yet()
        {
            RequireFileBasedPager();

            var r     = new Random();
            var bytes = new byte[512];

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            Env.FlushLogToDataFile();

            for (int i = 0; i < 10; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    Tree tree = tx.CreateTree("tree");

                    for (int j = 0; j < 100; j++)
                    {
                        r.NextBytes(bytes);
                        tree.Add(new string((char)j, 1000), bytes);
                    }

                    tx.Commit();
                }
            }

            using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator)
            {
                AfterGatherInformationAction = () =>
                {
                    Env.FlushLogToDataFile();
                }
            })
            {
                var syncResult = operation.SyncDataFile();
            }

            var voronDataDir = new VoronPathSetting(DataDir);

            BackupMethods.Full.ToFile(Env, voronDataDir.Combine("voron-test.backup"));

            BackupMethods.Full.Restore(voronDataDir.Combine("voron-test.backup"), voronDataDir.Combine("backup-test.data"));

            var options = StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, "backup-test.data"));

            options.MaxLogFileSize = Env.Options.MaxLogFileSize;

            using (var env = new StorageEnvironment(options))
            {
            }
        }
        // As part of the sync operation, there are stages where the sync operation needs the flush lock
        // and as part of the flush operation, there are stages the flush operation needs transaction write lock
        // this can lead to a situation where the sync is waiting to flush waiting to write transaction
        // so the sync pass his work that needs the flush lock to the flush operation if the lock is occupied
        // and if the flush operation can do it while it waits to write transaction lock

        //In this test, the sync is called while the flush is running and waiting to write transaction so the sync should not be blocked
        public void CanSyncWhileFlushWaiteToWriteTransaction()
        {
            var syncMayFinishedEvent = new AutoResetEvent(false);

            //Adding unsynced bytes so the sync thread will has work to do
            for (var i = 0; i < 100; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    var tree = tx.CreateTree("foo");
                    tree.Add("items/" + i, StreamFor("values/" + i));
                    tx.Commit();
                }
            }
            Env.FlushLogToDataFile();

            //Adding unflushed bytes so the flush thread will has work to do
            for (var i = 0; i < 100; i++)
            {
                using (var tx = Env.WriteTransaction())
                {
                    var tree = tx.CreateTree("foo");
                    tree.Add("items/" + i, StreamFor("values/" + i));
                    tx.Commit();
                }
            }

            void Sync()
            {
                try
                {
                    using (var operation = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator))
                    {
                        operation.SyncDataFile();
                    }
                }
                finally
                {
                    syncMayFinishedEvent.Set();
                }
            }

            void Flush()
            {
                try
                {
                    using (Env.Journal.Applicator.TakeFlushingLock())
                    {
                        Task.Run((Action)Sync);
                        Env.FlushLogToDataFile();
                    }
                }
                catch (Exception)
                {
                    syncMayFinishedEvent.Set();
                }
            }

            // Write transaction lock is taken to block the flush
            using (var tx = Env.WriteTransaction())
            {
                Task.Run((Action)Flush);

                syncMayFinishedEvent.WaitOne(TimeSpan.FromSeconds(10));
                var totalWrittenButUnsyncedBytes = Env.Journal.Applicator.TotalWrittenButUnsyncedBytes;
                Assert.Equal(0, totalWrittenButUnsyncedBytes);
            }
        }