Esempio n. 1
0
		public static void Execute(StorageEnvironmentOptions srcOptions, StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions, Action<CompactionProgress> progressReport = null)
		{
			if (srcOptions.IncrementalBackupEnabled)
				throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup);

			long minimalCompactedDataFileSize;

			srcOptions.ManualFlushing = true; // prevent from flushing during compaction - we shouldn't touch any source files
			compactOptions.ManualFlushing = true; // let us flush manually during data copy

			using(var existingEnv = new StorageEnvironment(srcOptions))
			using (var compactedEnv = new StorageEnvironment(compactOptions))
			{
				CopyTrees(existingEnv, compactedEnv, progressReport);

				compactedEnv.FlushLogToDataFile(allowToFlushOverwrittenPages: true);

				compactedEnv.Journal.Applicator.SyncDataFile(compactedEnv.OldestTransaction);
				compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal();

				minimalCompactedDataFileSize = compactedEnv.NextPageNumber*AbstractPager.PageSize;
			}

			using (var compactedDataFile = new FileStream(Path.Combine(compactOptions.BasePath, Constants.DatabaseFilename), FileMode.Open, FileAccess.ReadWrite))
			{
				compactedDataFile.SetLength(minimalCompactedDataFileSize);
			}
		}
Esempio n. 2
0
        public void AllScratchPagesShouldBeReleased()
        {
            var options = StorageEnvironmentOptions.CreateMemoryOnly();

            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    env.CreateTree(txw, "test");

                    txw.Commit();
                }

                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var tree = txw.Environment.CreateTree(txw, "test");

                    tree.Add("key/1", new MemoryStream(new byte[100]));
                    tree.Add("key/1", new MemoryStream(new byte[200]));
                    txw.Commit();
                }

                env.FlushLogToDataFile();                 // non read nor write transactions, so it should flush and release everything from scratch

                Assert.Equal(0, env.ScratchBufferPool.GetNumberOfAllocations(0));
            }
        }
Esempio n. 3
0
        private void Restore(StorageEnvironment env, string singleBackupFile)
        {
            using (env.Journal.Applicator.TakeFlushingLock())
            {
                env.FlushLogToDataFile();

                var transactionPersistentContext = new TransactionPersistentContext(true);
                using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite))
                {
                    using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8))
                    {
                        if (package.Entries.Count == 0)
                        {
                            return;
                        }

                        var toDispose = new List <IDisposable>();

                        var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName;

                        Restore(env, package.Entries, tempDir, toDispose, txw);
                    }
                }
            }
        }
Esempio n. 4
0
        public void AllScratchPagesShouldBeReleased()
        {
            var options = StorageEnvironmentOptions.CreateMemoryOnly();

            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                using (var txw = env.WriteTransaction())
                {
                    txw.CreateTree("test");

                    txw.Commit();
                }

                using (var txw = env.WriteTransaction())
                {
                    var tree = txw.CreateTree("test");

                    tree.Add("key/1", new MemoryStream(new byte[100]));
                    tree.Add("key/1", new MemoryStream(new byte[200]));
                    txw.Commit();
                }

                env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch

                // we keep track of the pages in scratch for one additional transaction, to avoid race
                // condition with FlushLogToDataFile concurrently with new read transactions
                Assert.Equal(2, env.ScratchBufferPool.GetNumberOfAllocations(0));
            }
        }
        public void AllScratchPagesShouldBeReleased()
        {
            var options = StorageEnvironmentOptions.CreateMemoryOnly();
            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    env.CreateTree(txw, "test");

                    txw.Commit();
                }

                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var tree = txw.Environment.CreateTree(txw, "test");

                    tree.Add("key/1", new MemoryStream(new byte[100]));
                    tree.Add("key/1", new MemoryStream(new byte[200]));
                    txw.Commit();
                }

                env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch

                // we keep track of the pages in scratch for one additional transaction, to avoid race
                // condition with FlushLogToDataFile concurrently with new read transactions
                Assert.Equal(2, env.ScratchBufferPool.GetNumberOfAllocations(0));
            }
        }
        public void AllScratchPagesShouldBeReleased()
        {
            var options = StorageEnvironmentOptions.CreateMemoryOnly();
            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    env.CreateTree(txw, "test");

                    txw.Commit();
                }

                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var tree = txw.Environment.State.GetTree(txw, "test");

                    tree.Add("key/1", new MemoryStream(new byte[100]));
                    tree.Add("key/1", new MemoryStream(new byte[200]));
                    txw.Commit();
                }

                env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch

                Assert.Equal(0, env.ScratchBufferPool.GetNumberOfAllocations(0));
            }
        }
Esempio n. 7
0
        public static void Execute(StorageEnvironmentOptions srcOptions,
                                   StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions,
                                   Action <CompactionProgress> progressReport = null)
        {
            if (srcOptions.IncrementalBackupEnabled)
            {
                throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup);
            }

            long minimalCompactedDataFileSize;

            srcOptions.ManualFlushing     = true; // prevent from flushing during compaction - we shouldn't touch any source files
            compactOptions.ManualFlushing = true; // let us flush manually during data copy

            using (var existingEnv = new StorageEnvironment(srcOptions))
                using (var compactedEnv = new StorageEnvironment(compactOptions))
                {
                    CopyTrees(existingEnv, compactedEnv, progressReport);

                    compactedEnv.FlushLogToDataFile();

                    compactedEnv.Journal.Applicator.SyncDataFile();
                    compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal();

                    minimalCompactedDataFileSize = compactedEnv.NextPageNumber * existingEnv.Options.PageSize;
                }

            using (var compactedDataFile = new FileStream(Path.Combine(compactOptions.BasePath, Constants.DatabaseFilename), FileMode.Open, FileAccess.ReadWrite))
            {
                compactedDataFile.SetLength(minimalCompactedDataFileSize);
            }
        }
Esempio n. 8
0
        public static void Execute(StorageEnvironmentOptions srcOptions,
            StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions,
            Action<StorageCompactionProgress> progressReport = null,
            CancellationToken token = default(CancellationToken))
        {
            if (srcOptions.IncrementalBackupEnabled)
                throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup);

            long minimalCompactedDataFileSize;

            srcOptions.ManualFlushing = true; // prevent from flushing during compaction - we shouldn't touch any source files
            compactOptions.ManualFlushing = true; // let us flush manually during data copy

            using (var existingEnv = new StorageEnvironment(srcOptions))
            using (var compactedEnv = new StorageEnvironment(compactOptions))
            {
                CopyTrees(existingEnv, compactedEnv, progressReport, token);

                compactedEnv.FlushLogToDataFile();
                bool synced;

                const int maxNumberOfRetries = 100;

                var syncRetries = 0;

                while (true)
                {
                    token.ThrowIfCancellationRequested();
                    using (var op = new WriteAheadJournal.JournalApplicator.SyncOperation(compactedEnv.Journal.Applicator))
                    {
                        try
                        {

                            synced = op.SyncDataFile();

                            if (synced || ++syncRetries >= maxNumberOfRetries)
                                break;

                            Thread.Sleep(100);
                        }
                        catch (Exception e)
                        {
                            existingEnv.Options.SetCatastrophicFailure(ExceptionDispatchInfo.Capture(e));
                            throw;
                        }
                    }
                }

                if (synced)
                    compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal();

                minimalCompactedDataFileSize = compactedEnv.NextPageNumber * Constants.Storage.PageSize;
            }

            using (var compactedDataFile = new FileStream(compactOptions.BasePath.Combine(Constants.DatabaseFilename).FullPath, FileMode.Open, FileAccess.ReadWrite))
            {
                compactedDataFile.SetLength(minimalCompactedDataFileSize);
            }
        }
Esempio n. 9
0
        private static long CopyFixedSizeTrees(StorageEnvironment compactedEnv, Action <StorageCompactionProgress> progressReport, Transaction txr,
                                               TreeIterator rootIterator, string treeName, long copiedTrees, long totalTreesCount, TransactionPersistentContext context, CancellationToken token)
        {
            var treeNameSlice = rootIterator.CurrentKey.Clone(txr.Allocator);

            var header = (FixedSizeTreeHeader.Embedded *)txr.LowLevelTransaction.RootObjects.DirectRead(treeNameSlice);

            var fst = txr.FixedTreeFor(treeNameSlice, header->ValueSize);

            Report(copiedTrees, totalTreesCount, 0, fst.NumberOfEntries, progressReport, $"Copying fixed size tree '{treeName}'. Progress: 0/{fst.NumberOfEntries} entries.", treeName);

            using (var it = fst.Iterate())
            {
                var copiedEntries = 0L;
                if (it.Seek(Int64.MinValue) == false)
                {
                    return(copiedTrees);
                }

                do
                {
                    token.ThrowIfCancellationRequested();
                    using (var txw = compactedEnv.WriteTransaction(context))
                    {
                        var snd             = txw.FixedTreeFor(treeNameSlice, header->ValueSize);
                        var transactionSize = 0L;

                        do
                        {
                            token.ThrowIfCancellationRequested();

                            Slice val;
                            using (it.Value(out val))
                                snd.Add(it.CurrentKey, val);
                            transactionSize += fst.ValueSize + sizeof(long);
                            copiedEntries++;

                            var reportRate = fst.NumberOfEntries / 33 + 1;
                            if (copiedEntries % reportRate == 0)
                            {
                                Report(copiedTrees, totalTreesCount, copiedEntries, fst.NumberOfEntries, progressReport, $"Copying fixed size tree '{treeName}'. Progress: {copiedEntries}/{fst.NumberOfEntries} entries.", treeName);
                            }
                        } while (transactionSize < compactedEnv.Options.MaxScratchBufferSize / 2 && it.MoveNext());

                        txw.Commit();
                    }

                    if (fst.NumberOfEntries == copiedEntries)
                    {
                        copiedTrees++;
                        Report(copiedTrees, totalTreesCount, copiedEntries, fst.NumberOfEntries, progressReport, $"Finished copying fixed size tree '{treeName}'. Progress: {copiedEntries}/{fst.NumberOfEntries} entries.", treeName);
                    }

                    compactedEnv.FlushLogToDataFile();
                } while (it.MoveNext());
            }
            return(copiedTrees);
        }
Esempio n. 10
0
        public void OldestActiveTransactionShouldBeCalculatedProperly()
        {
            using (var options = StorageEnvironmentOptions.CreateMemoryOnly())
            {
                options.ManualFlushing = true;
                using (var env = new StorageEnvironment(options))
                {
                    var trees        = CreateTrees(env, 1, "tree");
                    var transactions = new List <Transaction>();

                    for (int a = 0; a < 100; a++)
                    {
                        var random = new Random(1337);
                        var buffer = new byte[random.Next(100, 1000)];
                        random.NextBytes(buffer);

                        using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                        {
                            for (int i = 0; i < 100; i++)
                            {
                                foreach (var tree in trees)
                                {
                                    tx.Environment.State.GetTree(tx, tree).Add(string.Format("key/{0}/{1}", a, i), new MemoryStream(buffer));
                                }
                            }

                            tx.Commit();
                            env.FlushLogToDataFile(tx);
                            var txr = env.NewTransaction(TransactionFlags.Read);

                            transactions.Add(txr);
                        }
                    }

                    Assert.Equal(transactions.OrderBy(x => x.Id).First().Id, env.OldestTransaction);

                    foreach (var tx in transactions)
                    {
                        foreach (var tree in trees)
                        {
                            using (var iterator = tx.Environment.State.GetTree(tx, tree).Iterate())
                            {
                                if (!iterator.Seek(Slice.BeforeAllKeys))
                                {
                                    continue;
                                }

                                do
                                {
                                    Assert.Contains("key/", iterator.CurrentKey.ToString());
                                } while (iterator.MoveNext());
                            }
                        }
                    }
                }
            }
        }
Esempio n. 11
0
 private void ReplayFlushAction(FlushActivityEntry flushEntry, Transaction currentWriteTransaction)
 {
     if (flushEntry.ActionType == DebugActionType.FlushStart)
     {
         using (_env.Options.AllowManualFlushing())
         {
             _env.FlushLogToDataFile(currentWriteTransaction);
         }
     }
 }
Esempio n. 12
0
        public void Record_debug_journal_and_replay_it_with_manual_flushing()
        {
            using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
            {
                env.DebugJournal = new DebugJournal(debugJouralName, env, true);
                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    env.CreateTree(tx, "test-tree");
                    tx.Commit();
                }

                using (var writeBatch = new WriteBatch())
                {
                    var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}"));
                    writeBatch.Add("foo", valueBuffer, "test-tree");
                    env.Writer.Write(writeBatch);
                }

                using (env.Options.AllowManualFlushing())
                {
                    env.FlushLogToDataFile();
                }

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                    using (env.Options.AllowManualFlushing())
                    {
                        env.FlushLogToDataFile(tx);
                        tx.Commit();
                    }
            }

            using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
            {
                env.DebugJournal = DebugJournal.FromFile(debugJouralName, env);
                env.DebugJournal.Replay();

                using (var snapshot = env.CreateSnapshot())
                {
                    Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}", snapshot.Read("test-tree", "foo").Reader.ToStringValue());
                }
            }
        }
Esempio n. 13
0
        public void ShouldProperlyRecover()
        {
            var sequentialLargeIds = ReadData("non-leaf-page-seq-id-large-values-2.txt");

            var enumerator = sequentialLargeIds.GetEnumerator();

            if (Directory.Exists("tests"))
            {
                Directory.Delete("tests", true);
            }

            var options = StorageEnvironmentOptions.ForPath("tests");

            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                for (var transactions = 0; transactions < 100; transactions++)
                {
                    using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                    {
                        for (var i = 0; i < 100; i++)
                        {
                            enumerator.MoveNext();

                            tx.Root.Add(enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value));
                        }

                        tx.Commit();
                    }

                    if (transactions == 50)
                    {
                        env.FlushLogToDataFile();
                    }
                }

                ValidateRecords(env, new List <string> {
                    "Root"
                }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList());
            }

            options = StorageEnvironmentOptions.ForPath("tests");
            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                ValidateRecords(env, new List <string> {
                    "Root"
                }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList());
            }
        }
Esempio n. 14
0
        private PerformanceRecord ReadParallel(string operation, IEnumerable <uint> ids, PerfTracker perfTracker, int numberOfThreads)
        {
            var options = StorageEnvironmentOptions.ForPath(dataPath);

            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                env.FlushLogToDataFile();

                return(ExecuteReadWithParallel(operation, ids, numberOfThreads, () => ReadInternal(ids, perfTracker, env)));
            }
        }
Esempio n. 15
0
        public void ShouldOccupyLessSpace(int seed)
        {
            var r = new Random(seed);
            var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(DataDir);

            storageEnvironmentOptions.ManualFlushing = true;
            using (var env = new StorageEnvironment(storageEnvironmentOptions))
            {
                using (var tx = env.WriteTransaction())
                {
                    var tree = tx.CreateTree("records");

                    for (int i = 0; i < 100; i++)
                    {
                        var bytes = new byte[r.Next(10, 2 * 1024 * 1024)];
                        r.NextBytes(bytes);

                        tree.Add("record/" + i, bytes);
                    }

                    tx.Commit();
                }

                using (var tx = env.WriteTransaction())
                {
                    var tree = tx.CreateTree("records");

                    for (int i = 0; i < 50; i++)
                    {
                        tree.Delete("record/" + r.Next(0, 100));
                    }

                    tx.Commit();
                }
                env.FlushLogToDataFile();
            }

            var oldSize = GetDirSize(new DirectoryInfo(DataDir));

            storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(DataDir);
            storageEnvironmentOptions.ManualFlushing = true;
            var compactedData = Path.Combine(DataDir, "Compacted");

            StorageCompaction.Execute(storageEnvironmentOptions,
                                      (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(compactedData));

            var newSize = GetDirSize(new DirectoryInfo(compactedData));

            Assert.True(newSize < oldSize, string.Format("Old size: {0:#,#;;0} MB, new size {1:#,#;;0} MB", oldSize / 1024 / 1024, newSize / 1024 / 1024));
        }
Esempio n. 16
0
        public void ShouldProperlyRecover()
        {
            var sequentialLargeIds = TestDataUtil.ReadData("non-leaf-page-seq-id-large-values-2.txt");

            var enumerator = sequentialLargeIds.GetEnumerator();

            var options = StorageEnvironmentOptions.ForPath(DataDir);

            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                for (var transactions = 0; transactions < 100; transactions++)
                {
                    using (var tx = env.WriteTransaction())
                    {
                        var tree = tx.CreateTree("foo");
                        for (var i = 0; i < 100; i++)
                        {
                            enumerator.MoveNext();

                            tree.Add(enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value));
                        }

                        tx.Commit();
                    }

                    if (transactions == 50)
                    {
                        env.FlushLogToDataFile();
                    }
                }

                ValidateRecords(env, new List <string> {
                    "foo"
                }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList());
            }

            options = StorageEnvironmentOptions.ForPath(DataDir);
            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                ValidateRecords(env, new List <string> {
                    "foo"
                }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList());
            }
        }
Esempio n. 17
0
        public void OnDatabaseRecoverShouldMarkLastJournalAsRecyclableIfItExceedMaxLogFileSize()
        {
            CreateAndPopulateTree(startWithBigTx: false);

            // restart
            using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath(DataDir), manualFlushing: true)))
            {
                var journalPath   = env.Options.JournalPath.FullPath;
                var journalsCount = new DirectoryInfo(journalPath).GetFiles().Length;

                env.FlushLogToDataFile();
                env.ForceSyncDataFile();
                Assert.True(SpinWait.SpinUntil(() => new DirectoryInfo(journalPath).GetFiles($"{StorageEnvironmentOptions.RecyclableJournalFileNamePrefix}*").Length == journalsCount,
                                               TimeSpan.FromSeconds(30)));
            }
        }
Esempio n. 18
0
        private void Restore(StorageEnvironment env, IEnumerable <ZipArchiveEntry> entries)
        {
            using (env.Journal.Applicator.TakeFlushingLock())
            {
                env.FlushLogToDataFile();

                var transactionPersistentContext = new TransactionPersistentContext(true);
                using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite))
                {
                    var toDispose = new List <IDisposable>();

                    var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName;

                    Restore(env, entries, tempDir, toDispose, txw);
                }
            }
        }
Esempio n. 19
0
        private static void CopyFixedSizeTree(FixedSizeTree fst, Func <Transaction, FixedSizeTree> createDestinationTree, StorageEnvironment compactedEnv, TransactionPersistentContext context, Action <long> onEntriesCopiedProgress, Action onAllEntriesCopied, CancellationToken token)
        {
            using (var it = fst.Iterate())
            {
                var copiedEntries = 0L;
                if (it.Seek(Int64.MinValue) == false)
                {
                    return;
                }

                do
                {
                    token.ThrowIfCancellationRequested();
                    using (var txw = compactedEnv.WriteTransaction(context))
                    {
                        var snd             = createDestinationTree(txw);
                        var transactionSize = 0L;

                        do
                        {
                            token.ThrowIfCancellationRequested();

                            using (it.Value(out var val))
                                snd.Add(it.CurrentKey, val);
                            transactionSize += fst.ValueSize + sizeof(long);
                            copiedEntries++;

                            var reportRate = fst.NumberOfEntries / 33 + 1;
                            if (copiedEntries % reportRate == 0)
                            {
                                onEntriesCopiedProgress(copiedEntries);
                            }
                        } while (transactionSize < compactedEnv.Options.MaxScratchBufferSize / 2 && it.MoveNext());

                        txw.Commit();
                    }

                    compactedEnv.FlushLogToDataFile();

                    if (fst.NumberOfEntries == copiedEntries)
                    {
                        onAllEntriesCopied();
                    }
                } while (it.MoveNext());
            }
        }
Esempio n. 20
0
        public void ShouldProperlyRecover()
        {
            var sequentialLargeIds = ReadData("non-leaf-page-seq-id-large-values-2.txt");

            var enumerator = sequentialLargeIds.GetEnumerator();

            if (Directory.Exists("tests"))
                Directory.Delete("tests", true);

            var options = StorageEnvironmentOptions.ForPath("tests");
            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                for (var transactions = 0; transactions < 100; transactions++)
                {
                    using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                    {
                        for (var i = 0; i < 100; i++)
                        {
                            enumerator.MoveNext();

                            tx.Root.Add			(enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value));
                        }

                        tx.Commit();
                    }

                    if (transactions == 50)
                        env.FlushLogToDataFile();
                }

                ValidateRecords(env, new List<string> { "Root" }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList());
            }

            options = StorageEnvironmentOptions.ForPath("tests");
            options.ManualFlushing = true;

            using (var env = new StorageEnvironment(options))
            {
                ValidateRecords(env, new List<string> { "Root" }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList());
            }
        }
        private static long CopyFixedSizeTrees(StorageEnvironment compactedEnv, Action <CompactionProgress> progressReport, Transaction txr,
                                               TreeIterator rootIterator, string treeName, long copiedTrees, long totalTreesCount)
        {
            var fst = txr.FixedTreeFor(rootIterator.CurrentKey.Clone(txr.Allocator), 0);

            Report(treeName, copiedTrees, totalTreesCount, 0,
                   fst.NumberOfEntries,
                   progressReport);
            using (var it = fst.Iterate())
            {
                var copiedEntries = 0L;
                if (it.Seek(Int64.MinValue) == false)
                {
                    return(copiedTrees);
                }
                do
                {
                    using (var txw = compactedEnv.WriteTransaction())
                    {
                        var snd             = txw.FixedTreeFor(rootIterator.CurrentKey.Clone(txr.Allocator));
                        var transactionSize = 0L;
                        do
                        {
                            snd.Add(it.CurrentKey, it.Value);
                            transactionSize += fst.ValueSize + sizeof(long);
                            copiedEntries++;
                        } while (transactionSize < compactedEnv.Options.MaxLogFileSize / 2 && it.MoveNext());

                        txw.Commit();
                    }
                    if (fst.NumberOfEntries == copiedEntries)
                    {
                        copiedTrees++;
                    }

                    Report(treeName, copiedTrees, totalTreesCount, copiedEntries,
                           fst.NumberOfEntries,
                           progressReport);
                    compactedEnv.FlushLogToDataFile();
                } while (it.MoveNext());
            }
            return(copiedTrees);
        }
Esempio n. 22
0
        public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_LOTS_of_transactions()
        {
            var transactionsToShip = new List <TransactionToShip>();

            using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
            {
                shippingSourceEnv.Journal.OnTransactionCommit += tx =>
                {
                    tx.CreatePagesSnapshot();
                    transactionsToShip.Add(tx);
                };

                using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite))
                {
                    shippingSourceEnv.CreateTree(tx, "TestTree");
                    shippingSourceEnv.CreateTree(tx, "TestTree2");
                    tx.Commit();
                }

                WriteLotsOfTestDataForTree("TestTree", shippingSourceEnv);
                WriteLotsOfTestDataForTree("TestTree2", shippingSourceEnv);
            }


            var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly();

            storageEnvironmentOptions.ManualFlushing = true;
            using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions))
            {
                foreach (var tx in transactionsToShip)
                {
                    shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot);
                }

                shippingDestinationEnv.FlushLogToDataFile();

                using (var snapshot = shippingDestinationEnv.CreateSnapshot())
                {
                    ValidateLotsOfTestDataForTree(snapshot, "TestTree");
                    ValidateLotsOfTestDataForTree(snapshot, "TestTree2");
                }
            }
        }
Esempio n. 23
0
        public void MultipleTxPagesCanPointToOnePageNumberWhichShouldNotBeCausingIssuesDuringFlushing()
        {
            var options = StorageEnvironmentOptions.CreateMemoryOnly();

            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                var trees = CreateTrees(env, 2, "tree");
                var tree1 = trees[0];
                var tree2 = trees[1];

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var t1 = tx.ReadTree(tree1);

                    t1.MultiAdd("key", "value/1");
                    t1.MultiAdd("key", "value/2");

                    tx.Commit();
                }

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var t1 = tx.ReadTree(tree1);
                    var t2 = tx.ReadTree(tree2);

                    var buffer = new byte[1000];

                    t1.MultiDelete("key", "value/1");
                    t1.MultiDelete("key", "value/2");

                    t2.Add("key/1", new MemoryStream(buffer));
                    t2.Add("key/2", new MemoryStream(buffer));
                    t2.Add("key/3", new MemoryStream(buffer));
                    t2.Add("key/4", new MemoryStream(buffer));
                    t2.Add("key/5", new MemoryStream(buffer));

                    tx.Commit();
                }

                env.FlushLogToDataFile();
            }
        }
Esempio n. 24
0
        public void MultipleTxPagesCanPointToOnePageNumberWhichShouldNotBeCausingIssuesDuringFlushing()
        {
            var options = StorageEnvironmentOptions.CreateMemoryOnly();
            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                var trees = CreateTrees(env, 2, "tree");
                var tree1 = trees[0];
                var tree2 = trees[1];

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var t1 = tx.State.GetTree(tx, tree1);

                    t1.MultiAdd("key", "value/1");
                    t1.MultiAdd("key", "value/2");

                    tx.Commit();
                }

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    var t1 = tx.State.GetTree(tx, tree1);
                    var t2 = tx.State.GetTree(tx, tree2);

                    var buffer = new byte[1000];

                    t1.MultiDelete("key", "value/1");
                    t1.MultiDelete("key", "value/2");

                    t2.Add("key/1", new MemoryStream(buffer));
                    t2.Add("key/2", new MemoryStream(buffer));
                    t2.Add("key/3", new MemoryStream(buffer));
                    t2.Add("key/4", new MemoryStream(buffer));
                    t2.Add("key/5", new MemoryStream(buffer));

                    tx.Commit();
                }

                env.FlushLogToDataFile();
            }
        }
Esempio n. 25
0
        public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_new_trees()
        {
            var transactionsToShip = new ConcurrentBag <TransactionToShip>();

            using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
            {
                shippingSourceEnv.Journal.OnTransactionCommit += tx =>
                {
                    tx.CreatePagesSnapshot();
                    transactionsToShip.Add(tx);
                };

                using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite))
                {
                    shippingSourceEnv.CreateTree(tx, "TestTree");
                    tx.Commit();
                }
            }

            var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly();

            storageEnvironmentOptions.ManualFlushing = true;
            using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions))
            {
                foreach (var tx in transactionsToShip)
                {
                    shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot);
                }

                shippingDestinationEnv.FlushLogToDataFile();

                using (var snapshot = shippingDestinationEnv.CreateSnapshot())
                {
                    snapshot.Read("TestTree", "Foo");
                }
            }
        }
Esempio n. 26
0
		public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_new_trees()
		{
			var transactionsToShip = new ConcurrentBag<TransactionToShip>();

			using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
			{
				shippingSourceEnv.Journal.OnTransactionCommit += tx =>
				{
					tx.CreatePagesSnapshot();
					transactionsToShip.Add(tx);
				};

				using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite))
				{
					shippingSourceEnv.CreateTree(tx, "TestTree");
					tx.Commit();
				}
			}

			var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly();
			storageEnvironmentOptions.ManualFlushing = true;
			using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions))
			{
				foreach (var tx in transactionsToShip)
					shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot);

				shippingDestinationEnv.FlushLogToDataFile();

				using (var snapshot = shippingDestinationEnv.CreateSnapshot())
				{
					snapshot.Read("TestTree", "Foo");
				}
			}
		}
Esempio n. 27
0
        private void Restore(StorageEnvironment env, string singleBackupFile)
        {
            using (env.Journal.Applicator.TakeFlushingLock())
            {
                using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite))
                {
                    using (env.Options.AllowManualFlushing())
                    {
                        env.FlushLogToDataFile(txw);
                    }

                    using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8))
                    {
                        if (package.Entries.Count == 0)
                        {
                            return;
                        }

                        var toDispose = new List <IDisposable>();

                        var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName;

                        try
                        {
                            TransactionHeader *lastTxHeader = null;
                            var pagesToWrite = new Dictionary <long, TreePage>();

                            long journalNumber = -1;
                            foreach (var entry in package.Entries)
                            {
                                switch (Path.GetExtension(entry.Name))
                                {
                                case ".merged-journal":
                                case ".journal":

                                    var jounalFileName = Path.Combine(tempDir, entry.Name);
                                    using (var output = new FileStream(jounalFileName, FileMode.Create))
                                        using (var input = entry.Open())
                                        {
                                            output.Position = output.Length;
                                            input.CopyTo(output);
                                        }

                                    var pager = env.Options.OpenPager(jounalFileName);
                                    toDispose.Add(pager);

                                    if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false)
                                    {
                                        throw new InvalidOperationException("Cannot parse journal file number");
                                    }

                                    var recoveryPager = env.Options.CreateScratchPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber)));
                                    toDispose.Add(recoveryPager);

                                    var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader);

                                    while (reader.ReadOneTransaction(env.Options))
                                    {
                                        lastTxHeader = reader.LastTransactionHeader;
                                    }

                                    foreach (var translation in reader.TransactionPageTranslation)
                                    {
                                        var pageInJournal = translation.Value.JournalPos;
                                        var page          = recoveryPager.Read(null, pageInJournal);
                                        pagesToWrite[translation.Key] = page;

                                        if (page.IsOverflow)
                                        {
                                            var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);

                                            for (int i = 1; i < numberOfOverflowPages; i++)
                                            {
                                                pagesToWrite.Remove(translation.Key + i);
                                            }
                                        }
                                    }

                                    break;

                                default:
                                    throw new InvalidOperationException("Unknown file, cannot restore: " + entry);
                                }
                            }

                            var sortedPages = pagesToWrite.OrderBy(x => x.Key)
                                              .Select(x => x.Value)
                                              .ToList();

                            if (sortedPages.Count == 0)
                            {
                                return;
                            }
                            var last = sortedPages.Last();

                            var numberOfPages = last.IsOverflow
                                ? env.Options.DataPager.GetNumberOfOverflowPages(
                                last.OverflowSize)
                                : 1;
                            var pagerState = env.Options.DataPager.EnsureContinuous(last.PageNumber, numberOfPages);
                            txw.EnsurePagerStateReference(pagerState);

                            foreach (var page in sortedPages)
                            {
                                env.Options.DataPager.Write(page);
                            }

                            env.Options.DataPager.Sync();

                            var root = Tree.Open(txw, null, &lastTxHeader->Root);
                            root.Name = Constants.RootTreeName;

                            txw.UpdateRootsIfNeeded(root);

                            txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1;

                            env.Journal.Clear(txw);

                            txw.Commit();

                            env.HeaderAccessor.Modify(header =>
                            {
                                header->TransactionId  = lastTxHeader->TransactionId;
                                header->LastPageNumber = lastTxHeader->LastPageNumber;

                                header->Journal.LastSyncedJournal       = journalNumber;
                                header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId;

                                header->Root = lastTxHeader->Root;

                                header->Journal.CurrentJournal    = journalNumber + 1;
                                header->Journal.JournalFilesCount = 0;
                            });
                        }
                        finally
                        {
                            toDispose.ForEach(x => x.Dispose());

                            try
                            {
                                Directory.Delete(tempDir, true);
                            }
                            catch
                            {
                                // this is just a temporary directory, the worst case scenario is that we dont reclaim the space from the OS temp directory
                                // if for some reason we cannot delete it we are safe to ignore it.
                            }
                        }
                    }
                }
            }
        }
Esempio n. 28
0
        public void Record_debug_journal_and_replay_it_with_manual_flushing()
        {
            using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
            {
                env.DebugJournal = new DebugJournal(debugJouralName, env, true);
                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    env.CreateTree(tx, "test-tree");
                    tx.Commit();
                }

                using (var writeBatch = new WriteBatch())
                {
                    var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}"));
                    writeBatch.Add("foo", valueBuffer, "test-tree");
                    env.Writer.Write(writeBatch);
                }

                using (env.Options.AllowManualFlushing())
                {
                    env.FlushLogToDataFile();
                }

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                using (env.Options.AllowManualFlushing())
                {
                    env.FlushLogToDataFile(tx);
                    tx.Commit();
                }
            }

            using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly()))
            {
                env.DebugJournal = DebugJournal.FromFile(debugJouralName, env);
                env.DebugJournal.Replay();

                using (var snapshot = env.CreateSnapshot())
                {
                    Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}", snapshot.Read("test-tree", "foo").Reader.ToStringValue());
                }
            }

        }
Esempio n. 29
0
        public void OldestActiveTransactionShouldBeCalculatedProperly()
        {
            var directory = "Test";

            if (Directory.Exists(directory))
                Directory.Delete(directory, true);

            var options = StorageEnvironmentOptions.ForPath(directory);

            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                var trees = CreateTrees(env, 1, "tree");
                var transactions = new List<Transaction>();

                for (int a = 0; a < 100; a++)
                {
                    var random = new Random(1337);
                    var buffer = new byte[random.Next(100, 1000)];
                    random.NextBytes(buffer);

                    using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                    {
                        for (int i = 0; i < 100; i++)
                        {
                            foreach (var tree in trees)
                            {
                                tx.Environment.State.GetTree(tx,tree).Add(tx, string.Format("key/{0}/{1}", a, i), new MemoryStream(buffer));
                            }

                        }

                        tx.Commit();
                        env.FlushLogToDataFile(tx);
                        var txr = env.NewTransaction(TransactionFlags.Read);

                        transactions.Add(txr);
                    }
                }

                Assert.Equal(transactions.OrderBy(x => x.Id).First().Id, env.OldestTransaction);

                foreach (var tx in transactions)
                {
                    foreach (var tree in trees)
                    {
                        using (var iterator = tx.Environment.State.GetTree(tx,tree).Iterate(tx))
                        {
                            if (!iterator.Seek(Slice.BeforeAllKeys))
                                continue;

                            do
                            {
                                Assert.Contains("key/", iterator.CurrentKey.ToString());
                            }
                            while (iterator.MoveNext());
                        }
                    }
                }
            }
        }
Esempio n. 30
0
        private void Restore(StorageEnvironment env, string singleBackupFile)
        {
            using (env.Journal.Applicator.TakeFlushingLock())
            {
                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    using (env.Options.AllowManualFlushing())
                    {
                        env.FlushLogToDataFile(txw);
                    }

                    using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read))
                    {
                        if (package.Entries.Count == 0)
                        {
                            return;
                        }

                        var toDispose = new List <IDisposable>();

                        var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName;

                        try
                        {
                            TransactionHeader *lastTxHeader = null;
                            var pagesToWrite = new Dictionary <long, Func <Page> >();

                            long journalNumber = -1;
                            foreach (var entry in package.Entries)
                            {
                                switch (Path.GetExtension(entry.Name))
                                {
                                case ".journal":

                                    var jounalFileName = Path.Combine(tempDir, entry.Name);
                                    using (var output = new FileStream(jounalFileName, FileMode.Create))
                                        using (var input = entry.Open())
                                        {
                                            output.Position = output.Length;
                                            input.CopyTo(output);
                                        }

                                    var pager = new Win32MemoryMapPager(jounalFileName);
                                    toDispose.Add(pager);

                                    if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false)
                                    {
                                        throw new InvalidOperationException("Cannot parse journal file number");
                                    }

                                    var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber)));
                                    toDispose.Add(recoveryPager);

                                    var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader);

                                    while (reader.ReadOneTransaction(env.Options))
                                    {
                                        lastTxHeader = reader.LastTransactionHeader;
                                    }

                                    foreach (var translation in reader.TransactionPageTranslation)
                                    {
                                        var pageInJournal = translation.Value.JournalPos;
                                        pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal);
                                    }

                                    break;

                                default:
                                    throw new InvalidOperationException("Unknown file, cannot restore: " + entry);
                                }
                            }

                            var sortedPages = pagesToWrite.OrderBy(x => x.Key)
                                              .Select(x => x.Value())
                                              .ToList();

                            if (sortedPages.Count == 0)
                            {
                                return;
                            }
                            var last = sortedPages.Last();

                            env.Options.DataPager.EnsureContinuous(txw, last.PageNumber,
                                                                   last.IsOverflow
                                    ? env.Options.DataPager.GetNumberOfOverflowPages(
                                                                       last.OverflowSize)
                                    : 1);

                            foreach (var page in sortedPages)
                            {
                                env.Options.DataPager.Write(page);
                            }

                            env.Options.DataPager.Sync();

                            txw.State.Root          = Tree.Open(txw, &lastTxHeader->Root);
                            txw.State.FreeSpaceRoot = Tree.Open(txw, &lastTxHeader->FreeSpace);

                            txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName;
                            txw.State.Root.Name          = Constants.RootTreeName;

                            txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1;

                            env.Journal.Clear(txw);

                            txw.Commit();

                            env.HeaderAccessor.Modify(header =>
                            {
                                header->TransactionId  = lastTxHeader->TransactionId;
                                header->LastPageNumber = lastTxHeader->LastPageNumber;

                                header->Journal.LastSyncedJournal       = journalNumber;
                                header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId;

                                header->Root      = lastTxHeader->Root;
                                header->FreeSpace = lastTxHeader->FreeSpace;

                                header->Journal.CurrentJournal    = journalNumber + 1;
                                header->Journal.JournalFilesCount = 0;
                            });
                        }
                        finally
                        {
                            toDispose.ForEach(x => x.Dispose());

                            try
                            {
                                Directory.Delete(tempDir, true);
                            }
                            catch (Exception)
                            {
                                // just temp dir - ignore it
                            }
                        }
                    }
                }
            }
        }
Esempio n. 31
0
		private static void CopyTrees(StorageEnvironment existingEnv, StorageEnvironment compactedEnv, Action<CompactionProgress> progressReport = null)
		{
			using (var rootIterator = existingEnv.State.Root.Iterate())
			{
				if (rootIterator.Seek(Slice.BeforeAllKeys) == false)
					return;

				var totalTreesCount = existingEnv.State.Root.State.EntriesCount;
				var copiedTrees = 0L;

				do
				{
					var treeName = rootIterator.CurrentKey.ToString();

					using (var txr = existingEnv.NewTransaction(TransactionFlags.Read))
					{
						var existingTree = existingEnv.State.GetTree(txr, treeName);

						Report(treeName, copiedTrees, totalTreesCount, 0, existingTree.State.EntriesCount, progressReport);

						using (var existingTreeIterator = existingTree.Iterate())
						{
							if (existingTreeIterator.Seek(Slice.BeforeAllKeys) == false)
								continue;

							using (var txw = compactedEnv.NewTransaction(TransactionFlags.ReadWrite))
							{
								compactedEnv.CreateTree(txw, treeName);
								txw.Commit();
							}

							var copiedEntries = 0L;

							do
							{
								var transactionSize = 0L;

								using (var txw = compactedEnv.NewTransaction(TransactionFlags.ReadWrite))
								{
									var newTree = txw.ReadTree(treeName);

									do
									{
										var key = existingTreeIterator.CurrentKey;

										if (existingTreeIterator.Current->Flags == NodeFlags.MultiValuePageRef)
										{
											using (var multiTreeIterator = existingTree.MultiRead(key))
											{
												if (multiTreeIterator.Seek(Slice.BeforeAllKeys) == false)
													continue;

												do
												{
													var multiValue = multiTreeIterator.CurrentKey;
													newTree.MultiAdd(key, multiValue);
													transactionSize += multiValue.Size;
												} while (multiTreeIterator.MoveNext());
											}
										}
										else
										{
											using (var value = existingTree.Read(key).Reader.AsStream())
											{
												newTree.Add(key, value);
												transactionSize += value.Length;
											}
										}

										copiedEntries++;

									} while (transactionSize < compactedEnv.Options.MaxLogFileSize/2 && existingTreeIterator.MoveNext());

									txw.Commit();
								}

								if (copiedEntries == existingTree.State.EntriesCount)
									copiedTrees++;

								Report(treeName, copiedTrees, totalTreesCount, copiedEntries, existingTree.State.EntriesCount, progressReport);

								compactedEnv.FlushLogToDataFile();

							} while (existingTreeIterator.MoveNext());
						}
					}
				} while (rootIterator.MoveNext());
			}
		}
        private static unsafe long CopyVariableSizeTree(StorageEnvironment compactedEnv, Action <CompactionProgress> progressReport, Transaction txr,
                                                        string treeName, long copiedTrees, long totalTreesCount)
        {
            var existingTree = txr.ReadTree(treeName);

            Report(treeName, copiedTrees, totalTreesCount, 0, existingTree.State.NumberOfEntries, progressReport);

            using (var existingTreeIterator = existingTree.Iterate(true))
            {
                if (existingTreeIterator.Seek(Slices.BeforeAllKeys) == false)
                {
                    return(copiedTrees);
                }

                using (var txw = compactedEnv.WriteTransaction())
                {
                    txw.CreateTree(treeName);
                    txw.Commit();
                }

                var copiedEntries = 0L;

                do
                {
                    var transactionSize = 0L;

                    using (var txw = compactedEnv.WriteTransaction())
                    {
                        var newTree = txw.ReadTree(treeName);

                        do
                        {
                            var key = existingTreeIterator.CurrentKey;

                            if (existingTreeIterator.Current->Flags == TreeNodeFlags.MultiValuePageRef)
                            {
                                using (var multiTreeIterator = existingTree.MultiRead(key))
                                {
                                    if (multiTreeIterator.Seek(Slices.BeforeAllKeys) == false)
                                    {
                                        continue;
                                    }

                                    do
                                    {
                                        var multiValue = multiTreeIterator.CurrentKey;
                                        newTree.MultiAdd(key, multiValue);
                                        transactionSize += multiValue.Size;
                                    } while (multiTreeIterator.MoveNext());
                                }
                            }
                            else
                            {
                                using (var value = existingTree.Read(key).Reader.AsStream())
                                {
                                    newTree.Add(key, value);
                                    transactionSize += value.Length;
                                }
                            }

                            copiedEntries++;
                        } while (transactionSize < compactedEnv.Options.MaxLogFileSize / 2 && existingTreeIterator.MoveNext());

                        txw.Commit();
                    }

                    if (copiedEntries == existingTree.State.NumberOfEntries)
                    {
                        copiedTrees++;
                    }

                    Report(treeName, copiedTrees, totalTreesCount, copiedEntries, existingTree.State.NumberOfEntries,
                           progressReport);

                    compactedEnv.FlushLogToDataFile();
                } while (existingTreeIterator.MoveNext());
            }
            return(copiedTrees);
        }
Esempio n. 33
0
        private static long CopyVariableSizeTree(StorageEnvironment compactedEnv, Action <StorageCompactionProgress> progressReport, Transaction txr, string treeName, long copiedTrees, long totalTreesCount, TransactionPersistentContext context, CancellationToken token)
        {
            var existingTree = txr.ReadTree(treeName);

            Report(copiedTrees, totalTreesCount, 0, existingTree.State.NumberOfEntries, progressReport, $"Copying variable size tree '{treeName}'. Progress: 0/{existingTree.State.NumberOfEntries} entries.", treeName);

            using (var existingTreeIterator = existingTree.Iterate(true))
            {
                if (existingTreeIterator.Seek(Slices.BeforeAllKeys) == false)
                {
                    return(copiedTrees);
                }

                token.ThrowIfCancellationRequested();
                using (var txw = compactedEnv.WriteTransaction(context))
                {
                    if (existingTree.IsLeafCompressionSupported)
                    {
                        txw.CreateTree(treeName, flags: TreeFlags.LeafsCompressed);
                    }
                    else
                    {
                        txw.CreateTree(treeName);
                    }

                    txw.Commit();
                }

                var copiedEntries = 0L;

                do
                {
                    var transactionSize = 0L;

                    token.ThrowIfCancellationRequested();

                    var txw = compactedEnv.WriteTransaction(context);

                    try
                    {
                        var newTree = txw.ReadTree(treeName);

                        do
                        {
                            token.ThrowIfCancellationRequested();
                            var key = existingTreeIterator.CurrentKey;

                            if (existingTreeIterator.Current->Flags == TreeNodeFlags.MultiValuePageRef)
                            {
                                using (var multiTreeIterator = existingTree.MultiRead(key))
                                {
                                    if (multiTreeIterator.Seek(Slices.BeforeAllKeys) == false)
                                    {
                                        continue;
                                    }

                                    do
                                    {
                                        token.ThrowIfCancellationRequested();
                                        var multiValue = multiTreeIterator.CurrentKey;
                                        newTree.MultiAdd(key, multiValue);
                                        transactionSize += multiValue.Size;
                                    } while (multiTreeIterator.MoveNext());
                                }
                            }
                            else if (existingTree.IsLeafCompressionSupported)
                            {
                                using (var read = existingTree.ReadDecompressed(key))
                                {
                                    var value = read.Reader.AsStream();

                                    newTree.Add(key, value);
                                    transactionSize += value.Length;
                                }
                            }
                            else if (existingTree.State.Flags == (TreeFlags.FixedSizeTrees | TreeFlags.Streams))
                            {
                                var tag = existingTree.GetStreamTag(key);

                                using (var stream = existingTree.ReadStream(key))
                                {
                                    if (tag != null)
                                    {
                                        Slice tagStr;
                                        using (Slice.From(txw.Allocator, tag, out tagStr))
                                            newTree.AddStream(key, stream, tagStr);
                                    }
                                    else
                                    {
                                        newTree.AddStream(key, stream);
                                    }

                                    transactionSize += stream.Length;
                                }
                            }
                            else if (existingTree.State.Flags == TreeFlags.FixedSizeTrees)
                            {
                                var reader = existingTree.GetValueReaderFromHeader(existingTreeIterator.Current);

                                if (reader.Length >= sizeof(FixedSizeTreeHeader.Embedded))
                                {
                                    var header = (FixedSizeTreeHeader.Embedded *)reader.Base;

                                    if (header->RootObjectType == RootObjectType.FixedSizeTree || header->RootObjectType == RootObjectType.EmbeddedFixedSizeTree)
                                    {
                                        // CopyFixedSizeTree will open dedicated write transaction to copy fixed size tree

                                        txw.Commit();
                                        txw.Dispose();
                                        txw = null;

                                        var fixedSizeTreeName = key;
                                        var fst = existingTree.FixedTreeFor(fixedSizeTreeName, (byte)header->ValueSize);

                                        var currentCopiedTrees   = copiedTrees;
                                        var currentCopiedEntries = copiedEntries;

                                        CopyFixedSizeTree(fst, tx =>
                                        {
                                            var treeInCompactedEnv = tx.ReadTree(treeName);
                                            return(treeInCompactedEnv.FixedTreeFor(fixedSizeTreeName, (byte)header->ValueSize));
                                        }, compactedEnv, context, copiedFstEntries =>
                                        {
                                            Report(currentCopiedTrees, totalTreesCount, currentCopiedEntries, existingTree.State.NumberOfEntries, progressReport,
                                                   $"Copying fixed size tree '{fixedSizeTreeName}' inside '{treeName}' tree. Progress: {copiedFstEntries}/{fst.NumberOfEntries} entries.",
                                                   treeName);
                                        }, () =>
                                        {
                                            Report(currentCopiedTrees, totalTreesCount, currentCopiedEntries, existingTree.State.NumberOfEntries, progressReport,
                                                   $"Finished copying fixed size tree '{fixedSizeTreeName}' inside '{treeName}' tree. {fst.NumberOfEntries} entries copied.",
                                                   treeName);
                                        }, token);

                                        IncrementNumberOfCopiedEntries();
                                        break; // let's open new transaction after copying fixed size tree
                                    }
                                }

                                // if the entry wasn't recognized as fixed size tree then let's store it as regular value

                                using (var value = existingTree.Read(key).Reader.AsStream())
                                {
                                    newTree.Add(key, value);
                                    transactionSize += value.Length;
                                }
                            }
                            else
                            {
                                using (var value = existingTree.Read(key).Reader.AsStream())
                                {
                                    newTree.Add(key, value);
                                    transactionSize += value.Length;
                                }
                            }

                            IncrementNumberOfCopiedEntries();

                            void IncrementNumberOfCopiedEntries()
                            {
                                copiedEntries++;

                                var reportRate = existingTree.State.NumberOfEntries / 33 + 1;

                                if (copiedEntries % reportRate == 0)
                                {
                                    Report(copiedTrees, totalTreesCount, copiedEntries, existingTree.State.NumberOfEntries, progressReport,
                                           $"Copying variable size tree '{treeName}'. Progress: {copiedEntries}/{existingTree.State.NumberOfEntries} entries.", treeName);
                                }
                            }
                        } while (transactionSize < compactedEnv.Options.MaxScratchBufferSize / 2 && existingTreeIterator.MoveNext());

                        txw?.Commit();
                    }
                    finally
                    {
                        txw?.Dispose();
                    }

                    if (copiedEntries == existingTree.State.NumberOfEntries)
                    {
                        copiedTrees++;
                        Report(copiedTrees, totalTreesCount, copiedEntries, existingTree.State.NumberOfEntries, progressReport, $"Finished copying variable size tree '{treeName}'. Progress: {copiedEntries}/{existingTree.State.NumberOfEntries} entries.", treeName);
                    }

                    compactedEnv.FlushLogToDataFile();
                } while (existingTreeIterator.MoveNext());
            }
            return(copiedTrees);
        }
Esempio n. 34
0
        private static void CopyTrees(StorageEnvironment existingEnv, StorageEnvironment compactedEnv, Action <CompactionProgress> progressReport = null)
        {
            using (var txr = existingEnv.NewTransaction(TransactionFlags.Read))
                using (var rootIterator = txr.Root.Iterate())
                {
                    if (rootIterator.Seek(Slice.BeforeAllKeys) == false)
                    {
                        return;
                    }

                    var totalTreesCount = txr.Root.State.EntriesCount;
                    var copiedTrees     = 0L;

                    do
                    {
                        var treeName     = rootIterator.CurrentKey.ToString();
                        var existingTree = txr.ReadTree(treeName);

                        Report(treeName, copiedTrees, totalTreesCount, 0, existingTree.State.EntriesCount, progressReport);

                        using (var existingTreeIterator = existingTree.Iterate())
                        {
                            if (existingTreeIterator.Seek(Slice.BeforeAllKeys) == false)
                            {
                                continue;
                            }

                            using (var txw = compactedEnv.NewTransaction(TransactionFlags.ReadWrite))
                            {
                                compactedEnv.CreateTree(txw, treeName);
                                txw.Commit();
                            }

                            var copiedEntries = 0L;

                            do
                            {
                                var transactionSize = 0L;

                                using (var txw = compactedEnv.NewTransaction(TransactionFlags.ReadWrite))
                                {
                                    var newTree = txw.ReadTree(treeName);

                                    do
                                    {
                                        var key = existingTreeIterator.CurrentKey;

                                        if (existingTreeIterator.Current->Flags == NodeFlags.MultiValuePageRef)
                                        {
                                            using (var multiTreeIterator = existingTree.MultiRead(key))
                                            {
                                                if (multiTreeIterator.Seek(Slice.BeforeAllKeys) == false)
                                                {
                                                    continue;
                                                }

                                                do
                                                {
                                                    var multiValue = multiTreeIterator.CurrentKey;
                                                    newTree.MultiAdd(key, multiValue);
                                                    transactionSize += multiValue.Size;
                                                } while (multiTreeIterator.MoveNext());
                                            }
                                        }
                                        else
                                        {
                                            using (var value = existingTree.Read(key).Reader.AsStream())
                                            {
                                                newTree.Add(key, value);
                                                transactionSize += value.Length;
                                            }
                                        }

                                        copiedEntries++;
                                    } while (transactionSize < compactedEnv.Options.MaxLogFileSize / 2 && existingTreeIterator.MoveNext());

                                    txw.Commit();
                                }

                                if (copiedEntries == existingTree.State.EntriesCount)
                                {
                                    copiedTrees++;
                                }

                                Report(treeName, copiedTrees, totalTreesCount, copiedEntries, existingTree.State.EntriesCount,
                                       progressReport);

                                compactedEnv.FlushLogToDataFile();
                            } while (existingTreeIterator.MoveNext());
                        }
                    } while (rootIterator.MoveNext());
                }
        }
Esempio n. 35
0
        private void Restore(StorageEnvironment env, string singleBackupFile)
        {
            using (env.Journal.Applicator.TakeFlushingLock())
            {
                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    using (env.Options.AllowManualFlushing())
                    {
                        env.FlushLogToDataFile(txw);
                    }

                    using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read))
                    {
                        if (package.Entries.Count == 0)
                            return;

                        var toDispose = new List<IDisposable>();

						var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName;

                        try
                        {
                            TransactionHeader* lastTxHeader = null;
                            var pagesToWrite = new Dictionary<long, Func<Page>>();

                            long journalNumber = -1;
                            foreach (var entry in package.Entries)
                            {
                                switch (Path.GetExtension(entry.Name))
                                {
                                    case ".journal":

										var jounalFileName = Path.Combine(tempDir, entry.Name);
                                        using (var output = new FileStream(jounalFileName, FileMode.Create))
                                        using (var input = entry.Open())
                                        {
                                            output.Position = output.Length;
                                            input.CopyTo(output);
                                        }

                                        var pager = new Win32MemoryMapPager(jounalFileName);
                                        toDispose.Add(pager);

                                        if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false)
                                        {
                                            throw new InvalidOperationException("Cannot parse journal file number");
                                        }

										var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber)));
                                        toDispose.Add(recoveryPager);

                                        var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader);

                                        while (reader.ReadOneTransaction(env.Options))
                                        {
                                            lastTxHeader = reader.LastTransactionHeader;
                                        }

                                        foreach (var translation in reader.TransactionPageTranslation)
                                        {
                                            var pageInJournal = translation.Value.JournalPos;
                                            pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal);
                                        }

                                        break;
                                    default:
                                        throw new InvalidOperationException("Unknown file, cannot restore: " + entry);
                                }
                            }

                            var sortedPages = pagesToWrite.OrderBy(x => x.Key)
                                .Select(x => x.Value())
                                .ToList();

                            var last = sortedPages.Last();

                            env.Options.DataPager.EnsureContinuous(txw, last.PageNumber,
                                last.IsOverflow
                                    ? env.Options.DataPager.GetNumberOfOverflowPages(
                                        last.OverflowSize)
                                    : 1);

                            foreach (var page in sortedPages)
                            {
                                env.Options.DataPager.Write(page);
                            }

                            env.Options.DataPager.Sync();

                            txw.State.Root = Tree.Open(txw, env._sliceComparer, &lastTxHeader->Root);
                            txw.State.FreeSpaceRoot = Tree.Open(txw, env._sliceComparer, &lastTxHeader->FreeSpace);

                            txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName;
                            txw.State.Root.Name = Constants.RootTreeName;

                            txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1;

                            env.Journal.Clear(txw);

                            txw.Commit();

                            env.HeaderAccessor.Modify(header =>
                            {
                                header->TransactionId = lastTxHeader->TransactionId;
                                header->LastPageNumber = lastTxHeader->LastPageNumber;

                                header->Journal.LastSyncedJournal = journalNumber;
                                header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId;

                                header->Root = lastTxHeader->Root;
                                header->FreeSpace = lastTxHeader->FreeSpace;

                                header->Journal.CurrentJournal = journalNumber + 1;
                                header->Journal.JournalFilesCount = 0;
                            });
                        }
                        finally
                        {
                            toDispose.ForEach(x => x.Dispose());

	                        try
	                        {
								Directory.Delete(tempDir, true);
	                        }
	                        catch (Exception)
	                        {
								// just temp dir - ignore it
	                        }
                        }
                    }
                }
            }
        }
Esempio n. 36
0
        private static long CopyTableTree(StorageEnvironment compactedEnv, Action <StorageCompactionProgress> progressReport, Transaction txr, string treeName, long copiedTrees, long totalTreesCount, TransactionPersistentContext context, CancellationToken token)
        {
            // Load table
            var tableTree = txr.ReadTree(treeName, RootObjectType.Table);

            // Get the table schema
            var schemaSize = tableTree.GetDataSize(TableSchema.SchemasSlice);
            var schemaPtr  = tableTree.DirectRead(TableSchema.SchemasSlice);
            var schema     = TableSchema.ReadFrom(txr.Allocator, schemaPtr, schemaSize);

            // Load table into structure
            var inputTable = txr.OpenTable(schema, treeName);

            // The next three variables are used to know what our current
            // progress is
            var copiedEntries = 0;

            // It is very important that these slices be allocated in the
            // txr.Allocator, as the intermediate write transactions on
            // the compacted environment will be destroyed between each
            // loop.
            var  lastSlice      = Slices.BeforeAllKeys;
            long lastFixedIndex = 0L;

            Report(copiedTrees, totalTreesCount, copiedEntries, inputTable.NumberOfEntries, progressReport, $"Copying table tree '{treeName}'. Progress: {copiedEntries:#,#;;0}/{inputTable.NumberOfEntries:#,#;;0} entries.", treeName);
            using (var txw = compactedEnv.WriteTransaction(context))
            {
                schema.Create(txw, treeName, Math.Max((ushort)inputTable.ActiveDataSmallSection.NumberOfPages, (ushort)((ushort.MaxValue + 1) / Constants.Storage.PageSize)));
                txw.Commit(); // always create a table, even if it is empty
            }

            while (copiedEntries < inputTable.NumberOfEntries)
            {
                token.ThrowIfCancellationRequested();
                using (var txw = compactedEnv.WriteTransaction(context))
                {
                    long transactionSize = 0L;

                    var outputTable = txw.OpenTable(schema, treeName);

                    if (schema.Key == null || schema.Key.IsGlobal)
                    {
                        // There is no primary key, or there is one that is global to multiple tables
                        // we require a table to have at least a single local index that we'll use

                        var variableSizeIndex = schema.Indexes.Values.FirstOrDefault(x => x.IsGlobal == false);

                        if (variableSizeIndex != null)
                        {
                            // We have a variable size index, use it

                            // In case we continue an existing compaction, skip to the next slice
                            var skip = 0;
                            // can't use SliceComparer.Compare here
                            if (lastSlice.Options != Slices.BeforeAllKeys.Options)
                            {
                                skip = 1;
                            }

                            var sp = Stopwatch.StartNew();
                            foreach (var tvr in inputTable.SeekForwardFrom(variableSizeIndex, lastSlice, skip))
                            {
                                // The table will take care of reconstructing indexes automatically
                                outputTable.Insert(ref tvr.Result.Reader);
                                copiedEntries++;
                                transactionSize += tvr.Result.Reader.Size;

                                ReportIfNeeded(sp, copiedTrees, totalTreesCount, copiedEntries, inputTable.NumberOfEntries, progressReport, $"Copying table tree '{treeName}'. Progress: {copiedEntries:#,#;;0}/{inputTable.NumberOfEntries:#,#;;0} entries.", treeName);

                                // The transaction has surpassed the allowed
                                // size before a flush
                                if (lastSlice.Equals(tvr.Key) == false && transactionSize >= compactedEnv.Options.MaxScratchBufferSize / 2)
                                {
                                    lastSlice = tvr.Key.Clone(txr.Allocator);
                                    break;
                                }
                            }
                        }
                        else
                        {
                            // Use a fixed size index
                            var fixedSizeIndex = schema.FixedSizeIndexes.Values.FirstOrDefault(x => x.IsGlobal == false);

                            if (fixedSizeIndex == null)
                            {
                                throw new InvalidOperationException("Cannot compact table " + inputTable.Name + " because is has no local indexes, only global ones");
                            }

                            var sp = Stopwatch.StartNew();
                            foreach (var entry in inputTable.SeekForwardFrom(fixedSizeIndex, lastFixedIndex, lastFixedIndex > 0 ? 1 : 0))
                            {
                                token.ThrowIfCancellationRequested();
                                // The table will take care of reconstructing indexes automatically
                                outputTable.Insert(ref entry.Reader);
                                copiedEntries++;
                                transactionSize += entry.Reader.Size;

                                ReportIfNeeded(sp, copiedTrees, totalTreesCount, copiedEntries, inputTable.NumberOfEntries, progressReport, $"Copying table tree '{treeName}'. Progress: {copiedEntries:#,#;;0}/{inputTable.NumberOfEntries:#,#;;0} entries.", treeName);

                                // The transaction has surpassed the allowed
                                // size before a flush
                                if (transactionSize >= compactedEnv.Options.MaxScratchBufferSize / 2)
                                {
                                    lastFixedIndex = fixedSizeIndex.GetValue(ref entry.Reader);
                                    break;
                                }
                            }
                        }
                    }
                    else
                    {
                        // The table has a primary key, inserts in that order are expected to be faster
                        foreach (var entry in inputTable.SeekByPrimaryKey(lastSlice, 0))
                        {
                            token.ThrowIfCancellationRequested();
                            // The table will take care of reconstructing indexes automatically
                            outputTable.Insert(ref entry.Reader);
                            copiedEntries++;
                            transactionSize += entry.Reader.Size;

                            // The transaction has surpassed the allowed
                            // size before a flush
                            if (transactionSize >= compactedEnv.Options.MaxScratchBufferSize / 2)
                            {
                                schema.Key.GetSlice(txr.Allocator, ref entry.Reader, out lastSlice);
                                break;
                            }
                        }
                    }

                    txw.Commit();
                }

                if (copiedEntries == inputTable.NumberOfEntries)
                {
                    copiedTrees++;
                    Report(copiedTrees, totalTreesCount, copiedEntries, inputTable.NumberOfEntries, progressReport, $"Finished copying table tree '{treeName}'. Progress: {copiedEntries:#,#;;0}/{inputTable.NumberOfEntries:#,#;;0} entries.", treeName);
                }

                compactedEnv.FlushLogToDataFile();
            }

            return(copiedTrees);
        }
Esempio n. 37
0
        public unsafe void ValidatePageChecksumShouldDetectDataCorruption()
        {
            // Create some random data
            var treeNames = new List <string>();

            var random = new Random();

            var value1 = new byte[random.Next(1024 * 1024 * 2)];
            var value2 = new byte[random.Next(1024 * 1024 * 2)];

            random.NextBytes(value1);
            random.NextBytes(value2);

            const int treeCount   = 5;
            const int recordCount = 6;

            using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir)))
            {
                env.Options.ManualFlushing = true;

                for (int i = 0; i < treeCount; i++)
                {
                    using (var tx = env.WriteTransaction())
                    {
                        string name = "tree/" + i;
                        treeNames.Add(name);

                        var tree = tx.CreateTree(name);

                        for (int j = 0; j < recordCount; j++)
                        {
                            tree.Add(string.Format("{0}/items/{1}", name, j), j % 2 == 0 ? value1 : value2);
                        }

                        tx.Commit();
                    }
                }
                env.FlushLogToDataFile();
            }

            // Lets corrupt something
            using (var options = StorageEnvironmentOptions.ForPath(DataDir))
                using (var pager = LinuxTestUtils.GetNewPager(options, DataDir, "Raven.Voron"))
                    using (var tempTX = new TempPagerTransaction())
                    {
                        var writePtr = pager.AcquirePagePointer(tempTX, 2) + PageHeader.SizeOf + 43; // just some random place on page #2
                        for (byte i = 0; i < 8; i++)
                        {
                            writePtr[i] = i;
                        }
                    }

            // Now lets try to read it all back and hope we get an exception
            try
            {
                using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir)))
                {
                    using (var tx = env.ReadTransaction())
                    {
                        foreach (var treeName in treeNames)
                        {
                            var tree = tx.CreateTree(treeName);

                            for (int i = 0; i < recordCount; i++)
                            {
                                var readResult = tree.Read(string.Format("{0}/items/{1}", treeName, i));

                                Assert.NotNull(readResult);

                                if (i % 2 == 0)
                                {
                                    var readBytes = new byte[value1.Length];
                                    readResult.Reader.Read(readBytes, 0, readBytes.Length);
                                }
                                else
                                {
                                    var readBytes = new byte[value2.Length];
                                    readResult.Reader.Read(readBytes, 0, readBytes.Length);
                                }
                            }
                        }
                    }
                }
            }
            catch (Exception e)
            {
                Assert.True(e is InvalidOperationException || e is InvalidDataException);
            }
        }
        private void Restore(StorageEnvironment env, string backupPath)
        {
            using (env.Journal.Applicator.TakeFlushingLock())
            {
                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    using (env.Options.AllowManualFlushing())
                    {
                        env.FlushLogToDataFile(txw);
                    }

                    List <string> journalNames;

                    using (var package = ZipFile.Open(backupPath, ZipArchiveMode.Read))
                    {
                        journalNames = package.Entries.Select(x => x.Name).ToList();
                    }

                    if (journalNames.Count == 0)
                    {
                        return;
                    }

                    var tempDir   = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName;
                    var toDispose = new List <IDisposable>();

                    try
                    {
                        ZipFile.ExtractToDirectory(backupPath, tempDir);

                        TransactionHeader *lastTxHeader = null;

                        var pagesToWrite = new Dictionary <long, Func <Page> >();

                        long journalNumber = -1;
                        foreach (var journalName in journalNames)
                        {
                            var pager = new Win32MemoryMapPager(Path.Combine(tempDir, journalName));
                            toDispose.Add(pager);


                            if (long.TryParse(journalName.Replace(".journal", string.Empty), out journalNumber) == false)
                            {
                                throw new InvalidOperationException("Cannot parse journal file number");
                            }

                            var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber)));
                            toDispose.Add(recoveryPager);

                            var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader);

                            while (reader.ReadOneTransaction(env.Options))
                            {
                                lastTxHeader = reader.LastTransactionHeader;
                            }

                            foreach (var translation in reader.TransactionPageTranslation)
                            {
                                var pageInJournal = translation.Value.JournalPos;
                                pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal);
                            }
                        }

                        var sortedPages = pagesToWrite.OrderBy(x => x.Key)
                                          .Select(x => x.Value())
                                          .ToList();

                        var last = sortedPages.Last();

                        env.Options.DataPager.EnsureContinuous(txw, last.PageNumber,
                                                               last.IsOverflow
                                                            ? env.Options.DataPager.GetNumberOfOverflowPages(
                                                                   last.OverflowSize)
                                                            : 1);

                        foreach (var page in sortedPages)
                        {
                            env.Options.DataPager.Write(page);
                        }

                        env.Options.DataPager.Sync();

                        txw.State.Root          = Tree.Open(txw, env._sliceComparer, &lastTxHeader->Root);
                        txw.State.FreeSpaceRoot = Tree.Open(txw, env._sliceComparer, &lastTxHeader->FreeSpace);

                        txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName;
                        txw.State.Root.Name          = Constants.RootTreeName;

                        txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1;

                        env.Journal.Clear(txw);

                        txw.Commit();

                        env.HeaderAccessor.Modify(header =>
                        {
                            header->TransactionId  = lastTxHeader->TransactionId;
                            header->LastPageNumber = lastTxHeader->LastPageNumber;

                            header->Journal.LastSyncedJournal       = journalNumber;
                            header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId;

                            header->Root      = lastTxHeader->Root;
                            header->FreeSpace = lastTxHeader->FreeSpace;

                            header->Journal.CurrentJournal    = journalNumber + 1;
                            header->Journal.JournalFilesCount = 0;
                        });
                    }
                    finally
                    {
                        toDispose.ForEach(x => x.Dispose());

                        Directory.Delete(tempDir, true);
                    }
                }
            }
        }
Esempio n. 39
0
        public void ScratchPagesShouldNotBeReleasedUntilNotUsed()
        {
            var directory = "Test2";

            if (Directory.Exists(directory))
                Directory.Delete(directory, true);

            var options = StorageEnvironmentOptions.ForPath(directory);

            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                CreateTrees(env, 2, "tree");
                for (int a = 0; a < 3; a++)
                {
                    using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                    {
                        tx.Environment.State.GetTree(tx, "tree0").Add(string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream());
                        tx.Environment.State.GetTree(tx, "tree0").Add(string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream());

                        tx.Commit();
                    }
                }

                using (var tx = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    tx.Environment.State.GetTree(tx, "tree1").Add("yek/1", new MemoryStream());

                    tx.Commit();
                }

                using (var txr = env.NewTransaction(TransactionFlags.Read))
                {
                    using (var iterator = txr.Environment.State.GetTree(txr, "tree0").Iterate())
                    {
                        Assert.True(iterator.Seek(Slice.BeforeAllKeys)); // all pages are from scratch (one from position 11)

                        var currentKey = iterator.CurrentKey.ToString();

                        env.FlushLogToDataFile(); // frees pages from scratch (including the one at position 11)

                        using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                        {
                            var tree = txw.Environment.State.GetTree(txw, "tree1");
                            tree.Add(string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11)

                            txw.Commit();
                        }

                        Assert.Equal(currentKey, iterator.CurrentKey.ToString());

                        using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                        {
                            var tree = txw.Environment.State.GetTree(txw, "tree1");
                            tree.Add("fake", new MemoryStream());

                            txw.Commit();
                        }

                        Assert.Equal(currentKey, iterator.CurrentKey.ToString());

                        var count = 0;

                        do
                        {
                            currentKey = iterator.CurrentKey.ToString();
                            count++;

                            Assert.Contains("key/", currentKey);
                        }
                        while (iterator.MoveNext());

                        Assert.Equal(6, count);
                    }
                }
            }
        }
Esempio n. 40
0
        public void ScratchPagesShouldNotBeReleasedUntilNotUsed()
        {
            var options = StorageEnvironmentOptions.ForPath(DataDir);

            options.ManualFlushing = true;
            using (var env = new StorageEnvironment(options))
            {
                CreateTrees(env, 2, "tree");
                for (int a = 0; a < 3; a++)
                {
                    using (var tx = env.WriteTransaction())
                    {
                        tx.CreateTree("tree0").Add(string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream());
                        tx.CreateTree("tree0").Add(string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream());

                        tx.Commit();
                    }
                }

                using (var tx = env.WriteTransaction())
                {
                    tx.CreateTree("tree1").Add("yek/1", new MemoryStream());

                    tx.Commit();
                }

                using (var txr = env.ReadTransaction())
                {
                    using (var iterator = txr.CreateTree("tree0").Iterate(false))
                    {
                        Assert.True(iterator.Seek(Slices.BeforeAllKeys)); // all pages are from scratch (one from position 11)

                        var currentKey = iterator.CurrentKey.ToString();

                        env.FlushLogToDataFile(); // frees pages from scratch (including the one at position 11)

                        using (var txw = env.WriteTransaction())
                        {
                            var tree = txw.CreateTree("tree1");
                            tree.Add(string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11)

                            txw.Commit();
                        }

                        Assert.Equal(currentKey, iterator.CurrentKey.ToString());

                        using (var txw = env.WriteTransaction())
                        {
                            var tree = txw.CreateTree("tree1");
                            tree.Add("fake", new MemoryStream());

                            txw.Commit();
                        }

                        Assert.Equal(currentKey, iterator.CurrentKey.ToString());

                        var count = 0;

                        do
                        {
                            currentKey = iterator.CurrentKey.ToString();
                            count++;

                            Assert.Contains("key/", currentKey);
                        }while (iterator.MoveNext());

                        Assert.Equal(6, count);
                    }
                }
            }
        }
Esempio n. 41
0
        private static long CopyVariableSizeTree(StorageEnvironment compactedEnv, Action <StorageCompactionProgress> progressReport, Transaction txr, string treeName, long copiedTrees, long totalTreesCount, TransactionPersistentContext context, CancellationToken token)
        {
            var existingTree = txr.ReadTree(treeName);

            Report(copiedTrees, totalTreesCount, 0, existingTree.State.NumberOfEntries, progressReport, $"Copying variable size tree '{treeName}'. Progress: 0/{existingTree.State.NumberOfEntries} entries.", treeName);

            using (var existingTreeIterator = existingTree.Iterate(true))
            {
                if (existingTreeIterator.Seek(Slices.BeforeAllKeys) == false)
                {
                    return(copiedTrees);
                }

                token.ThrowIfCancellationRequested();
                using (var txw = compactedEnv.WriteTransaction(context))
                {
                    if (existingTree.IsLeafCompressionSupported)
                    {
                        txw.CreateTree(treeName, flags: TreeFlags.LeafsCompressed);
                    }
                    else
                    {
                        txw.CreateTree(treeName);
                    }

                    txw.Commit();
                }

                var copiedEntries = 0L;

                do
                {
                    var transactionSize = 0L;

                    token.ThrowIfCancellationRequested();
                    using (var txw = compactedEnv.WriteTransaction(context))
                    {
                        var newTree = txw.ReadTree(treeName);

                        do
                        {
                            token.ThrowIfCancellationRequested();
                            var key = existingTreeIterator.CurrentKey;

                            if (existingTreeIterator.Current->Flags == TreeNodeFlags.MultiValuePageRef)
                            {
                                using (var multiTreeIterator = existingTree.MultiRead(key))
                                {
                                    if (multiTreeIterator.Seek(Slices.BeforeAllKeys) == false)
                                    {
                                        continue;
                                    }

                                    do
                                    {
                                        token.ThrowIfCancellationRequested();
                                        var multiValue = multiTreeIterator.CurrentKey;
                                        newTree.MultiAdd(key, multiValue);
                                        transactionSize += multiValue.Size;
                                    } while (multiTreeIterator.MoveNext());
                                }
                            }
                            else if (existingTree.IsLeafCompressionSupported)
                            {
                                using (var read = existingTree.ReadDecompressed(key))
                                {
                                    var value = read.Reader.AsStream();

                                    newTree.Add(key, value);
                                    transactionSize += value.Length;
                                }
                            }
                            else if (existingTree.State.Flags == (TreeFlags.FixedSizeTrees | TreeFlags.Streams))
                            {
                                var tag = existingTree.GetStreamTag(key);

                                using (var stream = existingTree.ReadStream(key))
                                {
                                    if (tag != null)
                                    {
                                        Slice tagStr;
                                        using (Slice.From(txw.Allocator, tag, out tagStr))
                                            newTree.AddStream(key, stream, tagStr);
                                    }
                                    else
                                    {
                                        newTree.AddStream(key, stream);
                                    }

                                    transactionSize += stream.Length;
                                }
                            }
                            else
                            {
                                using (var value = existingTree.Read(key).Reader.AsStream())
                                {
                                    newTree.Add(key, value);
                                    transactionSize += value.Length;
                                }
                            }

                            copiedEntries++;

                            var reportRate = existingTree.State.NumberOfEntries / 33 + 1;
                            if (copiedEntries % reportRate == 0)
                            {
                                Report(copiedTrees, totalTreesCount, copiedEntries, existingTree.State.NumberOfEntries, progressReport, $"Copying variable size tree '{treeName}'. Progress: {copiedEntries}/{existingTree.State.NumberOfEntries} entries.", treeName);
                            }
                        } while (transactionSize < compactedEnv.Options.MaxScratchBufferSize / 2 && existingTreeIterator.MoveNext());

                        txw.Commit();
                    }

                    if (copiedEntries == existingTree.State.NumberOfEntries)
                    {
                        copiedTrees++;
                        Report(copiedTrees, totalTreesCount, copiedEntries, existingTree.State.NumberOfEntries, progressReport, $"Finished copying variable size tree '{treeName}'. Progress: {copiedEntries}/{existingTree.State.NumberOfEntries} entries.", treeName);
                    }

                    compactedEnv.FlushLogToDataFile();
                } while (existingTreeIterator.MoveNext());
            }
            return(copiedTrees);
        }