public Transaction(StorageEnvironment env, long id, TransactionFlags flags, IFreeSpaceHandling freeSpaceHandling) { _dataPager = env.Options.DataPager; _env = env; _journal = env.Journal; _id = id; _freeSpaceHandling = freeSpaceHandling; Flags = flags; var scratchPagerStates = env.ScratchBufferPool.GetPagerStatesOfAllScratches(); foreach (var scratchPagerState in scratchPagerStates.Values) { scratchPagerState.AddRef(); _pagerStates.Add(scratchPagerState); } if (flags.HasFlag(TransactionFlags.ReadWrite) == false) { // for read transactions, we need to keep the pager state frozen // for write transactions, we can use the current one (which == null) _scratchPagerStates = scratchPagerStates; _state = env.State.Clone(this); _journal.GetSnapshots().ForEach(AddJournalSnapshot); return; } _state = env.State.Clone(this); InitTransactionHeader(); MarkTreesForWriteTransaction(); }
public void ShouldWork() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var s = new string('0', 500); var tree = env.CreateTree(tx, "data"); for (int i = 0; i < 10; i++) { tree.Add("users-" + i + "-" + s, new byte[0]); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.State.GetTree(tx, "data"); using (var it = tree.Iterate()) { Assert.True(it.Seek("users-7")); for (int i = 0; i < 10; i++) { Assert.True(it.Seek("users-"+i),i.ToString()); } } } } }
private void ValidateMulti(StorageEnvironment env, IEnumerable<string> trees) { using (var snapshot = env.CreateSnapshot()) { foreach (var tree in trees) { using (var iterator = snapshot.MultiRead(tree, "test/0/user-50")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var keys = new HashSet<string>(); var count = 0; do { keys.Add(iterator.CurrentKey.ToString()); Guid.Parse(iterator.CurrentKey.ToString()); count++; } while (iterator.MoveNext()); Assert.Equal(2, count); Assert.Equal(2, keys.Count); } } } }
public void DataIsKeptAfterRestart() { using (var pureMemoryPager = StorageEnvironmentOptions.CreateMemoryOnly()) { pureMemoryPager.OwnsPagers = false; using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Root.Add ("test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Root.Add ("test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(tx.Root.Read("test/1")); Assert.NotNull(tx.Root.Read("test/2")); tx.Commit(); } } } }
public void Can_split_merged_transaction_to_multiple_tx() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; storageEnvironmentOptions.MaxNumberOfPagesInMergedTransaction = 8; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { for (int xi = 0; xi < 100; xi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < 1000; i++) { tree.Add("users/" + i, "john doe/" + i); } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); var backupPath = Path.Combine(_tempDir, "1.snapshot"); snapshotWriter.ToFile(envToSnapshot, backupPath); using (var stream = File.OpenRead(backupPath)) using (var zip = new ZipArchive(stream, ZipArchiveMode.Read)) { Assert.True(zip.Entries.Count > 1); } } }
private void ValidateRecords(StorageEnvironment env, IEnumerable<string> trees, IList<string> ids) { using (var snapshot = env.CreateSnapshot()) { foreach (var tree in trees) { using (var iterator = snapshot.Iterate(tree)) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var keys = new HashSet<string>(); var count = 0; do { keys.Add(iterator.CurrentKey.ToString()); Assert.True(ids.Contains(iterator.CurrentKey.ToString()), "Unknown key: " + iterator.CurrentKey); Assert.NotNull(snapshot.Read(tree, iterator.CurrentKey)); count++; } while (iterator.MoveNext()); Assert.Equal(ids.Count, snapshot.Transaction.Environment.CreateTree(snapshot.Transaction,tree).State.EntriesCount); Assert.Equal(ids.Count, count); Assert.Equal(ids.Count, keys.Count); } } } }
public void StorageEnvironment_Two_Different_Tx_Should_be_shipped_properly1() { var transactionsToShip = new ConcurrentQueue<TransactionToShip>(); Env.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Enqueue(tx); }; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "TestTree"); tree.Add("ABC", "Foo"); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "TestTree2"); tree.Add("ABC", "Foo"); tx.Commit(); } using (var shippingDestinationEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { TransactionToShip tx; transactionsToShip.TryDequeue(out tx); shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snaphsot = shippingDestinationEnv.CreateSnapshot()) { Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); } transactionsToShip.TryDequeue(out tx); shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snaphsot = shippingDestinationEnv.CreateSnapshot()) { Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree2", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); } } }
public static void Execute(StorageEnvironmentOptions srcOptions, StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions, Action<CompactionProgress> progressReport = null) { if (srcOptions.IncrementalBackupEnabled) throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup); long minimalCompactedDataFileSize; srcOptions.ManualFlushing = true; // prevent from flushing during compaction - we shouldn't touch any source files compactOptions.ManualFlushing = true; // let us flush manually during data copy using(var existingEnv = new StorageEnvironment(srcOptions)) using (var compactedEnv = new StorageEnvironment(compactOptions)) { CopyTrees(existingEnv, compactedEnv, progressReport); compactedEnv.FlushLogToDataFile(allowToFlushOverwrittenPages: true); compactedEnv.Journal.Applicator.SyncDataFile(compactedEnv.OldestTransaction); compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal(); minimalCompactedDataFileSize = compactedEnv.NextPageNumber*AbstractPager.PageSize; } using (var compactedDataFile = new FileStream(Path.Combine(compactOptions.BasePath, Constants.DatabaseFilename), FileMode.Open, FileAccess.ReadWrite)) { compactedDataFile.SetLength(minimalCompactedDataFileSize); } }
public void AllScratchPagesShouldBeReleased() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "test"); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "test"); tree.Add("key/1", new MemoryStream(new byte[100])); tree.Add("key/1", new MemoryStream(new byte[200])); txw.Commit(); } env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch Assert.Equal(0, env.ScratchBufferPool.GetNumberOfAllocations(0)); } }
public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactions() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < 10000; i++) { tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream()); } } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { } DeleteDirectory(path); }
public void DataIsKeptAfterRestart_OnDisk() { if (Directory.Exists("test.data")) Directory.Delete("test.data", true); using (var pager = StorageEnvironmentOptions.ForPath("test.data")) { pager.OwnsPagers = false; using (var env = new StorageEnvironment(pager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.State.Root.Add("test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.State.Root.Add("test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(tx.State.Root.Read("test/1")); Assert.NotNull(tx.State.Root.Read("test/2")); tx.Commit(); } } } }
public TableStorage(StorageEnvironment environment, IBufferPool bufferPool) { this.bufferPool = bufferPool; env = environment; Initialize(); }
public void AllScratchPagesShouldBeReleased() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "test"); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.CreateTree(txw, "test"); tree.Add("key/1", new MemoryStream(new byte[100])); tree.Add("key/1", new MemoryStream(new byte[200])); txw.Commit(); } env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch // we keep track of the pages in scratch for one additional transaction, to avoid race // condition with FlushLogToDataFile concurrently with new read transactions Assert.Equal(2, env.ScratchBufferPool.GetNumberOfAllocations(0)); } }
public TableStorage(StorageEnvironmentOptions options, IBufferPool bufferPool) { if (options == null) throw new ArgumentNullException("options"); _options = options; this.bufferPool = bufferPool; Debug.Assert(options != null); //#if DEBUG // var directoryOptions = options as StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions; // // string debugJournalName; // if (directoryOptions != null) // debugJournalName = directoryOptions.TempPath.Replace(Path.DirectorySeparatorChar, '_').Replace(':','_'); // else // debugJournalName = "InMemoryDebugJournal-" + Interlocked.Increment(ref debugJournalCount); // // env = new StorageEnvironment(options, debugJournalName) {IsDebugRecording = true}; //#else env = new StorageEnvironment(options); //#endif Initialize(); }
public void IterationShouldNotFindAnyRecordsAndShouldNotThrowWhenNumberOfEntriesOnPageIs1AndKeyDoesNotMatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.ReadTree("tree"); tree.Add(@"Raven\Database\1", StreamFor("123")); tx.Commit(); } using (var snapshot = env.CreateSnapshot()) using (var iterator = snapshot.Iterate("tree")) { Assert.False(iterator.Seek(@"Raven\Filesystem\")); } } }
public void CanAddMultiValuesUnderTheSameKeyToBatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { var rand = new Random(); var testBuffer = new byte[168]; rand.NextBytes(testBuffer); CreateTrees(env, 1, "multitree"); var batch = new WriteBatch(); batch.MultiAdd("key", "value1", "multitree0"); batch.MultiAdd("key", "value2", "multitree0"); env.Writer.Write(batch); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.CreateTree(tx,"multitree0"); using (var it = tree.MultiRead("key")) { Assert.True(it.Seek(Slice.BeforeAllKeys)); Assert.Equal("value1", it.CurrentKey.ToString()); Assert.True(it.MoveNext()); Assert.Equal("value2", it.CurrentKey.ToString()); } } } }
public void PageSplitterShouldCalculateSeparatorKeyCorrectly2() { var ids = ReadIds("data2.txt"); StorageEnvironmentOptions storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.MaxScratchBufferSize *=2; using (var env = new StorageEnvironment(storageEnvironmentOptions)) { var rand = new Random(); var testBuffer = new byte[69]; rand.NextBytes(testBuffer); var trees = CreateTrees(env, 1, "tree"); foreach (var id in ids) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { foreach (var treeName in trees) { var tree = tx.Environment.CreateTree(tx, treeName); tree.Add(id, new MemoryStream(testBuffer)); } tx.Commit(); } } ValidateRecords(env, trees, ids); } }
public DebugJournal(string journalName, StorageEnvironment env, bool isRecordingByDefault = false) { _env = env; IsRecording = isRecordingByDefault; InitializeDebugJournal(journalName); _isDisposed = false; }
public static DebugJournal FromFile(string journalName, StorageEnvironment env, bool onlyValueLength = false) { var newJournal = new DebugJournal(journalName, env) { RecordOnlyValueLength = onlyValueLength }; return newJournal; }
public void DefaultScratchLocation() { var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(path); using (var env = new StorageEnvironment(options)) { var scratchFile = Path.Combine(path, "scratch.buffers"); Assert.True(File.Exists(scratchFile)); } }
public void CanBackupAndRestore() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[8192]; random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 500; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Assert.True(Env.Journal.Files.Count > 1); Env.FlushLogToDataFile(); // force writing data to the data file // add more data to journal files using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 500; i < 1000; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Env.FlushLogToDataFile(); // force writing data to the data file - this won't sync data to disk because there was another sync withing last minute BackupMethods.Full.ToFile(Env, _backupFile); BackupMethods.Full.Restore(_backupFile, _recoveredStoragePath); var options = StorageEnvironmentOptions.ForPath(_recoveredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 1000; i++) { var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
internal TransactionMergingWriter(StorageEnvironment env, CancellationToken cancellationToken, DebugJournal debugJournal = null) { _env = env; _cancellationToken = cancellationToken; _stopWrites.Set(); _debugJournal = debugJournal; _backgroundTask = new Lazy<Task>(() => Task.Factory.StartNew(BackgroundWriter, _cancellationToken, TaskCreationOptions.LongRunning, TaskScheduler.Current)); }
public void ShouldNotThrowChecksumMismatch() { var random = new Random(1); var buffer = new byte[100]; random.NextBytes(buffer); for (int i = 0; i < 100; i++) { buffer[i] = 13; } var options = StorageEnvironmentOptions.ForPath(_dataPath); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 50; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 50; i < 100; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } } options = StorageEnvironmentOptions.ForPath(_dataPath); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 100; i++) { var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public static int Main() { var sp = Stopwatch.StartNew(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(@"\\10.0.0.10\Documents\main"); using (var se = new StorageEnvironment(storageEnvironmentOptions)) { } Console.WriteLine(sp.Elapsed); return 0; }
public void ScratchLocationWithTemporaryPathSpecified() { var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(path, temp); using (var env = new StorageEnvironment(options)) { var scratchFile = Path.Combine(path, "scratch.buffers"); var scratchFileTemp = Path.Combine(temp, "scratch.buffers"); Assert.False(File.Exists(scratchFile)); Assert.True(File.Exists(scratchFileTemp)); } }
public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { DeleteDirectory("test2.data"); var random = new Random(1234); var buffer = new byte[1024 * 512]; random.NextBytes(buffer); var options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx,"tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var read = tx.Environment.State.GetTree(tx,"tree").Read("key1"); Assert.NotNull(read); { Assert.Equal(buffer.Length, read.Reader.Length); int used; Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } } DeleteDirectory("test2.data"); }
public void Can_make_multiple_min_inc_backups_and_then_restore() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { int index = 0; for (int xi = 0; xi < 5; xi++) { for (int yi = 0; yi < 2; yi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + index, "john doe/" + index); index++; } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, xi + ".snapshot")); } } var incremental = new IncrementalBackup(); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); incremental.Restore(restoredOptions, Enumerable.Range(0, 5).Select(i => Path.Combine(_tempDir, i + ".snapshot"))); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); for (int i = 0; i < UserCount; i++) { var readResult = tree.Read("users/" + i); Assert.NotNull(readResult); Assert.Equal("john doe/" + i, readResult.Reader.ToStringValue()); } } } }
public void Restore(StorageEnvironmentOptions options, IEnumerable<string> backupPaths) { var ownsPagers = options.OwnsPagers; options.OwnsPagers = false; using (var env = new StorageEnvironment(options)) { foreach (var backupPath in backupPaths) { Restore(env, backupPath); } } options.OwnsPagers = ownsPagers; }
public TableStorage(StorageEnvironmentOptions options, IBufferPool bufferPool) { if (options == null) throw new ArgumentNullException("options"); _options = options; this.bufferPool = bufferPool; Debug.Assert(options != null); env = new StorageEnvironment(options); Initialize(); }
public GraphStorage(string graphName, StorageEnvironment storageEnvironment) { if (String.IsNullOrWhiteSpace(graphName)) throw new ArgumentNullException("graphName"); if (storageEnvironment == null) throw new ArgumentNullException("storageEnvironment"); _nodeTreeName = graphName + Constants.NodeTreeNameSuffix; _edgeTreeName = graphName + Constants.EdgeTreeNameSuffix; _disconnectedNodesTreeName = graphName + Constants.DisconnectedNodesTreeName; _storageEnvironment = storageEnvironment; CreateConventions(); CreateSchema(); CreateCommandAndQueryInstances(); _nextId = GetLatestStoredNodeKey(); }
public void StorageRecoveryShouldWorkWhenThereSingleTransactionToRecoverFromLog() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("tree"); for (var i = 0; i < 100; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { tx.CreateTree("tree"); tx.Commit(); } using (var tx = env.ReadTransaction()) { var tree = tx.CreateTree("tree"); for (var i = 0; i < 100; i++) { Assert.NotNull(tree.Read("key" + i)); } } } }
public void ShouldCorrectlyLoadAfterRestartIfIncrementalBackupWasDone() { var bytes = new byte[1024]; new Random().NextBytes(bytes); using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath(DataDir)))) { using (var tx = env.WriteTransaction()) { tx.CreateTree("items"); tx.Commit(); } for (int j = 0; j < 100; j++) { using (var tx = env.WriteTransaction()) { var tree = tx.ReadTree("items"); for (int i = 0; i < 100; i++) { tree.Add("items/" + i, bytes); } tx.Commit(); } } BackupMethods.Incremental.ToFile(env, _incrementalBackupTestUtils.IncrementalBackupFile(0)); } // restart using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath(DataDir)))) { } }
public void ShouldDeleteCurrentJournalEvenThoughItHasAvailableSpace() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); tx.Commit(); } } var compactedData = Path.Combine(DataDir, "Compacted"); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(DataDir), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(compactedData)); var compactedDir = new DirectoryInfo(compactedData); var journalsAfterCompaction = compactedDir.GetFiles("*.journal").Select(x => x.Name).ToList(); Assert.Equal(0, journalsAfterCompaction.Count); // ensure it can write more data using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(compactedData))) { using (var tx = compacted.WriteTransaction()) { var tree = tx.CreateTree("fruits"); tree.Add("peach", new byte[144]); } } }
public void MultiTreeIteratorShouldBeIsolated2() { var directory = "Test2"; DeleteDirectory(directory); var options = StorageEnvironmentOptions.ForPath(directory); using (var env = new StorageEnvironment(options)) { CreateTrees(env, 1, "tree"); for (var i = 0; i < 11; i++) { Write(env, i); } using (var txr = env.NewTransaction(TransactionFlags.Read)) { var key = Delete(env, 10); using (var iterator = txr.ReadTree("tree0").MultiRead("key/1")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var keys = new List <string>(); do { keys.Add(iterator.CurrentKey.ToString()); }while (iterator.MoveNext()); Assert.Equal(11, keys.Count); Assert.Contains(key, keys); } } } }
public void ShouldNotInvokeIntegrityError(string fileName) { Directory.CreateDirectory(DataDir); ExtractFile(DataDir, fileName); var options = StorageEnvironmentOptions.ForPath(DataDir); options.OnIntegrityErrorOfAlreadySyncedData += (sender, args) => { _onIntegrityErrorOfAlreadySyncedDataHandlerWasCalled = true; }; options.ManualSyncing = true; options.ManualFlushing = true; options.MaxScratchBufferSize = 1 * 1024 * 1024 * 1024; options.IgnoreDataIntegrityErrorsOfAlreadySyncedTransactions = true; using (var storage = new StorageEnvironment(options)) { Assert.False(_onIntegrityErrorOfAlreadySyncedDataHandlerWasCalled); } }
private void TrackReadOnlyPage(Page page) { if (writablePages.ContainsKey(page.PageNumber)) { return; } ulong pageHash = StorageEnvironment.CalculatePageChecksum(page.Pointer, page.PageNumber, page.Flags, page.OverflowSize); ulong storedHash; if (readOnlyPages.TryGetValue(page.PageNumber, out storedHash)) { if (pageHash != storedHash) { VoronUnrecoverableErrorException.Raise(_env, "Read Only Page has change between tracking requests. Page #" + page.PageNumber); } } else { readOnlyPages[page.PageNumber] = pageHash; } }
public void Error_on_db_creation_must_not_cause_failure_on_next_db_load() { var dataDir = DataDir; using (var options = StorageEnvironmentOptions.ForPath(dataDir)) { options.SimulateFailureOnDbCreation = true; Assert.Throws <InvalidOperationException>(() => { using (var a = new StorageEnvironment(options)) { } }); } using (var options = StorageEnvironmentOptions.ForPath(dataDir)) { using (var s = new StorageEnvironment(options)) { } } }
public static IndexDefinition Load(StorageEnvironment environment) { using (var context = JsonOperationContext.ShortTermSingleUse()) using (var tx = environment.ReadTransaction()) { var tree = tx.CreateTree("Definition"); var result = tree.Read(DefinitionSlice); if (result == null) { return(null); } using (var reader = context.ReadForDisk(result.Reader.AsStream(), string.Empty)) { var definition = ReadIndexDefinition(reader); definition.Name = ReadName(reader); definition.LockMode = ReadLockMode(reader); definition.Priority = ReadPriority(reader); return(definition); } } }
public void Initialize(StorageEnvironment environment) { if (_initialized) { throw new InvalidOperationException(); } environment.NewTransactionCreated += SetStreamCacheInTx; using (var tx = environment.WriteTransaction()) { InitializeMainIndexStorage(tx, environment); InitializeSuggestionsIndexStorage(tx, environment); BuildStreamCacheAfterTx(tx); // force tx commit so it will bump tx counter and just created searcher holder will have valid tx id tx.LowLevelTransaction.ModifyPage(0); tx.Commit(); } _initialized = true; }
public void Initialize(StorageEnvironment environment, TransactionContextPool contextPool, IndexStore indexStore, TransformerStore transformerStore) { _environment = environment; _contextPool = contextPool; TransactionOperationContext context; using (contextPool.AllocateOperationContext(out context)) using (var tx = context.OpenWriteTransaction()) { IndexesTableSchema.Create(tx.InnerTransaction, SchemaNameConstants.IndexMetadataTable, 16); ConflictsTableSchema.Create(tx.InnerTransaction, SchemaNameConstants.ConflictMetadataTable, 16); tx.InnerTransaction.CreateTree(SchemaNameConstants.GlobalChangeVectorTree); tx.InnerTransaction.CreateTree(SchemaNameConstants.LastReplicatedEtagsTree); DeleteIndexMetadataForRemovedIndexesAndTransformers(tx.InnerTransaction, context, indexStore, transformerStore); tx.Commit(); } IsInitialized = true; }
public void ShouldWork() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { for (int x = 0; x < 10; x++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var value = new byte[100]; new Random().NextBytes(value); var ms = new MemoryStream(value); for (long i = 0; i < 100; i++) { ms.Position = 0; tx.State.Root.Add(tx, (x * i).ToString("0000000000000000"), ms); } tx.Commit(); } } } }
private static void ReadOneTransaction_Parallel(Stopwatch sw, int concurrency) { using (var env = new StorageEnvironment(new MemoryMapPager(Path))) { var countdownEvent = new CountdownEvent(concurrency); sw.Start(); for (int i = 0; i < concurrency; i++) { var currentBase = i; ThreadPool.QueueUserWorkItem(state => { using (var tx = env.NewTransaction(TransactionFlags.Read)) { var ms = new byte[100]; for (int j = 0; j < ((ItemsPerTransaction * Transactions) / concurrency); j++) { var current = j * currentBase; var key = current.ToString("0000000000000000"); using (var stream = env.Root.Read(tx, key)) { while (stream.Read(ms, 0, ms.Length) != 0) { } } } tx.Commit(); } countdownEvent.Signal(); }); } countdownEvent.Wait(); sw.Stop(); } }
public ConfigurationStorage(DocumentDatabase db) { var path = db.Configuration.Core.DataDirectory.Combine("Configuration"); string tempPath = null; if (db.Configuration.Storage.TempPath != null) { tempPath = db.Configuration.Storage.TempPath.Combine("Configuration").ToFullPath(); } var options = db.Configuration.Core.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly(path.FullPath, tempPath, db.IoChanges, db.CatastrophicFailureNotification) : StorageEnvironmentOptions.ForPath(path.FullPath, tempPath, null, db.IoChanges, db.CatastrophicFailureNotification); options.OnNonDurableFileSystemError += db.HandleNonDurableFileSystemError; options.OnRecoveryError += db.HandleOnConfigurationRecoveryError; options.CompressTxAboveSizeInBytes = db.Configuration.Storage.CompressTxAboveSize.GetValue(SizeUnit.Bytes); options.SchemaVersion = SchemaUpgrader.CurrentVersion.ConfigurationVersion; options.SchemaUpgrader = SchemaUpgrader.Upgrader(SchemaUpgrader.StorageType.Configuration, this, null); options.ForceUsing32BitsPager = db.Configuration.Storage.ForceUsing32BitsPager; options.TimeToSyncAfterFlashInSec = (int)db.Configuration.Storage.TimeToSyncAfterFlash.AsTimeSpan.TotalSeconds; options.NumOfConcurrentSyncsPerPhysDrive = db.Configuration.Storage.NumberOfConcurrentSyncsPerPhysicalDrive; options.MasterKey = db.MasterKey?.ToArray(); options.DoNotConsiderMemoryLockFailureAsCatastrophicError = db.Configuration.Security.DoNotConsiderMemoryLockFailureAsCatastrophicError; if (db.Configuration.Storage.MaxScratchBufferSize.HasValue) { options.MaxScratchBufferSize = db.Configuration.Storage.MaxScratchBufferSize.Value.GetValue(SizeUnit.Bytes); } NotificationsStorage = new NotificationsStorage(db.Name); OperationsStorage = new OperationsStorage(); Environment = LayoutUpdater.OpenEnvironment(options); ContextPool = new TransactionContextPool(Environment); }
public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_new_trees() { var transactionsToShip = new ConcurrentBag <TransactionToShip>(); using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { shippingSourceEnv.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Add(tx); }; using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite)) { shippingSourceEnv.CreateTree(tx, "TestTree"); tx.Commit(); } } var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions)) { foreach (var tx in transactionsToShip) { shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); } shippingDestinationEnv.FlushLogToDataFile(); using (var snapshot = shippingDestinationEnv.CreateSnapshot()) { snapshot.Read("TestTree", "Foo"); } } }
public void MultiTreeIteratorShouldBeIsolated1() { DeleteDirectory(DataDir); var options = StorageEnvironmentOptions.ForPath(DataDir); using (var env = new StorageEnvironment(options)) { CreateTrees(env, 1, "tree"); for (var i = 0; i < 10; i++) { Write(env, i); } using (var txr = env.ReadTransaction()) { var key = Write(env, 10); using (var iterator = txr.ReadTree("tree0").MultiRead("key/1")) { Assert.True(iterator.Seek(Slices.BeforeAllKeys)); var count = 0; do { Assert.True(iterator.CurrentKey.ToString() != key, string.Format("Key '{0}' should not be present in multi-iterator", key)); count++; }while (iterator.MoveNext()); Assert.Equal(10, count); } } } }
private static void ReadOneTransaction(Stopwatch sw) { using (var env = new StorageEnvironment(new MemoryMapPager(Path))) { sw.Start(); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var ms = new byte[100]; for (int i = 0; i < Transactions * ItemsPerTransaction; i++) { var key = i.ToString("0000000000000000"); using (var stream = env.Root.Read(tx, key)) { while (stream.Read(ms, 0, ms.Length) != 0) { } } } tx.Commit(); } sw.Stop(); } }
private static long CopyFixedSizeTreeFromRoot(StorageEnvironment compactedEnv, Action <StorageCompactionProgress> progressReport, Transaction txr, TreeIterator rootIterator, string treeName, long copiedTrees, long totalTreesCount, TransactionPersistentContext context, CancellationToken token) { var treeNameSlice = rootIterator.CurrentKey.Clone(txr.Allocator); var header = (FixedSizeTreeHeader.Embedded *)txr.LowLevelTransaction.RootObjects.DirectRead(treeNameSlice); var fst = txr.FixedTreeFor(treeNameSlice, header->ValueSize); Report(copiedTrees, totalTreesCount, 0, fst.NumberOfEntries, progressReport, $"Copying fixed size tree '{treeName}'. Progress: 0/{fst.NumberOfEntries} entries.", treeName); CopyFixedSizeTree(fst, txw => txw.FixedTreeFor(treeNameSlice, header->ValueSize), compactedEnv, context, copiedEntries => { Report(copiedTrees, totalTreesCount, copiedEntries, fst.NumberOfEntries, progressReport, $"Copying fixed size tree '{treeName}'. Progress: {copiedEntries}/{fst.NumberOfEntries} entries.", treeName); }, () => { copiedTrees++; Report(copiedTrees, totalTreesCount, fst.NumberOfEntries, fst.NumberOfEntries, progressReport, $"Finished copying fixed size tree '{treeName}'. {fst.NumberOfEntries} entries copied.", treeName); }, token); return(copiedTrees); }
public void Restore(string outPath, IEnumerable <string> backupPaths) { foreach (var backupPath in backupPaths) { using (var package = ZipFile.Open(backupPath, ZipArchiveMode.Read, System.Text.Encoding.UTF8)) { if (package.Entries.Count == 0) { return; } foreach (var dir in package.Entries.GroupBy(entry => Path.GetDirectoryName(entry.FullName))) { using (var options = StorageEnvironmentOptions.ForPath(Path.Combine(outPath, dir.Key))) { options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { Restore(env, dir); } } } } } }
private static void FillBatchReadBatchOneTransaction(Stopwatch sw, int iterations) { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(Path))) { sw.Start(); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var ms = new byte[100]; var batch = new WriteBatch(); for (int i = 0; i < iterations; i++) { var key = i.ToString("0000000000000000"); batch.Add(key, new MemoryStream(), null); } using (var snapshot = env.CreateSnapshot()) { for (int i = 0; i < iterations; i++) { var key = i.ToString("0000000000000000"); var read = snapshot.Read(null, key, batch).Reader; { while (read.Read(ms, 0, ms.Length) != 0) { } } } } tx.Commit(); } sw.Stop(); } }
public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactions() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { tx.CreateTree("tree"); tx.Commit(); } using (var tx = env.WriteTransaction()) { for (var i = 0; i < 10000; i++) { tx.CreateTree("tree").Add("a" + i, new MemoryStream()); } } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { } }
public void CanAddMultiValuesUnderTheSameKeyToBatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { var rand = new Random(); var testBuffer = new byte[168]; rand.NextBytes(testBuffer); CreateTrees(env, 1, "multitree"); using (var tx = env.WriteTransaction()) { var batch = tx.CreateTree("multitree0"); batch.MultiAdd("key", "value1"); batch.MultiAdd("key", "value2"); tx.Commit(); } using (var tx = env.ReadTransaction()) { var tree = tx.CreateTree("multitree0"); using (var it = tree.MultiRead("key")) { Assert.True(it.Seek(Slices.BeforeAllKeys)); Assert.Equal("value1", it.CurrentKey.ToString()); Assert.True(it.MoveNext()); Assert.Equal("value2", it.CurrentKey.ToString()); } } } }
public unsafe void StreamsTempFile_With_Encryption_ShouldThrow_When_SeekAndWrite_AreMixed_Without_ExecutingReset() { using (var options = StorageEnvironmentOptions.ForPath(DataDir)) { options.Encryption.MasterKey = Sodium.GenerateRandomBuffer((int)Sodium.crypto_aead_xchacha20poly1305_ietf_keybytes()); using (var environment = new StorageEnvironment(options)) { using (var temp = new StreamsTempFile(Path.Combine(DataDir, "EncryptedTempFile"), environment)) { var bytes = new byte[1024]; fixed(byte *b = bytes) { Memory.Set(b, (byte)'I', bytes.Length); } Stream stream; using (temp.Scope()) { stream = temp.StartNewStream(); stream.Write(bytes, 0, bytes.Length); stream.Flush(); stream.Seek(0, SeekOrigin.Begin); var read = stream.Read(new Span <byte>(new byte[10])); Assert.Equal(10, read); Assert.Throws <NotSupportedException>(() => stream.Write(bytes, 0, bytes.Length)); } Assert.Throws <NotSupportedException>(() => stream.Write(bytes, 0, bytes.Length)); } } } }
public void ScratchPagesShouldNotBeReleasedUntilNotUsed() { var options = StorageEnvironmentOptions.ForPath(DataDir); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { CreateTrees(env, 2, "tree"); for (int a = 0; a < 3; a++) { using (var tx = env.WriteTransaction()) { tx.CreateTree("tree0").Add(string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); tx.CreateTree("tree0").Add(string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); tx.Commit(); } } using (var tx = env.WriteTransaction()) { tx.CreateTree("tree1").Add("yek/1", new MemoryStream()); tx.Commit(); } using (var txr = env.ReadTransaction()) { using (var iterator = txr.CreateTree("tree0").Iterate(false)) { Assert.True(iterator.Seek(Slices.BeforeAllKeys)); // all pages are from scratch (one from position 11) var currentKey = iterator.CurrentKey.ToString(); env.FlushLogToDataFile(); // frees pages from scratch (including the one at position 11) using (var txw = env.WriteTransaction()) { var tree = txw.CreateTree("tree1"); tree.Add(string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11) txw.Commit(); } Assert.Equal(currentKey, iterator.CurrentKey.ToString()); using (var txw = env.WriteTransaction()) { var tree = txw.CreateTree("tree1"); tree.Add("fake", new MemoryStream()); txw.Commit(); } Assert.Equal(currentKey, iterator.CurrentKey.ToString()); var count = 0; do { currentKey = iterator.CurrentKey.ToString(); count++; Assert.Contains("key/", currentKey); }while (iterator.MoveNext()); Assert.Equal(6, count); } } } }
public void OldestActiveTransactionShouldBeCalculatedProperly() { using (var options = StorageEnvironmentOptions.CreateMemoryOnly()) { options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { var trees = CreateTrees(env, 1, "tree"); var transactions = new List <Transaction>(); for (int a = 0; a < 100; a++) { var random = new Random(1337); var buffer = new byte[random.Next(100, 1000)]; random.NextBytes(buffer); using (var tx = env.WriteTransaction()) { for (int i = 0; i < 100; i++) { foreach (var tree in trees) { tx.CreateTree(tree).Add(string.Format("key/{0}/{1}", a, i), new MemoryStream(buffer)); } } var txr = env.ReadTransaction(); transactions.Add(txr); tx.Commit(); } env.FlushLogToDataFile(); } Assert.Equal(transactions.OrderBy(x => x.LowLevelTransaction.Id).First().LowLevelTransaction.Id, env.ActiveTransactions.OldestTransaction); foreach (var tx in transactions) { foreach (var tree in trees) { using (var iterator = tx.CreateTree(tree).Iterate(false)) { if (!iterator.Seek(Slices.BeforeAllKeys)) { continue; } do { Assert.Contains("key/", iterator.CurrentKey.ToString()); } while (iterator.MoveNext()); } } } foreach (var transaction in transactions) { transaction.Dispose(); } } } }
public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { infoNotify = infoNotify ?? (s => { }); if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } long numberOfBackedUpPages = 0; var copier = new DataCopier(env.Options.PageSize * 16); var backupSuccess = true; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { IncrementalBackupInfo backupInfo; using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } // txw.Commit(); intentionally not committing } using (env.NewLowLevelTransaction(TransactionFlags.Read)) { if (backupStarted != null) { backupStarted();// we let call know that we have started the backup } var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var num = journalNum; var journalFile = GetJournalFile(env, journalNum, backupInfo); journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; pagesToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here { continue; } var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage); } using (var stream = part.Open()) { copier.ToStream(env, journalFile, startBackupAt, pagesToCopy, stream); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + pagesToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += pagesToCopy; } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number jrnl.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); } } file.Flush(true); // make sure that this is actually persisted fully to disk return(numberOfBackedUpPages); } }
public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options) { if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb) { return(false); } if (TryReadAndValidateHeader(options, out TransactionHeader * current) == false) { var lastValid4Kb = _readAt4Kb; _readAt4Kb++; while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb) { if (TryReadAndValidateHeader(options, out current)) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { SkipCurrentTransaction(current); return(true); } RequireHeaderUpdate = true; break; } _readAt4Kb++; } _readAt4Kb = lastValid4Kb; return(false); } if (IsAlreadySyncTransaction(current)) { SkipCurrentTransaction(current); return(true); } var performDecompression = current->CompressedSize != -1; var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current); _readAt4Kb += transactionSizeIn4Kb; TransactionHeaderPageInfo *pageInfoPtr; byte *outputPage; if (performDecompression) { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); try { LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } else { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize); pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount; if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } for (var i = 0; i < current->PageCount; i++) { if (pageInfoPtr[i].PageNumber > current->LastPageNumber) { throw new InvalidDataException($"Transaction {current->TransactionId} contains reference to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}"); } } for (var i = 0; i < current->PageCount; i++) { if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } Debug.Assert(_journalPager.Disposed == false); if (performDecompression) { Debug.Assert(_recoveryPager.Disposed == false); } var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size); _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); // We are going to overwrite the page, so we don't care about its current content var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.MaybePrefetchMemory(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); var pageNumber = *(long *)(outputPage + totalRead); if (pageInfoPtr[i].PageNumber != pageNumber) { throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}"); } totalRead += sizeof(long); _modifiedPages.Add(pageNumber); for (var j = 1; j < numberOfPagesOnDestination; j++) { _modifiedPages.Remove(pageNumber + j); } _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); if (pageInfoPtr[i].DiffSize == 0) { if (pageInfoPtr[i].Size == 0) { // diff contained no changes continue; } var journalPagePtr = outputPage + totalRead; if (options.Encryption.IsEnabled == false) { var pageHeader = (PageHeader *)journalPagePtr; var checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageNumber, out var expectedChecksum); if (checksum != expectedChecksum) { ThrowInvalidChecksumOnPageFromJournal(pageNumber, current, expectedChecksum, checksum, pageHeader); } } Memory.Copy(pagePtr, journalPagePtr, pageInfoPtr[i].Size); totalRead += pageInfoPtr[i].Size; if (options.Encryption.IsEnabled) { var pageHeader = (PageHeader *)pagePtr; if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow) { // need to mark overlapped buffers as invalid for commit var encryptionBuffers = ((IPagerLevelTransactionState)this).CryptoPagerTransactionState[_dataPager]; var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize); for (var j = 1; j < numberOfPages; j++) { if (encryptionBuffers.TryGetValue(pageNumber + j, out var buffer)) { buffer.SkipOnTxCommit = true; } } } } } else { _diffApplier.Destination = pagePtr; _diffApplier.Diff = outputPage + totalRead; _diffApplier.Size = pageInfoPtr[i].Size; _diffApplier.DiffSize = pageInfoPtr[i].DiffSize; _diffApplier.Apply(pageInfoPtr[i].IsNewDiff); totalRead += pageInfoPtr[i].DiffSize; } _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); } LastTransactionHeader = current; return(true); }
public ScratchBufferPool(StorageEnvironment env) { _scratchPager = env.Options.CreateScratchPager("scratch.buffers"); _scratchPager.AllocateMorePages(null, env.Options.InitialLogFileSize); }
public void StorageEnvironment_Two_Different_Tx_Should_be_shipped_properly1() { var transactionsToShip = new ConcurrentQueue <TransactionToShip>(); Env.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Enqueue(tx); }; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "TestTree"); tree.Add("ABC", "Foo"); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "TestTree2"); tree.Add("ABC", "Foo"); tx.Commit(); } using (var shippingDestinationEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { TransactionToShip tx; transactionsToShip.TryDequeue(out tx); shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snaphsot = shippingDestinationEnv.CreateSnapshot()) { Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); } transactionsToShip.TryDequeue(out tx); shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snaphsot = shippingDestinationEnv.CreateSnapshot()) { Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree2", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); } } }
public unsafe void ShouldPreserveTables(int entries, int seed) { // Create random docs to check everything is preserved using (var allocator = new ByteStringContext(SharedMultipleUseFlag.None)) { var create = new Dictionary <Slice, long>(); var delete = new List <Slice>(); var r = new Random(seed); for (var i = 0; i < entries; i++) { Slice key; Slice.From(allocator, "test" + i, out key); create.Add(key, r.Next()); if (r.NextDouble() < 0.5) { delete.Add(key); } } // Create the schema var schema = new TableSchema() .DefineKey(new TableSchema.SchemaIndexDef { StartIndex = 0, Count = 1, IsGlobal = false }); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { // Create table in the environment using (var tx = env.WriteTransaction()) { schema.Create(tx, "test", 16); var table = tx.OpenTable(schema, "test"); foreach (var entry in create) { var value = entry.Value; table.Set(new TableValueBuilder { entry.Key, value }); } tx.Commit(); } using (var tx = env.ReadTransaction()) { var table = tx.OpenTable(schema, "test"); Assert.Equal(table.NumberOfEntries, entries); } // Delete some of the entries (this is so that compaction makes sense) using (var tx = env.WriteTransaction()) { var table = tx.OpenTable(schema, "test"); foreach (var entry in delete) { table.DeleteByKey(entry); } tx.Commit(); } } var compactedData = Path.Combine(DataDir, "Compacted"); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(DataDir), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions) StorageEnvironmentOptions.ForPath(compactedData)); using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(compactedData))) { using (var tx = compacted.ReadTransaction()) { var table = tx.OpenTable(schema, "test"); foreach (var entry in create) { TableValueReader reader; var hasValue = table.ReadByKey(entry.Key, out reader); if (delete.Contains(entry.Key)) { // This key should not be here Assert.False(hasValue); } else { // This key should be there Assert.True(hasValue); // Data should be the same int size; byte *ptr = reader.Read(0, out size); Slice current; using (Slice.External(allocator, ptr, size, out current)) Assert.True(SliceComparer.Equals(current, entry.Key)); ptr = reader.Read(1, out size); Assert.Equal(entry.Value, *(long *)ptr); } } tx.Commit(); } } } }
public void Can_compact_fixed_size_tree_stored_inside_variable_size_tree(int count) { RequireFileBasedPager(); var bytes = new byte[48]; Slice.From(Allocator, "main-tree", out Slice mainTreeId); Slice.From(Allocator, "fst-tree", out Slice fstTreeIdreeId); var smallValue = new byte[] { 1, 2, 3 }; var bigValue = new byte[128]; for (int i = 0; i < 128; i++) { bigValue[i] = (byte)i; } using (var tx = Env.WriteTransaction()) { var mainTree = tx.CreateTree(mainTreeId); var fst = mainTree.FixedTreeFor(fstTreeIdreeId, valSize: 48); for (int i = 0; i < count; i++) { EndianBitConverter.Little.CopyBytes(i, bytes, 0); fst.Add(i, bytes); Slice read; using (fst.Read(i, out read)) { Assert.True(read.HasValue); } } mainTree.Add("small", smallValue); mainTree.Add("big", bigValue); tx.Commit(); } Env.Dispose(); var compactedData = Path.Combine(DataDir, "Compacted"); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(DataDir), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(compactedData)); using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(compactedData))) { using (var tx = compacted.ReadTransaction()) { var mainTree = tx.CreateTree(mainTreeId); var fst = mainTree.FixedTreeFor(fstTreeIdreeId, valSize: 48); for (int i = 0; i < count; i++) { Assert.True(fst.Contains(i), $"at {i}"); Slice read; using (fst.Read(i, out read)) { read.CopyTo(bytes); Assert.Equal(i, EndianBitConverter.Little.ToInt32(bytes, 0)); } } var readResult = mainTree.Read("small"); Assert.Equal(smallValue, readResult.Reader.AsStream().ReadData()); readResult = mainTree.Read("big"); Assert.Equal(bigValue, readResult.Reader.AsStream().ReadData()); } } }