public long GetEntriesCount(TableBase table) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { return(env.CreateTree(tx, table.TableName).State.EntriesCount); } }
public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactions2() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "atree"); env.CreateTree(tx, "btree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < 10000; i++) { tx.Environment.State.GetTree(tx, "atree").Add("a" + i, new MemoryStream()); tx.Environment.State.GetTree(tx, "btree").MultiAdd("a" + i, "a" + i); } } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { } DeleteDirectory(path); }
public FullTextIndex(StorageEnvironmentOptions options, IAnalyzer analyzer) { Analyzer = analyzer; Conventions = new IndexingConventions(); BufferPool = new BufferPool(); StorageEnvironment = new StorageEnvironment(options); using (var tx = StorageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { StorageEnvironment.CreateTree(tx, "@terms", keysPrefixing: true); StorageEnvironment.CreateTree(tx, "deletes", keysPrefixing: true); var docs = StorageEnvironment.CreateTree(tx, "docs", keysPrefixing: true); var metadata = StorageEnvironment.CreateTree(tx, "$metadata"); var idVal = metadata.Read("id"); if (idVal == null) { Id = Guid.NewGuid(); metadata.Add("id", Id.ToByteArray()); } else { int _; Id = new Guid(idVal.Reader.ReadBytes(16, out _)); } using (var it = docs.Iterate()) { _lastDocumentId = it.Seek(Slice.AfterAllKeys) == false ? 0 : it.CurrentKey.CreateReader().ReadBigEndianInt64(); } tx.Commit(); } }
public FreeDbQueries(string path) { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (Transaction tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } }
public VoronDisksDestination() { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath("FreeDB")); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } _currentBatch = new WriteBatch(); }
public void IterationShouldNotFindAnyRecordsAndShouldNotThrowWhenNumberOfEntriesOnPageIs1AndKeyDoesNotMatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.ReadTree("tree"); tree.Add(@"Raven\Database\1", StreamFor("123")); tx.Commit(); } using (var snapshot = env.CreateSnapshot()) using (var iterator = snapshot.Iterate("tree")) { Assert.False(iterator.Seek(@"Raven\Filesystem\")); } } }
public void AllScratchPagesShouldBeReleased() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "test"); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.CreateTree(txw, "test"); tree.Add("key/1", new MemoryStream(new byte[100])); tree.Add("key/1", new MemoryStream(new byte[200])); txw.Commit(); } env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch // we keep track of the pages in scratch for one additional transaction, to avoid race // condition with FlushLogToDataFile concurrently with new read transactions Assert.Equal(2, env.ScratchBufferPool.GetNumberOfAllocations(0)); } }
public void ShouldWork() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var s = new string('0', 500); var tree = env.CreateTree(tx, "data"); for (int i = 0; i < 10; i++) { tree.Add("users-" + i + "-" + s, new byte[0]); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("data"); using (var it = tree.Iterate()) { Assert.True(it.Seek("users-7")); for (int i = 0; i < 10; i++) { Assert.True(it.Seek("users-" + i), i.ToString()); } } } } }
public void DataIsKeptAfterRestartForSubTrees() { using (var pureMemoryPager = new PureMemoryPager()) { using (var env = new StorageEnvironment(pureMemoryPager, ownsPager: false)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.GetTree("test").Add(tx, "test", Stream.Null); tx.Commit(); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(env.GetTree("test").Read(tx, "test")); tx.Commit(); } } } }
public void ShouldWork() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var s = new string('0', 500); var tree = env.CreateTree(tx, "data"); for (int i = 0; i < 10; i++) { tree.Add("users-" + i + "-" + s, new byte[0]); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.State.GetTree(tx, "data"); using (var it = tree.Iterate()) { Assert.True(it.Seek("users-7")); for (int i = 0; i < 10; i++) { Assert.True(it.Seek("users-"+i),i.ToString()); } } } } }
public KeyValueStateMachine(StorageEnvironmentOptions options) { options.IncrementalBackupEnabled = true; _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "items"); var metadata = _storageEnvironment.CreateTree(tx, "$metadata"); var readResult = metadata.Read("last-index"); if (readResult != null) { LastAppliedIndex = readResult.Reader.ReadLittleEndianInt64(); } tx.Commit(); } }
public VoronOdbBackend(string voronDataPath) { if (voronDataPath == null) { throw new ArgumentNullException("voronDataPath"); } _env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(voronDataPath)); using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { _env.CreateTree(tx, Index); _env.CreateTree(tx, Objects); tx.Commit(); } }
public void AllScratchPagesShouldBeReleased() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "test"); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "test"); tree.Add("key/1", new MemoryStream(new byte[100])); tree.Add("key/1", new MemoryStream(new byte[200])); txw.Commit(); } env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch Assert.Equal(0, env.ScratchBufferPool.GetNumberOfAllocations(0)); } }
public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_new_trees_no_flushing() { var transactionsToShip = new ConcurrentBag <TransactionToShip>(); using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { shippingSourceEnv.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Add(tx); }; using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite)) { shippingSourceEnv.CreateTree(tx, "TestTree"); tx.Commit(); } } var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions)) { foreach (var tx in transactionsToShip) { shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); } using (var snapshot = shippingDestinationEnv.CreateSnapshot()) { snapshot.Read("TestTree", "Foo"); } } }
public void Flush() { _bufferSize = 0; using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { foreach (var kvp in _buffer) { var data = _storageEnvironment.CreateTree(tx, "channel:" + kvp.Key); var buffer = new byte[16]; var key = new Slice(buffer); var ms = new MemoryStream(); var bw = new BinaryWriter(ms); foreach (var item in kvp.Value) { var date = item.Timestamp; EndianBitConverter.Big.CopyBytes(date.Ticks, buffer, 0); EndianBitConverter.Big.CopyBytes(_last++, buffer, 8); ms.SetLength(0); bw.Write(item.Value); ms.Position = 0; data.Add(tx, key, ms); } } tx.State.Root.Add(tx, _lastKey, new MemoryStream(BitConverter.GetBytes(_last))); tx.Commit(); } _buffer.Clear(); }
public void CannotCompactStorageIfIncrementalBackupEnabled() { var envOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); envOptions.IncrementalBackupEnabled = true; using (var env = new StorageEnvironment(envOptions)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); tree.Add("record/1", new byte[9]); tree.Add("record/2", new byte[9]); tx.Commit(); } } var srcOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); srcOptions.IncrementalBackupEnabled = true; var invalidOperationException = Assert.Throws <InvalidOperationException>(() => StorageCompaction.Execute(srcOptions, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData))); Assert.Equal(StorageCompaction.CannotCompactBecauseOfIncrementalBackup, invalidOperationException.Message); }
private void ReplayWriteAction(WriteActivityEntry activityEntry, ref Transaction tx) { switch (activityEntry.ActionType) { case DebugActionType.Add: tx.ReadTree(activityEntry.TreeName).Add(activityEntry.Key, activityEntry.ValueStream); break; case DebugActionType.Delete: tx.ReadTree(activityEntry.TreeName).Delete(activityEntry.Key); break; case DebugActionType.MultiAdd: tx.ReadTree(activityEntry.TreeName).MultiAdd(activityEntry.Key, new Slice(Encoding.UTF8.GetBytes(activityEntry.Value.ToString()))); break; case DebugActionType.MultiDelete: tx.ReadTree(activityEntry.TreeName).MultiDelete(activityEntry.Key, new Slice(Encoding.UTF8.GetBytes(activityEntry.Value.ToString()))); break; case DebugActionType.CreateTree: _env.CreateTree(tx, activityEntry.TreeName); break; case DebugActionType.Increment: var buffer = new byte[sizeof(long)]; activityEntry.ValueStream.Read(buffer, 0, buffer.Length); var delta = EndianBitConverter.Little.ToInt64(buffer, 0); tx.ReadTree(activityEntry.TreeName).Increment(activityEntry.Key, delta); break; default: //precaution against newly added action types throw new InvalidOperationException("unsupported tree action type"); } }
public void Can_split_merged_transaction_to_multiple_tx() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; storageEnvironmentOptions.MaxNumberOfPagesInMergedTransaction = 8; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { for (int xi = 0; xi < 100; xi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < 1000; i++) { tree.Add("users/" + i, "john doe/" + i); } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); var backupPath = Path.Combine(_tempDir, "1.snapshot"); snapshotWriter.ToFile(envToSnapshot, backupPath); using (var stream = File.OpenRead(backupPath)) using (var zip = new ZipArchive(stream, ZipArchiveMode.Read)) { Assert.True(zip.Entries.Count > 1); } } }
public void ShouldDeleteCurrentJournalEvenThoughItHasAvailableSpace() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); tx.Commit(); } } StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); var compactedDir = new DirectoryInfo(CompactedData); var journalsAfterCompaction = compactedDir.GetFiles("*.journal").Select(x => x.Name).ToList(); Assert.Equal(0, journalsAfterCompaction.Count); // ensure it can write more data using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactedData))) { using (var tx = compacted.NewTransaction(TransactionFlags.ReadWrite)) { var tree = compacted.CreateTree(tx, "fruits"); tree.Add("peach", new byte[144]); } } }
public void Mixed_small_and_overflow_changes() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); tree.Add("users/1", "john doe"); tree.Add("users/2", new String('a', 5000)); tx.Commit(); } using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); tree.Add("users/2", "jane darling"); tree.Add("users/3", new String('b', 5000)); tx.Commit(); } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, "1.snapshot")); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); new IncrementalBackup().Restore(restoredOptions, new[] { Path.Combine(_tempDir, "1.snapshot") }); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); Assert.Equal("john doe", tree.Read("users/1").Reader.ToStringValue()); Assert.Equal("jane darling", tree.Read("users/2").Reader.ToStringValue()); Assert.Equal(new String('b', 5000), tree.Read("users/3").Reader.ToStringValue()); } } } }
public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_LOTS_of_transactions() { var transactionsToShip = new List <TransactionToShip>(); using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { shippingSourceEnv.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Add(tx); }; using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite)) { shippingSourceEnv.CreateTree(tx, "TestTree"); shippingSourceEnv.CreateTree(tx, "TestTree2"); tx.Commit(); } WriteLotsOfTestDataForTree("TestTree", shippingSourceEnv); WriteLotsOfTestDataForTree("TestTree2", shippingSourceEnv); } var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions)) { foreach (var tx in transactionsToShip) { shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); } shippingDestinationEnv.FlushLogToDataFile(); using (var snapshot = shippingDestinationEnv.CreateSnapshot()) { ValidateLotsOfTestDataForTree(snapshot, "TestTree"); ValidateLotsOfTestDataForTree(snapshot, "TestTree2"); } } }
private void InitializeDatabase() { using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { _env.CreateTree(tx, LogsTreeName); _env.CreateTree(tx, EntryTermsTreeName); var metadata = _env.CreateTree(tx, MetadataTreeName); var versionReadResult = metadata.Read("version"); if (versionReadResult == null) // new db { metadata.Add("version", Encoding.UTF8.GetBytes(CurrentVersion)); DbId = Guid.NewGuid(); metadata.Add("db-id", DbId.ToByteArray()); metadata.Add("current-term", EndianBitConverter.Little.GetBytes(0L)); metadata.Add("voted-for", Encoding.UTF8.GetBytes(string.Empty)); metadata.Add("voted-for-term", EndianBitConverter.Little.GetBytes(-1L)); metadata.Add("is-leader-potential", EndianBitConverter.Little.GetBytes(0)); } else { var dbVersion = versionReadResult.Reader.ToStringValue(); if (dbVersion != CurrentVersion) { throw new InvalidOperationException("Cannot open db because its version is " + dbVersion + " but the library expects version " + CurrentVersion); } int used; var bytes = metadata.Read("db-id").Reader.ReadBytes(16, out used).Take(16).ToArray(); DbId = new Guid(bytes); CurrentTerm = metadata.Read("current-term").Reader.ReadLittleEndianInt64(); var votedFor = metadata.Read("voted-for"); VotedFor = votedFor.Reader.Length == 0 ? null : votedFor.Reader.ToStringValue(); var votedForTerm = metadata.Read("voted-for-term"); VotedForTerm = votedForTerm.Reader.ReadLittleEndianInt64(); } tx.Commit(); } }
public void Can_make_multiple_min_inc_backups_and_then_restore() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { int index = 0; for (int xi = 0; xi < 5; xi++) { for (int yi = 0; yi < 2; yi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + index, "john doe/" + index); index++; } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, xi + ".snapshot")); } } var incremental = new IncrementalBackup(); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); incremental.Restore(restoredOptions, Enumerable.Range(0, 5).Select(i => Path.Combine(_tempDir, i + ".snapshot"))); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); for (int i = 0; i < UserCount; i++) { var readResult = tree.Read("users/" + i); Assert.NotNull(readResult); Assert.Equal("john doe/" + i, readResult.Reader.ToStringValue()); } } } }
public void ShouldOccupyLessSpace() { var r = new Random(); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 100; i++) { var bytes = new byte[r.Next(10, 2 * 1024 * 1024)]; r.NextBytes(bytes); tree.Add("record/" + i, bytes); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 50; i++) { tree.Delete("record/" + r.Next(0, 100)); } tx.Commit(); } } var oldSize = GetDirSize(new DirectoryInfo(CompactionTestsData)); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); var newSize = GetDirSize(new DirectoryInfo(CompactedData)); Assert.True(newSize < oldSize, string.Format("Old size: {0:#,#;;0} MB, new size {1:#,#;;0} MB", oldSize / 1024 / 1024, newSize / 1024 / 1024)); }
public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { DeleteDirectory("test2.data"); var random = new Random(1234); var buffer = new byte[1024 * 512]; random.NextBytes(buffer); var options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.CreateTree(tx, "tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var read = tx.Environment.CreateTree(tx, "tree").Read("key1"); Assert.NotNull(read); { Assert.Equal(buffer.Length, read.Reader.Length); int used; Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } } DeleteDirectory("test2.data"); }
public void ShouldReportProgress() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); var tree2 = env.CreateTree(tx, "vegetables"); tree2.Add("carrot", new byte[123]); tree2.Add("potato", new byte[99]); var tree3 = env.CreateTree(tx, "multi"); tree3.MultiAdd("fruits", "apple"); tree3.MultiAdd("fruits", "orange"); tree3.MultiAdd("vegetables", "carrot"); tree3.MultiAdd("vegetables", "carrot"); tx.Commit(); } } var progressReport = new List <string>(); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData), x => progressReport.Add(string.Format("Copied {0} of {1} records in '{2}' tree. Copied {3} of {4} trees.", x.CopiedTreeRecords, x.TotalTreeRecordsCount, x.TreeName, x.CopiedTrees, x.TotalTreeCount))); Assert.NotEmpty(progressReport); Assert.Contains("Copied 0 of 2 records in 'fruits' tree. Copied 0 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'fruits' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'multi' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'multi' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'vegetables' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'vegetables' tree. Copied 3 of 3 trees.", progressReport); }
private void Initialize() { using (var tx = storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var serverNamesToIds = storageEnvironment.CreateTree(tx, "serverNames->Ids"); var serverIdsToNames = storageEnvironment.CreateTree(tx, "Ids->serverNames"); storageEnvironment.CreateTree(tx, "servers->lastEtag"); storageEnvironment.CreateTree(tx, "counters"); storageEnvironment.CreateTree(tx, "countersGroups"); var etags = storageEnvironment.CreateTree(tx, "etags->counters"); storageEnvironment.CreateTree(tx, "counters->etags"); var metadata = tx.State.GetTree(tx, "$metadata"); var id = metadata.Read("id"); if (id == null) // new counter db { var serverIdBytes = EndianBitConverter.Big.GetBytes(ServerId); var serverIdSlice = new Slice(serverIdBytes); serverNamesToIds.Add(CounterStorageUrl, serverIdSlice); serverIdsToNames.Add(serverIdSlice, CounterStorageUrl); Id = Guid.NewGuid(); metadata.Add("id", Id.ToByteArray()); metadata.Add("name", Encoding.UTF8.GetBytes(Name)); tx.Commit(); } else // existing counter db { int used; Id = new Guid(id.Reader.ReadBytes(16, out used)); var nameResult = metadata.Read("name"); if (nameResult == null) { throw new InvalidOperationException("Could not read name from the store, something bad happened"); } var storedName = new StreamReader(nameResult.Reader.AsStream()).ReadToEnd(); if (storedName != Name) { throw new InvalidOperationException("The stored name " + storedName + " does not match the given name " + Name); } using (var it = etags.Iterate()) { if (it.Seek(Slice.AfterAllKeys)) { LastEtag = it.CurrentKey.CreateReader().ReadBigEndianInt64(); } } } ReplicationTask.StartReplication(); } }
public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { DeleteDirectory("test2.data"); var random = new Random(1234); var buffer = new byte[1024 * 512]; random.NextBytes(buffer); var options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx,"tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var read = tx.Environment.State.GetTree(tx,"tree").Read("key1"); Assert.NotNull(read); { Assert.Equal(buffer.Length, read.Reader.Length); int used; Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } } DeleteDirectory("test2.data"); }
public void Replay() { var wasDebugRecording = _env.IsDebugRecording; _env.IsDebugRecording = false; using (var writeBatch = new WriteBatch()) { ActivityEntry entry; while (WriteQueue.TryDequeue(out entry)) { switch (entry.ActionType) { case DebugActionType.Add: writeBatch.Add(entry.Key, entry.ValueStream, entry.TreeName); break; case DebugActionType.Delete: writeBatch.Delete(entry.Key, entry.TreeName); break; case DebugActionType.MultiAdd: writeBatch.MultiAdd(entry.Key, new Slice(Encoding.UTF8.GetBytes(entry.Value.ToString())), entry.TreeName); break; case DebugActionType.MultiDelete: writeBatch.MultiDelete(entry.Key, new Slice(Encoding.UTF8.GetBytes(entry.Value.ToString())), entry.TreeName); break; case DebugActionType.CreateTree: using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { _env.CreateTree(tx, entry.TreeName); tx.Commit(); } break; case DebugActionType.Increment: //TODO : make sure this is correct here writeBatch.Increment(entry.Key, entry.ValueStream.ReadByte(), entry.TreeName); break; default: //precaution against newly added action types throw new InvalidOperationException("unsupported tree action type"); } } _env.Writer.Write(writeBatch); } _env.IsDebugRecording = wasDebugRecording; //restore the state as it was }
public DateTimeSeries(string path) { _lastKey = "last-key"; _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "data"); var read = tx.State.Root.Read(tx, _lastKey); _last = read != null?read.Reader.ReadInt64() : 1; tx.Commit(); } }
protected IList <string> CreateTrees(StorageEnvironment env, int number, string prefix) { var results = new List <string>(); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < number; i++) { results.Add(env.CreateTree(tx, prefix + i).Name); } tx.Commit(); } return(results); }
public FullTextIndex(StorageEnvironmentOptions options, IAnalyzer analyzer) { Analyzer = analyzer; Conventions = new IndexingConventions(); BufferPool = new BufferPool(); StorageEnvironment = new StorageEnvironment(options); using (var tx = StorageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { StorageEnvironment.CreateTree(tx, "TermPositions"); ReadMetadata(tx); ReadLastDocumentId(tx); ReadFields(tx); tx.Commit(); } }
public void SurviveRestart() { using (var options = StorageEnvironmentOptions.CreateMemoryOnly()) { options.OwnsPagers = false; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "events"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx,"events").Add("test", new MemoryStream(0)); tx.Commit(); } } using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "events"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"events"); var readResult = tree.Read("test"); Assert.NotNull(readResult); tx.Commit(); } } } }
public void StorageRecoveryShouldWorkWhenThereSingleTransactionToRecoverFromLog() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "tree"); for (var i = 0; i < 100; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"tree"); for (var i = 0; i < 100; i++) { Assert.NotNull(tree.Read("key" + i)); } } } DeleteDirectory(path); }
public void DataIsKeptAfterRestartForSubTrees() { using (var pureMemoryPager = StorageEnvironmentOptions.CreateMemoryOnly()) { pureMemoryPager.OwnsPagers = false; using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.CreateTree(tx,"test"); tree.Add("test", Stream.Null); tx.Commit(); Assert.NotNull(tree.Read("test")); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "test"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.CreateTree(tx,"test"); Assert.NotNull(tree.Read("test")); tx.Commit(); } } } }
public void MultiAdds_And_MultiDeletes_After_Causing_PageSplit_DoNot_Fail(int size) { using (var Env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { var inputData = new List<byte[]>(); for (int i = 0; i < size; i++) { inputData.Add(Encoding.UTF8.GetBytes(RandomString(1024))); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "foo"); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"foo"); foreach (var buffer in inputData) { Assert.DoesNotThrow(() => tree.MultiAdd(tx, "ChildTreeKey", new Slice(buffer))); } tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"foo"); for (int i = 0; i < inputData.Count; i++) { var buffer = inputData[i]; Assert.DoesNotThrow(() => tree.MultiDelete(tx, "ChildTreeKey", new Slice(buffer))); } tx.Commit(); } } }
public void ShouldCorrectlyLoadAfterRestartIfIncrementalBackupWasDone() { var bytes = new byte[1024]; new Random().NextBytes(bytes); using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath("Data")))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "items"); tx.Commit(); } for (int j = 0; j < 100; j++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.ReadTree("items"); for (int i = 0; i < 100; i++) { tree.Add("items/" + i, bytes); } tx.Commit(); } } BackupMethods.Incremental.ToFile(env, IncrementalBackupTestUtils.IncrementalBackupFile(0)); } // restart using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath("Data")))) { } }
public void CannotCompactStorageIfIncrementalBackupEnabled() { var envOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); envOptions.IncrementalBackupEnabled = true; using (var env = new StorageEnvironment(envOptions)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); tree.Add("record/1", new byte[9]); tree.Add("record/2", new byte[9]); tx.Commit(); } } var srcOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); srcOptions.IncrementalBackupEnabled = true; var invalidOperationException = Assert.Throws<InvalidOperationException>(() => StorageCompaction.Execute(srcOptions, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData))); Assert.Equal(StorageCompaction.CannotCompactBecauseOfIncrementalBackup, invalidOperationException.Message); }
public void StorageRecoveryShouldWorkForSplitTransactions() { var random = new Random(1234); var buffer = new byte[4096]; random.NextBytes(buffer); var path = "test2.data"; var count = 1000; DeleteDirectory(path); var options = StorageEnvironmentOptions.ForPath(path); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "atree"); env.CreateTree(tx, "btree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var aTree = tx.Environment.State.GetTree(tx,"atree"); var bTree = tx.Environment.State.GetTree(tx,"btree"); for (var i = 0; i < count; i++) { aTree.Add("a" + i, new MemoryStream(buffer)); bTree.MultiAdd("a", "a" + i); } tx.Commit(); } } var expectedString = Encoding.UTF8.GetString(buffer); options = StorageEnvironmentOptions.ForPath(path); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "atree"); env.CreateTree(tx, "btree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var aTree = tx.Environment.State.GetTree(tx,"atree"); var bTree = tx.Environment.State.GetTree(tx,"btree"); for (var i = 0; i < count; i++) { var read = aTree.Read("a" + i); Assert.NotNull(read); Assert.Equal(expectedString, read.Reader.ToStringValue()); } using (var iterator = bTree.MultiRead("a")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var keys = new HashSet<string>(); do { keys.Add(iterator.CurrentKey.ToString()); } while (iterator.MoveNext()); Assert.Equal(count, keys.Count); } } } DeleteDirectory(path); }
public unsafe void Min_inc_backup_is_smaller_than_normal_inc_backup() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { for (int xi = 0; xi < 10; xi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + i, "john doe/" + i); } tx.Commit(); } } var incrementalBackupInfo = envToSnapshot.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, "1.snapshot")); // reset the incremental backup stuff envToSnapshot.HeaderAccessor.Modify(ptr => ptr->IncrementalBackup = incrementalBackupInfo); var incBackup = new IncrementalBackup(); incBackup.ToFile(envToSnapshot, Path.Combine(_tempDir, "2.snapshot")); var incLen = new FileInfo(Path.Combine(_tempDir, "2.snapshot")).Length; var minInLen = new FileInfo(Path.Combine(_tempDir, "1.snapshot")).Length; Assert.True(incLen > minInLen); } }
public void Can_use_full_back_then_full_min_backup() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < 1000; i++) { tree.Add("users/" + i, "first/" + i); } tx.Commit(); } new FullBackup().ToFile(envToSnapshot, Path.Combine(_tempDir, "full.backup")); using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < 500; i++) { tree.Add("users/" + i, "second/" + (i * 2)); } for (int i = 0; i < 500; i++) { tree.Add("users/" + (i + 10000), "third/" + i); } tx.Commit(); } new MinimalIncrementalBackup().ToFile(envToSnapshot, Path.Combine(_tempDir, "1.backup")); } new FullBackup().Restore(Path.Combine(_tempDir, "full.backup"), Path.Combine(_tempDir, "restored")); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); new IncrementalBackup().Restore(restoredOptions, new[] { Path.Combine(_tempDir, "1.backup") }); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); for (int i = 0; i < 500; i++) { var readResult = tree.Read("users/" + i); Assert.NotNull(readResult); Assert.Equal("second/" + (i * 2), readResult.Reader.ToStringValue()); } for (int i = 0; i < 500; i++) { var readResult = tree.Read("users/" + (i + 10000)); Assert.NotNull(readResult); Assert.Equal("third/" + i, readResult.Reader.ToStringValue()); } for (int i = 0; i < 500; i++) { var readResult = tree.Read("users/" + (i + 500)); Assert.NotNull(readResult); Assert.Equal("first/" + (i+500), readResult.Reader.ToStringValue()); } } } }
public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_LOTS_of_transactions() { var transactionsToShip = new List<TransactionToShip>(); using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { shippingSourceEnv.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Add(tx); }; using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite)) { shippingSourceEnv.CreateTree(tx, "TestTree"); shippingSourceEnv.CreateTree(tx, "TestTree2"); tx.Commit(); } WriteLotsOfTestDataForTree("TestTree", shippingSourceEnv); WriteLotsOfTestDataForTree("TestTree2", shippingSourceEnv); } var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions)) { foreach (var tx in transactionsToShip) shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); shippingDestinationEnv.FlushLogToDataFile(); using (var snapshot = shippingDestinationEnv.CreateSnapshot()) { ValidateLotsOfTestDataForTree(snapshot, "TestTree"); ValidateLotsOfTestDataForTree(snapshot, "TestTree2"); } } }
public void CompactionMustNotLooseAnyData() { var treeNames = new List<string>(); var multiValueTreeNames = new List<string>(); var random = new Random(); var value1 = new byte[random.Next(1024*1024*2)]; var value2 = new byte[random.Next(1024*1024*2)]; random.NextBytes(value1); random.NextBytes(value2); const int treeCount = 5; const int recordCount = 6; const int multiValueTreeCount = 7; const int multiValueRecordsCount = 4; const int multiValuesCount = 3; using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { for (int i = 0; i < treeCount; i++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { string name = "tree/" + i; treeNames.Add(name); var tree = env.State.GetTree(tx, name); for (int j = 0; j < recordCount; j++) { tree.Add(string.Format("{0}/items/{1}", name, j), j%2 == 0 ? value1 : value2); } tx.Commit(); } } for (int i = 0; i < multiValueTreeCount; i++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var name = "multiValueTree/" + i; multiValueTreeNames.Add(name); var tree = env.CreateTree(tx, name); for (int j = 0; j < multiValueRecordsCount; j++) { for (int k = 0; k < multiValuesCount; k++) { tree.MultiAdd("record/" + j, "value/" + k); } } tx.Commit(); } } } StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactedData))) { using (var tx = compacted.NewTransaction(TransactionFlags.Read)) { foreach (var treeName in treeNames) { var tree = compacted.State.GetTree(tx, treeName); for (int i = 0; i < recordCount; i++) { var readResult = tree.Read(string.Format("{0}/items/{1}", treeName, i)); Assert.NotNull(readResult); if (i%2 == 0) { var readBytes = new byte[value1.Length]; readResult.Reader.Read(readBytes, 0, readBytes.Length); Assert.Equal(value1, readBytes); } else { var readBytes = new byte[value2.Length]; readResult.Reader.Read(readBytes, 0, readBytes.Length); Assert.Equal(value2, readBytes); } } } foreach (var treeName in multiValueTreeNames) { var tree = compacted.State.GetTree(tx, treeName); for (int i = 0; i < multiValueRecordsCount; i++) { var multiRead = tree.MultiRead("record/" + i); Assert.True(multiRead.Seek(Slice.BeforeAllKeys)); int count = 0; do { Assert.Equal("value/" + count, multiRead.CurrentKey.ToString()); count++; } while (multiRead.MoveNext()); Assert.Equal(multiValuesCount, count); } } } } }
public void ShouldOccupyLessSpace() { var r = new Random(); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 100; i++) { var bytes = new byte[r.Next(10, 2*1024*1024)]; r.NextBytes(bytes); tree.Add("record/" + i, bytes); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 50; i++) { tree.Delete("record/" + r.Next(0, 100)); } tx.Commit(); } } var oldSize = GetDirSize(new DirectoryInfo(CompactionTestsData)); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions) StorageEnvironmentOptions.ForPath(CompactedData)); var newSize = GetDirSize(new DirectoryInfo(CompactedData)); Assert.True(newSize < oldSize, string.Format("Old size: {0:#,#;;0} MB, new size {1:#,#;;0} MB", oldSize / 1024 / 1024, newSize / 1024 / 1024)); }
public void Record_debug_journal_and_replay_it_with_manual_flushing() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = new DebugJournal(debugJouralName, env, true); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}")); writeBatch.Add("foo", valueBuffer, "test-tree"); env.Writer.Write(writeBatch); } using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(tx); tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = DebugJournal.FromFile(debugJouralName, env); env.DebugJournal.Replay(); using (var snapshot = env.CreateSnapshot()) { Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}", snapshot.Read("test-tree", "foo").Reader.ToStringValue()); } } }
private static void CopyTrees(StorageEnvironment existingEnv, StorageEnvironment compactedEnv, Action<CompactionProgress> progressReport = null) { using (var rootIterator = existingEnv.State.Root.Iterate()) { if (rootIterator.Seek(Slice.BeforeAllKeys) == false) return; var totalTreesCount = existingEnv.State.Root.State.EntriesCount; var copiedTrees = 0L; do { var treeName = rootIterator.CurrentKey.ToString(); using (var txr = existingEnv.NewTransaction(TransactionFlags.Read)) { var existingTree = existingEnv.State.GetTree(txr, treeName); Report(treeName, copiedTrees, totalTreesCount, 0, existingTree.State.EntriesCount, progressReport); using (var existingTreeIterator = existingTree.Iterate()) { if (existingTreeIterator.Seek(Slice.BeforeAllKeys) == false) continue; using (var txw = compactedEnv.NewTransaction(TransactionFlags.ReadWrite)) { compactedEnv.CreateTree(txw, treeName); txw.Commit(); } var copiedEntries = 0L; do { var transactionSize = 0L; using (var txw = compactedEnv.NewTransaction(TransactionFlags.ReadWrite)) { var newTree = txw.ReadTree(treeName); do { var key = existingTreeIterator.CurrentKey; if (existingTreeIterator.Current->Flags == NodeFlags.MultiValuePageRef) { using (var multiTreeIterator = existingTree.MultiRead(key)) { if (multiTreeIterator.Seek(Slice.BeforeAllKeys) == false) continue; do { var multiValue = multiTreeIterator.CurrentKey; newTree.MultiAdd(key, multiValue); transactionSize += multiValue.Size; } while (multiTreeIterator.MoveNext()); } } else { using (var value = existingTree.Read(key).Reader.AsStream()) { newTree.Add(key, value); transactionSize += value.Length; } } copiedEntries++; } while (transactionSize < compactedEnv.Options.MaxLogFileSize/2 && existingTreeIterator.MoveNext()); txw.Commit(); } if (copiedEntries == existingTree.State.EntriesCount) copiedTrees++; Report(treeName, copiedTrees, totalTreesCount, copiedEntries, existingTree.State.EntriesCount, progressReport); compactedEnv.FlushLogToDataFile(); } while (existingTreeIterator.MoveNext()); } } } while (rootIterator.MoveNext()); } }
public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions2() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "atree"); for (var i = 0; i < 1000; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "btree"); for (var i = 0; i < 5; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "atree"); env.CreateTree(tx, "btree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var aTree = tx.Environment.CreateTree(tx,"atree"); var bTree = tx.Environment.CreateTree(tx,"btree"); for (var i = 0; i < 1000; i++) { Assert.NotNull(aTree.Read("key" + i)); } for (var i = 0; i < 5; i++) { Assert.NotNull(bTree.Read("key" + i)); } } } DeleteDirectory(path); }
public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactions() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < 10000; i++) { tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream()); } } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { } DeleteDirectory(path); }
public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_new_trees_no_flushing() { var transactionsToShip = new ConcurrentBag<TransactionToShip>(); using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { shippingSourceEnv.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Add(tx); }; using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite)) { shippingSourceEnv.CreateTree(tx, "TestTree"); tx.Commit(); } } var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions)) { foreach (var tx in transactionsToShip) shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snapshot = shippingDestinationEnv.CreateSnapshot()) { snapshot.Read("TestTree", "Foo"); } } }
public void SplitterIssue2() { var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var env = new StorageEnvironment(storageEnvironmentOptions)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "multi"); tx.Commit(); } var batch = new WriteBatch(); batch.MultiAdd("0", "1", "multi"); batch.MultiAdd("1", "1", "multi"); batch.MultiAdd("2", "1", "multi"); batch.MultiAdd("3", "1", "multi"); batch.MultiAdd("4", "1", "multi"); batch.MultiAdd("5", "1", "multi"); env.Writer.Write(batch); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"multi"); using (var iterator = tree.MultiRead(tx, "0")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var count = 0; do { count++; } while (iterator.MoveNext()); Assert.Equal(1, count); } } batch = new WriteBatch(); batch.MultiAdd("0", "2", "multi"); batch.MultiAdd("1", "2", "multi"); batch.MultiAdd("2", "2", "multi"); batch.MultiAdd("3", "2", "multi"); batch.MultiAdd("4", "2", "multi"); batch.MultiAdd("5", "2", "multi"); env.Writer.Write(batch); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"multi"); using (var iterator = tree.MultiRead(tx, "0")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var count = 0; do { count++; } while (iterator.MoveNext()); Assert.Equal(2, count); } } } }
private IList<string> CreateTrees(StorageEnvironment env, int number, string prefix) { var results = new List<string>(); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < number; i++) { results.Add(env.CreateTree(tx, prefix + i).Name); } tx.Commit(); } return results; }
public void ShouldReportProgress() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); var tree2 = env.CreateTree(tx, "vegetables"); tree2.Add("carrot", new byte[123]); tree2.Add("potato", new byte[99]); var tree3 = env.CreateTree(tx, "multi"); tree3.MultiAdd("fruits", "apple"); tree3.MultiAdd("fruits", "orange"); tree3.MultiAdd("vegetables", "carrot"); tree3.MultiAdd("vegetables", "carrot"); tx.Commit(); } } var progressReport = new List<string>(); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData), x => progressReport.Add(string.Format("Copied {0} of {1} records in '{2}' tree. Copied {3} of {4} trees.", x.CopiedTreeRecords, x.TotalTreeRecordsCount, x.TreeName, x.CopiedTrees, x.TotalTreeCount))); Assert.NotEmpty(progressReport); Assert.Contains("Copied 0 of 2 records in 'fruits' tree. Copied 0 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'fruits' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'multi' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'multi' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'vegetables' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'vegetables' tree. Copied 3 of 3 trees.", progressReport); }
public void Record_debug_journal_and_replay_it() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = new DebugJournal(debugJouralName, env, true); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}")); writeBatch.Add("foo", valueBuffer, "test-tree"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2!")); writeBatch.Add("bar", valueBuffer, "test-tree"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2 3!")); writeBatch.Add("foo-bar", valueBuffer, "test-tree"); writeBatch.MultiAdd("multi-foo", "AA", "test-tree"); env.Writer.Write(writeBatch); } using (var writeBatch = new WriteBatch()) { writeBatch.Increment("incr-key", 5, "test-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.Equal(5, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); using (var writeBatch = new WriteBatch()) { writeBatch.Increment("incr-key", 5, "test-tree"); env.Writer.Write(writeBatch); } Assert.Equal(5, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.Equal(10, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); } using (var writeBatch = new WriteBatch()) { writeBatch.MultiAdd("multi-foo", "BB", "test-tree"); writeBatch.MultiAdd("multi-foo", "CC", "test-tree"); writeBatch.Delete("foo-bar", "test-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree2"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1!")); writeBatch.Add("foo", valueBuffer, "test-tree2"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2!")); writeBatch.Add("bar", valueBuffer, "test-tree2"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2 3!")); writeBatch.Add("foo-bar", valueBuffer, "test-tree2"); env.Writer.Write(writeBatch); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = DebugJournal.FromFile(debugJouralName, env); env.DebugJournal.Replay(); using (var snapshot = env.CreateSnapshot()) { Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}", snapshot.Read("test-tree", "foo").Reader.ToStringValue()); Assert.Equal("testing testing 1 2!", snapshot.Read("test-tree", "bar").Reader.ToStringValue()); Assert.Equal("testing testing 1!", snapshot.Read("test-tree2", "foo").Reader.ToStringValue()); Assert.Equal("testing testing 1 2!", snapshot.Read("test-tree2", "bar").Reader.ToStringValue()); Assert.Equal("testing testing 1 2 3!", snapshot.Read("test-tree2", "foo-bar").Reader.ToStringValue()); Assert.Equal(10, snapshot.Read("test-tree", "incr-key").Reader.ReadLittleEndianInt64()); Assert.Equal(0,snapshot.ReadVersion("test-tree","foo-bar")); using (var iter = snapshot.MultiRead("test-tree","multi-foo")) { iter.Seek(Slice.BeforeAllKeys); Assert.Equal("AA",iter.CurrentKey.ToString()); Assert.DoesNotThrow(() => iter.MoveNext()); Assert.Equal("BB",iter.CurrentKey.ToString()); Assert.DoesNotThrow(() => iter.MoveNext()); Assert.Equal("CC",iter.CurrentKey.ToString()); } } } }