public void DataIsKeptAfterRestart_OnDisk() { if (Directory.Exists("test.data")) Directory.Delete("test.data", true); using (var pager = StorageEnvironmentOptions.ForPath("test.data")) { pager.OwnsPagers = false; using (var env = new StorageEnvironment(pager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.State.Root.Add("test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.State.Root.Add("test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(tx.State.Root.Read("test/1")); Assert.NotNull(tx.State.Root.Read("test/2")); tx.Commit(); } } } }
public void AllScratchPagesShouldBeReleased() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "test"); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "test"); tree.Add("key/1", new MemoryStream(new byte[100])); tree.Add("key/1", new MemoryStream(new byte[200])); txw.Commit(); } env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch Assert.Equal(0, env.ScratchBufferPool.GetNumberOfAllocations(0)); } }
public void AllScratchPagesShouldBeReleased() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "test"); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.CreateTree(txw, "test"); tree.Add("key/1", new MemoryStream(new byte[100])); tree.Add("key/1", new MemoryStream(new byte[200])); txw.Commit(); } env.FlushLogToDataFile(); // non read nor write transactions, so it should flush and release everything from scratch // we keep track of the pages in scratch for one additional transaction, to avoid race // condition with FlushLogToDataFile concurrently with new read transactions Assert.Equal(2, env.ScratchBufferPool.GetNumberOfAllocations(0)); } }
public void ShouldWork() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var s = new string('0', 500); var tree = env.CreateTree(tx, "data"); for (int i = 0; i < 10; i++) { tree.Add("users-" + i + "-" + s, new byte[0]); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.State.GetTree(tx, "data"); using (var it = tree.Iterate()) { Assert.True(it.Seek("users-7")); for (int i = 0; i < 10; i++) { Assert.True(it.Seek("users-"+i),i.ToString()); } } } } }
public void IterationShouldNotFindAnyRecordsAndShouldNotThrowWhenNumberOfEntriesOnPageIs1AndKeyDoesNotMatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.ReadTree("tree"); tree.Add(@"Raven\Database\1", StreamFor("123")); tx.Commit(); } using (var snapshot = env.CreateSnapshot()) using (var iterator = snapshot.Iterate("tree")) { Assert.False(iterator.Seek(@"Raven\Filesystem\")); } } }
public void DataIsKeptAfterRestart() { using (var pureMemoryPager = StorageEnvironmentOptions.CreateMemoryOnly()) { pureMemoryPager.OwnsPagers = false; using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Root.Add ("test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Root.Add ("test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(tx.Root.Read("test/1")); Assert.NotNull(tx.Root.Read("test/2")); tx.Commit(); } } } }
public void ShouldNotThrowChecksumMismatch() { var random = new Random(1); var buffer = new byte[100]; random.NextBytes(buffer); for (int i = 0; i < 100; i++) { buffer[i] = 13; } var options = StorageEnvironmentOptions.ForPath(_dataPath); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 50; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 50; i < 100; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } } options = StorageEnvironmentOptions.ForPath(_dataPath); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 100; i++) { var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { DeleteDirectory("test2.data"); var random = new Random(1234); var buffer = new byte[1024 * 512]; random.NextBytes(buffer); var options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx,"tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var read = tx.Environment.State.GetTree(tx,"tree").Read("key1"); Assert.NotNull(read); { Assert.Equal(buffer.Length, read.Reader.Length); int used; Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } } DeleteDirectory("test2.data"); }
public void DataIsKeptAfterRestart_OnDisk() { if (Directory.Exists("test.data")) { Directory.Delete("test.data", true); } using (var pager = StorageEnvironmentOptions.ForPath("test.data")) { pager.OwnsPagers = false; using (var env = new StorageEnvironment(pager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Root.Add("test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Root.Add("test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(tx.Root.Read("test/1")); Assert.NotNull(tx.Root.Read("test/2")); tx.Commit(); } } } }
public void PageSplitterShouldCalculateSeparatorKeyCorrectly2() { var ids = ReadIds("data2.txt"); StorageEnvironmentOptions storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.MaxScratchBufferSize *=2; using (var env = new StorageEnvironment(storageEnvironmentOptions)) { var rand = new Random(); var testBuffer = new byte[69]; rand.NextBytes(testBuffer); var trees = CreateTrees(env, 1, "tree"); foreach (var id in ids) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { foreach (var treeName in trees) { var tree = tx.Environment.CreateTree(tx, treeName); tree.Add(id, new MemoryStream(testBuffer)); } tx.Commit(); } } ValidateRecords(env, trees, ids); } }
private void ValidateMultiRecords(StorageEnvironment env, IEnumerable <string> trees, int documentCount, int i) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (var j = 0; j < 10; j++) { foreach (var treeName in trees) { var tree = tx.Environment.State.GetTree(tx, treeName); using (var iterator = tree.MultiRead((j % 10).ToString())) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var count = 0; do { count++; }while (iterator.MoveNext()); Assert.Equal((i * documentCount) / 10, count); } } } } }
public long GetFreePageCount() { long freePages = 0; using (var tx = _env.NewTransaction(TransactionFlags.Read)) using (var it = _env.FreeSpaceRoot.Iterate(tx)) { if (it.Seek(Slice.BeforeAllKeys) == false) { return(0); } do { if (it.CurrentKey.StartsWith(_sectionsPrefix, _env.SliceComparer) || it.CurrentKey.StartsWith(_txPrefix, _env.SliceComparer)) { var dataSize = it.GetCurrentDataSize(); freePages += dataSize / sizeof(long) - 1; } else { Debug.Assert(false, "invalid key in free space tree: " + it.CurrentKey); } } while (it.MoveNext()); } return(freePages); }
public void DataIsKeptAfterRestart() { using (var pureMemoryPager = StorageEnvironmentOptions.CreateMemoryOnly()) { pureMemoryPager.OwnsPagers = false; using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.State.Root.Add(tx, "test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.State.Root.Add(tx, "test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { if (tx.State.Root.Read(tx, "test/1") == null) { Debugger.Launch(); } Assert.NotNull(tx.State.Root.Read(tx, "test/1")); Assert.NotNull(tx.State.Root.Read(tx, "test/2")); tx.Commit(); } } } }
public void CanAddMultiValuesUnderTheSameKeyToBatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { var rand = new Random(); var testBuffer = new byte[168]; rand.NextBytes(testBuffer); CreateTrees(env, 1, "multitree"); var batch = new WriteBatch(); batch.MultiAdd("key", "value1", "multitree0"); batch.MultiAdd("key", "value2", "multitree0"); env.Writer.Write(batch); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.CreateTree(tx,"multitree0"); using (var it = tree.MultiRead("key")) { Assert.True(it.Seek(Slice.BeforeAllKeys)); Assert.Equal("value1", it.CurrentKey.ToString()); Assert.True(it.MoveNext()); Assert.Equal("value2", it.CurrentKey.ToString()); } } } }
public void StorageEnvironment_should_be_able_to_accept_transactionsToShip_with_new_trees_no_flushing() { var transactionsToShip = new ConcurrentBag <TransactionToShip>(); using (var shippingSourceEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { shippingSourceEnv.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Add(tx); }; using (var tx = shippingSourceEnv.NewTransaction(TransactionFlags.ReadWrite)) { shippingSourceEnv.CreateTree(tx, "TestTree"); tx.Commit(); } } var storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.ManualFlushing = true; using (var shippingDestinationEnv = new StorageEnvironment(storageEnvironmentOptions)) { foreach (var tx in transactionsToShip) { shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); } using (var snapshot = shippingDestinationEnv.CreateSnapshot()) { snapshot.Read("TestTree", "Foo"); } } }
public void ShouldDeleteCurrentJournalEvenThoughItHasAvailableSpace() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); tx.Commit(); } } StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); var compactedDir = new DirectoryInfo(CompactedData); var journalsAfterCompaction = compactedDir.GetFiles("*.journal").Select(x => x.Name).ToList(); Assert.Equal(0, journalsAfterCompaction.Count); // ensure it can write more data using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactedData))) { using (var tx = compacted.NewTransaction(TransactionFlags.ReadWrite)) { var tree = compacted.CreateTree(tx, "fruits"); tree.Add("peach", new byte[144]); } } }
public void CannotCompactStorageIfIncrementalBackupEnabled() { var envOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); envOptions.IncrementalBackupEnabled = true; using (var env = new StorageEnvironment(envOptions)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); tree.Add("record/1", new byte[9]); tree.Add("record/2", new byte[9]); tx.Commit(); } } var srcOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); srcOptions.IncrementalBackupEnabled = true; var invalidOperationException = Assert.Throws <InvalidOperationException>(() => StorageCompaction.Execute(srcOptions, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData))); Assert.Equal(StorageCompaction.CannotCompactBecauseOfIncrementalBackup, invalidOperationException.Message); }
public void CanAddMultiValuesUnderTheSameKeyToBatch() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { var rand = new Random(); var testBuffer = new byte[168]; rand.NextBytes(testBuffer); CreateTrees(env, 1, "multitree"); var batch = new WriteBatch(); batch.MultiAdd("key", "value1", "multitree0"); batch.MultiAdd("key", "value2", "multitree0"); env.Writer.Write(batch); using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx, "multitree0"); using (var it = tree.MultiRead("key")) { Assert.True(it.Seek(Slice.BeforeAllKeys)); Assert.Equal("value1", it.CurrentKey.ToString()); Assert.True(it.MoveNext()); Assert.Equal("value2", it.CurrentKey.ToString()); } } } }
public void PageSplitterShouldCalculateSeparatorKeyCorrectly2() { var ids = ReadIds("data2.txt"); StorageEnvironmentOptions storageEnvironmentOptions = StorageEnvironmentOptions.CreateMemoryOnly(); storageEnvironmentOptions.MaxScratchBufferSize *= 2; using (var env = new StorageEnvironment(storageEnvironmentOptions)) { var rand = new Random(); var testBuffer = new byte[69]; rand.NextBytes(testBuffer); var trees = CreateTrees(env, 1, "tree"); foreach (var id in ids) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { foreach (var treeName in trees) { var tree = tx.Environment.CreateTree(tx, treeName); tree.Add(id, new MemoryStream(testBuffer)); } tx.Commit(); } } ValidateRecords(env, trees, ids); } }
public FullTextIndex(StorageEnvironmentOptions options, IAnalyzer analyzer) { Analyzer = analyzer; Conventions = new IndexingConventions(); BufferPool = new BufferPool(); StorageEnvironment = new StorageEnvironment(options); using (var tx = StorageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { StorageEnvironment.CreateTree(tx, "@terms", keysPrefixing: true); StorageEnvironment.CreateTree(tx, "deletes", keysPrefixing: true); var docs = StorageEnvironment.CreateTree(tx, "docs", keysPrefixing: true); var metadata = StorageEnvironment.CreateTree(tx, "$metadata"); var idVal = metadata.Read("id"); if (idVal == null) { Id = Guid.NewGuid(); metadata.Add("id", Id.ToByteArray()); } else { int _; Id = new Guid(idVal.Reader.ReadBytes(16, out _)); } using (var it = docs.Iterate()) { _lastDocumentId = it.Seek(Slice.AfterAllKeys) == false ? 0 : it.CurrentKey.CreateReader().ReadBigEndianInt64(); } tx.Commit(); } }
private static void ReadOneTransaction_Parallel(Stopwatch sw, int concurrency) { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(Path))) { var countdownEvent = new CountdownEvent(concurrency); sw.Start(); for (int i = 0; i < concurrency; i++) { var currentBase = i; ThreadPool.QueueUserWorkItem(state => { using (var tx = env.NewTransaction(TransactionFlags.Read)) { var ms = new byte[100]; for (int j = 0; j < ((ItemsPerTransaction * Transactions) / concurrency); j++) { var current = j * currentBase; var key = current.ToString("0000000000000000"); var stream = tx.State.Root.Read(tx, key).Reader; while (stream.Read(ms, 0, ms.Length) != 0) { } } tx.Commit(); } countdownEvent.Signal(); }); } countdownEvent.Wait(); sw.Stop(); } }
public void Can_split_merged_transaction_to_multiple_tx() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; storageEnvironmentOptions.MaxNumberOfPagesInMergedTransaction = 8; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { for (int xi = 0; xi < 100; xi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < 1000; i++) { tree.Add("users/" + i, "john doe/" + i); } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); var backupPath = Path.Combine(_tempDir, "1.snapshot"); snapshotWriter.ToFile(envToSnapshot, backupPath); using (var stream = File.OpenRead(backupPath)) using (var zip = new ZipArchive(stream, ZipArchiveMode.Read)) { Assert.True(zip.Entries.Count > 1); } } }
public void DataIsKeptAfterRestart() { using (var pureMemoryPager = new PureMemoryPager()) { using (var env = new StorageEnvironment(pureMemoryPager, ownsPager: false)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.Root.Add(tx, "test/1", new MemoryStream()); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.Root.Add(tx, "test/2", new MemoryStream()); tx.Commit(); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(env.Root.Read(tx, "test/1")); Assert.NotNull(env.Root.Read(tx, "test/2")); tx.Commit(); } } } }
public void Mixed_small_and_overflow_changes() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); tree.Add("users/1", "john doe"); tree.Add("users/2", new String('a', 5000)); tx.Commit(); } using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); tree.Add("users/2", "jane darling"); tree.Add("users/3", new String('b', 5000)); tx.Commit(); } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, "1.snapshot")); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); new IncrementalBackup().Restore(restoredOptions, new[] { Path.Combine(_tempDir, "1.snapshot") }); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); Assert.Equal("john doe", tree.Read("users/1").Reader.ToStringValue()); Assert.Equal("jane darling", tree.Read("users/2").Reader.ToStringValue()); Assert.Equal(new String('b', 5000), tree.Read("users/3").Reader.ToStringValue()); } } } }
private void HandleActualWrites(OutstandingWrite mine, CancellationToken token) { List <OutstandingWrite> writes = null; try { writes = BuildBatchGroup(mine); var completedSuccessfully = false; using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { HandleOperations(tx, writes, _cancellationToken); try { tx.Commit(); if (ShouldRecordToDebugJournal) { _debugJournal.Flush(); } completedSuccessfully = true; } catch (Exception e) { // if we have an error duing the commit, we can't recover, just fail them all. foreach (var write in writes) { write.Errored(e); } } } if (completedSuccessfully) { foreach (var write in writes) { write.Completed(); } } } catch (Exception e) { HandleWriteFailure(writes, mine, e); } }
public JToken Read(string key) { using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.Read)) { var items = tx.ReadTree("items"); var readResult = items.Read(key); if (readResult == null) { return(null); } return(JToken.ReadFrom(new JsonTextReader(new StreamReader(readResult.Reader.AsStream())))); } }
public void ApplyShippedLog(byte[] txPagesRaw) { fixed(byte *pages = txPagesRaw) { using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { var transactionHeader = (TransactionHeader *)pages; var dataPages = pages + AbstractPager.PageSize; var compressedPages = (transactionHeader->CompressedSize / AbstractPager.PageSize) + (transactionHeader->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); var crc = Crc.Value(dataPages, 0, compressedPages * AbstractPager.PageSize); var transactionId = transactionHeader->TransactionId; if (transactionHeader->Crc != crc) { throw new InvalidDataException("Invalid CRC signature for shipped transaction " + transactionId); } if (transactionId - 1 != PreviousTransactionId) { throw new InvalidDataException("Invalid id for shipped transaction got " + transactionId + " but expected " + (PreviousTransactionId + 1) + ", is there a break in the chain?"); } if (transactionHeader->PreviousTransactionCrc != PreviousTransactionCrc) { throw new InvalidDataException("Invalid CRC signature for previous shipped transaction " + transactionId + ", is there a break in the chain?"); } var totalPages = transactionHeader->PageCount + transactionHeader->OverflowPageCount; var decompressBuffer = _env.ScratchBufferPool.Allocate(tx, totalPages); try { try { var dest = _env.ScratchBufferPool.AcquirePagePointer(decompressBuffer.ScratchFileNumber, decompressBuffer.PositionInScratchBuffer); LZ4.Decode64(dataPages, transactionHeader->CompressedSize, dest, transactionHeader->UncompressedSize, true); } catch (Exception e) { throw new InvalidDataException("Could not de-compress shipped transaction pages, invalid data", e); } tx.WriteDirect(transactionHeader, decompressBuffer); _previousTransactionCrc = crc; _previousTransactionId = transactionHeader->TransactionId; } finally { _env.ScratchBufferPool.Free(decompressBuffer.ScratchFileNumber, decompressBuffer.PositionInScratchBuffer, -1); } tx.Commit(); OnTransactionApplied(transactionId, crc); } } }
public void IncorrectWriteOfOverflowPagesFromJournalsToDataFile_RavenDB_2806() { RequireFileBasedPager(); const int testedOverflowSize = 20000; var overflowValue = new byte[testedOverflowSize]; new Random(1).NextBytes(overflowValue); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "test"); var itemBytes = new byte[16000]; new Random(2).NextBytes(itemBytes); tree.Add("items/1", itemBytes); new Random(3).NextBytes(itemBytes); tree.Add("items/2", itemBytes); tree.Delete("items/1"); tree.Delete("items/2"); tree.Add("items/3", overflowValue); tx.Commit(); } BackupMethods.Incremental.ToFile(Env, IncrementalBackupTestUtils.IncrementalBackupFile(0)); var options = StorageEnvironmentOptions.ForPath(IncrementalBackupTestUtils.RestoredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; BackupMethods.Incremental.Restore(options, new[] { IncrementalBackupTestUtils.IncrementalBackupFile(0) }); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); var readResult = tree.Read("items/3"); var readBytes = new byte[testedOverflowSize]; readResult.Reader.Read(readBytes, 0, testedOverflowSize); Assert.Equal(overflowValue, readBytes); } } }
public void CanBackupAndRestore() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[8192]; random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 500; i++) { tx.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Assert.True(Env.Journal.Files.Count > 1); Env.FlushLogToDataFile(); // force writing data to the data file // add more data to journal files using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 500; i < 1000; i++) { tx.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Env.FlushLogToDataFile(); // force writing data to the data file - this won't sync data to disk because there was another sync withing last minute BackupMethods.Full.ToFile(Env, _backupFile); BackupMethods.Full.Restore(_backupFile, _recoveredStoragePath); var options = StorageEnvironmentOptions.ForPath(_recoveredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 1000; i++) { var readResult = tx.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void StorageRecoveryShouldWorkWhenThereSingleTransactionToRecoverFromLog() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "tree"); for (var i = 0; i < 100; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx, "tree"); for (var i = 0; i < 100; i++) { Assert.NotNull(tree.Read("key" + i)); } } } DeleteDirectory(path); }
public void SurviveRestart() { using (var options = StorageEnvironmentOptions.CreateMemoryOnly()) { options.OwnsPagers = false; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "events"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx,"events").Add("test", new MemoryStream(0)); tx.Commit(); } } using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "events"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"events"); var readResult = tree.Read("test"); Assert.NotNull(readResult); tx.Commit(); } } } }
private void Initialize() { using (var tx = storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var serverNamesToIds = storageEnvironment.CreateTree(tx, "serverNames->Ids"); var serverIdsToNames = storageEnvironment.CreateTree(tx, "Ids->serverNames"); storageEnvironment.CreateTree(tx, "servers->lastEtag"); storageEnvironment.CreateTree(tx, "counters"); storageEnvironment.CreateTree(tx, "countersGroups"); var etags = storageEnvironment.CreateTree(tx, "etags->counters"); storageEnvironment.CreateTree(tx, "counters->etags"); var metadata = tx.State.GetTree(tx, "$metadata"); var id = metadata.Read("id"); if (id == null) // new counter db { var serverIdBytes = EndianBitConverter.Big.GetBytes(ServerId); var serverIdSlice = new Slice(serverIdBytes); serverNamesToIds.Add(CounterStorageUrl, serverIdSlice); serverIdsToNames.Add(serverIdSlice, CounterStorageUrl); Id = Guid.NewGuid(); metadata.Add("id", Id.ToByteArray()); metadata.Add("name", Encoding.UTF8.GetBytes(Name)); tx.Commit(); } else // existing counter db { int used; Id = new Guid(id.Reader.ReadBytes(16, out used)); var nameResult = metadata.Read("name"); if (nameResult == null) { throw new InvalidOperationException("Could not read name from the store, something bad happened"); } var storedName = new StreamReader(nameResult.Reader.AsStream()).ReadToEnd(); if (storedName != Name) { throw new InvalidOperationException("The stored name " + storedName + " does not match the given name " + Name); } using (var it = etags.Iterate()) { if (it.Seek(Slice.AfterAllKeys)) { LastEtag = it.CurrentKey.CreateReader().ReadBigEndianInt64(); } } } ReplicationTask.StartReplication(); } }
public void CanBackupAndRestore() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[8192]; random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 500; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Assert.True(Env.Journal.Files.Count > 1); Env.FlushLogToDataFile(); // force writing data to the data file // add more data to journal files using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 500; i < 1000; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Env.FlushLogToDataFile(); // force writing data to the data file - this won't sync data to disk because there was another sync withing last minute BackupMethods.Full.ToFile(Env, _backupFile); BackupMethods.Full.Restore(_backupFile, _recoveredStoragePath); var options = StorageEnvironmentOptions.ForPath(_recoveredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 1000; i++) { var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void MultipleTxPagesCanPointToOnePageNumberWhichShouldNotBeCausingIssuesDuringFlushing() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { var trees = CreateTrees(env, 2, "tree"); var tree1 = trees[0]; var tree2 = trees[1]; using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var t1 = tx.ReadTree(tree1); t1.MultiAdd("key", "value/1"); t1.MultiAdd("key", "value/2"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var t1 = tx.ReadTree(tree1); var t2 = tx.ReadTree(tree2); var buffer = new byte[1000]; t1.MultiDelete("key", "value/1"); t1.MultiDelete("key", "value/2"); t2.Add("key/1", new MemoryStream(buffer)); t2.Add("key/2", new MemoryStream(buffer)); t2.Add("key/3", new MemoryStream(buffer)); t2.Add("key/4", new MemoryStream(buffer)); t2.Add("key/5", new MemoryStream(buffer)); tx.Commit(); } env.FlushLogToDataFile(); } }
public void StorageRecoveryShouldWorkWhenThereSingleTransactionToRecoverFromLog() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "tree"); for (var i = 0; i < 100; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.State.GetTree(tx,"tree"); for (var i = 0; i < 100; i++) { Assert.NotNull(tree.Read("key" + i)); } } } DeleteDirectory(path); }
public void Can_make_multiple_min_inc_backups_and_then_restore() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { int index = 0; for (int xi = 0; xi < 5; xi++) { for (int yi = 0; yi < 2; yi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + index, "john doe/" + index); index++; } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, xi + ".snapshot")); } } var incremental = new IncrementalBackup(); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); incremental.Restore(restoredOptions, Enumerable.Range(0, 5).Select(i => Path.Combine(_tempDir, i + ".snapshot"))); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); for (int i = 0; i < UserCount; i++) { var readResult = tree.Read("users/" + i); Assert.NotNull(readResult); Assert.Equal("john doe/" + i, readResult.Reader.ToStringValue()); } } } }
public void MultipleTxPagesCanPointToOnePageNumberWhichShouldNotBeCausingIssuesDuringFlushing() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { var trees = CreateTrees(env, 2, "tree"); var tree1 = trees[0]; var tree2 = trees[1]; using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var t1 = tx.State.GetTree(tx, tree1); t1.MultiAdd("key", "value/1"); t1.MultiAdd("key", "value/2"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var t1 = tx.State.GetTree(tx, tree1); var t2 = tx.State.GetTree(tx, tree2); var buffer = new byte[1000]; t1.MultiDelete("key", "value/1"); t1.MultiDelete("key", "value/2"); t2.Add("key/1", new MemoryStream(buffer)); t2.Add("key/2", new MemoryStream(buffer)); t2.Add("key/3", new MemoryStream(buffer)); t2.Add("key/4", new MemoryStream(buffer)); t2.Add("key/5", new MemoryStream(buffer)); tx.Commit(); } env.FlushLogToDataFile(); } }
public void SurviveRestart() { using (var options = StorageEnvironmentOptions.CreateMemoryOnly()) { options.OwnsPagers = false; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "events"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx, "events").Add(tx, "test", new MemoryStream(0)); tx.Commit(); } } using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "events"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx, "events"); var readResult = tree.Read(tx, "test"); Assert.NotNull(readResult); tx.Commit(); } } } }
public void ShouldOccupyLessSpace() { var r = new Random(); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 100; i++) { var bytes = new byte[r.Next(10, 2 * 1024 * 1024)]; r.NextBytes(bytes); tree.Add("record/" + i, bytes); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 50; i++) { tree.Delete("record/" + r.Next(0, 100)); } tx.Commit(); } } var oldSize = GetDirSize(new DirectoryInfo(CompactionTestsData)); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); var newSize = GetDirSize(new DirectoryInfo(CompactedData)); Assert.True(newSize < oldSize, string.Format("Old size: {0:#,#;;0} MB, new size {1:#,#;;0} MB", oldSize / 1024 / 1024, newSize / 1024 / 1024)); }
public void Record_debug_journal_and_replay_it_with_manual_flushing() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = new DebugJournal(debugJouralName, env, true); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}")); writeBatch.Add("foo", valueBuffer, "test-tree"); env.Writer.Write(writeBatch); } using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(tx); tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = DebugJournal.FromFile(debugJouralName, env); env.DebugJournal.Replay(); using (var snapshot = env.CreateSnapshot()) { Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}", snapshot.Read("test-tree", "foo").Reader.ToStringValue()); } } }
public IEnumerable <string> ScanIds() { using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.Read)) { using (var it = tx.State.Root.Iterate(tx)) { var prefix = "channel:"; it.RequiredPrefix = prefix; if (it.Seek(it.RequiredPrefix) == false) { yield break; } do { var key = it.CurrentKey.ToString(); yield return(key.Substring(prefix.Length)); } while (it.MoveNext()); } } }
public void ShouldNotThrowChecksumMismatch() { var random = new Random(1); var buffer = new byte[100]; random.NextBytes(buffer); for (int i = 0; i < 100; i++) { buffer[i] = 13; } var options = StorageEnvironmentOptions.ForPath(_dataPath); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 50; i++) { tx.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 50; i < 100; i++) { tx.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } } options = StorageEnvironmentOptions.ForPath(_dataPath); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 100; i++) { var readResult = tx.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void ReadTransactionCanReadJustCommittedValue() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { CreateTrees(env, 1, "tree"); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { txw.Environment.CreateTree(txw, "tree0").Add("key/1", new MemoryStream()); txw.Commit(); using (var txr = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(txr.Environment.CreateTree(txr, "tree0").Read("key/1")); } } } }
public void ReadTransactionCanReadJustCommittedValue() { var options = StorageEnvironmentOptions.CreateMemoryOnly(); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { CreateTrees(env, 1, "tree"); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { txw.Environment.State.GetTree(txw, "tree0").Add("key/1", new MemoryStream()); txw.Commit(); using (var txr = env.NewTransaction(TransactionFlags.Read)) { Assert.NotNull(txr.Environment.State.GetTree(txr, "tree0").Read("key/1")); } } } }
private static string Delete(StorageEnvironment env, int i) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var key = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + i.ToString("D2"); txw.ReadTree("tree0").MultiDelete("key/1", key); txw.Commit(); return(key); } }
public void MultiAdds_And_MultiDeletes_After_Causing_PageSplit_DoNot_Fail(int size) { using (var Env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { var inputData = new List<byte[]>(); for (int i = 0; i < size; i++) { inputData.Add(Encoding.UTF8.GetBytes(RandomString(1024))); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { Env.CreateTree(tx, "foo"); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"foo"); foreach (var buffer in inputData) { Assert.DoesNotThrow(() => tree.MultiAdd(tx, "ChildTreeKey", new Slice(buffer))); } tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.State.GetTree(tx,"foo"); for (int i = 0; i < inputData.Count; i++) { var buffer = inputData[i]; Assert.DoesNotThrow(() => tree.MultiDelete(tx, "ChildTreeKey", new Slice(buffer))); } tx.Commit(); } } }
public void DataIsKeptAfterRestartForSubTrees() { using (var pureMemoryPager = StorageEnvironmentOptions.CreateMemoryOnly()) { pureMemoryPager.OwnsPagers = false; using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.Environment.CreateTree(tx,"test"); tree.Add("test", Stream.Null); tx.Commit(); Assert.NotNull(tree.Read("test")); } } using (var env = new StorageEnvironment(pureMemoryPager)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "test"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.Environment.CreateTree(tx,"test"); Assert.NotNull(tree.Read("test")); tx.Commit(); } } } }
public void ShouldCorrectlyLoadAfterRestartIfIncrementalBackupWasDone() { var bytes = new byte[1024]; new Random().NextBytes(bytes); using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath("Data")))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "items"); tx.Commit(); } for (int j = 0; j < 100; j++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = tx.ReadTree("items"); for (int i = 0; i < 100; i++) { tree.Add("items/" + i, bytes); } tx.Commit(); } } BackupMethods.Incremental.ToFile(env, IncrementalBackupTestUtils.IncrementalBackupFile(0)); } // restart using (var env = new StorageEnvironment(ModifyOptions(StorageEnvironmentOptions.ForPath("Data")))) { } }
public void ShouldProperlyRecover() { var sequentialLargeIds = ReadData("non-leaf-page-seq-id-large-values-2.txt"); var enumerator = sequentialLargeIds.GetEnumerator(); if (Directory.Exists("tests")) Directory.Delete("tests", true); var options = StorageEnvironmentOptions.ForPath("tests"); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { for (var transactions = 0; transactions < 100; transactions++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < 100; i++) { enumerator.MoveNext(); tx.Root.Add (enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value)); } tx.Commit(); } if (transactions == 50) env.FlushLogToDataFile(); } ValidateRecords(env, new List<string> { "Root" }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList()); } options = StorageEnvironmentOptions.ForPath("tests"); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { ValidateRecords(env, new List<string> { "Root" }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList()); } }
public void CanBackupAndRestoreOnEmptyStorage() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[8192]; random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 500; i++) { tx.State.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } BackupMethods.Incremental.ToFile(Env, _incrementalBackupFile(0)); var options = StorageEnvironmentOptions.ForPath(_restoredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; BackupMethods.Incremental.Restore(options, new[] { _incrementalBackupFile(0) }); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 500; i++) { var readResult = tx.State.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void MultiTreeIteratorShouldBeIsolated1() { var directory = "Test2"; DeleteDirectory(directory); var options = StorageEnvironmentOptions.ForPath(directory); using (var env = new StorageEnvironment(options)) { CreateTrees(env, 1, "tree"); for (var i = 0; i < 10; i++) Write(env, i); using (var txr = env.NewTransaction(TransactionFlags.Read)) { var key = Write(env, 10); using (var iterator = txr.ReadTree("tree0").MultiRead("key/1")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var count = 0; do { Assert.True(iterator.CurrentKey.ToString() != key, string.Format("Key '{0}' should not be present in multi-iterator", key)); count++; } while (iterator.MoveNext()); Assert.Equal(10, count); } } } }
public void ShouldWork() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { for (int x = 0; x < 10; x++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var value = new byte[100]; new Random().NextBytes(value); var ms = new MemoryStream(value); for (long i = 0; i < 100; i++) { ms.Position = 0; tx.State.Root.Add((x * i).ToString("0000000000000000"), ms); } tx.Commit(); } } } }
public void MultiTreeIteratorShouldBeIsolated2() { var directory = "Test2"; DeleteDirectory(directory); var options = StorageEnvironmentOptions.ForPath(directory); using (var env = new StorageEnvironment(options)) { CreateTrees(env, 1, "tree"); for (var i = 0; i < 11; i++) Write(env, i); using (var txr = env.NewTransaction(TransactionFlags.Read)) { var key = Delete(env, 10); using (var iterator = txr.ReadTree("tree0").MultiRead("key/1")) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); var keys = new List<string>(); do { keys.Add(iterator.CurrentKey.ToString()); } while (iterator.MoveNext()); Assert.Equal(11, keys.Count); Assert.Contains(key, keys); } } } }
public void CannotCompactStorageIfIncrementalBackupEnabled() { var envOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); envOptions.IncrementalBackupEnabled = true; using (var env = new StorageEnvironment(envOptions)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); tree.Add("record/1", new byte[9]); tree.Add("record/2", new byte[9]); tx.Commit(); } } var srcOptions = StorageEnvironmentOptions.ForPath(CompactionTestsData); srcOptions.IncrementalBackupEnabled = true; var invalidOperationException = Assert.Throws<InvalidOperationException>(() => StorageCompaction.Execute(srcOptions, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData))); Assert.Equal(StorageCompaction.CannotCompactBecauseOfIncrementalBackup, invalidOperationException.Message); }
public void ShouldReportProgress() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); var tree2 = env.CreateTree(tx, "vegetables"); tree2.Add("carrot", new byte[123]); tree2.Add("potato", new byte[99]); var tree3 = env.CreateTree(tx, "multi"); tree3.MultiAdd("fruits", "apple"); tree3.MultiAdd("fruits", "orange"); tree3.MultiAdd("vegetables", "carrot"); tree3.MultiAdd("vegetables", "carrot"); tx.Commit(); } } var progressReport = new List<string>(); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData), x => progressReport.Add(string.Format("Copied {0} of {1} records in '{2}' tree. Copied {3} of {4} trees.", x.CopiedTreeRecords, x.TotalTreeRecordsCount, x.TreeName, x.CopiedTrees, x.TotalTreeCount))); Assert.NotEmpty(progressReport); Assert.Contains("Copied 0 of 2 records in 'fruits' tree. Copied 0 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'fruits' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'multi' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'multi' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'vegetables' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'vegetables' tree. Copied 3 of 3 trees.", progressReport); }
public void CompactionMustNotLooseAnyData() { var treeNames = new List<string>(); var multiValueTreeNames = new List<string>(); var random = new Random(); var value1 = new byte[random.Next(1024*1024*2)]; var value2 = new byte[random.Next(1024*1024*2)]; random.NextBytes(value1); random.NextBytes(value2); const int treeCount = 5; const int recordCount = 6; const int multiValueTreeCount = 7; const int multiValueRecordsCount = 4; const int multiValuesCount = 3; using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { for (int i = 0; i < treeCount; i++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { string name = "tree/" + i; treeNames.Add(name); var tree = env.State.GetTree(tx, name); for (int j = 0; j < recordCount; j++) { tree.Add(string.Format("{0}/items/{1}", name, j), j%2 == 0 ? value1 : value2); } tx.Commit(); } } for (int i = 0; i < multiValueTreeCount; i++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var name = "multiValueTree/" + i; multiValueTreeNames.Add(name); var tree = env.CreateTree(tx, name); for (int j = 0; j < multiValueRecordsCount; j++) { for (int k = 0; k < multiValuesCount; k++) { tree.MultiAdd("record/" + j, "value/" + k); } } tx.Commit(); } } } StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactedData))) { using (var tx = compacted.NewTransaction(TransactionFlags.Read)) { foreach (var treeName in treeNames) { var tree = compacted.State.GetTree(tx, treeName); for (int i = 0; i < recordCount; i++) { var readResult = tree.Read(string.Format("{0}/items/{1}", treeName, i)); Assert.NotNull(readResult); if (i%2 == 0) { var readBytes = new byte[value1.Length]; readResult.Reader.Read(readBytes, 0, readBytes.Length); Assert.Equal(value1, readBytes); } else { var readBytes = new byte[value2.Length]; readResult.Reader.Read(readBytes, 0, readBytes.Length); Assert.Equal(value2, readBytes); } } } foreach (var treeName in multiValueTreeNames) { var tree = compacted.State.GetTree(tx, treeName); for (int i = 0; i < multiValueRecordsCount; i++) { var multiRead = tree.MultiRead("record/" + i); Assert.True(multiRead.Seek(Slice.BeforeAllKeys)); int count = 0; do { Assert.Equal("value/" + count, multiRead.CurrentKey.ToString()); count++; } while (multiRead.MoveNext()); Assert.Equal(multiValuesCount, count); } } } } }
public void ShouldOccupyLessSpace() { var r = new Random(); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 100; i++) { var bytes = new byte[r.Next(10, 2*1024*1024)]; r.NextBytes(bytes); tree.Add("record/" + i, bytes); } tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "records"); for (int i = 0; i < 50; i++) { tree.Delete("record/" + r.Next(0, 100)); } tx.Commit(); } } var oldSize = GetDirSize(new DirectoryInfo(CompactionTestsData)); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions) StorageEnvironmentOptions.ForPath(CompactedData)); var newSize = GetDirSize(new DirectoryInfo(CompactedData)); Assert.True(newSize < oldSize, string.Format("Old size: {0:#,#;;0} MB, new size {1:#,#;;0} MB", oldSize / 1024 / 1024, newSize / 1024 / 1024)); }
public void StorageRecoveryShouldWorkWhenThereAreCommitedAndUncommitedTransactions() { var path = "test2.data"; DeleteDirectory(path); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < 10000; i++) { tx.Environment.State.GetTree(tx,"tree").Add("a" + i, new MemoryStream()); } } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path))) { } DeleteDirectory(path); }