public void Can_make_multiple_min_inc_backups_and_then_restore() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { int index = 0; for (int xi = 0; xi < 5; xi++) { for (int yi = 0; yi < 2; yi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + index, "john doe/" + index); index++; } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, xi + ".snapshot")); } } var incremental = new IncrementalBackup(); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); incremental.Restore(restoredOptions, Enumerable.Range(0, 5).Select(i => Path.Combine(_tempDir, i + ".snapshot"))); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); for (int i = 0; i < UserCount; i++) { var readResult = tree.Read("users/" + i); Assert.NotNull(readResult); Assert.Equal("john doe/" + i, readResult.Reader.ToStringValue()); } } } }
public void Can_split_merged_transaction_to_multiple_tx() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; storageEnvironmentOptions.MaxNumberOfPagesInMergedTransaction = 8; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { for (int xi = 0; xi < 100; xi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < 1000; i++) { tree.Add("users/" + i, "john doe/" + i); } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); var backupPath = Path.Combine(_tempDir, "1.snapshot"); snapshotWriter.ToFile(envToSnapshot, backupPath); using (var stream = File.OpenRead(backupPath)) using (var zip = new ZipArchive(stream, ZipArchiveMode.Read)) { Assert.True(zip.Entries.Count > 1); } } }
public void Mixed_small_and_overflow_changes() { _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); tree.Add("users/1", "john doe"); tree.Add("users/2", new String('a', 5000)); tx.Commit(); } using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); tree.Add("users/2", "jane darling"); tree.Add("users/3", new String('b', 5000)); tx.Commit(); } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, "1.snapshot")); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); new IncrementalBackup().Restore(restoredOptions, new[] { Path.Combine(_tempDir, "1.snapshot") }); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); Assert.Equal("john doe", tree.Read("users/1").Reader.ToStringValue()); Assert.Equal("jane darling", tree.Read("users/2").Reader.ToStringValue()); Assert.Equal(new String('b', 5000), tree.Read("users/3").Reader.ToStringValue()); } } } }
public unsafe void Min_inc_backup_is_smaller_than_normal_inc_backup() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { for (int xi = 0; xi < 10; xi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + i, "john doe/" + i); } tx.Commit(); } } var incrementalBackupInfo = envToSnapshot.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, "1.snapshot")); // reset the incremental backup stuff envToSnapshot.HeaderAccessor.Modify(ptr => ptr->IncrementalBackup = incrementalBackupInfo); var incBackup = new IncrementalBackup(); incBackup.ToFile(envToSnapshot, Path.Combine(_tempDir, "2.snapshot")); var incLen = new FileInfo(Path.Combine(_tempDir, "2.snapshot")).Length; var minInLen = new FileInfo(Path.Combine(_tempDir, "1.snapshot")).Length; Assert.True(incLen > minInLen); } }
public void Can_write_minimal_incremental_backup() { _tempDir = Guid.NewGuid().ToString(); Directory.CreateDirectory(_tempDir); var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(Env, Path.Combine(_tempDir, "1.snapshot")); Assert.True(File.Exists(Path.Combine(_tempDir, "1.snapshot")), " Even empty minimal backup should create a file"); var snapshotFileInfo = new FileInfo(Path.Combine(_tempDir, "1.snapshot")); Assert.True(snapshotFileInfo.Length > 0, " Even empty minimal backup should create a file with some information"); }
public void CreateSnapshot(long index, long term, ManualResetEventSlim allowFurtherModifications) { // we have not snapshot files, so this is the first time that we create a snapshot // we handle that by asking voron to create a full backup var files = Directory.GetFiles(_storageEnvironment.Options.BasePath, "*.Snapshot"); Array.Sort(files, StringComparer.OrdinalIgnoreCase); // make sure we get it in sort order if (files.Any() == false) { DoFullBackup(index, term, allowFurtherModifications); return; } var fullBackupIndex = GetFullBackupIndex(files); if (fullBackupIndex == -1) { // this shouldn't be the case, we must always have at least one full backup. // maybe user deleted it? We'll do a full backup here to compensate DoFullBackup(index, term, allowFurtherModifications); return; } string lastFullBackup = files[fullBackupIndex]; var fullBackupSize = new FileInfo(lastFullBackup).Length; var incrementalBackupsSize = files.Skip(fullBackupIndex + 1).Sum(f => new FileInfo(f).Length); // now we need to decide whatever to do a full or incremental backup, doing incremental backups stop // making sense if they will take more space than the full backup. Our cutoff point is when it passes to 50% // size of the full backup. // If full backup size is 1 GB, and we have 25 incrmeental backups that are 600 MB in size, we need to transfer // 1.6 GB to restore. If we generate a new full backup, we'll only need to transfer 1 GB to restore. if (incrementalBackupsSize / 2 > fullBackupSize) { DoFullBackup(index, term, allowFurtherModifications); return; } DeleteOldSnapshots(files.Take(fullBackupIndex - 1));// delete snapshots older than the current full backup var incrementalBackup = new MinimalIncrementalBackup(); incrementalBackup.ToFile(_storageEnvironment, Path.Combine(_storageEnvironment.Options.BasePath, string.Format("Inc-{0:D19}-{1:D19}.Snapshot", index, term)), infoNotify: Console.WriteLine, backupStarted: allowFurtherModifications.Set); }