private void CorruptPage(long journal, long page, int pos) { _options.Dispose(); _options = StorageEnvironmentOptions.ForPath("test.data"); Configure(_options); using (var fileStream = new FileStream( Path.Combine("test.data", StorageEnvironmentOptions.JournalName(journal)), FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite | FileShare.Delete)) { fileStream.Position = page * AbstractPager.PageSize; var buffer = new byte[AbstractPager.PageSize]; var remaining = buffer.Length; var start = 0; while (remaining > 0) { var read = fileStream.Read(buffer, start, remaining); if (read == 0) { break; } start += read; remaining -= read; } buffer[pos] = 42; fileStream.Position = page * AbstractPager.PageSize; fileStream.Write(buffer, 0, buffer.Length); } }
internal static JournalFile GetJournalFile(StorageEnvironment env, long journalNum, IncrementalBackupInfo backupInfo) { var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile != null) { journalFile.AddRef(); return(journalFile); } try { using (var pager = env.Options.OpenJournalPager(journalNum)) { long journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize); journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); journalFile.AddRef(); return(journalFile); } } catch (Exception e) { if (backupInfo.LastBackedUpJournal == -1 && journalNum == 0 && e.Message.StartsWith("No such journal", StringComparison.Ordinal)) { throw new InvalidOperationException("The first incremental backup creation failed because the first journal file " + StorageEnvironmentOptions.JournalName(journalNum) + " was not found. " + "Did you turn on the incremental backup feature after initializing the storage? " + "In order to create backups incrementally the storage must be created with IncrementalBackupEnabled option set to 'true'.", e); } throw; } }
private void CorruptJournal(long journal, long posOf4KbInJrnl) { Options.Dispose(); Options = StorageEnvironmentOptions.ForPath(DataDir); Configure(Options); using (var fileStream = new FileStream( Path.Combine(DataDir, StorageEnvironmentOptions.JournalName(journal)), FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite | FileShare.Delete)) { fileStream.Position = posOf4KbInJrnl * Constants.Size.Kilobyte * 4; var buffer = new byte[Constants.Size.Kilobyte * 4]; var remaining = buffer.Length; var start = 0; while (remaining > 0) { var read = fileStream.Read(buffer, start, remaining); if (read == 0) { break; } start += read; remaining -= read; } for (int i = 0; i < buffer.Length; i++) { buffer[i] = 42; } fileStream.Position = posOf4KbInJrnl * Constants.Size.Kilobyte * 4; fileStream.Write(buffer, 0, buffer.Length); } }
public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { infoNotify = infoNotify ?? (s => { }); if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } long numberOfBackedUpPages = 0; var copier = new DataCopier(env.Options.PageSize * 16); var backupSuccess = true; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { IncrementalBackupInfo backupInfo; using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } // txw.Commit(); intentionally not committing } using (env.NewLowLevelTransaction(TransactionFlags.Read)) { if (backupStarted != null) { backupStarted();// we let call know that we have started the backup } var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var num = journalNum; var journalFile = GetJournalFile(env, journalNum, backupInfo); journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; pagesToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here { continue; } var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage); } using (var stream = part.Open()) { copier.ToStream(env, journalFile, startBackupAt, pagesToCopy, stream); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + pagesToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += pagesToCopy; } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number jrnl.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); } } file.Flush(true); // make sure that this is actually persisted fully to disk return(numberOfBackedUpPages); } }
private static long Incremental_Backup(StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, ZipArchive package, string basePath, DataCopier copier) { long numberOfBackedUpPages = 0; long lastWrittenLogFile = -1; long lastWrittenLog4kb = -1; bool backupSuccess = true; IncrementalBackupInfo backupInfo; JournalInfo journalInfo; var transactionPersistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLog4kb = env.Journal.CurrentFile.WritePosIn4KbPosition; } // txw.Commit(); intentionally not committing } using (env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.Read)) { backupStarted?.Invoke(); // we let call know that we have started the backup var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var num = journalNum; var journalFile = GetJournalFile(env, journalNum, backupInfo, journalInfo); journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; long numberOf4KbsToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; numberOf4KbsToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocated4Kb) // nothing to do here { continue; } var part = package.CreateEntry( Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalNum)) , compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { numberOf4KbsToCopy -= (journalFile.JournalWriter.NumberOfAllocated4Kb - lastWrittenLog4kb); } using (var stream = part.Open()) { copier.ToStream(env, journalFile, startBackupAt, numberOf4KbsToCopy, stream); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + numberOf4KbsToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocated4Kb - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += numberOf4KbsToCopy; } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number jrnl.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); } return(numberOfBackedUpPages); }
public void ShouldExplicitlyErrorThatTurningOnIncrementalBackupAfterInitializingTheStorageIsntAllowed() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[4000]; random.NextBytes(buffer); for (int i = 0; i < 300; i++) { using (var tx = Env.WriteTransaction()) { var tree = tx.CreateTree("foo"); tree.Add("items/" + i, new MemoryStream(buffer)); tx.Commit(); } } Env.FlushLogToDataFile(); using (var op = new WriteAheadJournal.JournalApplicator.SyncOperation(Env.Journal.Applicator)) { op.SyncDataFile(); } Env.Options.IncrementalBackupEnabled = true; var exception = Assert.Throws <InvalidOperationException>(() => BackupMethods.Incremental.ToFile(Env, _incrementalBackupTestUtils.IncrementalBackupFile(0))); Assert.Equal("The first incremental backup creation failed because the first journal file " + StorageEnvironmentOptions.JournalName(0) + " was not found. Did you turn on the incremental backup feature after initializing the storage? In order to create backups incrementally the storage must be created with IncrementalBackupEnabled option set to 'true'.", exception.Message); }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { foreach (JournalFile journalFile in usedJournals) { var entryName = Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalFile.Number)); var journalPart = package.CreateEntry(entryName, compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } } } finally { foreach (var journalFile in usedJournals) { journalFile.Release(); } } } finally { txr?.Dispose(); } }
public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal) { if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } long numberOfBackedUpPages = 0; var copier = new DataCopier(AbstractPager.PageSize * 16); var backupSuccess = true; IncrementalBackupInfo backupInfo; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } // txw.Commit(); intentionally not committing } using (env.NewTransaction(TransactionFlags.Read)) { var usedJournals = new List <JournalFile>(); try { using (var file = new FileStream(backupPath, FileMode.Create)) using (var package = new ZipArchive(file, ZipArchiveMode.Create)) { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); if (journalSize >= env.Options.MaxLogFileSize) // can't set for more than the max log file size { throw new InvalidOperationException("Recovered journal size is " + journalSize + ", while the maximum journal size can be " + env.Options.MaxLogFileSize); } } journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; pagesToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here { continue; } var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage); } using (var stream = part.Open()) { copier.ToStream(journalFile, startBackupAt, pagesToCopy, stream); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + pagesToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += pagesToCopy; } //Debug.Assert(lastBackedUpPage != -1); env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } } catch (Exception) { backupSuccess = false; throw; } finally { foreach (var file in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (file.Number != lastWrittenLogFile) // prevent deletion of the current journal { file.DeleteOnClose = true; } } file.Release(); } } return(numberOfBackedUpPages); } }
public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null) { infoNotify = infoNotify ?? (s => { }); var dataPager = env.Options.DataPager; var copier = new DataCopier(AbstractPager.PageSize * 16); Transaction txr = null; try { infoNotify("Voron copy headers"); using (var file = new FileStream(backupPath, FileMode.Create)) using (var package = new ZipArchive(file, ZipArchiveMode.Create)) { long allocatedPages; ImmutableAppendOnlyList <JournalFile> files; // thread safety copy long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewTransaction(TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options); // journal files snapshot files = env.Journal.Files; foreach (var journalFile in files) { journalFile.AddRef(); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } // data file backup var dataPart = package.CreateEntry(Constants.DatabaseFilename, compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else var firstDataPage = dataPager.Read(0); copier.ToStream(firstDataPage.Base, AbstractPager.PageSize * allocatedPages, dataStream); } } try { foreach (var journalFile in files) { var journalPart = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalFile.Number), compression); Debug.Assert(journalPart != null); var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0} ", journalFile)); } } } finally { foreach (var journalFile in files) { journalFile.Release(); } } } } finally { if (txr != null) { txr.Dispose(); } } infoNotify(string.Format("Voron backup db finished")); }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; var backupSuccess = false; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePosIn4KbPosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { long lastBackedupJournal = 0; foreach (var journalFile in usedJournals) { var entryName = StorageEnvironmentOptions.JournalName(journalFile.Number); var journalPart = package.CreateEntry(Path.Combine(basePath, entryName), compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } lastBackedupJournal = journalFile.Number; } if (env.Options.IncrementalBackupEnabled) { env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedupJournal; //since we backed-up everything, no need to start next incremental backup from the middle header->IncrementalBackup.LastBackedUpJournalPage = -1; }); } backupSuccess = true; } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var journalFile in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (journalFile.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number journalFile.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { journalFile.DeleteOnClose = true; } } journalFile.Release(); } } } finally { txr?.Dispose(); } }
public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { infoNotify = infoNotify ?? (s => { }); var dataPager = env.Options.DataPager; var copier = new DataCopier(AbstractPager.PageSize * 16); Transaction txr = null; try { infoNotify("Voron copy headers"); using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { long allocatedPages; ImmutableAppendOnlyList <JournalFile> files; // thread safety copy var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewTransaction(TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options); // journal files snapshot files = env.Journal.Files; JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); } journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } if (backupStarted != null) { backupStarted(); } // data file backup var dataPart = package.CreateEntry(Constants.DatabaseFilename, compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else var firstDataPage = dataPager.Read(null, 0); copier.ToStream(firstDataPage.Base, AbstractPager.PageSize * allocatedPages, dataStream); } } try { foreach (var journalFile in usedJournals) { var journalPart = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalFile.Number), compression); Debug.Assert(journalPart != null); var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0} ", journalFile)); } } } finally { foreach (var journalFile in usedJournals) { journalFile.Release(); } } } file.Flush(true); // make sure that we fully flushed to disk } } finally { if (txr != null) { txr.Dispose(); } } infoNotify(string.Format("Voron backup db finished")); }
public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null) { infoNotify = infoNotify ?? (s => { }); if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } long numberOfBackedUpPages = 0; var copier = new DataCopier(AbstractPager.PageSize * 16); var backupSuccess = true; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var file = new FileStream(backupPath, FileMode.Create)) using (var package = new ZipArchive(file, ZipArchiveMode.Create)) { IncrementalBackupInfo backupInfo; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } // txw.Commit(); intentionally not committing } using (env.NewTransaction(TransactionFlags.Read)) { var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var num = journalNum; var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; try { using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); } } catch (Exception e) { if (backupInfo.LastBackedUpJournal == -1 && journalNum == 0 && e.Message.StartsWith("No such journal")) { throw new InvalidOperationException("The first incremental backup creation failed because the first journal file " + StorageEnvironmentOptions.JournalName(journalNum) + " was not found. " + "Did you turn on the incremental backup feature after initializing the storage? " + "In order to create backups incrementally the storage must be created with IncrementalBackupEnabled option set to 'true'.", e); } throw; } journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; pagesToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here { continue; } var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage); } using (var stream = part.Open()) { copier.ToStream(journalFile, startBackupAt, pagesToCopy, stream); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + pagesToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += pagesToCopy; } //Debug.Assert(lastBackedUpPage != -1); env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile) // prevent deletion of the current journal and journals with a greater number { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); return(numberOfBackedUpPages); } } }