public void Cleanup() { if (_recycleArea.Count == 0 && _scratchBuffers.Count == 1) { return; } long txIdAllowingToReleaseOldScratches = -1; // ReSharper disable once LoopCanBeConvertedToQuery foreach (var scratchBufferItem in _scratchBuffers) { if (scratchBufferItem.Value == _current) { continue; } txIdAllowingToReleaseOldScratches = Math.Max(txIdAllowingToReleaseOldScratches, scratchBufferItem.Value.File.TxIdAfterWhichLatestFreePagesBecomeAvailable); } while (_env.CurrentReadTransactionId <= txIdAllowingToReleaseOldScratches) { // we've just flushed and had no more writes after that, let us bump id of next read transactions to ensure // that nobody will attempt to read old scratches so we will be able to release more files try { using (var tx = _env.NewLowLevelTransaction(new TransactionPersistentContext(), TransactionFlags.ReadWrite, timeout: TimeSpan.FromMilliseconds(500))) { tx.ModifyPage(0); tx.Commit(); } } catch (TimeoutException) { break; } catch (DiskFullException) { break; } } // we need to ensure that no access to _recycleArea and _scratchBuffers will take place in the same time // and only methods that access this are used within write transaction try { using (_env.WriteTransaction()) { RemoveInactiveScratches(_current); RemoveInactiveRecycledScratches(); } } catch (TimeoutException) { } }
private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { env.FlushLogToDataFile(); var transactionPersistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite)) { using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8)) { if (package.Entries.Count == 0) { return; } var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; var tempDirSettings = new VoronPathSetting(tempDir); Restore(env, package.Entries, tempDirSettings, toDispose, txw); } } } }
private void Restore(StorageEnvironment env, IEnumerable <ZipArchiveEntry> entries) { using (env.Journal.Applicator.TakeFlushingLock()) { env.FlushLogToDataFile(); var transactionPersistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite)) { var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; Restore(env, entries, tempDir, toDispose, txw); } } }
public long ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { infoNotify = infoNotify ?? (s => { }); if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } long numberOfBackedUpPages = 0; var copier = new DataCopier(env.Options.PageSize * 16); var backupSuccess = true; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { IncrementalBackupInfo backupInfo; using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } // txw.Commit(); intentionally not committing } using (env.NewLowLevelTransaction(TransactionFlags.Read)) { if (backupStarted != null) { backupStarted();// we let call know that we have started the backup } var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var num = journalNum; var journalFile = GetJournalFile(env, journalNum, backupInfo); journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; pagesToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here { continue; } var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage); } using (var stream = part.Open()) { copier.ToStream(env, journalFile, startBackupAt, pagesToCopy, stream); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + pagesToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += pagesToCopy; } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number jrnl.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); } } file.Flush(true); // make sure that this is actually persisted fully to disk return(numberOfBackedUpPages); } }
private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(txw); } using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8)) { if (package.Entries.Count == 0) { return; } var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; try { TransactionHeader *lastTxHeader = null; var pagesToWrite = new Dictionary <long, TreePage>(); long journalNumber = -1; foreach (var entry in package.Entries) { switch (Path.GetExtension(entry.Name)) { case ".merged-journal": case ".journal": var jounalFileName = Path.Combine(tempDir, entry.Name); using (var output = new FileStream(jounalFileName, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = env.Options.OpenPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = env.Options.CreateScratchPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); toDispose.Add(recoveryPager); var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); while (reader.ReadOneTransaction(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } foreach (var translation in reader.TransactionPageTranslation) { var pageInJournal = translation.Value.JournalPos; var page = recoveryPager.Read(null, pageInJournal); pagesToWrite[translation.Key] = page; if (page.IsOverflow) { var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); for (int i = 1; i < numberOfOverflowPages; i++) { pagesToWrite.Remove(translation.Key + i); } } } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } var sortedPages = pagesToWrite.OrderBy(x => x.Key) .Select(x => x.Value) .ToList(); if (sortedPages.Count == 0) { return; } var last = sortedPages.Last(); var numberOfPages = last.IsOverflow ? env.Options.DataPager.GetNumberOfOverflowPages( last.OverflowSize) : 1; var pagerState = env.Options.DataPager.EnsureContinuous(last.PageNumber, numberOfPages); txw.EnsurePagerStateReference(pagerState); foreach (var page in sortedPages) { env.Options.DataPager.Write(page); } env.Options.DataPager.Sync(); var root = Tree.Open(txw, null, &lastTxHeader->Root); root.Name = Constants.RootTreeName; txw.UpdateRootsIfNeeded(root); txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir, true); } catch { // this is just a temporary directory, the worst case scenario is that we dont reclaim the space from the OS temp directory // if for some reason we cannot delete it we are safe to ignore it. } } } } } }
private static long Incremental_Backup(StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, ZipArchive package, string basePath, DataCopier copier) { long numberOfBackedUpPages = 0; long lastWrittenLogFile = -1; long lastWrittenLog4kb = -1; bool backupSuccess = true; IncrementalBackupInfo backupInfo; JournalInfo journalInfo; var transactionPersistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLog4kb = env.Journal.CurrentFile.WritePosIn4KbPosition; } // txw.Commit(); intentionally not committing } using (env.NewLowLevelTransaction(transactionPersistentContext, TransactionFlags.Read)) { backupStarted?.Invoke(); // we let call know that we have started the backup var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { var num = journalNum; var journalFile = GetJournalFile(env, journalNum, backupInfo, journalInfo); journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; long numberOf4KbsToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; numberOf4KbsToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocated4Kb) // nothing to do here { continue; } var part = package.CreateEntry( Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalNum)) , compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { numberOf4KbsToCopy -= (journalFile.JournalWriter.NumberOfAllocated4Kb - lastWrittenLog4kb); } using (var stream = part.Open()) { copier.ToStream(env, journalFile, startBackupAt, numberOf4KbsToCopy, stream); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + numberOf4KbsToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocated4Kb - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += numberOf4KbsToCopy; } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number jrnl.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); } return(numberOfBackedUpPages); }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { foreach (JournalFile journalFile in usedJournals) { var entryName = Path.Combine(basePath, StorageEnvironmentOptions.JournalName(journalFile.Number)); var journalPart = package.CreateEntry(entryName, compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } } } finally { foreach (var journalFile in usedJournals) { journalFile.Release(); } } } finally { txr?.Dispose(); } }
public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } var pageNumberToPageInScratch = new Dictionary <long, long>(); if (infoNotify == null) { infoNotify = str => { } } ; var toDispose = new List <IDisposable>(); try { IncrementalBackupInfo backupInfo; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } //txw.Commit(); - intentionally not committing } if (backupStarted != null) { backupStarted(); } infoNotify("Voron - reading storage journals for snapshot pages"); var lastBackedUpFile = backupInfo.LastBackedUpJournal; var lastBackedUpPage = backupInfo.LastBackedUpJournalPage; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } var lastTransaction = new TransactionHeader { TransactionId = -1 }; var recoveryPager = env.Options.CreateScratchPager("min-inc-backup.scratch"); toDispose.Add(recoveryPager); int recoveryPage = 0; for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { lastBackedUpFile = journalNum; var journalFile = IncrementalBackup.GetJournalFile(env, journalNum, backupInfo); try { using (var filePager = env.Options.OpenJournalPager(journalNum)) { var reader = new JournalReader(filePager, recoveryPager, 0, null, recoveryPage); reader.MaxPageToRead = lastBackedUpPage = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalNum == lastWrittenLogFile) // set the last part of the log file we'll be reading { reader.MaxPageToRead = lastBackedUpPage = lastWrittenLogPage; } if (lastBackedUpPage == journalFile.JournalWriter.NumberOfAllocatedPages) // past the file size { // move to the next lastBackedUpPage = -1; lastBackedUpFile++; } if (journalNum == backupInfo.LastBackedUpJournal) // continue from last backup { reader.SetStartPage(backupInfo.LastBackedUpJournalPage); } TransactionHeader *lastJournalTxHeader = null; while (reader.ReadOneTransaction(env.Options)) { // read all transactions here lastJournalTxHeader = reader.LastTransactionHeader; } if (lastJournalTxHeader != null) { lastTransaction = *lastJournalTxHeader; } recoveryPage = reader.RecoveryPage; foreach (var pagePosition in reader.TransactionPageTranslation) { var pageInJournal = pagePosition.Value.JournalPos; var page = recoveryPager.Read(null, pageInJournal); pageNumberToPageInScratch[pagePosition.Key] = pageInJournal; if (page.IsOverflow) { var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); for (int i = 1; i < numberOfOverflowPages; i++) { pageNumberToPageInScratch.Remove(page.PageNumber + i); } } } } } finally { journalFile.Release(); } } if (pageNumberToPageInScratch.Count == 0) { infoNotify("Voron - no changes since last backup, nothing to do"); return; } infoNotify("Voron - started writing snapshot file."); if (lastTransaction.TransactionId == -1) { throw new InvalidOperationException("Could not find any transactions in the journals, but found pages to write? That ain't right."); } // it is possible that we merged enough transactions so the _merged_ output is too large for us. // Voron limit transactions to about 4GB each. That means that we can't just merge all transactions // blindly, for fear of hitting this limit. So we need to split things. // We are also limited to about 8 TB of data in general before we literally can't fit the number of pages into // pageNumberToPageInScratch even theoretically. // We're fine with saying that you need to run min inc backup before you hit 8 TB in your increment, so that works for now. // We are also going to use env.Options.MaxScratchBufferSize to set the actual transaction limit here, to avoid issues // down the road and to limit how big a single transaction can be before the theoretical 4GB limit. var nextJournalNum = lastBackedUpFile; using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { var copier = new DataCopier(env.Options.PageSize * 16); var finalPager = env.Options.CreateScratchPager("min-inc-backup-final.scratch"); toDispose.Add(finalPager); finalPager.EnsureContinuous(0, 1);//txHeader foreach (var partition in Partition(pageNumberToPageInScratch.Values, env.Options.MaxNumberOfPagesInMergedTransaction)) { int totalNumberOfPages = 0; int overflowPages = 0; int start = 1; foreach (var pageNum in partition) { var p = recoveryPager.Read(null, pageNum); var size = 1; if (p.IsOverflow) { size = recoveryPager.GetNumberOfOverflowPages(p.OverflowSize); overflowPages += (size - 1); } totalNumberOfPages += size; finalPager.EnsureContinuous(start, size); //maybe increase size Memory.Copy(finalPager.AcquirePagePointer(null, start), p.Base, size * env.Options.PageSize); start += size; } var txPage = finalPager.AcquirePagePointer(null, 0); UnmanagedMemory.Set(txPage, 0, env.Options.PageSize); var txHeader = (TransactionHeader *)txPage; txHeader->HeaderMarker = Constants.TransactionHeaderMarker; txHeader->Root = lastTransaction.Root; txHeader->OverflowPageCount = overflowPages; txHeader->PageCount = totalNumberOfPages - overflowPages; txHeader->TransactionId = lastTransaction.TransactionId; txHeader->NextPageNumber = lastTransaction.NextPageNumber; txHeader->LastPageNumber = lastTransaction.LastPageNumber; txHeader->TxMarker = TransactionMarker.Commit | TransactionMarker.Merged; txHeader->Compressed = false; txHeader->UncompressedSize = txHeader->CompressedSize = totalNumberOfPages * env.Options.PageSize; txHeader->Hash = Hashing.XXHash64.Calculate(finalPager.AcquirePagePointer(null, 1), totalNumberOfPages * env.Options.PageSize); var entry = package.CreateEntry(string.Format("{0:D19}.merged-journal", nextJournalNum), compression); nextJournalNum++; using (var stream = entry.Open()) { copier.ToStream(finalPager.AcquirePagePointer(null, 0), (totalNumberOfPages + 1) * env.Options.PageSize, stream); } } } file.Flush(true);// make sure we hit the disk and stay there } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } finally { foreach (var disposable in toDispose) { disposable.Dispose(); } } }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; var backupSuccess = false; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePosIn4KbPosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { long lastBackedupJournal = 0; foreach (var journalFile in usedJournals) { var entryName = StorageEnvironmentOptions.JournalName(journalFile.Number); var journalPart = package.CreateEntry(Path.Combine(basePath, entryName), compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } lastBackedupJournal = journalFile.Number; } if (env.Options.IncrementalBackupEnabled) { env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedupJournal; //since we backed-up everything, no need to start next incremental backup from the middle header->IncrementalBackup.LastBackedUpJournalPage = -1; }); } backupSuccess = true; } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var journalFile in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (journalFile.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number journalFile.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { journalFile.DeleteOnClose = true; } } journalFile.Release(); } } } finally { txr?.Dispose(); } }
public void Cleanup() { if (_recycleArea.Count == 0 && _scratchBuffers.Count == 1) { return; } long txIdAllowingToReleaseOldScratches = -1; // ReSharper disable once LoopCanBeConvertedToQuery foreach (var scratchBufferItem in _scratchBuffers) { if (scratchBufferItem.Value == _current) { continue; } txIdAllowingToReleaseOldScratches = Math.Max(txIdAllowingToReleaseOldScratches, scratchBufferItem.Value.File.TxIdAfterWhichLatestFreePagesBecomeAvailable); } ByteStringContext byteStringContext; try { byteStringContext = new ByteStringContext(SharedMultipleUseFlag.None); } catch (Exception e) when(e is OutOfMemoryException || e is EarlyOutOfMemoryException) { return; } try { while (_env.CurrentReadTransactionId <= txIdAllowingToReleaseOldScratches) { // we've just flushed and had no more writes after that, let us bump id of next read transactions to ensure // that nobody will attempt to read old scratches so we will be able to release more files try { using (var tx = _env.NewLowLevelTransaction(new TransactionPersistentContext(), TransactionFlags.ReadWrite, timeout: TimeSpan.FromMilliseconds(500), context: byteStringContext)) { tx.ModifyPage(0); tx.Commit(); } } catch (TimeoutException) { break; } catch (DiskFullException) { break; } } // we need to ensure that no access to _recycleArea and _scratchBuffers will take place in the same time // and only methods that access this are used within write transaction try { using (_env.WriteTransaction(context: byteStringContext)) { var removedInactive = RemoveInactiveScratches(_current); var removedInactiveRecycled = RemoveInactiveRecycledScratches(); if (_logger.IsInfoEnabled) { _logger.Info($"Cleanup of {nameof(ScratchBufferPool)} removed: {removedInactive} inactive scratches and {removedInactiveRecycled} inactive from the recycle area"); } } } catch (TimeoutException) { } catch (DiskFullException) { } } finally { byteStringContext.Dispose(); } }
public void Cleanup() { if (_recycleArea.Count == 0 && _scratchBuffers.Count == 1) { return; } long txIdAllowingToReleaseOldScratches = -1; // ReSharper disable once LoopCanBeConvertedToQuery foreach (var scratchBufferItem in _scratchBuffers) { if (scratchBufferItem.Value == _current) { continue; } txIdAllowingToReleaseOldScratches = Math.Max(txIdAllowingToReleaseOldScratches, scratchBufferItem.Value.File.TxIdAfterWhichLatestFreePagesBecomeAvailable); } ByteStringContext byteStringContext; try { byteStringContext = new ByteStringContext(SharedMultipleUseFlag.None); } catch (Exception e) when(e is OutOfMemoryException || e is EarlyOutOfMemoryException) { return; } try { while (_env.CurrentReadTransactionId <= txIdAllowingToReleaseOldScratches) { // we've just flushed and had no more writes after that, let us bump id of next read transactions to ensure // that nobody will attempt to read old scratches so we will be able to release more files try { using (var tx = _env.NewLowLevelTransaction(new TransactionPersistentContext(), TransactionFlags.ReadWrite, timeout: TimeSpan.FromMilliseconds(500), context: byteStringContext)) { tx.ModifyPage(0); tx.Commit(); } } catch (TimeoutException) { break; } catch (DiskFullException) { break; } } IDisposable exitPreventNewTransactions = null; try { // we need to ensure that no access to _recycleArea and _scratchBuffers will take place in the same time // and only methods that access this are used within write transaction using (_env.WriteTransaction()) { // additionally we must not allow to start any transaction (even read one) to start because it uses GetPagerStatesOfAllScratches() which // returns _pagerStatesAllScratchesCache that we're updating here if (_env.TryPreventNewTransactions(TimeSpan.Zero, out exitPreventNewTransactions)) { var removedInactive = RemoveInactiveScratches(_current, updateCacheBeforeDisposingScratch: false); // no need to update cache because we're going do to it here anyway var removedInactiveRecycled = RemoveInactiveRecycledScratches(); if (_logger.IsInfoEnabled) { _logger.Info( $"Cleanup of {nameof(ScratchBufferPool)} removed: {removedInactive} inactive scratches and {removedInactiveRecycled} inactive from the recycle area"); } _forTestingPurposes?.ActionToCallDuringCleanupRightAfterRemovingInactiveScratches?.Invoke(); UpdateCacheForPagerStatesOfAllScratches(); // it's going to be called by Rollback() of the write tx but let's call it explicitly so we can easily find this usage } } } catch (TimeoutException) { } catch (DiskFullException) { } finally { exitPreventNewTransactions?.Dispose(); } } finally { byteStringContext.Dispose(); } }
public void OpenRead() { var readPersistentContext = new TransactionPersistentContext(true); Read = new Transaction(_env.NewLowLevelTransaction(readPersistentContext, TransactionFlags.Read)); }
public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { infoNotify = infoNotify ?? (s => { }); var dataPager = env.Options.DataPager; var copier = new DataCopier(env.Options.PageSize * 16); LowLevelTransaction txr = null; try { infoNotify("Voron copy headers"); using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { long allocatedPages; ImmutableAppendOnlyList <JournalFile> files; // thread safety copy var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options); // journal files snapshot files = env.Journal.Files; JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * env.Options.PageSize); } journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition - 1; } // txw.Commit(); intentionally not committing } if (backupStarted != null) { backupStarted(); } // data file backup var dataPart = package.CreateEntry(Constants.DatabaseFilename, compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else var firstDataPage = dataPager.Read(null, 0); copier.ToStream(firstDataPage.Base, env.Options.PageSize * allocatedPages, dataStream); } } try { foreach (var journalFile in usedJournals) { var journalPart = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalFile.Number), compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0} ", journalFile)); } } } finally { foreach (var journalFile in usedJournals) { journalFile.Release(); } } } file.Flush(true); // make sure that we fully flushed to disk } } finally { if (txr != null) { txr.Dispose(); } } infoNotify(string.Format("Voron backup db finished")); }