private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(txw); } using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8)) { if (package.Entries.Count == 0) { return; } var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; try { TransactionHeader *lastTxHeader = null; var pagesToWrite = new Dictionary <long, TreePage>(); long journalNumber = -1; foreach (var entry in package.Entries) { switch (Path.GetExtension(entry.Name)) { case ".merged-journal": case ".journal": var jounalFileName = Path.Combine(tempDir, entry.Name); using (var output = new FileStream(jounalFileName, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = env.Options.OpenPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = env.Options.CreateScratchPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); toDispose.Add(recoveryPager); var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); while (reader.ReadOneTransaction(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } foreach (var translation in reader.TransactionPageTranslation) { var pageInJournal = translation.Value.JournalPos; var page = recoveryPager.Read(null, pageInJournal); pagesToWrite[translation.Key] = page; if (page.IsOverflow) { var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); for (int i = 1; i < numberOfOverflowPages; i++) { pagesToWrite.Remove(translation.Key + i); } } } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } var sortedPages = pagesToWrite.OrderBy(x => x.Key) .Select(x => x.Value) .ToList(); if (sortedPages.Count == 0) { return; } var last = sortedPages.Last(); var numberOfPages = last.IsOverflow ? env.Options.DataPager.GetNumberOfOverflowPages( last.OverflowSize) : 1; var pagerState = env.Options.DataPager.EnsureContinuous(last.PageNumber, numberOfPages); txw.EnsurePagerStateReference(pagerState); foreach (var page in sortedPages) { env.Options.DataPager.Write(page); } env.Options.DataPager.Sync(); var root = Tree.Open(txw, null, &lastTxHeader->Root); root.Name = Constants.RootTreeName; txw.UpdateRootsIfNeeded(root); txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir, true); } catch { // this is just a temporary directory, the worst case scenario is that we dont reclaim the space from the OS temp directory // if for some reason we cannot delete it we are safe to ignore it. } } } } } }
private static void Restore(StorageEnvironment env, IEnumerable <ZipArchiveEntry> entries, VoronPathSetting tempDir, List <IDisposable> toDispose, LowLevelTransaction txw) { try { TransactionHeader *lastTxHeader = null; var lastTxHeaderStackLocation = stackalloc TransactionHeader[1]; long lastTxId = env.HeaderAccessor.Get(x => x->TransactionId); long journalNumber = -1; foreach (var entry in entries) { switch (Path.GetExtension(entry.Name)) { case ".merged-journal": case ".journal": var jounalFileName = tempDir.Combine(entry.Name); using (var output = SafeFileStream.Create(jounalFileName.FullPath, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = env.Options.OpenPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = env.Options.CreateTemporaryBufferPager(Path.Combine(tempDir.Combine(StorageEnvironmentOptions.JournalRecoveryName(journalNumber)).FullPath), env.Options.InitialFileSize ?? env.Options.InitialLogFileSize); toDispose.Add(recoveryPager); using (var reader = new JournalReader(pager, env.Options.DataPager, recoveryPager, new HashSet <long>(), new JournalInfo { LastSyncedTransactionId = lastTxId }, lastTxHeader)) { while (reader.ReadOneTransactionToDataFile(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } reader.ZeroRecoveryBufferIfNeeded(reader, env.Options); if (lastTxHeader != null) { *lastTxHeaderStackLocation = *lastTxHeader; lastTxHeader = lastTxHeaderStackLocation; lastTxId = lastTxHeader->TransactionId; } } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } if (lastTxHeader == null) { return; // there was no valid transactions, nothing to do } env.Options.DataPager.Sync(0); var root = Tree.Open(txw, null, Constants.RootTreeNameSlice, &lastTxHeader->Root); txw.UpdateRootsIfNeeded(root); txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; Sparrow.Memory.Set(header->Journal.Reserved, 0, JournalInfo.NumberOfReservedBytes); header->Journal.Flags = JournalInfoFlags.None; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir.FullPath, true); } catch { // this is just a temporary directory, the worst case scenario is that we dont reclaim the space from the OS temp directory // if for some reason we cannot delete it we are safe to ignore it. } } }
private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(txw); } using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read)) { if (package.Entries.Count == 0) { return; } var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; try { TransactionHeader *lastTxHeader = null; var pagesToWrite = new Dictionary <long, Func <Page> >(); long journalNumber = -1; foreach (var entry in package.Entries) { switch (Path.GetExtension(entry.Name)) { case ".journal": var jounalFileName = Path.Combine(tempDir, entry.Name); using (var output = new FileStream(jounalFileName, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = new Win32MemoryMapPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); toDispose.Add(recoveryPager); var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); while (reader.ReadOneTransaction(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } foreach (var translation in reader.TransactionPageTranslation) { var pageInJournal = translation.Value.JournalPos; pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal); } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } var sortedPages = pagesToWrite.OrderBy(x => x.Key) .Select(x => x.Value()) .ToList(); if (sortedPages.Count == 0) { return; } var last = sortedPages.Last(); env.Options.DataPager.EnsureContinuous(txw, last.PageNumber, last.IsOverflow ? env.Options.DataPager.GetNumberOfOverflowPages( last.OverflowSize) : 1); foreach (var page in sortedPages) { env.Options.DataPager.Write(page); } env.Options.DataPager.Sync(); txw.State.Root = Tree.Open(txw, &lastTxHeader->Root); txw.State.FreeSpaceRoot = Tree.Open(txw, &lastTxHeader->FreeSpace); txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; txw.State.Root.Name = Constants.RootTreeName; txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->FreeSpace = lastTxHeader->FreeSpace; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir, true); } catch (Exception) { // just temp dir - ignore it } } } } } }
public bool RecoverDatabase(TransactionHeader *txHeader) { // note, we don't need to do any concurrency here, happens as a single threaded // fashion on db startup var requireHeaderUpdate = false; var logInfo = _headerAccessor.Get(ptr => ptr->Journal); if (logInfo.JournalFilesCount == 0) { _journalIndex = logInfo.LastSyncedJournal; return(false); } var oldestLogFileStillInUse = logInfo.CurrentJournal - logInfo.JournalFilesCount + 1; if (_env.Options.IncrementalBackupEnabled == false) { // we want to check that we cleanup old log files if they aren't needed // this is more just to be safe than anything else, they shouldn't be there. var unusedfiles = oldestLogFileStillInUse; while (true) { unusedfiles--; if (_env.Options.TryDeleteJournal(unusedfiles) == false) { break; } } } var lastSyncedTransactionId = logInfo.LastSyncedTransactionId; var journalFiles = new List <JournalFile>(); long lastSyncedTxId = -1; long lastSyncedJournal = logInfo.LastSyncedJournal; for (var journalNumber = oldestLogFileStillInUse; journalNumber <= logInfo.CurrentJournal; journalNumber++) { using (var recoveryPager = _env.Options.CreateScratchPager(StorageEnvironmentOptions.JournalRecoveryName(journalNumber))) using (var pager = _env.Options.OpenJournalPager(journalNumber)) { RecoverCurrentJournalSize(pager); var transactionHeader = txHeader->TransactionId == 0 ? null : txHeader; var journalReader = new JournalReader(pager, recoveryPager, lastSyncedTransactionId, transactionHeader); journalReader.RecoverAndValidate(_env.Options); var pagesToWrite = journalReader .TransactionPageTranslation .Select(kvp => recoveryPager.Read(kvp.Value.JournalPos)) .OrderBy(x => x.PageNumber) .ToList(); var lastReadHeaderPtr = journalReader.LastTransactionHeader; if (lastReadHeaderPtr != null) { if (pagesToWrite.Count > 0) { ApplyPagesToDataFileFromJournal(pagesToWrite); } *txHeader = *lastReadHeaderPtr; lastSyncedTxId = txHeader->TransactionId; lastSyncedJournal = journalNumber; } if (journalReader.RequireHeaderUpdate || journalNumber == logInfo.CurrentJournal) { var jrnlWriter = _env.Options.CreateJournalWriter(journalNumber, pager.NumberOfAllocatedPages * AbstractPager.PageSize); var jrnlFile = new JournalFile(jrnlWriter, journalNumber); jrnlFile.InitFrom(journalReader); jrnlFile.AddRef(); // creator reference - write ahead log journalFiles.Add(jrnlFile); } if (journalReader.RequireHeaderUpdate) //this should prevent further loading of transactions { requireHeaderUpdate = true; break; } } } _files = _files.AppendRange(journalFiles); Debug.Assert(lastSyncedTxId >= 0); Debug.Assert(lastSyncedJournal >= 0); _journalIndex = lastSyncedJournal; _headerAccessor.Modify( header => { header->Journal.LastSyncedJournal = lastSyncedJournal; header->Journal.LastSyncedTransactionId = lastSyncedTxId; header->Journal.CurrentJournal = lastSyncedJournal; header->Journal.JournalFilesCount = _files.Count; header->IncrementalBackup.LastCreatedJournal = _journalIndex; }); CleanupInvalidJournalFiles(lastSyncedJournal); CleanupUnusedJournalFiles(oldestLogFileStillInUse, lastSyncedJournal); if (_files.Count > 0) { var lastFile = _files.Last(); if (lastFile.AvailablePages >= 2) { // it must have at least one page for the next transaction header and one page for data CurrentFile = lastFile; } } return(requireHeaderUpdate); }
private void Restore(StorageEnvironment env, string backupPath) { using (env.Journal.Applicator.TakeFlushingLock()) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(txw); } List <string> journalNames; using (var package = ZipFile.Open(backupPath, ZipArchiveMode.Read)) { journalNames = package.Entries.Select(x => x.Name).ToList(); } if (journalNames.Count == 0) { return; } var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; var toDispose = new List <IDisposable>(); try { ZipFile.ExtractToDirectory(backupPath, tempDir); TransactionHeader *lastTxHeader = null; var pagesToWrite = new Dictionary <long, Func <Page> >(); long journalNumber = -1; foreach (var journalName in journalNames) { var pager = new Win32MemoryMapPager(Path.Combine(tempDir, journalName)); toDispose.Add(pager); if (long.TryParse(journalName.Replace(".journal", string.Empty), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = new Win32MemoryMapPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); toDispose.Add(recoveryPager); var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); while (reader.ReadOneTransaction(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } foreach (var translation in reader.TransactionPageTranslation) { var pageInJournal = translation.Value.JournalPos; pagesToWrite[translation.Key] = () => recoveryPager.Read(pageInJournal); } } var sortedPages = pagesToWrite.OrderBy(x => x.Key) .Select(x => x.Value()) .ToList(); var last = sortedPages.Last(); env.Options.DataPager.EnsureContinuous(txw, last.PageNumber, last.IsOverflow ? env.Options.DataPager.GetNumberOfOverflowPages( last.OverflowSize) : 1); foreach (var page in sortedPages) { env.Options.DataPager.Write(page); } env.Options.DataPager.Sync(); txw.State.Root = Tree.Open(txw, env._sliceComparer, &lastTxHeader->Root); txw.State.FreeSpaceRoot = Tree.Open(txw, env._sliceComparer, &lastTxHeader->FreeSpace); txw.State.FreeSpaceRoot.Name = Constants.FreeSpaceTreeName; txw.State.Root.Name = Constants.RootTreeName; txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->FreeSpace = lastTxHeader->FreeSpace; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); Directory.Delete(tempDir, true); } } } }
private static void Restore(StorageEnvironment env, IEnumerable <ZipArchiveEntry> entries, string tempDir, List <IDisposable> toDispose, LowLevelTransaction txw) { try { TransactionHeader *lastTxHeader = null; long journalNumber = -1; foreach (var entry in entries) { switch (Path.GetExtension(entry.Name)) { case ".merged-journal": case ".journal": var jounalFileName = Path.Combine(tempDir, entry.Name); using (var output = new FileStream(jounalFileName, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = env.Options.OpenPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = env.Options.CreateScratchPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber)), env.Options.InitialFileSize ?? env.Options.InitialLogFileSize); toDispose.Add(recoveryPager); using ( var reader = new JournalReader(pager, env.Options.DataPager, recoveryPager, 0, lastTxHeader)) { while (reader.ReadOneTransactionToDataFile(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } if (lastTxHeader == null) { return; // there was no valid transactions, nothing to do } env.Options.DataPager.Sync(); var root = Tree.Open(txw, null, &lastTxHeader->Root); root.Name = Constants.RootTreeNameSlice; txw.UpdateRootsIfNeeded(root); txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir, true); } catch { // this is just a temporary directory, the worst case scenario is that we dont reclaim the space from the OS temp directory // if for some reason we cannot delete it we are safe to ignore it. } } }