private JournalFile NextFile(int numberOfPages = 1) { _journalIndex++; var now = DateTime.UtcNow; if ((now - _lastFile).TotalSeconds < 90) { _currentJournalFileSize = Math.Min(_env.Options.MaxLogFileSize, _currentJournalFileSize * 2); } var actualLogSize = _currentJournalFileSize; var minRequiredSize = numberOfPages * AbstractPager.PageSize; if (_currentJournalFileSize < minRequiredSize) { actualLogSize = minRequiredSize; } _lastFile = now; var journalPager = _env.Options.CreateJournalWriter(_journalIndex, actualLogSize); var journal = new JournalFile(journalPager, _journalIndex); journal.AddRef(); // one reference added by a creator - write ahead log _files = _files.Append(journal); _headerAccessor.Modify(_updateLogInfo); return(journal); }
private JournalFile NextFile(int numberOfPages = 1) { _journalIndex++; var now = DateTime.UtcNow; if ((now - _lastFile).TotalSeconds < 90) { _currentJournalFileSize = Math.Min(_env.Options.MaxLogFileSize, _currentJournalFileSize * 2); } var actualLogSize = _currentJournalFileSize; var minRequiredSize = numberOfPages * AbstractPager.PageSize; if (_currentJournalFileSize < minRequiredSize) { actualLogSize = minRequiredSize; } _lastFile = now; var journalPager = _env.Options.CreateJournalWriter(_journalIndex, actualLogSize); var journal = new JournalFile(journalPager, _journalIndex); journal.AddRef(); // one reference added by a creator - write ahead log _files = _files.Append(journal); _headerAccessor.Modify(_updateLogInfo); return journal; }
public bool RecoverDatabase(TransactionHeader *txHeader) { // note, we don't need to do any concurrency here, happens as a single threaded // fashion on db startup var requireHeaderUpdate = false; var logInfo = _headerAccessor.Get(ptr => ptr->Journal); if (logInfo.JournalFilesCount == 0) { _journalIndex = logInfo.LastSyncedJournal; return(false); } var oldestLogFileStillInUse = logInfo.CurrentJournal - logInfo.JournalFilesCount + 1; if (_env.Options.IncrementalBackupEnabled == false) { // we want to check that we cleanup old log files if they aren't needed // this is more just to be safe than anything else, they shouldn't be there. var unusedfiles = oldestLogFileStillInUse; while (true) { unusedfiles--; if (_env.Options.TryDeleteJournal(unusedfiles) == false) { break; } } } var lastSyncedTransactionId = logInfo.LastSyncedTransactionId; var journalFiles = new List <JournalFile>(); long lastSyncedTxId = -1; long lastSyncedJournal = logInfo.LastSyncedJournal; for (var journalNumber = oldestLogFileStillInUse; journalNumber <= logInfo.CurrentJournal; journalNumber++) { using (var recoveryPager = _env.Options.CreateScratchPager(StorageEnvironmentOptions.JournalRecoveryName(journalNumber))) using (var pager = _env.Options.OpenJournalPager(journalNumber)) { RecoverCurrentJournalSize(pager); var transactionHeader = txHeader->TransactionId == 0 ? null : txHeader; var journalReader = new JournalReader(pager, recoveryPager, lastSyncedTransactionId, transactionHeader); journalReader.RecoverAndValidate(_env.Options); var pagesToWrite = journalReader .TransactionPageTranslation .Select(kvp => recoveryPager.Read(kvp.Value.JournalPos)) .OrderBy(x => x.PageNumber) .ToList(); var lastReadHeaderPtr = journalReader.LastTransactionHeader; if (lastReadHeaderPtr != null) { if (pagesToWrite.Count > 0) { ApplyPagesToDataFileFromJournal(pagesToWrite); } *txHeader = *lastReadHeaderPtr; lastSyncedTxId = txHeader->TransactionId; lastSyncedJournal = journalNumber; } if (journalReader.RequireHeaderUpdate || journalNumber == logInfo.CurrentJournal) { var jrnlWriter = _env.Options.CreateJournalWriter(journalNumber, pager.NumberOfAllocatedPages * AbstractPager.PageSize); var jrnlFile = new JournalFile(jrnlWriter, journalNumber); jrnlFile.InitFrom(journalReader); jrnlFile.AddRef(); // creator reference - write ahead log journalFiles.Add(jrnlFile); } if (journalReader.RequireHeaderUpdate) //this should prevent further loading of transactions { requireHeaderUpdate = true; break; } } } _files = _files.AppendRange(journalFiles); Debug.Assert(lastSyncedTxId >= 0); Debug.Assert(lastSyncedJournal >= 0); _journalIndex = lastSyncedJournal; _headerAccessor.Modify( header => { header->Journal.LastSyncedJournal = lastSyncedJournal; header->Journal.LastSyncedTransactionId = lastSyncedTxId; header->Journal.CurrentJournal = lastSyncedJournal; header->Journal.JournalFilesCount = _files.Count; header->IncrementalBackup.LastCreatedJournal = _journalIndex; }); CleanupInvalidJournalFiles(lastSyncedJournal); CleanupUnusedJournalFiles(oldestLogFileStillInUse, lastSyncedJournal); if (_files.Count > 0) { var lastFile = _files.Last(); if (lastFile.AvailablePages >= 2) { // it must have at least one page for the next transaction header and one page for data CurrentFile = lastFile; } } return(requireHeaderUpdate); }
public bool RecoverDatabase(TransactionHeader* txHeader) { // note, we don't need to do any concurrency here, happens as a single threaded // fashion on db startup var requireHeaderUpdate = false; var logInfo = _headerAccessor.Get(ptr => ptr->Journal); if (logInfo.JournalFilesCount == 0) { _journalIndex = logInfo.LastSyncedJournal; return false; } var oldestLogFileStillInUse = logInfo.CurrentJournal - logInfo.JournalFilesCount + 1; if (_env.Options.IncrementalBackupEnabled == false) { // we want to check that we cleanup old log files if they aren't needed // this is more just to be safe than anything else, they shouldn't be there. var unusedfiles = oldestLogFileStillInUse; while (true) { unusedfiles--; if (_env.Options.TryDeleteJournal(unusedfiles) == false) break; } } var lastSyncedTransactionId = logInfo.LastSyncedTransactionId; var journalFiles = new List<JournalFile>(); long lastSyncedTxId = -1; long lastSyncedJournal = logInfo.LastSyncedJournal; uint lastShippedTxCrc = 0; for (var journalNumber = oldestLogFileStillInUse; journalNumber <= logInfo.CurrentJournal; journalNumber++) { using (var recoveryPager = _env.Options.CreateScratchPager(StorageEnvironmentOptions.JournalRecoveryName(journalNumber))) using (var pager = _env.Options.OpenJournalPager(journalNumber)) { RecoverCurrentJournalSize(pager); var transactionHeader = txHeader->TransactionId == 0 ? null : txHeader; var journalReader = new JournalReader(pager, recoveryPager, lastSyncedTransactionId, transactionHeader); journalReader.RecoverAndValidate(_env.Options); var pagesToWrite = journalReader .TransactionPageTranslation .Select(kvp => recoveryPager.Read(kvp.Value.JournalPos)) .OrderBy(x => x.PageNumber) .ToList(); var lastReadHeaderPtr = journalReader.LastTransactionHeader; if (lastReadHeaderPtr != null) { if (pagesToWrite.Count > 0) ApplyPagesToDataFileFromJournal(pagesToWrite); *txHeader = *lastReadHeaderPtr; lastSyncedTxId = txHeader->TransactionId; lastShippedTxCrc = txHeader->Crc; lastSyncedJournal = journalNumber; } if (journalReader.RequireHeaderUpdate || journalNumber == logInfo.CurrentJournal) { var jrnlWriter = _env.Options.CreateJournalWriter(journalNumber, pager.NumberOfAllocatedPages * AbstractPager.PageSize); var jrnlFile = new JournalFile(jrnlWriter, journalNumber); jrnlFile.InitFrom(journalReader); jrnlFile.AddRef(); // creator reference - write ahead log journalFiles.Add(jrnlFile); } if (journalReader.RequireHeaderUpdate) //this should prevent further loading of transactions { requireHeaderUpdate = true; break; } } } Shipper.SetPreviousTransaction(lastSyncedTxId, lastShippedTxCrc); _files = _files.AppendRange(journalFiles); Debug.Assert(lastSyncedTxId >= 0); Debug.Assert(lastSyncedJournal >= 0); _journalIndex = lastSyncedJournal; _headerAccessor.Modify( header => { header->Journal.LastSyncedJournal = lastSyncedJournal; header->Journal.LastSyncedTransactionId = lastSyncedTxId; header->Journal.CurrentJournal = lastSyncedJournal; header->Journal.JournalFilesCount = _files.Count; header->IncrementalBackup.LastCreatedJournal = _journalIndex; header->PreviousTransactionCrc = lastShippedTxCrc; }); CleanupInvalidJournalFiles(lastSyncedJournal); CleanupUnusedJournalFiles(oldestLogFileStillInUse, lastSyncedJournal); if (_files.Count > 0) { var lastFile = _files.Last(); if (lastFile.AvailablePages >= 2) // it must have at least one page for the next transaction header and one page for data CurrentFile = lastFile; } return requireHeaderUpdate; }
internal static JournalFile GetJournalFile(StorageEnvironment env, long journalNum, IncrementalBackupInfo backupInfo) { var journalFile = env.Journal.Files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile != null) { journalFile.AddRef(); return journalFile; } try { using (var pager = env.Options.OpenJournalPager(journalNum)) { long journalSize = Utils.NearestPowerOfTwo(pager.NumberOfAllocatedPages * AbstractPager.PageSize); journalFile = new JournalFile(env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); journalFile.AddRef(); return journalFile; } } catch (Exception e) { if (backupInfo.LastBackedUpJournal == -1 && journalNum == 0 && e.Message.StartsWith("No such journal")) { throw new InvalidOperationException("The first incremental backup creation failed because the first journal file " + StorageEnvironmentOptions.JournalName(journalNum) + " was not found. " + "Did you turn on the incremental backup feature after initializing the storage? " + "In order to create backups incrementally the storage must be created with IncrementalBackupEnabled option set to 'true'.", e); } throw; } }