private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current) { if (_readAt4Kb > _journalPagerNumberOfAllocated4Kb) { current = null; return(false); // end of jouranl } const int pageTo4KbRatio = Constants.Storage.PageSize / (4 * Constants.Size.Kilobyte); var pageNumber = _readAt4Kb / pageTo4KbRatio; var positionInsidePage = (_readAt4Kb % pageTo4KbRatio) * (4 * Constants.Size.Kilobyte); current = (TransactionHeader *) (_journalPager.AcquirePagePointer(this, pageNumber) + positionInsidePage); // due to the reuse of journals we no longer can assume we have zeros in the end of the journal // we might have there random garbage or old transactions we can ignore, so we have the following scenarios: // * TxId <= current Id :: we can ignore old transaction of the reused journal and continue // * TxId == current Id + 1 :: valid, but if hash is invalid. Transaction hasn't been committed // * TxId > current Id + 1 :: if hash is invalid we can ignore reused/random, but if hash valid then we might missed TXs if (current->HeaderMarker != Constants.TransactionHeaderMarker) { // not a transaction page, // if the header marker is zero or garbage, we are probably in the area at the end of the log file, and have no additional log records // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading // this log file and move to the next one, or it might have happened because of reuse of journal file // note : we might encounter a "valid" TransactionHeaderMarker which is still garbage, so we will test that later on RequireHeaderUpdate = false; return(false); } if (current->TransactionId < 0) { return(false); } current = EnsureTransactionMapped(current, pageNumber, positionInsidePage); bool hashIsValid; if (options.Encryption.IsEnabled) { // We use temp buffers to hold the transaction before decrypting, and release the buffers afterwards. var pagesSize = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize; var size = (4 * Constants.Size.Kilobyte) * GetNumberOf4KbFor(sizeof(TransactionHeader) + pagesSize); var ptr = PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out var thread); var buffer = new EncryptionBuffer { Pointer = ptr, Size = size, AllocatingThread = thread }; _encryptionBuffers.Add(buffer); Memory.Copy(buffer.Pointer, (byte *)current, size); current = (TransactionHeader *)buffer.Pointer; try { DecryptTransaction((byte *)current, options); hashIsValid = true; } catch (InvalidOperationException ex) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Unable to decrypt data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + "Safely continuing the startup recovery process.", ex); return(true); } RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", ex); return(false); } } else { hashIsValid = ValidatePagesHash(options, current); } long lastTxId; if (LastTransactionHeader != null) { lastTxId = LastTransactionHeader->TransactionId; } else { // this is first transaction being processed in the recovery process if (_journalInfo.LastSyncedTransactionId == -1 || current->TransactionId <= _journalInfo.LastSyncedTransactionId) { if (hashIsValid == false && CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Invalid hash of data of first transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + "Safely continuing the startup recovery process.", null); return(true); } if (hashIsValid && _firstValidTransactionHeader == null) { _firstValidTransactionHeader = current; } return(hashIsValid); } lastTxId = _journalInfo.LastSyncedTransactionId; } var txIdDiff = current->TransactionId - lastTxId; // 1 is a first storage transaction which does not increment transaction counter after commit if (current->TransactionId != 1) { if (txIdDiff < 0) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Negative tx id diff: {txIdDiff}. " + "Safely continuing the startup recovery process.", null); return(true); } return(false); } if (txIdDiff > 1 || txIdDiff == 0) { if (hashIsValid) { // TxId is bigger then the last one by more than '1' but has valid hash which mean we lost transactions in the middle if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { // when running in ignore data integrity errors mode then we could skip corrupted but already sync data // so it's expected in this case that txIdDiff > 1, let it continue to work then options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Tx diff is: {txIdDiff}. " + $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null); return(true); } if (LastTransactionHeader != null) { throw new InvalidJournalException( $"Transaction has valid(!) hash with invalid transaction id {current->TransactionId}, the last valid transaction id is {LastTransactionHeader->TransactionId}. Tx diff is: {txIdDiff}." + $" Journal file {_journalPager.FileName} might be corrupted. Debug details - file header {_currentFileHeader}", _journalInfo); } throw new InvalidJournalException( $"The last synced transaction id was {_journalInfo.LastSyncedTransactionId} (in journal: {_journalInfo.LastSyncedJournal}) but the first transaction being read in the recovery process is {current->TransactionId} (transaction has valid hash). Tx diff is: {txIdDiff}. " + $"Some journals are missing. Current journal file {_journalPager.FileName}. Debug details - file header {_currentFileHeader}", _journalInfo); } } // if (txIdDiff == 1) : if (current->LastPageNumber <= 0) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Invalid last page number ({current->LastPageNumber}) in the header of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null); return(true); } throw new InvalidDataException("Last page number after committed transaction must be greater than 0. Debug details - file header {_currentFileHeader}"); } } if (hashIsValid == false) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Invalid hash of data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + "Safely continuing the startup recovery process.", null); return(true); } RequireHeaderUpdate = true; return(false); } if (_firstValidTransactionHeader == null) { _firstValidTransactionHeader = current; } return(true); }