private bool ValidatePagesHash(StorageEnvironmentOptions options, TransactionHeader *current) { byte *dataPtr = (byte *)current + sizeof(TransactionHeader); var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize; if (size < 0) { RequireHeaderUpdate = true; // negative size is not supported options.InvokeRecoveryError(this, $"Compresses size {current->CompressedSize} is negative", null); return(false); } if (size > (_journalPagerNumberOfAllocated4Kb - _readAt4Kb) * 4 * Constants.Size.Kilobyte) { // we can't read past the end of the journal RequireHeaderUpdate = true; var compressLabel = (current->CompressedSize != -1) ? "Compressed" : "Uncompressed"; options.InvokeRecoveryError(this, $"Size {size} ({compressLabel}) is too big for the journal size {_journalPagerNumberOfAllocated4Kb * 4 * Constants.Size.Kilobyte}", null); return(false); } ulong hash = Hashing.XXHash64.Calculate(dataPtr, (ulong)size, (ulong)current->TransactionId); if (hash != current->Hash) { RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Invalid hash signature for transaction: " + current->ToString(), null); return(false); } return(true); }
private void InitTransactionHeader() { var allocation = _env.ScratchBufferPool.Allocate(this, 1); var page = _env.ScratchBufferPool.ReadPage(allocation.ScratchFileNumber, allocation.PositionInScratchBuffer); _transactionHeaderPage = allocation; StdLib.memset(page.Base, 0, AbstractPager.PageSize); _txHeader = (TransactionHeader *)page.Base; _txHeader->HeaderMarker = Constants.TransactionHeaderMarker; _txHeader->TransactionId = _id; _txHeader->NextPageNumber = _state.NextPageNumber; _txHeader->LastPageNumber = -1; _txHeader->PageCount = -1; _txHeader->Crc = 0; _txHeader->TxMarker = TransactionMarker.None; _txHeader->Compressed = false; _txHeader->CompressedSize = 0; _txHeader->UncompressedSize = 0; _allocatedPagesInTransaction = 0; _overflowPagesInTransaction = 0; _scratchPagesTable.Clear(); }
private void SkipCurrentTransaction(TransactionHeader *current) { var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current); _readAt4Kb += transactionSizeIn4Kb; LastTransactionHeader = current; }
internal void WriteDirect(TransactionHeader *transactionHeader, PageFromScratchBuffer pages) { for (int i = 0; i < pages.NumberOfPages; i++) { var page = _env.ScratchBufferPool.ReadPage(pages.ScratchFileNumber, pages.PositionInScratchBuffer + i); int numberOfPages = 1; if (page.IsOverflow) { numberOfPages = (page.OverflowSize / AbstractPager.PageSize) + (page.OverflowSize % AbstractPager.PageSize == 0 ? 0 : 1); i += numberOfPages; _overflowPagesInTransaction += (numberOfPages - 1); } var pageFromScratchBuffer = _env.ScratchBufferPool.Allocate(this, numberOfPages); var dest = _env.ScratchBufferPool.AcquirePagePointer(pageFromScratchBuffer.ScratchFileNumber, pageFromScratchBuffer.PositionInScratchBuffer); StdLib.memcpy(dest, page.Base, numberOfPages * AbstractPager.PageSize); _allocatedPagesInTransaction++; _dirtyPages.Add(page.PageNumber); page.Dirty = true; if (numberOfPages > 1) { _dirtyOverflowPages.Add(page.PageNumber + 1, numberOfPages - 1); } _scratchPagesTable[page.PageNumber] = pageFromScratchBuffer; _transactionPages.Add(pageFromScratchBuffer); _state.NextPageNumber = transactionHeader->NextPageNumber; } }
private bool ValidatePagesHash(StorageEnvironmentOptions options, TransactionHeader *current) { // The location of the data is the base pointer, plus the space reserved for the transaction header if uncompressed. byte *dataPtr = _journalPager.AcquirePagePointer(this, _readingPage) + sizeof(TransactionHeader); if (current->CompressedSize < 0) { RequireHeaderUpdate = true; // negative size is not supported options.InvokeRecoveryError(this, $"Compresses size {current->CompressedSize} is negative", null); return(false); } if (current->CompressedSize > (_journalPager.NumberOfAllocatedPages - _readingPage) * _journalPager.PageSize) { // we can't read past the end of the journal RequireHeaderUpdate = true; options.InvokeRecoveryError(this, $"Compresses size {current->CompressedSize} is too big for the journal size {_journalPager.NumberOfAllocatedPages * _journalPager.PageSize}", null); return(false); } ulong hash = Hashing.XXHash64.Calculate(dataPtr, (ulong)current->CompressedSize); if (hash != current->Hash) { RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Invalid hash signature for transaction: " + current->ToString(), null); return(false); } return(true); }
private void InitTransactionHeader() { var allocation = _env.ScratchBufferPool.Allocate(this, 1); var page = _env.ScratchBufferPool.ReadPage(this, allocation.ScratchFileNumber, allocation.PositionInScratchBuffer); _transactionHeaderPage = allocation; UnmanagedMemory.Set(page.Pointer, 0, Environment.Options.PageSize); _txHeader = (TransactionHeader *)page.Pointer; _txHeader->HeaderMarker = Constants.TransactionHeaderMarker; _txHeader->TransactionId = _id; _txHeader->NextPageNumber = _state.NextPageNumber; _txHeader->LastPageNumber = -1; _txHeader->PageCount = -1; _txHeader->Hash = 0; _txHeader->TxMarker = TransactionMarker.None; _txHeader->Compressed = false; _txHeader->CompressedSize = 0; _txHeader->UncompressedSize = 0; _allocatedPagesInTransaction = 0; _overflowPagesInTransaction = 0; _scratchPagesTable.Clear(); }
private void ValidateHeader(TransactionHeader *current, TransactionHeader *previous) { if (current->TransactionId < 0) { throw new InvalidDataException("Transaction id cannot be less than 0 (Tx: " + current->TransactionId + " )"); } if (current->TxMarker.HasFlag(TransactionMarker.Commit) && current->LastPageNumber < 0) { throw new InvalidDataException("Last page number after committed transaction must be greater than 0"); } if (current->TxMarker.HasFlag(TransactionMarker.Commit) && current->PageCount > 0 && current->Crc == 0) { throw new InvalidDataException("Committed and not empty transaction checksum can't be equal to 0"); } if (current->Compressed) { if (current->CompressedSize <= 0) { throw new InvalidDataException("Compression error in transaction."); } } if (previous == null) { return; } if (current->TransactionId != 1 && // 1 is a first storage transaction which does not increment transaction counter after commit current->TransactionId - previous->TransactionId != 1) { throw new InvalidDataException("Unexpected transaction id. Expected: " + (previous->TransactionId + 1) + ", got:" + current->TransactionId); } }
public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPages; return(true); // skipping } if (checkCrc && !ValidatePagesCrc(options, compressedPages, current)) { return(false); } var totalPageCount = current->PageCount + current->OverflowPageCount; _recoveryPager.EnsureContinuous(null, _recoveryPage, totalPageCount + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); NativeMethods.memset(dataPage, 0, totalPageCount * AbstractPager.PageSize); try { LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } var tempTransactionPageTranslaction = (*current).GetTransactionToPageTranslation(_recoveryPager, ref _recoveryPage); _readingPage += compressedPages; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; } return(true); }
private bool CanIgnoreDataIntegrityErrorBecauseTxWasSynced(TransactionHeader *currentTx, StorageEnvironmentOptions options) { // if we have a journal which contains transactions that has been synced and this is the case for current transaction // then we can continue the recovery regardless encountered errors return(options.IgnoreDataIntegrityErrorsOfAlreadySyncedTransactions && IsAlreadySyncTransaction(currentTx)); }
private bool CanIgnoreDataIntegrityErrorBecauseTxWasSynced(TransactionHeader *currentTx, StorageEnvironmentOptions options) { // if we have a journal which contains transactions that has been synced and this is the case for current transaction // then we can continue the recovery regardless encountered errors return(options.IgnoreDataIntegrityErrorsOfAlreadySyncedTransactions && IsAlreadySyncTransaction(currentTx) && (_firstValidTransactionHeader == null || currentTx->TransactionId > _firstValidTransactionHeader->TransactionId)); // when reusing journal we might encounter a transaction with valid Id but it comes from already deleted (and reused journal) }
private static long GetTransactionSizeIn4Kb(TransactionHeader *current) { var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize; var transactionSizeIn4Kb = (size + sizeof(TransactionHeader)) / (4 * Constants.Size.Kilobyte) + ((size + sizeof(TransactionHeader)) % (4 * Constants.Size.Kilobyte) == 0 ? 0 : 1); return(transactionSizeIn4Kb); }
public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous, int recoverPage = 0) { RequireHeaderUpdate = false; _pager = pager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readingPage = 0; _recoveryPage = recoverPage; LastTransactionHeader = previous; }
public JournalReader(AbstractPager journalPager, AbstractPager dataPager, AbstractPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous) { RequireHeaderUpdate = false; _journalPager = journalPager; _dataPager = dataPager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readingPage = 0; LastTransactionHeader = previous; }
private TransactionHeader *EnsureTransactionMapped(TransactionHeader *current, long pageNumber, long positionInsidePage) { var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize; var numberOfPages = GetNumberOfPagesFor(positionInsidePage + sizeof(TransactionHeader) + size); _journalPager.EnsureMapped(this, pageNumber, numberOfPages); var pageHeader = _journalPager.AcquirePagePointer(this, pageNumber) + positionInsidePage; return((TransactionHeader *)pageHeader); }
public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous) { if (pager == null) { throw new ArgumentNullException("pager"); } RequireHeaderUpdate = false; _pager = pager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readingPage = 0; _recoveryPage = 0; LastTransactionHeader = previous; _previousTransactionCrc = 0; }
private bool ValidatePagesHash(StorageEnvironmentOptions options, TransactionHeader *current) { // The location of the data is the base pointer, plus the space reserved for the transaction header if uncompressed. byte *dataPtr = _pager.AcquirePagePointer(null, _readingPage) + (current->Compressed == true ? sizeof(TransactionHeader) : 0); ulong hash = Hashing.XXHash64.Calculate(dataPtr, current->Compressed == true ? current->CompressedSize : current->UncompressedSize); if (hash != current->Hash) { RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Invalid hash signature for transaction " + current->TransactionId, null); return(false); } return(true); }
private void ThrowInvalidChecksumOnPageFromJournal(long pageNumber, TransactionHeader *current, ulong expectedChecksum, ulong checksum, PageHeader *pageHeader) { var message = $"Invalid checksum for page {pageNumber} in transaction {current->TransactionId}, journal file {_journalPager} might be corrupted, expected hash to be {expectedChecksum} but was {checksum}." + $"Data from journal has not been applied to data file {_dataPager} yet. "; message += $"Page flags: {pageHeader->Flags}. "; if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow) { message += $"Overflow size: {pageHeader->OverflowSize}. "; } throw new InvalidDataException(message); }
protected bool ReadOneTransactionForShipping(StorageEnvironmentOptions options, out TransactionToShip transactionToShipRecord) { transactionToShipRecord = null; if (_readingPage >= _pager.NumberOfAllocatedPages) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var compressedPageCount = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPageCount; return(true); // skipping } if (!ValidatePagesCrc(options, compressedPageCount, current)) { return(false); } var compressedPagesRaw = new byte[compressedPageCount * AbstractPager.PageSize]; fixed(byte *compressedDataPtr = compressedPagesRaw) NativeMethods.memcpy(compressedDataPtr, _pager.AcquirePagePointer(_readingPage), compressedPageCount * AbstractPager.PageSize); transactionToShipRecord = new TransactionToShip(*current) { CompressedData = new MemoryStream(compressedPagesRaw), //no need to compress the pages --> after being written to Journal they are already compressed PreviousTransactionCrc = _previousTransactionCrc }; _previousTransactionCrc = current->Crc; _readingPage += compressedPageCount; return(true); }
public JournalReader(AbstractPager journalPager, AbstractPager dataPager, AbstractPager recoveryPager, HashSet <long> modifiedPages, JournalInfo journalInfo, TransactionHeader *previous) { RequireHeaderUpdate = false; _journalPager = journalPager; _dataPager = dataPager; _recoveryPager = recoveryPager; _modifiedPages = modifiedPages; _journalInfo = journalInfo; _readAt4Kb = 0; LastTransactionHeader = previous; _journalPagerNumberOfAllocated4Kb = _journalPager.TotalAllocationSize / (4 * Constants.Size.Kilobyte); if (journalPager.Options.Encryption.IsEnabled) { _encryptionBuffers = new List <EncryptionBuffer>(); } }
internal void WriteDirect(TransactionHeader *transactionHeader, PageFromScratchBuffer pages) { for (int i = 0; i < pages.NumberOfPages; i++) { var page = _env.ScratchBufferPool.ReadPage(pages.ScratchFileNumber, pages.PositionInScratchBuffer + i); int numberOfPages = 1; if (page.IsOverflow) { numberOfPages = (page.OverflowSize / AbstractPager.PageSize) + (page.OverflowSize % AbstractPager.PageSize == 0 ? 0 : 1); i += numberOfPages; _overflowPagesInTransaction += (numberOfPages - 1); } WritePageDirect(page, numberOfPages); _state.NextPageNumber = transactionHeader->NextPageNumber; } }
private bool IsOldTransactionFromRecycledJournal(TransactionHeader *currentTx) { // when reusing journal we might encounter a transaction with valid Id but it comes from already deleted (and reused journal - recyclable one) if (_firstValidTransactionHeader != null && currentTx->TransactionId < _firstValidTransactionHeader->TransactionId) { return(true); } if (LastTransactionHeader != null && currentTx->TransactionId < LastTransactionHeader->TransactionId) { return(true); } return(false); }
public JournalReader(AbstractPager journalPager, AbstractPager dataPager, AbstractPager recoveryPager, long lastSyncedTransactionId, TransactionHeader *previous) { RequireHeaderUpdate = false; _journalPager = journalPager; _dataPager = dataPager; _recoveryPager = recoveryPager; _lastSyncedTransactionId = lastSyncedTransactionId; _readAt4Kb = 0; LastTransactionHeader = previous; _journalPagerNumberOfAllocated4Kb = _journalPager.TotalAllocationSize / (4 * Constants.Size.Kilobyte); if (journalPager.Options.EncryptionEnabled) { _encryptionBuffers = new List <EncryptionBuffer>(); } }
private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current) { current = (TransactionHeader *)_pager.Read(_readingPage).Base; if (current->HeaderMarker != Constants.TransactionHeaderMarker) { // not a transaction page, // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading // this log file and move to the next one. RequireHeaderUpdate = current->HeaderMarker != 0; if (RequireHeaderUpdate) { options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " header marker was set to garbage value, file is probably corrupted", null); } return(false); } ValidateHeader(current, LastTransactionHeader); if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false) { // uncommitted transaction, probably RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", null); return(false); } _readingPage++; return(true); }
private void SkipCurrentTransaction(TransactionHeader *current) { var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current); _readAt4Kb += transactionSizeIn4Kb; if (LastTransactionHeader == null || LastTransactionHeader->TransactionId < current->TransactionId) // precaution { if (current->TransactionId > _journalInfo.LastSyncedTransactionId) { LastTransactionHeader = current; } } if (_firstSkippedTx == null) { _firstSkippedTx = current->TransactionId; } else { _lastSkippedTx = current->TransactionId; } }
private void InitTransactionHeader() { Allocator.Allocate(sizeof(TransactionHeader), out _txHeaderMemory); Memory.Set(_txHeaderMemory.Ptr, 0, sizeof(TransactionHeader)); _txHeader = (TransactionHeader *)_txHeaderMemory.Ptr; _txHeader->HeaderMarker = Constants.TransactionHeaderMarker; if (_id > 1 && _state.NextPageNumber <= 1) { ThrowNextPageNumberCannotBeSmallerOrEqualThanOne(); } _txHeader->TransactionId = _id; _txHeader->NextPageNumber = _state.NextPageNumber; _txHeader->LastPageNumber = -1; _txHeader->PageCount = -1; _txHeader->Hash = 0; _txHeader->TimeStampTicksUtc = DateTime.UtcNow.Ticks; _txHeader->TxMarker = TransactionMarker.None; _txHeader->CompressedSize = 0; _txHeader->UncompressedSize = 0; }
private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current) { current = (TransactionHeader *)_journalPager.AcquirePagePointer(this, _readingPage); if (current->HeaderMarker != Constants.TransactionHeaderMarker) { // not a transaction page, // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading // this log file and move to the next one. RequireHeaderUpdate = current->HeaderMarker != 0; if (RequireHeaderUpdate) { options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " header marker was set to garbage value, file is probably corrupted", null); } return(false); } ValidateHeader(current, LastTransactionHeader); _journalPager.EnsureMapped(this, _readingPage, GetNumberOfPagesFromSize(options, sizeof(TransactionHeader) + current->CompressedSize)); current = (TransactionHeader *)_journalPager.AcquirePagePointer(this, _readingPage); if ((current->TxMarker & TransactionMarker.Commit) != TransactionMarker.Commit) { // uncommitted transaction, probably RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", null); return(false); } return(true); }
private void InitTransactionHeader() { var allocation = _env.ScratchBufferPool.Allocate(this, 1); var page = _env.ScratchBufferPool.ReadPage(allocation.ScratchFileNumber, allocation.PositionInScratchBuffer); _transactionHeaderPage = allocation; UnmanagedMemory.Set(page.Base, 0, AbstractPager.PageSize); _txHeader = (TransactionHeader*)page.Base; _txHeader->HeaderMarker = Constants.TransactionHeaderMarker; _txHeader->TransactionId = _id; _txHeader->NextPageNumber = _state.NextPageNumber; _txHeader->LastPageNumber = -1; _txHeader->PageCount = -1; _txHeader->Crc = 0; _txHeader->TxMarker = TransactionMarker.None; _txHeader->Compressed = false; _txHeader->CompressedSize = 0; _txHeader->UncompressedSize = 0; _allocatedPagesInTransaction = 0; _overflowPagesInTransaction = 0; _scratchPagesTable.Clear(); }
public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options) { if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb) { return(false); } if (TryReadAndValidateHeader(options, out TransactionHeader * current) == false) { var lastValid4Kb = _readAt4Kb; _readAt4Kb++; while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb) { if (TryReadAndValidateHeader(options, out current)) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { SkipCurrentTransaction(current); return(true); } RequireHeaderUpdate = true; break; } _readAt4Kb++; } _readAt4Kb = lastValid4Kb; return(false); } if (IsAlreadySyncTransaction(current)) { SkipCurrentTransaction(current); return(true); } var performDecompression = current->CompressedSize != -1; var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current); _readAt4Kb += transactionSizeIn4Kb; TransactionHeaderPageInfo *pageInfoPtr; byte *outputPage; if (performDecompression) { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); try { LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } else { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize); pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount; if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } for (var i = 0; i < current->PageCount; i++) { if (pageInfoPtr[i].PageNumber > current->LastPageNumber) { throw new InvalidDataException($"Transaction {current->TransactionId} contains reference to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}"); } } for (var i = 0; i < current->PageCount; i++) { if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } Debug.Assert(_journalPager.Disposed == false); if (performDecompression) { Debug.Assert(_recoveryPager.Disposed == false); } var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size); _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); // We are going to overwrite the page, so we don't care about its current content var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.MaybePrefetchMemory(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); var pageNumber = *(long *)(outputPage + totalRead); if (pageInfoPtr[i].PageNumber != pageNumber) { throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}"); } totalRead += sizeof(long); _modifiedPages.Add(pageNumber); for (var j = 1; j < numberOfPagesOnDestination; j++) { _modifiedPages.Remove(pageNumber + j); } _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); if (pageInfoPtr[i].DiffSize == 0) { if (pageInfoPtr[i].Size == 0) { // diff contained no changes continue; } var journalPagePtr = outputPage + totalRead; if (options.Encryption.IsEnabled == false) { var pageHeader = (PageHeader *)journalPagePtr; var checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageNumber, out var expectedChecksum); if (checksum != expectedChecksum) { ThrowInvalidChecksumOnPageFromJournal(pageNumber, current, expectedChecksum, checksum, pageHeader); } } Memory.Copy(pagePtr, journalPagePtr, pageInfoPtr[i].Size); totalRead += pageInfoPtr[i].Size; if (options.Encryption.IsEnabled) { var pageHeader = (PageHeader *)pagePtr; if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow) { // need to mark overlapped buffers as invalid for commit var encryptionBuffers = ((IPagerLevelTransactionState)this).CryptoPagerTransactionState[_dataPager]; var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize); for (var j = 1; j < numberOfPages; j++) { if (encryptionBuffers.TryGetValue(pageNumber + j, out var buffer)) { buffer.SkipOnTxCommit = true; } } } } } else { _diffApplier.Destination = pagePtr; _diffApplier.Diff = outputPage + totalRead; _diffApplier.Size = pageInfoPtr[i].Size; _diffApplier.DiffSize = pageInfoPtr[i].DiffSize; _diffApplier.Apply(pageInfoPtr[i].IsNewDiff); totalRead += pageInfoPtr[i].DiffSize; } _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); } LastTransactionHeader = current; return(true); }
private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current) { if (_readAt4Kb > _journalPagerNumberOfAllocated4Kb) { current = null; return(false); // end of jouranl } const int pageTo4KbRatio = Constants.Storage.PageSize / (4 * Constants.Size.Kilobyte); var pageNumber = _readAt4Kb / pageTo4KbRatio; var positionInsidePage = (_readAt4Kb % pageTo4KbRatio) * (4 * Constants.Size.Kilobyte); current = (TransactionHeader *) (_journalPager.AcquirePagePointer(this, pageNumber) + positionInsidePage); // due to the reuse of journals we no longer can assume we have zeros in the end of the journal // we might have there random garbage or old transactions we can ignore, so we have the following scenarios: // * TxId <= current Id :: we can ignore old transaction of the reused journal and continue // * TxId == current Id + 1 :: valid, but if hash is invalid. Transaction hasn't been committed // * TxId > current Id + 1 :: if hash is invalid we can ignore reused/random, but if hash valid then we might missed TXs if (current->HeaderMarker != Constants.TransactionHeaderMarker) { // not a transaction page, // if the header marker is zero or garbage, we are probably in the area at the end of the log file, and have no additional log records // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading // this log file and move to the next one, or it might have happened because of reuse of journal file // note : we might encounter a "valid" TransactionHeaderMarker which is still garbage, so we will test that later on RequireHeaderUpdate = false; return(false); } if (current->TransactionId < 0) { return(false); } current = EnsureTransactionMapped(current, pageNumber, positionInsidePage); bool hashIsValid; if (options.Encryption.IsEnabled) { // We use temp buffers to hold the transaction before decrypting, and release the buffers afterwards. var pagesSize = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize; var size = (4 * Constants.Size.Kilobyte) * GetNumberOf4KbFor(sizeof(TransactionHeader) + pagesSize); var ptr = PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out var thread); var buffer = new EncryptionBuffer { Pointer = ptr, Size = size, AllocatingThread = thread }; _encryptionBuffers.Add(buffer); Memory.Copy(buffer.Pointer, (byte *)current, size); current = (TransactionHeader *)buffer.Pointer; try { DecryptTransaction((byte *)current, options); hashIsValid = true; } catch (InvalidOperationException ex) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Unable to decrypt data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + "Safely continuing the startup recovery process.", ex); return(true); } RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", ex); return(false); } } else { hashIsValid = ValidatePagesHash(options, current); } long lastTxId; if (LastTransactionHeader != null) { lastTxId = LastTransactionHeader->TransactionId; } else { // this is first transaction being processed in the recovery process if (_journalInfo.LastSyncedTransactionId == -1 || current->TransactionId <= _journalInfo.LastSyncedTransactionId) { if (hashIsValid == false && CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Invalid hash of data of first transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + "Safely continuing the startup recovery process.", null); return(true); } if (hashIsValid && _firstValidTransactionHeader == null) { _firstValidTransactionHeader = current; } return(hashIsValid); } lastTxId = _journalInfo.LastSyncedTransactionId; } var txIdDiff = current->TransactionId - lastTxId; // 1 is a first storage transaction which does not increment transaction counter after commit if (current->TransactionId != 1) { if (txIdDiff < 0) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Negative tx id diff: {txIdDiff}. " + "Safely continuing the startup recovery process.", null); return(true); } return(false); } if (txIdDiff > 1 || txIdDiff == 0) { if (hashIsValid) { // TxId is bigger then the last one by more than '1' but has valid hash which mean we lost transactions in the middle if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { // when running in ignore data integrity errors mode then we could skip corrupted but already sync data // so it's expected in this case that txIdDiff > 1, let it continue to work then options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Tx diff is: {txIdDiff}. " + $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null); return(true); } if (LastTransactionHeader != null) { throw new InvalidJournalException( $"Transaction has valid(!) hash with invalid transaction id {current->TransactionId}, the last valid transaction id is {LastTransactionHeader->TransactionId}. Tx diff is: {txIdDiff}." + $" Journal file {_journalPager.FileName} might be corrupted. Debug details - file header {_currentFileHeader}", _journalInfo); } throw new InvalidJournalException( $"The last synced transaction id was {_journalInfo.LastSyncedTransactionId} (in journal: {_journalInfo.LastSyncedJournal}) but the first transaction being read in the recovery process is {current->TransactionId} (transaction has valid hash). Tx diff is: {txIdDiff}. " + $"Some journals are missing. Current journal file {_journalPager.FileName}. Debug details - file header {_currentFileHeader}", _journalInfo); } } // if (txIdDiff == 1) : if (current->LastPageNumber <= 0) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Invalid last page number ({current->LastPageNumber}) in the header of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null); return(true); } throw new InvalidDataException("Last page number after committed transaction must be greater than 0. Debug details - file header {_currentFileHeader}"); } } if (hashIsValid == false) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { options.InvokeIntegrityErrorOfAlreadySyncedData(this, $"Invalid hash of data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " + "Safely continuing the startup recovery process.", null); return(true); } RequireHeaderUpdate = true; return(false); } if (_firstValidTransactionHeader == null) { _firstValidTransactionHeader = current; } return(true); }
public bool ReadOneTransaction(StorageEnvironmentOptions options,bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) return false; TransactionHeader* current; if (!TryReadAndValidateHeader(options, out current)) return false; var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPages; return true; // skipping } if (checkCrc) { uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize); if (crc != current->Crc) { RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null); return false; } } _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); try { LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return false; } var tempTransactionPageTranslaction = new Dictionary<long, JournalFile.PagePosition>(); for (var i = 0; i < current->PageCount; i++) { Debug.Assert(_pager.Disposed == false); Debug.Assert(_recoveryPager.Disposed == false); var page = _recoveryPager.Read(_recoveryPage); tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition { JournalPos = _recoveryPage, TransactionId = current->TransactionId }; if (page.IsOverflow) { var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); _recoveryPage += numOfPages; } else { _recoveryPage++; } } _readingPage += compressedPages; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; } return true; }
private bool IsAlreadySyncTransaction(TransactionHeader *current) { return(_journalInfo.LastSyncedTransactionId != -1 && current->TransactionId <= _journalInfo.LastSyncedTransactionId); }
private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite)) { using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(txw); } using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8)) { if (package.Entries.Count == 0) { return; } var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; try { TransactionHeader *lastTxHeader = null; var pagesToWrite = new Dictionary <long, TreePage>(); long journalNumber = -1; foreach (var entry in package.Entries) { switch (Path.GetExtension(entry.Name)) { case ".merged-journal": case ".journal": var jounalFileName = Path.Combine(tempDir, entry.Name); using (var output = new FileStream(jounalFileName, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = env.Options.OpenPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = env.Options.CreateScratchPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); toDispose.Add(recoveryPager); var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); while (reader.ReadOneTransaction(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } foreach (var translation in reader.TransactionPageTranslation) { var pageInJournal = translation.Value.JournalPos; var page = recoveryPager.Read(null, pageInJournal); pagesToWrite[translation.Key] = page; if (page.IsOverflow) { var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); for (int i = 1; i < numberOfOverflowPages; i++) { pagesToWrite.Remove(translation.Key + i); } } } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } var sortedPages = pagesToWrite.OrderBy(x => x.Key) .Select(x => x.Value) .ToList(); if (sortedPages.Count == 0) { return; } var last = sortedPages.Last(); var numberOfPages = last.IsOverflow ? env.Options.DataPager.GetNumberOfOverflowPages( last.OverflowSize) : 1; var pagerState = env.Options.DataPager.EnsureContinuous(last.PageNumber, numberOfPages); txw.EnsurePagerStateReference(pagerState); foreach (var page in sortedPages) { env.Options.DataPager.Write(page); } env.Options.DataPager.Sync(); var root = Tree.Open(txw, null, &lastTxHeader->Root); root.Name = Constants.RootTreeName; txw.UpdateRootsIfNeeded(root); txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir, true); } catch { // this is just a temporary directory, the worst case scenario is that we dont reclaim the space from the OS temp directory // if for some reason we cannot delete it we are safe to ignore it. } } } } } }