Ejemplo n.º 1
0
        private bool ValidatePagesHash(StorageEnvironmentOptions options, TransactionHeader *current)
        {
            // The location of the data is the base pointer, plus the space reserved for the transaction header if uncompressed.
            byte *dataPtr = _journalPager.AcquirePagePointer(this, _readingPage) + sizeof(TransactionHeader);

            if (current->CompressedSize < 0)
            {
                RequireHeaderUpdate = true;
                // negative size is not supported
                options.InvokeRecoveryError(this, $"Compresses size {current->CompressedSize} is negative", null);
                return(false);
            }
            if (current->CompressedSize >
                (_journalPager.NumberOfAllocatedPages - _readingPage) * _journalPager.PageSize)
            {
                // we can't read past the end of the journal
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, $"Compresses size {current->CompressedSize} is too big for the journal size {_journalPager.NumberOfAllocatedPages * _journalPager.PageSize}", null);
                return(false);
            }

            ulong hash = Hashing.XXHash64.Calculate(dataPtr, (ulong)current->CompressedSize);

            if (hash != current->Hash)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Invalid hash signature for transaction: " + current->ToString(), null);

                return(false);
            }
            return(true);
        }
Ejemplo n.º 2
0
        private bool ValidatePagesHash(StorageEnvironmentOptions options, TransactionHeader *current)
        {
            byte *dataPtr = (byte *)current + sizeof(TransactionHeader);

            var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize;

            if (size < 0)
            {
                RequireHeaderUpdate = true;
                // negative size is not supported
                options.InvokeRecoveryError(this, $"Compresses size {current->CompressedSize} is negative", null);
                return(false);
            }
            if (size >
                (_journalPagerNumberOfAllocated4Kb - _readAt4Kb) * 4 * Constants.Size.Kilobyte)
            {
                // we can't read past the end of the journal
                RequireHeaderUpdate = true;
                var compressLabel = (current->CompressedSize != -1) ? "Compressed" : "Uncompressed";
                options.InvokeRecoveryError(this, $"Size {size} ({compressLabel}) is too big for the journal size {_journalPagerNumberOfAllocated4Kb * 4 * Constants.Size.Kilobyte}", null);
                return(false);
            }

            ulong hash = Hashing.XXHash64.Calculate(dataPtr, (ulong)size, (ulong)current->TransactionId);

            if (hash != current->Hash)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Invalid hash signature for transaction: " + current->ToString(), null);

                return(false);
            }
            return(true);
        }
Ejemplo n.º 3
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }

            var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage         += compressedPages;
                return(true);                // skipping
            }

            if (checkCrc && !ValidatePagesCrc(options, compressedPages, current))
            {
                return(false);
            }

            var totalPageCount = current->PageCount + current->OverflowPageCount;

            _recoveryPager.EnsureContinuous(null, _recoveryPage, totalPageCount + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            NativeMethods.memset(dataPage, 0, totalPageCount * AbstractPager.PageSize);
            try
            {
                LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }

            var tempTransactionPageTranslaction = (*current).GetTransactionToPageTranslation(_recoveryPager, ref _recoveryPage);

            _readingPage += compressedPages;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;
            }

            return(true);
        }
Ejemplo n.º 4
0
        private bool ValidatePagesCrc(StorageEnvironmentOptions options, int compressedPages, TransactionHeader *current)
        {
            uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize);

            if (crc != current->Crc)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null);

                return(false);
            }
            return(true);
        }
Ejemplo n.º 5
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current)
        {
            current = (TransactionHeader *)_pager.Read(_readingPage).Base;

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one.

                RequireHeaderUpdate = current->HeaderMarker != 0;
                if (RequireHeaderUpdate)
                {
                    options.InvokeRecoveryError(this,
                                                "Transaction " + current->TransactionId +
                                                " header marker was set to garbage value, file is probably corrupted", null);
                }

                return(false);
            }

            ValidateHeader(current, LastTransactionHeader);

            if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false)
            {
                // uncommitted transaction, probably
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this,
                                            "Transaction " + current->TransactionId +
                                            " was not committed", null);
                return(false);
            }

            _readingPage++;
            return(true);
        }
Ejemplo n.º 6
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current)
        {
            current = (TransactionHeader *)_journalPager.AcquirePagePointer(this, _readingPage);

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one.

                RequireHeaderUpdate = current->HeaderMarker != 0;
                if (RequireHeaderUpdate)
                {
                    options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " header marker was set to garbage value, file is probably corrupted", null);
                }

                return(false);
            }

            ValidateHeader(current, LastTransactionHeader);

            _journalPager.EnsureMapped(this, _readingPage,
                                       GetNumberOfPagesFromSize(options, sizeof(TransactionHeader) + current->CompressedSize));
            current = (TransactionHeader *)_journalPager.AcquirePagePointer(this, _readingPage);

            if ((current->TxMarker & TransactionMarker.Commit) != TransactionMarker.Commit)
            {
                // uncommitted transaction, probably
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", null);
                return(false);
            }

            return(true);
        }
Ejemplo n.º 7
0
        private unsafe bool TryDecompressTransactionPages(StorageEnvironmentOptions options, TransactionHeader *current, byte *dataPage)
        {
            try
            {
                LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }
            return(true);
        }
Ejemplo n.º 8
0
        private bool ValidatePagesHash(StorageEnvironmentOptions options, TransactionHeader *current)
        {
            // The location of the data is the base pointer, plus the space reserved for the transaction header if uncompressed.
            byte *dataPtr = _pager.AcquirePagePointer(null, _readingPage) + (current->Compressed == true ? sizeof(TransactionHeader) : 0);

            ulong hash = Hashing.XXHash64.Calculate(dataPtr, current->Compressed == true ? current->CompressedSize : current->UncompressedSize);

            if (hash != current->Hash)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Invalid hash signature for transaction " + current->TransactionId, null);

                return(false);
            }
            return(true);
        }
Ejemplo n.º 9
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options)
        {
            if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb)
            {
                return(false);
            }

            TransactionHeader *current;

            if (TryReadAndValidateHeader(options, out current) == false)
            {
                var lastValid4Kb = _readAt4Kb;
                _readAt4Kb++;
                while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb)
                {
                    if (TryReadAndValidateHeader(options, out current))
                    {
                        RequireHeaderUpdate = true;
                        break;
                    }
                    _readAt4Kb++;
                }

                _readAt4Kb = lastValid4Kb;
                return(false);
            }
            bool performDecompression = current->CompressedSize != -1;

            var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize;

            var transactionSizeIn4Kb =
                (size + sizeof(TransactionHeader)) / (4 * Constants.Size.Kilobyte) +
                ((size + sizeof(TransactionHeader)) % (4 * Constants.Size.Kilobyte) == 0 ? 0 : 1);


            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                _readAt4Kb           += transactionSizeIn4Kb;
                LastTransactionHeader = current;
                return(true); // skipping
            }

            _readAt4Kb += transactionSizeIn4Kb;

            TransactionHeaderPageInfo *pageInfoPtr;
            byte *outputPage;

            if (performDecompression)
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);

                try
                {
                    LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                            current->UncompressedSize, true);
                }
                catch (Exception e)
                {
                    options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                    RequireHeaderUpdate = true;

                    return(false);
                }
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }
            else
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);
                Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize);
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            if (totalRead > current->UncompressedSize)
            {
                throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (pageInfoPtr[i].PageNumber > current->LastPageNumber)
                {
                    throw new InvalidDataException($"Transaction {current->TransactionId} contains refeence to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}");
                }
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                if (performDecompression)
                {
                    Debug.Assert(_recoveryPager.Disposed == false);
                }

                var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                // We are going to overwrite the page, so we don't care about its current content
                var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                var pageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != pageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}");
                }
                totalRead += sizeof(long);

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    Memory.Copy(pagePtr, outputPage + totalRead, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply(pageInfoPtr[i].IsNewDiff);
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }
Ejemplo n.º 10
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current)
        {
            if (_readAt4Kb > _journalPagerNumberOfAllocated4Kb)
            {
                current = null;
                return(false); // end of jouranl
            }

            const int pageTo4KbRatio     = Constants.Storage.PageSize / (4 * Constants.Size.Kilobyte);
            var       pageNumber         = _readAt4Kb / pageTo4KbRatio;
            var       positionInsidePage = (_readAt4Kb % pageTo4KbRatio) * (4 * Constants.Size.Kilobyte);

            current = (TransactionHeader *)
                      (_journalPager.AcquirePagePointer(this, pageNumber) + positionInsidePage);

            // due to the reuse of journals we no longer can assume we have zeros in the end of the journal
            // we might have there random garbage or old transactions we can ignore, so we have the following scenarios:
            // * TxId <= current Id      ::  we can ignore old transaction of the reused journal and continue
            // * TxId == current Id + 1  ::  valid, but if hash is invalid. Transaction hasn't been committed
            // * TxId >  current Id + 1  ::  if hash is invalid we can ignore reused/random, but if hash valid then we might missed TXs

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero or garbage, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one, or it might have happened becuase of reuse of journal file

                // note : we might encounter a "valid" TransactionHeaderMarker which is still garbage, so we will test that later on

                RequireHeaderUpdate = false;
                return(false);
            }

            if (current->TransactionId < 0)
            {
                return(false);
            }

            current = EnsureTransactionMapped(current, pageNumber, positionInsidePage);

            if (options.EncryptionEnabled)
            {
                // We use temp buffers to hold the transaction before decrypting, and release the buffers afterwards.
                var pagesSize = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize;
                var size      = (4 * Constants.Size.Kilobyte) * GetNumberOf4KbFor(sizeof(TransactionHeader) + pagesSize);

                var ptr    = NativeMemory.Allocate4KbAlignedMemory(size, out var thread);
                var buffer = new EncryptionBuffer
                {
                    Pointer          = ptr,
                    Size             = size,
                    AllocatingThread = thread
                };

                _encryptionBuffers.Add(buffer);
                Memory.Copy(buffer.Pointer, (byte *)current, size);
                current = (TransactionHeader *)buffer.Pointer;

                try
                {
                    DecryptTransaction((byte *)current, options);
                }
                catch (InvalidOperationException ex)
                {
                    RequireHeaderUpdate = true;
                    options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", ex);
                    return(false);
                }
            }
            bool hashIsValid = ValidatePagesHash(options, current);

            if (LastTransactionHeader == null)
            {
                return(hashIsValid);
            }

            var txIdDiff = current->TransactionId - LastTransactionHeader->TransactionId;

            // 1 is a first storage transaction which does not increment transaction counter after commit
            if (current->TransactionId != 1)
            {
                if (txIdDiff < 0)
                {
                    return(false);
                }

                if (txIdDiff > 1 || txIdDiff == 0)
                {
                    if (hashIsValid)
                    {
                        // TxId is bigger then the last one by nore the '1' but has valid hash which mean we lost transactions in the middle
                        throw new InvalidDataException(
                                  $"Transaction has valid(!) hash with invalid transaction id {current->TransactionId}, the last valid transaction id is {LastTransactionHeader->TransactionId}." +
                                  $" Journal file {_journalPager.FileName} might be corrupted");
                    }
                }

                // if (txIdDiff == 1) :
                if (current->LastPageNumber <= 0)
                {
                    throw new InvalidDataException("Last page number after committed transaction must be greater than 0");
                }
            }

            if (hashIsValid == false)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed",
                                            null);
                return(false);
            }

            return(true);
        }
Ejemplo n.º 11
0
        private bool ValidatePagesCrc(StorageEnvironmentOptions options, int compressedPages, TransactionHeader* current)
        {
            uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize);

            if (crc != current->Crc)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null);

                return false;
            }
            return true;
        }
Ejemplo n.º 12
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options,out TransactionHeader* current)
        {
            current = (TransactionHeader*)_pager.Read(_readingPage).Base;

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one.

                RequireHeaderUpdate = current->HeaderMarker != 0;
                if (RequireHeaderUpdate)
                {
                    options.InvokeRecoveryError(this,
                        "Transaction " + current->TransactionId +
                        " header marker was set to garbage value, file is probably corrupted", null);
                }

                return false;
            }

            ValidateHeader(current, LastTransactionHeader);

            if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false)
            {
                // uncommitted transaction, probably
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this,
                        "Transaction " + current->TransactionId +
                        " was not committed", null);
                return false;
            }

            _readingPage++;
            return true;
        }
Ejemplo n.º 13
0
		private unsafe bool TryDecompressTransactionPages(StorageEnvironmentOptions options, TransactionHeader* current, byte* dataPage)
		{
			try
			{
				LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
			}
			catch (Exception e)
			{
				options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
				RequireHeaderUpdate = true;

				return false;
			}
			return true;
		}
Ejemplo n.º 14
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }

            var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage         += compressedPages;
                return(true); // skipping
            }

            if (checkCrc)
            {
                uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize);

                if (crc != current->Crc)
                {
                    RequireHeaderUpdate = true;
                    options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null);

                    return(false);
                }
            }

            _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            try
            {
                LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }

            var tempTransactionPageTranslaction = new Dictionary <long, JournalFile.PagePosition>();

            for (var i = 0; i < current->PageCount; i++)
            {
                Debug.Assert(_pager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var page = _recoveryPager.Read(_recoveryPage);

                tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition
                {
                    JournalPos    = _recoveryPage,
                    TransactionId = current->TransactionId
                };

                if (page.IsOverflow)
                {
                    var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);
                    _recoveryPage += numOfPages;
                }
                else
                {
                    _recoveryPage++;
                }
            }

            _readingPage += compressedPages;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;
            }

            return(true);
        }
Ejemplo n.º 15
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _journalPager.NumberOfAllocatedPages)
            {
                return(false);
            }

            if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }


            var transactionSize = GetNumberOfPagesFromSize(options, current->CompressedSize + sizeof(TransactionHeader));

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                _readingPage         += transactionSize;
                LastTransactionHeader = current;
                return(true); // skipping
            }

            if (checkCrc && !ValidatePagesHash(options, current))
            {
                return(false);
            }

            _readingPage += transactionSize;
            var numberOfPages = _recoveryPager.GetNumberOfOverflowPages(current->UncompressedSize);

            _recoveryPager.EnsureContinuous(0, numberOfPages);
            _recoveryPager.EnsureMapped(this, 0, numberOfPages);
            var outputPage = _recoveryPager.AcquirePagePointer(this, 0);

            UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * options.PageSize);

            try
            {
                LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                        current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }

            var pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var numberOfPagesOnDestination = GetNumberOfPagesFromSize(options, pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                var pagePtr = _dataPager.AcquirePagePointer(this, pageInfoPtr[i].PageNumber);

                var diffPageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != diffPageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {diffPageNumber}");
                }
                totalRead += sizeof(long);

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    Memory.Copy(pagePtr, outputPage + totalRead, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply();
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }
Ejemplo n.º 16
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options, out TransactionHeader *current)
        {
            if (_readAt4Kb > _journalPagerNumberOfAllocated4Kb)
            {
                current = null;
                return(false); // end of jouranl
            }

            const int pageTo4KbRatio     = Constants.Storage.PageSize / (4 * Constants.Size.Kilobyte);
            var       pageNumber         = _readAt4Kb / pageTo4KbRatio;
            var       positionInsidePage = (_readAt4Kb % pageTo4KbRatio) * (4 * Constants.Size.Kilobyte);

            current = (TransactionHeader *)
                      (_journalPager.AcquirePagePointer(this, pageNumber) + positionInsidePage);

            // due to the reuse of journals we no longer can assume we have zeros in the end of the journal
            // we might have there random garbage or old transactions we can ignore, so we have the following scenarios:
            // * TxId <= current Id      ::  we can ignore old transaction of the reused journal and continue
            // * TxId == current Id + 1  ::  valid, but if hash is invalid. Transaction hasn't been committed
            // * TxId >  current Id + 1  ::  if hash is invalid we can ignore reused/random, but if hash valid then we might missed TXs

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero or garbage, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one, or it might have happened because of reuse of journal file

                // note : we might encounter a "valid" TransactionHeaderMarker which is still garbage, so we will test that later on

                RequireHeaderUpdate = false;
                return(false);
            }

            if (current->TransactionId < 0)
            {
                return(false);
            }

            current = EnsureTransactionMapped(current, pageNumber, positionInsidePage);
            bool hashIsValid;

            if (options.Encryption.IsEnabled)
            {
                // We use temp buffers to hold the transaction before decrypting, and release the buffers afterwards.
                var pagesSize = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize;
                var size      = (4 * Constants.Size.Kilobyte) * GetNumberOf4KbFor(sizeof(TransactionHeader) + pagesSize);

                var ptr    = PlatformSpecific.NativeMemory.Allocate4KbAlignedMemory(size, out var thread);
                var buffer = new EncryptionBuffer
                {
                    Pointer          = ptr,
                    Size             = size,
                    AllocatingThread = thread
                };

                _encryptionBuffers.Add(buffer);
                Memory.Copy(buffer.Pointer, (byte *)current, size);
                current = (TransactionHeader *)buffer.Pointer;

                try
                {
                    DecryptTransaction((byte *)current, options);
                    hashIsValid = true;
                }
                catch (InvalidOperationException ex)
                {
                    if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Unable to decrypt data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                        "Safely continuing the startup recovery process.",
                                                                        ex);

                        return(true);
                    }

                    RequireHeaderUpdate = true;
                    options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", ex);
                    return(false);
                }
            }
            else
            {
                hashIsValid = ValidatePagesHash(options, current);
            }

            long lastTxId;

            if (LastTransactionHeader != null)
            {
                lastTxId = LastTransactionHeader->TransactionId;
            }
            else
            {
                // this is first transaction being processed in the recovery process

                if (_journalInfo.LastSyncedTransactionId == -1 || current->TransactionId <= _journalInfo.LastSyncedTransactionId)
                {
                    if (hashIsValid == false && CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Invalid hash of data of first transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                        "Safely continuing the startup recovery process.", null);

                        return(true);
                    }

                    if (hashIsValid && _firstValidTransactionHeader == null)
                    {
                        _firstValidTransactionHeader = current;
                    }

                    return(hashIsValid);
                }

                lastTxId = _journalInfo.LastSyncedTransactionId;
            }

            var txIdDiff = current->TransactionId - lastTxId;

            // 1 is a first storage transaction which does not increment transaction counter after commit
            if (current->TransactionId != 1)
            {
                if (txIdDiff < 0)
                {
                    if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Negative tx id diff: {txIdDiff}. " +
                                                                        "Safely continuing the startup recovery process.", null);

                        return(true);
                    }

                    return(false);
                }

                if (txIdDiff > 1 || txIdDiff == 0)
                {
                    if (hashIsValid)
                    {
                        // TxId is bigger then the last one by more than '1' but has valid hash which mean we lost transactions in the middle

                        if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                        {
                            // when running in ignore data integrity errors mode then we could skip corrupted but already sync data
                            // so it's expected in this case that txIdDiff > 1, let it continue to work then

                            options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                            $"Encountered integrity error of transaction data which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). Tx diff is: {txIdDiff}. " +
                                                                            $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null);

                            return(true);
                        }

                        if (LastTransactionHeader != null)
                        {
                            throw new InvalidJournalException(
                                      $"Transaction has valid(!) hash with invalid transaction id {current->TransactionId}, the last valid transaction id is {LastTransactionHeader->TransactionId}. Tx diff is: {txIdDiff}." +
                                      $" Journal file {_journalPager.FileName} might be corrupted. Debug details - file header {_currentFileHeader}", _journalInfo);
                        }

                        throw new InvalidJournalException(
                                  $"The last synced transaction id was {_journalInfo.LastSyncedTransactionId} (in journal: {_journalInfo.LastSyncedJournal}) but the first transaction being read in the recovery process is {current->TransactionId} (transaction has valid hash). Tx diff is: {txIdDiff}. " +
                                  $"Some journals are missing. Current journal file {_journalPager.FileName}. Debug details - file header {_currentFileHeader}", _journalInfo);
                    }
                }

                // if (txIdDiff == 1) :
                if (current->LastPageNumber <= 0)
                {
                    if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                    {
                        options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                        $"Invalid last page number ({current->LastPageNumber}) in the header of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                        $"Safely continuing the startup recovery process. Debug details - file header {_currentFileHeader}", null);

                        return(true);
                    }

                    throw new InvalidDataException("Last page number after committed transaction must be greater than 0. Debug details - file header {_currentFileHeader}");
                }
            }

            if (hashIsValid == false)
            {
                if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                {
                    options.InvokeIntegrityErrorOfAlreadySyncedData(this,
                                                                    $"Invalid hash of data of transaction which has been already synced (tx id: {current->TransactionId}, last synced tx: {_journalInfo.LastSyncedTransactionId}, journal: {_journalInfo.CurrentJournal}). " +
                                                                    "Safely continuing the startup recovery process.", null);

                    return(true);
                }

                RequireHeaderUpdate = true;
                return(false);
            }

            if (_firstValidTransactionHeader == null)
            {
                _firstValidTransactionHeader = current;
            }

            return(true);
        }
Ejemplo n.º 17
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options)
        {
            if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb)
            {
                return(false);
            }

            if (TryReadAndValidateHeader(options, out TransactionHeader * current) == false)
            {
                var lastValid4Kb = _readAt4Kb;
                _readAt4Kb++;

                while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb)
                {
                    if (TryReadAndValidateHeader(options, out current))
                    {
                        if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                        {
                            SkipCurrentTransaction(current);
                            return(true);
                        }

                        RequireHeaderUpdate = true;
                        break;
                    }
                    _readAt4Kb++;
                }

                _readAt4Kb = lastValid4Kb;
                return(false);
            }

            if (IsAlreadySyncTransaction(current))
            {
                SkipCurrentTransaction(current);
                return(true);
            }

            var performDecompression = current->CompressedSize != -1;

            var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current);

            _readAt4Kb += transactionSizeIn4Kb;

            TransactionHeaderPageInfo *pageInfoPtr;
            byte *outputPage;

            if (performDecompression)
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);

                try
                {
                    LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                            current->UncompressedSize, true);
                }
                catch (Exception e)
                {
                    options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                    RequireHeaderUpdate = true;

                    return(false);
                }
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }
            else
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);
                Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize);
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            if (totalRead > current->UncompressedSize)
            {
                throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (pageInfoPtr[i].PageNumber > current->LastPageNumber)
                {
                    throw new InvalidDataException($"Transaction {current->TransactionId} contains reference to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}");
                }
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                if (performDecompression)
                {
                    Debug.Assert(_recoveryPager.Disposed == false);
                }

                var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);


                // We are going to overwrite the page, so we don't care about its current content
                var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.MaybePrefetchMemory(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                var pageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != pageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}");
                }
                totalRead += sizeof(long);

                _modifiedPages.Add(pageNumber);

                for (var j = 1; j < numberOfPagesOnDestination; j++)
                {
                    _modifiedPages.Remove(pageNumber + j);
                }

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    if (pageInfoPtr[i].Size == 0)
                    {
                        // diff contained no changes
                        continue;
                    }

                    var journalPagePtr = outputPage + totalRead;

                    if (options.Encryption.IsEnabled == false)
                    {
                        var pageHeader = (PageHeader *)journalPagePtr;

                        var checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageNumber, out var expectedChecksum);
                        if (checksum != expectedChecksum)
                        {
                            ThrowInvalidChecksumOnPageFromJournal(pageNumber, current, expectedChecksum, checksum, pageHeader);
                        }
                    }

                    Memory.Copy(pagePtr, journalPagePtr, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;

                    if (options.Encryption.IsEnabled)
                    {
                        var pageHeader = (PageHeader *)pagePtr;

                        if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow)
                        {
                            // need to mark overlapped buffers as invalid for commit

                            var encryptionBuffers = ((IPagerLevelTransactionState)this).CryptoPagerTransactionState[_dataPager];

                            var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize);

                            for (var j = 1; j < numberOfPages; j++)
                            {
                                if (encryptionBuffers.TryGetValue(pageNumber + j, out var buffer))
                                {
                                    buffer.SkipOnTxCommit = true;
                                }
                            }
                        }
                    }
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply(pageInfoPtr[i].IsNewDiff);
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }
Ejemplo n.º 18
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options,bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
                return false;

            TransactionHeader* current;
            if (!TryReadAndValidateHeader(options, out current))
                return false;

            var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage += compressedPages;
                return true; // skipping
            }

            if (checkCrc && !ValidatePagesCrc(options, compressedPages, current))
                return false;

            _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            try
            {
                LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return false;
            }

            var tempTransactionPageTranslaction = new Dictionary<long, JournalFile.PagePosition>();

            for (var i = 0; i < current->PageCount; i++)
            {
                Debug.Assert(_pager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var page = _recoveryPager.Read(_recoveryPage);

                 tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition
                {
                    JournalPos = _recoveryPage,
                    TransactionId = current->TransactionId
                };

                if (page.IsOverflow)
                {
                    var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);
                    _recoveryPage += numOfPages;
                }
                else
                {
                    _recoveryPage++;
                }
            }

            _readingPage += compressedPages;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;
            }

            return true;
        }