示例#1
0
        private void ValidateReadOnlyPages()
        {
            foreach (var readOnlyKey in readOnlyPages)
            {
                long pageNumber = readOnlyKey.Key;
                if (dirtyPagesValidate.Contains(pageNumber))
                {
                    VoronUnrecoverableErrorException.Raise(_env, "Read only page is dirty (which means you are modifying a page directly in the data -- non transactionally -- ).");
                }

                var page = GetPage(pageNumber);

                ulong pageHash = StorageEnvironment.CalculatePageChecksum(page.Pointer, page.PageNumber, page.Flags, page.OverflowSize);
                if (pageHash != readOnlyKey.Value)
                {
                    VoronUnrecoverableErrorException.Raise(_env, "Read only page content is different (which means you are modifying a page directly in the data -- non transactionally -- ).");
                }
            }
        }
示例#2
0
        private void TrackReadOnlyPage(Page page)
        {
            if (writablePages.ContainsKey(page.PageNumber))
            {
                return;
            }

            ulong pageHash = StorageEnvironment.CalculatePageChecksum(page.Pointer, page.PageNumber, page.Flags, page.OverflowSize);

            ulong storedHash;

            if (readOnlyPages.TryGetValue(page.PageNumber, out storedHash))
            {
                if (pageHash != storedHash)
                {
                    VoronUnrecoverableErrorException.Raise(_env, "Read Only Page has change between tracking requests. Page #" + page.PageNumber);
                }
            }
            else
            {
                readOnlyPages[page.PageNumber] = pageHash;
            }
        }
示例#3
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options)
        {
            if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb)
            {
                return(false);
            }

            if (TryReadAndValidateHeader(options, out TransactionHeader * current) == false)
            {
                var lastValid4Kb = _readAt4Kb;
                _readAt4Kb++;

                while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb)
                {
                    if (TryReadAndValidateHeader(options, out current))
                    {
                        if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options))
                        {
                            SkipCurrentTransaction(current);
                            return(true);
                        }

                        RequireHeaderUpdate = true;
                        break;
                    }
                    _readAt4Kb++;
                }

                _readAt4Kb = lastValid4Kb;
                return(false);
            }

            if (IsAlreadySyncTransaction(current))
            {
                SkipCurrentTransaction(current);
                return(true);
            }

            var performDecompression = current->CompressedSize != -1;

            var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current);

            _readAt4Kb += transactionSizeIn4Kb;

            TransactionHeaderPageInfo *pageInfoPtr;
            byte *outputPage;

            if (performDecompression)
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);

                try
                {
                    LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                            current->UncompressedSize, true);
                }
                catch (Exception e)
                {
                    options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                    RequireHeaderUpdate = true;

                    return(false);
                }
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }
            else
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);
                Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize);
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            if (totalRead > current->UncompressedSize)
            {
                throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (pageInfoPtr[i].PageNumber > current->LastPageNumber)
                {
                    throw new InvalidDataException($"Transaction {current->TransactionId} contains reference to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}");
                }
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                if (performDecompression)
                {
                    Debug.Assert(_recoveryPager.Disposed == false);
                }

                var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);


                // We are going to overwrite the page, so we don't care about its current content
                var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.MaybePrefetchMemory(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                var pageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != pageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}");
                }
                totalRead += sizeof(long);

                _modifiedPages.Add(pageNumber);

                for (var j = 1; j < numberOfPagesOnDestination; j++)
                {
                    _modifiedPages.Remove(pageNumber + j);
                }

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    if (pageInfoPtr[i].Size == 0)
                    {
                        // diff contained no changes
                        continue;
                    }

                    var journalPagePtr = outputPage + totalRead;

                    if (options.Encryption.IsEnabled == false)
                    {
                        var pageHeader = (PageHeader *)journalPagePtr;

                        var checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageNumber, out var expectedChecksum);
                        if (checksum != expectedChecksum)
                        {
                            ThrowInvalidChecksumOnPageFromJournal(pageNumber, current, expectedChecksum, checksum, pageHeader);
                        }
                    }

                    Memory.Copy(pagePtr, journalPagePtr, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;

                    if (options.Encryption.IsEnabled)
                    {
                        var pageHeader = (PageHeader *)pagePtr;

                        if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow)
                        {
                            // need to mark overlapped buffers as invalid for commit

                            var encryptionBuffers = ((IPagerLevelTransactionState)this).CryptoPagerTransactionState[_dataPager];

                            var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize);

                            for (var j = 1; j < numberOfPages; j++)
                            {
                                if (encryptionBuffers.TryGetValue(pageNumber + j, out var buffer))
                                {
                                    buffer.SkipOnTxCommit = true;
                                }
                            }
                        }
                    }
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply(pageInfoPtr[i].IsNewDiff);
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }
示例#4
0
        public RecoveryStatus Execute(CancellationToken ct)
        {
            var sw = new Stopwatch();
            StorageEnvironment se = null;

            sw.Start();
            if (_copyOnWrite)
            {
                Console.WriteLine("Recovering journal files, this may take a while...");
                try
                {
                    se = new StorageEnvironment(_option);
                    Console.WriteLine(
                        $"Journal recovery has completed successfully within {sw.Elapsed.TotalSeconds:N1} seconds");
                }
                catch (Exception e)
                {
                    Console.WriteLine($"Journal recovery failed, reason:{Environment.NewLine}{e}");
                }
                finally
                {
                    se?.Dispose();
                }
            }
            _option = StorageEnvironmentOptions.ForPath(Path.GetDirectoryName(_datafile));

            var  mem         = Pager.AcquirePagePointer(null, 0);
            long startOffset = (long)mem;
            var  fi          = new FileInfo(_datafile);
            var  fileSize    = fi.Length;
            //making sure eof is page aligned
            var      eof = mem + (fileSize / _pageSize) * _pageSize;
            DateTime lastProgressReport = DateTime.MinValue;

            using (var destinationStreamDocuments = File.OpenWrite(Path.Combine(Path.GetDirectoryName(_output), Path.GetFileNameWithoutExtension(_output) + "-2-Documents" + Path.GetExtension(_output))))
                using (var destinationStreamRevisions = File.OpenWrite(Path.Combine(Path.GetDirectoryName(_output), Path.GetFileNameWithoutExtension(_output) + "-3-Revisions" + Path.GetExtension(_output))))
                    using (var destinationStreamConflicts = File.OpenWrite(Path.Combine(Path.GetDirectoryName(_output), Path.GetFileNameWithoutExtension(_output) + "-4-Conflicts" + Path.GetExtension(_output))))
                        using (var logFile = File.CreateText(Path.Combine(Path.GetDirectoryName(_output), LogFileName)))
                            using (var gZipStreamDocuments = new GZipStream(destinationStreamDocuments, CompressionMode.Compress, true))
                                using (var gZipStreamRevisions = new GZipStream(destinationStreamRevisions, CompressionMode.Compress, true))
                                    using (var gZipStreamConflicts = new GZipStream(destinationStreamConflicts, CompressionMode.Compress, true))
                                        using (var context = new JsonOperationContext(_initialContextSize, _initialContextLongLivedSize, SharedMultipleUseFlag.None))
                                            using (var documentsWriter = new BlittableJsonTextWriter(context, gZipStreamDocuments))
                                                using (var revisionsWriter = new BlittableJsonTextWriter(context, gZipStreamRevisions))
                                                    using (var conflictsWriter = new BlittableJsonTextWriter(context, gZipStreamConflicts))
                                                    {
                                                        WriteSmugglerHeader(documentsWriter, 40018, "Docs");
                                                        WriteSmugglerHeader(revisionsWriter, 40018, "RevisionDocuments");
                                                        WriteSmugglerHeader(conflictsWriter, 40018, "ConflictDocuments");

                                                        while (mem < eof)
                                                        {
                                                            try
                                                            {
                                                                if (ct.IsCancellationRequested)
                                                                {
                                                                    logFile.WriteLine(
                                                                        $"Cancellation requested while recovery was in position {GetFilePosition(startOffset, mem)}");
                                                                    _cancellationRequested = true;
                                                                    break;
                                                                }
                                                                var now = DateTime.UtcNow;
                                                                if ((now - lastProgressReport).TotalSeconds >= _progressIntervalInSec)
                                                                {
                                                                    if (lastProgressReport != DateTime.MinValue)
                                                                    {
                                                                        Console.Clear();
                                                                        Console.WriteLine("Press 'q' to quit the recovery process");
                                                                    }
                                                                    lastProgressReport = now;
                                                                    var currPos = GetFilePosition(startOffset, mem);
                                                                    var eofPos  = GetFilePosition(startOffset, eof);
                                                                    Console.WriteLine(
                                                                        $"{now:hh:MM:ss}: Recovering page at position {currPos:#,#;;0}/{eofPos:#,#;;0} ({(double)currPos / eofPos:p}) - Last recovered doc is {_lastRecoveredDocumentKey}");
                                                                }

                                                                var pageHeader = (PageHeader *)mem;

                                                                //this page is not raw data section move on
                                                                if ((pageHeader->Flags).HasFlag(PageFlags.RawData) == false)
                                                                {
                                                                    mem += _pageSize;
                                                                    continue;
                                                                }

                                                                if (pageHeader->Flags.HasFlag(PageFlags.Single) &&
                                                                    pageHeader->Flags.HasFlag(PageFlags.Overflow))
                                                                {
                                                                    var message =
                                                                        $"page #{pageHeader->PageNumber} (offset={GetFilePosition(startOffset, mem)}) has both Overflow and Single flag turned";
                                                                    mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                    continue;
                                                                }
                                                                //overflow page
                                                                ulong checksum;
                                                                if (pageHeader->Flags.HasFlag(PageFlags.Overflow))
                                                                {
                                                                    var endOfOverflow = (byte *)pageHeader + VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize) * _pageSize;
                                                                    // the endOfOeverFlow can be equal to eof if the last page is overflow
                                                                    if (endOfOverflow > eof)
                                                                    {
                                                                        var message =
                                                                            $"Overflow page #{pageHeader->PageNumber} (offset={GetFilePosition(startOffset, mem)})" +
                                                                            $" size exceeds the end of the file ([{(long)pageHeader}:{(long)endOfOverflow}])";
                                                                        mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                        continue;
                                                                    }

                                                                    if (pageHeader->OverflowSize <= 0)
                                                                    {
                                                                        var message =
                                                                            $"Overflow page #{pageHeader->PageNumber} (offset={GetFilePosition(startOffset, mem)})" +
                                                                            $" OverflowSize is not a positive number ({pageHeader->OverflowSize})";
                                                                        mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                        continue;
                                                                    }
                                                                    // this can only be here if we know that the overflow size is valid
                                                                    checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageHeader->PageNumber, pageHeader->Flags, pageHeader->OverflowSize);

                                                                    if (checksum != pageHeader->Checksum)
                                                                    {
                                                                        var message =
                                                                            $"Invalid checksum for overflow page {pageHeader->PageNumber}, expected hash to be {pageHeader->Checksum} but was {checksum}";
                                                                        mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                        continue;
                                                                    }


                                                                    if (Write((byte *)pageHeader + PageHeader.SizeOf, pageHeader->OverflowSize, documentsWriter, revisionsWriter,
                                                                              conflictsWriter, logFile, context, startOffset, ((RawDataOverflowPageHeader *)mem)->TableType))
                                                                    {
                                                                        var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize);
                                                                        mem += numberOfPages * _pageSize;
                                                                    }
                                                                    else //write document failed
                                                                    {
                                                                        mem += _pageSize;
                                                                    }
                                                                    continue;
                                                                }

                                                                checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageHeader->PageNumber, pageHeader->Flags, 0);

                                                                if (checksum != pageHeader->Checksum)
                                                                {
                                                                    var message =
                                                                        $"Invalid checksum for page {pageHeader->PageNumber}, expected hash to be {pageHeader->Checksum} but was {checksum}";
                                                                    mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                    continue;
                                                                }

                                                                // small raw data section
                                                                var rawHeader = (RawDataSmallPageHeader *)mem;

                                                                // small raw data section header
                                                                if (rawHeader->RawDataFlags.HasFlag(RawDataPageFlags.Header))
                                                                {
                                                                    mem += _pageSize;
                                                                    continue;
                                                                }
                                                                if (rawHeader->NextAllocation > _pageSize)
                                                                {
                                                                    var message =
                                                                        $"RawDataSmallPage #{rawHeader->PageNumber} at {GetFilePosition(startOffset, mem)} next allocation is larger than {_pageSize} bytes";
                                                                    mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                    continue;
                                                                }

                                                                for (var pos = PageHeader.SizeOf; pos < rawHeader->NextAllocation;)
                                                                {
                                                                    var currMem = mem + pos;
                                                                    var entry   = (RawDataSection.RawDataEntrySizes *)currMem;
                                                                    //this indicates that the current entry is invalid because it is outside the size of a page
                                                                    if (pos > _pageSize)
                                                                    {
                                                                        var message =
                                                                            $"RawDataSmallPage #{rawHeader->PageNumber} has an invalid entry at {GetFilePosition(startOffset, currMem)}";
                                                                        mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                        //we can't retrive entries past the invalid entry
                                                                        break;
                                                                    }
                                                                    //Allocated size of entry exceed the bound of the page next allocation
                                                                    if (entry->AllocatedSize + pos + sizeof(RawDataSection.RawDataEntrySizes) >
                                                                        rawHeader->NextAllocation)
                                                                    {
                                                                        var message =
                                                                            $"RawDataSmallPage #{rawHeader->PageNumber} has an invalid entry at {GetFilePosition(startOffset, currMem)}" +
                                                                            "the allocated entry exceed the bound of the page next allocation.";
                                                                        mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                        //we can't retrive entries past the invalid entry
                                                                        break;
                                                                    }
                                                                    if (entry->UsedSize > entry->AllocatedSize)
                                                                    {
                                                                        var message =
                                                                            $"RawDataSmallPage #{rawHeader->PageNumber} has an invalid entry at {GetFilePosition(startOffset, currMem)}" +
                                                                            "the size of the entry exceed the allocated size";
                                                                        mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                                        //we can't retrive entries past the invalid entry
                                                                        break;
                                                                    }
                                                                    pos += entry->AllocatedSize + sizeof(RawDataSection.RawDataEntrySizes);
                                                                    if (entry->AllocatedSize == 0 || entry->UsedSize == -1)
                                                                    {
                                                                        continue;
                                                                    }

                                                                    if (Write(currMem + sizeof(RawDataSection.RawDataEntrySizes), entry->UsedSize, documentsWriter, revisionsWriter,
                                                                              conflictsWriter, logFile, context, startOffset, ((RawDataSmallPageHeader *)mem)->TableType) == false)
                                                                    {
                                                                        break;
                                                                    }
                                                                }
                                                                mem += _pageSize;
                                                            }
                                                            catch (Exception e)
                                                            {
                                                                var message =
                                                                    $"Unexpected exception at position {GetFilePosition(startOffset, mem)}:{Environment.NewLine} {e}";
                                                                mem = PrintErrorAndAdvanceMem(message, mem, logFile);
                                                            }
                                                        }
                                                        documentsWriter.WriteEndArray();
                                                        conflictsWriter.WriteEndArray();
                                                        revisionsWriter.WriteEndArray();
                                                        documentsWriter.WriteEndObject();
                                                        conflictsWriter.WriteEndObject();
                                                        revisionsWriter.WriteEndObject();

                                                        logFile.WriteLine(
                                                            $"Discovered a total of {_numberOfDocumentsRetrieved:#,#;00} documents within {sw.Elapsed.TotalSeconds::#,#.#;;00} seconds.");
                                                        logFile.WriteLine($"Discovered a total of {_numberOfFaultedPages::#,#;00} faulted pages.");
                                                    }
            if (_cancellationRequested)
            {
                return(RecoveryStatus.CancellationRequested);