예제 #1
0
        private void InitTransactionHeader()
        {
            var allocation = _env.ScratchBufferPool.Allocate(this, 1);
            var page       = _env.ScratchBufferPool.ReadPage(allocation.ScratchFileNumber, allocation.PositionInScratchBuffer);

            _transactionHeaderPage = allocation;

            UnmanagedMemory.Set(page.Base, 0, AbstractPager.PageSize);
            _txHeader = (TransactionHeader *)page.Base;
            _txHeader->HeaderMarker = Constants.TransactionHeaderMarker;

            _txHeader->TransactionId    = _id;
            _txHeader->NextPageNumber   = _state.NextPageNumber;
            _txHeader->LastPageNumber   = -1;
            _txHeader->PageCount        = -1;
            _txHeader->Crc              = 0;
            _txHeader->TxMarker         = TransactionMarker.None;
            _txHeader->Compressed       = false;
            _txHeader->CompressedSize   = 0;
            _txHeader->UncompressedSize = 0;

            _allocatedPagesInTransaction = 0;
            _overflowPagesInTransaction  = 0;

            _scratchPagesTable.Clear();
        }
예제 #2
0
        private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager, uint previousTransactionCrc)
        {
            // numberOfPages include the tx header page, which we don't compress
            var dataPagesCount      = numberOfPages - 1;
            var sizeInBytes         = dataPagesCount * AbstractPager.PageSize;
            var outputBuffer        = LZ4.MaximumOutputLength(sizeInBytes);
            var outputBufferInPages = outputBuffer / AbstractPager.PageSize +
                                      (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1);
            var pagesRequired = (dataPagesCount + outputBufferInPages);

            compressionPager.EnsureContinuous(tx, 0, pagesRequired);
            var tempBuffer        = compressionPager.AcquirePagePointer(tx, 0);
            var compressionBuffer = compressionPager.AcquirePagePointer(tx, dataPagesCount);

            var write   = tempBuffer;
            var txPages = tx.GetTransactionPages();

            foreach (var txPage in txPages)
            {
                var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txPage.ScratchFileNumber, txPage.PositionInScratchBuffer);
                var count       = txPage.NumberOfPages * AbstractPager.PageSize;
                Memory.BulkCopy(write, scratchPage, count);
                write += count;
            }

            var len             = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer);
            var remainder       = len % AbstractPager.PageSize;
            var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1);

            if (remainder != 0)
            {
                // zero the remainder of the page
                UnmanagedMemory.Set(compressionBuffer + len, 0, remainder);
            }

            var pages = new IntPtr[compressedPages + 1];

            var txHeaderPage = tx.GetTransactionHeaderPage();
            var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer);
            var txHeader     = (TransactionHeader *)txHeaderBase;

            txHeader->Compressed             = true;
            txHeader->CompressedSize         = len;
            txHeader->UncompressedSize       = sizeInBytes;
            txHeader->PreviousTransactionCrc = previousTransactionCrc;

            pages[0] = new IntPtr(txHeaderBase);
            for (int index = 0; index < compressedPages; index++)
            {
                pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize));
            }

            txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize);

            return(pages);
        }
예제 #3
0
        public void ClearPrefixInfo()
        {
            if (KeysPrefixed == false)
            {
                return;
            }

            UnmanagedMemory.Set((byte *)_prefixSection->PrefixOffsets, 0, sizeof(ushort) * PrefixCount);
            _prefixSection->NextPrefixId = 0;
        }
예제 #4
0
        public int Encode64(
            byte *input,
            byte *output,
            int inputLength,
            int outputLength)
        {
            if (inputLength < LZ4_64KLIMIT)
            {
                UnmanagedMemory.Set((byte *)_hashtable64K, 0, HASH64K_TABLESIZE * sizeof(ushort));
                return(LZ4_compress64kCtx_64(_hashtable64K, input, output, inputLength, outputLength));
            }

            UnmanagedMemory.Set((byte *)_hashtable, 0, HASH_TABLESIZE * sizeof(uint));
            return(LZ4_compressCtx_64(_hashtable, input, output, inputLength, outputLength));
        }
예제 #5
0
        private void InitTransactionHeader()
        {
            var allocation = _env.ScratchBufferPool.Allocate(this, 1);
            var page       = _env.ScratchBufferPool.ReadPage(this, allocation.ScratchFileNumber, allocation.PositionInScratchBuffer);

            _transactionHeaderPage = allocation;

            UnmanagedMemory.Set(page.Pointer, 0, Environment.Options.PageSize);
            _txHeader = (TransactionHeader *)page.Pointer;
            _txHeader->HeaderMarker = Constants.TransactionHeaderMarker;

            _txHeader->TransactionId     = _id;
            _txHeader->NextPageNumber    = _state.NextPageNumber;
            _txHeader->LastPageNumber    = -1;
            _txHeader->PageCount         = -1;
            _txHeader->Hash              = 0;
            _txHeader->TimeStampTicksUtc = DateTime.UtcNow.Ticks;
            _txHeader->TxMarker          = TransactionMarker.None;
            _txHeader->CompressedSize    = 0;
            _txHeader->UncompressedSize  = 0;

            _allocatedPagesInTransaction = 0;
            _overflowPagesInTransaction  = 0;
        }
예제 #6
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options)
        {
            if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb)
            {
                return(false);
            }

            TransactionHeader *current;

            if (TryReadAndValidateHeader(options, out current) == false)
            {
                var lastValid4Kb = _readAt4Kb;
                _readAt4Kb++;
                while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb)
                {
                    if (TryReadAndValidateHeader(options, out current))
                    {
                        RequireHeaderUpdate = true;
                        break;
                    }
                    _readAt4Kb++;
                }

                _readAt4Kb = lastValid4Kb;
                return(false);
            }
            bool performDecompression = current->CompressedSize != -1;

            var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize;

            var transactionSizeIn4Kb =
                (size + sizeof(TransactionHeader)) / (4 * Constants.Size.Kilobyte) +
                ((size + sizeof(TransactionHeader)) % (4 * Constants.Size.Kilobyte) == 0 ? 0 : 1);


            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                _readAt4Kb           += transactionSizeIn4Kb;
                LastTransactionHeader = current;
                return(true); // skipping
            }

            _readAt4Kb += transactionSizeIn4Kb;

            TransactionHeaderPageInfo *pageInfoPtr;
            byte *outputPage;

            if (performDecompression)
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);

                try
                {
                    LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                            current->UncompressedSize, true);
                }
                catch (Exception e)
                {
                    options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                    RequireHeaderUpdate = true;

                    return(false);
                }
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }
            else
            {
                var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize);
                _recoveryPager.EnsureContinuous(0, numberOfPages);
                _recoveryPager.EnsureMapped(this, 0, numberOfPages);
                outputPage = _recoveryPager.AcquirePagePointer(this, 0);
                UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize);
                Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize);
                pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;
            }

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            if (totalRead > current->UncompressedSize)
            {
                throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (pageInfoPtr[i].PageNumber > current->LastPageNumber)
                {
                    throw new InvalidDataException($"Transaction {current->TransactionId} contains refeence to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}");
                }
            }

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                if (performDecompression)
                {
                    Debug.Assert(_recoveryPager.Disposed == false);
                }

                var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                // We are going to overwrite the page, so we don't care about its current content
                var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);

                var pageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != pageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}");
                }
                totalRead += sizeof(long);

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    Memory.Copy(pagePtr, outputPage + totalRead, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply(pageInfoPtr[i].IsNewDiff);
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }
예제 #7
0
        private IntPtr[] CompressPages(LowLevelTransaction tx, int numberOfPages, AbstractPager compressionPager)
        {
            // numberOfPages include the tx header page, which we don't compress
            var dataPagesCount = numberOfPages - 1;

            int pageSize    = tx.Environment.Options.PageSize;
            var sizeInBytes = dataPagesCount * pageSize;

            // We want to include the Transaction Header straight into the compression buffer.
            var outputBufferSize    = LZ4.MaximumOutputLength(sizeInBytes) + sizeof(TransactionHeader);
            var outputBufferInPages = outputBufferSize / pageSize +
                                      (outputBufferSize % pageSize == 0 ? 0 : 1);

            // The pages required includes the intermediate pages and the required output pages.
            var pagesRequired = (dataPagesCount + outputBufferInPages);
            var pagerState    = compressionPager.EnsureContinuous(0, pagesRequired);

            tx.EnsurePagerStateReference(pagerState);

            // We get the pointer to the compression buffer, which will be the buffer that will hold the whole thing.
            var outputBuffer = compressionPager.AcquirePagePointer(tx, dataPagesCount);

            // Where we are going to store the input data continously to compress it afterwards.
            var tempBuffer = compressionPager.AcquirePagePointer(tx, 0);
            var txPages    = tx.GetTransactionPages();
            var write      = tempBuffer;

            foreach (var txPage in txPages)
            {
                var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txPage.ScratchFileNumber, txPage.PositionInScratchBuffer);
                var count       = txPage.NumberOfPages * pageSize;
                Memory.Copy(write, scratchPage, count);
                write += count;
            }

            var compressionBuffer = outputBuffer + sizeof(TransactionHeader);
            var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBufferSize);

            int totalLength     = len + sizeof(TransactionHeader); // We need to account for the transaction header as part of the total length.
            var remainder       = totalLength % pageSize;
            var compressedPages = (totalLength / pageSize) + (remainder == 0 ? 0 : 1);

            if (remainder != 0)
            {
                // zero the remainder of the page
                UnmanagedMemory.Set(outputBuffer + totalLength, 0, pageSize - remainder);
            }

            var txHeaderPage = tx.GetTransactionHeaderPage();
            var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer);
            var txHeader     = (TransactionHeader *)txHeaderBase;

            txHeader->Compressed       = true;
            txHeader->CompressedSize   = len;
            txHeader->UncompressedSize = sizeInBytes;
            txHeader->Hash             = Hashing.XXHash64.Calculate(compressionBuffer, len);

            // Copy the transaction header to the output buffer.
            Memory.Copy(outputBuffer, txHeaderBase, sizeof(TransactionHeader));

            var pages = new IntPtr[compressedPages];

            for (int index = 0; index < compressedPages; index++)
            {
                pages[index] = new IntPtr(outputBuffer + (index * pageSize));
            }

            return(pages);
        }
예제 #8
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
            {
                return(false);
            }

            if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }

            var transactionSize = GetNumberOfPagesFromSize(current->Compressed ? current->CompressedSize : current->UncompressedSize);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage         += transactionSize;
                return(true);                // skipping
            }

            if (checkCrc && !ValidatePagesCrc(options, transactionSize, current))
            {
                return(false);
            }

            _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            UnmanagedMemory.Set(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            if (current->Compressed)
            {
                if (TryDecompressTransactionPages(options, current, dataPage) == false)
                {
                    return(false);
                }
            }
            else
            {
                Memory.Copy(dataPage, _pager.AcquirePagePointer(_readingPage), (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            }

            var tempTransactionPageTranslaction = new Dictionary <long, RecoveryPagePosition>();

            for (var i = 0; i < current->PageCount; i++)
            {
                Debug.Assert(_pager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var page = _recoveryPager.Read(_recoveryPage);

                var pagePosition = new RecoveryPagePosition
                {
                    JournalPos    = _recoveryPage,
                    TransactionId = current->TransactionId
                };

                if (page.IsOverflow)
                {
                    var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);

                    pagePosition.IsOverflow            = true;
                    pagePosition.NumberOfOverflowPages = numOfPages;

                    _recoveryPage += numOfPages;
                }
                else
                {
                    _recoveryPage++;
                }

                tempTransactionPageTranslaction[page.PageNumber] = pagePosition;
            }

            _readingPage += transactionSize;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;

                if (pagePosition.Value.IsOverflow)
                {
                    Debug.Assert(pagePosition.Value.NumberOfOverflowPages != -1);

                    for (int i = 1; i < pagePosition.Value.NumberOfOverflowPages; i++)
                    {
                        _transactionPageTranslation.Remove(pagePosition.Key + i);
                    }
                }
            }

            return(true);
        }
예제 #9
0
        private Page AllocatePage(int numberOfPages, long pageNumber, Page?previousVersion, bool zeroPage)
        {
            if (_disposed)
            {
                throw new ObjectDisposedException("Transaction");
            }

            if (_env.Options.MaxStorageSize.HasValue) // check against quota
            {
                var maxAvailablePageNumber = _env.Options.MaxStorageSize / Environment.Options.PageSize;

                if (pageNumber > maxAvailablePageNumber)
                {
                    ThrowQuotaExceededException(pageNumber, maxAvailablePageNumber);
                }
            }


            Debug.Assert(pageNumber < State.NextPageNumber);

#if VALIDATE
            VerifyNoDuplicateScratchPages();
#endif
            var pageFromScratchBuffer = _env.ScratchBufferPool.Allocate(this, numberOfPages);
            pageFromScratchBuffer.PreviousVersion = previousVersion;
            _transactionPages.Add(pageFromScratchBuffer);

            _allocatedPagesInTransaction++;
            if (numberOfPages > 1)
            {
                _overflowPagesInTransaction += (numberOfPages - 1);
            }

            _scratchPagesTable[pageNumber] = pageFromScratchBuffer;

            _dirtyPages.Add(pageNumber);

            if (numberOfPages > 1)
            {
                _dirtyOverflowPages.Add(pageNumber + 1, numberOfPages - 1);
            }

            if (numberOfPages != 1)
            {
                _env.ScratchBufferPool.EnsureMapped(this,
                                                    pageFromScratchBuffer.ScratchFileNumber,
                                                    pageFromScratchBuffer.PositionInScratchBuffer,
                                                    numberOfPages);
            }

            var newPage = _env.ScratchBufferPool.ReadPage(this, pageFromScratchBuffer.ScratchFileNumber,
                                                          pageFromScratchBuffer.PositionInScratchBuffer);

            if (zeroPage)
            {
                UnmanagedMemory.Set(newPage.Pointer, 0, Environment.Options.PageSize * numberOfPages);
            }

            newPage.PageNumber = pageNumber;
            newPage.Flags      = PageFlags.Single;

            TrackWritablePage(newPage);

#if VALIDATE
            VerifyNoDuplicateScratchPages();
#endif

            return(newPage);
        }
        public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null,
                           Action backupStarted = null)
        {
            if (env.Options.IncrementalBackupEnabled == false)
            {
                throw new InvalidOperationException("Incremental backup is disabled for this storage");
            }

            var pageNumberToPageInScratch = new Dictionary <long, long>();

            if (infoNotify == null)
            {
                infoNotify = str => { }
            }
            ;
            var toDispose = new List <IDisposable>();

            try
            {
                IncrementalBackupInfo backupInfo;
                long lastWrittenLogPage = -1;
                long lastWrittenLogFile = -1;

                using (var txw = env.NewLowLevelTransaction(TransactionFlags.ReadWrite))
                {
                    backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition;
                    }

                    //txw.Commit(); - intentionally not committing
                }

                if (backupStarted != null)
                {
                    backupStarted();
                }

                infoNotify("Voron - reading storage journals for snapshot pages");

                var lastBackedUpFile     = backupInfo.LastBackedUpJournal;
                var lastBackedUpPage     = backupInfo.LastBackedUpJournalPage;
                var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                if (firstJournalToBackup == -1)
                {
                    firstJournalToBackup = 0; // first time that we do incremental backup
                }
                var lastTransaction = new TransactionHeader {
                    TransactionId = -1
                };

                var recoveryPager = env.Options.CreateScratchPager("min-inc-backup.scratch");
                toDispose.Add(recoveryPager);
                int recoveryPage = 0;
                for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++)
                {
                    lastBackedUpFile = journalNum;
                    var journalFile = IncrementalBackup.GetJournalFile(env, journalNum, backupInfo);
                    try
                    {
                        using (var filePager = env.Options.OpenJournalPager(journalNum))
                        {
                            var reader = new JournalReader(filePager, recoveryPager, 0, null, recoveryPage);
                            reader.MaxPageToRead = lastBackedUpPage = journalFile.JournalWriter.NumberOfAllocatedPages;
                            if (journalNum == lastWrittenLogFile) // set the last part of the log file we'll be reading
                            {
                                reader.MaxPageToRead = lastBackedUpPage = lastWrittenLogPage;
                            }

                            if (lastBackedUpPage == journalFile.JournalWriter.NumberOfAllocatedPages) // past the file size
                            {
                                // move to the next
                                lastBackedUpPage = -1;
                                lastBackedUpFile++;
                            }

                            if (journalNum == backupInfo.LastBackedUpJournal) // continue from last backup
                            {
                                reader.SetStartPage(backupInfo.LastBackedUpJournalPage);
                            }
                            TransactionHeader *lastJournalTxHeader = null;
                            while (reader.ReadOneTransaction(env.Options))
                            {
                                // read all transactions here
                                lastJournalTxHeader = reader.LastTransactionHeader;
                            }

                            if (lastJournalTxHeader != null)
                            {
                                lastTransaction = *lastJournalTxHeader;
                            }

                            recoveryPage = reader.RecoveryPage;

                            foreach (var pagePosition in reader.TransactionPageTranslation)
                            {
                                var pageInJournal = pagePosition.Value.JournalPos;
                                var page          = recoveryPager.Read(null, pageInJournal);
                                pageNumberToPageInScratch[pagePosition.Key] = pageInJournal;
                                if (page.IsOverflow)
                                {
                                    var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);
                                    for (int i = 1; i < numberOfOverflowPages; i++)
                                    {
                                        pageNumberToPageInScratch.Remove(page.PageNumber + i);
                                    }
                                }
                            }
                        }
                    }
                    finally
                    {
                        journalFile.Release();
                    }
                }

                if (pageNumberToPageInScratch.Count == 0)
                {
                    infoNotify("Voron - no changes since last backup, nothing to do");
                    return;
                }

                infoNotify("Voron - started writing snapshot file.");

                if (lastTransaction.TransactionId == -1)
                {
                    throw new InvalidOperationException("Could not find any transactions in the journals, but found pages to write? That ain't right.");
                }


                // it is possible that we merged enough transactions so the _merged_ output is too large for us.
                // Voron limit transactions to about 4GB each. That means that we can't just merge all transactions
                // blindly, for fear of hitting this limit. So we need to split things.
                // We are also limited to about 8 TB of data in general before we literally can't fit the number of pages into
                // pageNumberToPageInScratch even theoretically.
                // We're fine with saying that you need to run min inc backup before you hit 8 TB in your increment, so that works for now.
                // We are also going to use env.Options.MaxScratchBufferSize to set the actual transaction limit here, to avoid issues
                // down the road and to limit how big a single transaction can be before the theoretical 4GB limit.

                var nextJournalNum = lastBackedUpFile;
                using (var file = new FileStream(backupPath, FileMode.Create))
                {
                    using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                    {
                        var copier = new DataCopier(env.Options.PageSize * 16);

                        var finalPager = env.Options.CreateScratchPager("min-inc-backup-final.scratch");
                        toDispose.Add(finalPager);
                        finalPager.EnsureContinuous(0, 1);//txHeader

                        foreach (var partition in Partition(pageNumberToPageInScratch.Values, env.Options.MaxNumberOfPagesInMergedTransaction))
                        {
                            int totalNumberOfPages = 0;
                            int overflowPages      = 0;
                            int start = 1;
                            foreach (var pageNum in partition)
                            {
                                var p    = recoveryPager.Read(null, pageNum);
                                var size = 1;
                                if (p.IsOverflow)
                                {
                                    size           = recoveryPager.GetNumberOfOverflowPages(p.OverflowSize);
                                    overflowPages += (size - 1);
                                }
                                totalNumberOfPages += size;
                                finalPager.EnsureContinuous(start, size); //maybe increase size

                                Memory.Copy(finalPager.AcquirePagePointer(null, start), p.Base, size * env.Options.PageSize);

                                start += size;
                            }


                            var txPage = finalPager.AcquirePagePointer(null, 0);
                            UnmanagedMemory.Set(txPage, 0, env.Options.PageSize);
                            var txHeader = (TransactionHeader *)txPage;
                            txHeader->HeaderMarker      = Constants.TransactionHeaderMarker;
                            txHeader->Root              = lastTransaction.Root;
                            txHeader->OverflowPageCount = overflowPages;
                            txHeader->PageCount         = totalNumberOfPages - overflowPages;
                            txHeader->TransactionId     = lastTransaction.TransactionId;
                            txHeader->NextPageNumber    = lastTransaction.NextPageNumber;
                            txHeader->LastPageNumber    = lastTransaction.LastPageNumber;
                            txHeader->TxMarker          = TransactionMarker.Commit | TransactionMarker.Merged;
                            txHeader->Compressed        = false;
                            txHeader->UncompressedSize  = txHeader->CompressedSize = totalNumberOfPages * env.Options.PageSize;
                            txHeader->Hash              = Hashing.XXHash64.Calculate(finalPager.AcquirePagePointer(null, 1), totalNumberOfPages * env.Options.PageSize);

                            var entry = package.CreateEntry(string.Format("{0:D19}.merged-journal", nextJournalNum), compression);
                            nextJournalNum++;
                            using (var stream = entry.Open())
                            {
                                copier.ToStream(finalPager.AcquirePagePointer(null, 0), (totalNumberOfPages + 1) * env.Options.PageSize, stream);
                            }
                        }
                    }
                    file.Flush(true);// make sure we hit the disk and stay there
                }

                env.HeaderAccessor.Modify(header =>
                {
                    header->IncrementalBackup.LastBackedUpJournal     = lastBackedUpFile;
                    header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                });
            }
            finally
            {
                foreach (var disposable in toDispose)
                {
                    disposable.Dispose();
                }
            }
        }
예제 #11
0
        public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _journalPager.NumberOfAllocatedPages)
            {
                return(false);
            }

            if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }


            var transactionSize = GetNumberOfPagesFromSize(options, current->CompressedSize + sizeof(TransactionHeader));

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                _readingPage         += transactionSize;
                LastTransactionHeader = current;
                return(true); // skipping
            }

            if (checkCrc && !ValidatePagesHash(options, current))
            {
                return(false);
            }

            _readingPage += transactionSize;
            var numberOfPages = _recoveryPager.GetNumberOfOverflowPages(current->UncompressedSize);

            _recoveryPager.EnsureContinuous(0, numberOfPages);
            _recoveryPager.EnsureMapped(this, 0, numberOfPages);
            var outputPage = _recoveryPager.AcquirePagePointer(this, 0);

            UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * options.PageSize);

            try
            {
                LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage,
                                        current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }

            var pageInfoPtr = (TransactionHeaderPageInfo *)outputPage;

            long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount;

            for (var i = 0; i < current->PageCount; i++)
            {
                if (totalRead > current->UncompressedSize)
                {
                    throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}");
                }

                Debug.Assert(_journalPager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var numberOfPagesOnDestination = GetNumberOfPagesFromSize(options, pageInfoPtr[i].Size);
                _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination);
                var pagePtr = _dataPager.AcquirePagePointer(this, pageInfoPtr[i].PageNumber);

                var diffPageNumber = *(long *)(outputPage + totalRead);
                if (pageInfoPtr[i].PageNumber != diffPageNumber)
                {
                    throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {diffPageNumber}");
                }
                totalRead += sizeof(long);

                _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);

                if (pageInfoPtr[i].DiffSize == 0)
                {
                    Memory.Copy(pagePtr, outputPage + totalRead, pageInfoPtr[i].Size);
                    totalRead += pageInfoPtr[i].Size;
                }
                else
                {
                    _diffApplier.Destination = pagePtr;
                    _diffApplier.Diff        = outputPage + totalRead;
                    _diffApplier.Size        = pageInfoPtr[i].Size;
                    _diffApplier.DiffSize    = pageInfoPtr[i].DiffSize;
                    _diffApplier.Apply();
                    totalRead += pageInfoPtr[i].DiffSize;
                }

                _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size);
            }

            LastTransactionHeader = current;

            return(true);
        }