Пример #1
0
        public void SetLastReadTxHeader(long maxTransactionId, ref TransactionHeader lastReadTxHeader)
        {
            int low  = 0;
            int high = _transactionHeaders.Count - 1;

            while (low <= high)
            {
                int  mid        = (low + high) >> 1;
                long midValTxId = _transactionHeaders[mid].TransactionId;

                if (midValTxId < maxTransactionId)
                {
                    low = mid + 1;
                }
                else if (midValTxId > maxTransactionId)
                {
                    high = mid - 1;
                }
                else // found the max tx id
                {
                    lastReadTxHeader = _transactionHeaders[mid];
                    return;
                }
            }
            if (low == 0)
            {
                lastReadTxHeader.TransactionId = -1; // not found
                return;
            }
            if (high != _transactionHeaders.Count - 1)
            {
                throw new InvalidOperationException("Found a gap in the transaction headers held by this journal file in memory, shouldn't be possible");
            }
            lastReadTxHeader = _transactionHeaders[_transactionHeaders.Count - 1];
        }
Пример #2
0
 public JournalReader(IVirtualPager pager, IVirtualPager recoveryPager, long lastSyncedTransactionId, TransactionHeader* previous)
 {
     RequireHeaderUpdate = false;
     _pager = pager;
     _recoveryPager = recoveryPager;
     _lastSyncedTransactionId = lastSyncedTransactionId;
     _readingPage = 0;
     _recoveryPage = 0;
     LastTransactionHeader = previous;
 }
Пример #3
0
	    internal void WriteDirect(TransactionHeader* transactionHeader, PageFromScratchBuffer pages)
		{
			for (int i = 0; i < pages.NumberOfPages; i++)
		    {
		        var page = _env.ScratchBufferPool.ReadPage(pages.ScratchFileNumber, pages.PositionInScratchBuffer+i);
			    int numberOfPages = 1;
			    if (page.IsOverflow)
		        {
					numberOfPages = (page.OverflowSize / AbstractPager.PageSize) + (page.OverflowSize % AbstractPager.PageSize == 0 ? 0 : 1);
					i += numberOfPages;
			        _overflowPagesInTransaction += (numberOfPages - 1);
		        }

			    WritePageDirect(page, numberOfPages);

			    _state.NextPageNumber = transactionHeader->NextPageNumber;
			}
		}
Пример #4
0
 public TransactionToShip(TransactionHeader header)
 {
     Header = header;
 }
Пример #5
0
        public void ToFile(StorageEnvironment env, string backupPath, CompressionLevel compression = CompressionLevel.Optimal, Action<string> infoNotify = null,
            Action backupStarted = null)
        {
            if (env.Options.IncrementalBackupEnabled == false)
                throw new InvalidOperationException("Incremental backup is disabled for this storage");

            var pageNumberToPageInScratch = new Dictionary<long, long>();
            if (infoNotify == null)
                infoNotify = str => { };
            var toDispose = new List<IDisposable>();
            try
            {
                IncrementalBackupInfo backupInfo;
                long lastWrittenLogPage = -1;
                long lastWrittenLogFile = -1;

                using (var txw = env.NewTransaction(TransactionFlags.ReadWrite))
                {
                    backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup);

                    if (env.Journal.CurrentFile != null)
                    {
                        lastWrittenLogFile = env.Journal.CurrentFile.Number;
                        lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition;
                    }

                    //txw.Commit(); - intentionally not committing
                }

                if (backupStarted != null)
                    backupStarted();

                infoNotify("Voron - reading storage journals for snapshot pages");

                var lastBackedUpFile = backupInfo.LastBackedUpJournal;
                var lastBackedUpPage = backupInfo.LastBackedUpJournalPage;
                var firstJournalToBackup = backupInfo.LastBackedUpJournal;

                if (firstJournalToBackup == -1)
                    firstJournalToBackup = 0; // first time that we do incremental backup

                var lastTransaction = new TransactionHeader { TransactionId = -1 };

                var recoveryPager = env.Options.CreateScratchPager("min-inc-backup.scratch");
                toDispose.Add(recoveryPager);
                int recoveryPage = 0;
                for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++)
                {
                    lastBackedUpFile = journalNum;
                    var journalFile = IncrementalBackup.GetJournalFile(env, journalNum, backupInfo);
                    try
                    {
                        using (var filePager = env.Options.OpenJournalPager(journalNum))
                        {
                            var reader = new JournalReader(filePager, recoveryPager, 0, null, recoveryPage);
                            reader.MaxPageToRead = lastBackedUpPage = journalFile.JournalWriter.NumberOfAllocatedPages;
                            if (journalNum == lastWrittenLogFile) // set the last part of the log file we'll be reading
                                reader.MaxPageToRead = lastBackedUpPage = lastWrittenLogPage;

                            if (lastBackedUpPage == journalFile.JournalWriter.NumberOfAllocatedPages) // past the file size
                            {
                                // move to the next
                                lastBackedUpPage = -1;
                                lastBackedUpFile++;
                            }

                            if (journalNum == backupInfo.LastBackedUpJournal) // continue from last backup
                                reader.SetStartPage(backupInfo.LastBackedUpJournalPage);
                            TransactionHeader* lastJournalTxHeader = null;
                            while (reader.ReadOneTransaction(env.Options))
                            {
                                // read all transactions here
                                lastJournalTxHeader = reader.LastTransactionHeader;
                            }

                            if (lastJournalTxHeader != null)
                                lastTransaction = *lastJournalTxHeader;

                            recoveryPage = reader.RecoveryPage;

                            foreach (var pagePosition in reader.TransactionPageTranslation)
                            {
                                var pageInJournal = pagePosition.Value.JournalPos;
                                var page = recoveryPager.Read(pageInJournal);
                                pageNumberToPageInScratch[pagePosition.Key] = pageInJournal;
                                if (page.IsOverflow)
                                {
                                    var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);
                                    for (int i = 1; i < numberOfOverflowPages; i++)
                                        pageNumberToPageInScratch.Remove(page.PageNumber + i);
                                }
                            }
                        }
                    }
                    finally
                    {
                        journalFile.Release();
                    }
                }

                if (pageNumberToPageInScratch.Count == 0)
                {
                    infoNotify("Voron - no changes since last backup, nothing to do");
                    return;
                }

                infoNotify("Voron - started writing snapshot file.");

                if (lastTransaction.TransactionId == -1)
                    throw new InvalidOperationException("Could not find any transactions in the journals, but found pages to write? That ain't right.");

                // it is possible that we merged enough transactions so the _merged_ output is too large for us.
                // Voron limit transactions to about 4GB each. That means that we can't just merge all transactions
                // blindly, for fear of hitting this limit. So we need to split things.
                // We are also limited to about 8 TB of data in general before we literally can't fit the number of pages into
                // pageNumberToPageInScratch even theoretically.
                // We're fine with saying that you need to run min inc backup before you hit 8 TB in your increment, so that works for now.
                // We are also going to use env.Options.MaxScratchBufferSize to set the actual transaction limit here, to avoid issues
                // down the road and to limit how big a single transaction can be before the theoretical 4GB limit.

                var nextJournalNum = lastBackedUpFile;
                using (var file = new FileStream(backupPath, FileMode.Create))
                {
                    using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true))
                    {
                        var copier = new DataCopier(AbstractPager.PageSize * 16);

                        var finalPager = env.Options.CreateScratchPager("min-inc-backup-final.scratch");
                        toDispose.Add(finalPager);
                        finalPager.EnsureContinuous(null, 0, 1);//txHeader

                        foreach (var partition in Partition(pageNumberToPageInScratch.Values, env.Options.MaxNumberOfPagesInMergedTransaction))
                        {
                            int totalNumberOfPages = 0;
                            int overflowPages = 0;
                            int start = 1;
                            foreach (var pageNum in partition)
                            {
                                var p = recoveryPager.Read(pageNum);
                                var size = 1;
                                if (p.IsOverflow)
                                {
                                    size = recoveryPager.GetNumberOfOverflowPages(p.OverflowSize);
                                    overflowPages += (size - 1);
                                }
                                totalNumberOfPages += size;
                                finalPager.EnsureContinuous(null, start, size); //maybe increase size

                                Memory.Copy(finalPager.AcquirePagePointer(start), p.Base, size * AbstractPager.PageSize);

                                start += size;
                            }

                            var txPage = finalPager.AcquirePagePointer(0);
                            UnmanagedMemory.Set(txPage, 0, AbstractPager.PageSize);
                            var txHeader = (TransactionHeader*)txPage;
                            txHeader->HeaderMarker = Constants.TransactionHeaderMarker;
                            txHeader->FreeSpace = lastTransaction.FreeSpace;
                            txHeader->Root = lastTransaction.Root;
                            txHeader->OverflowPageCount = overflowPages;
                            txHeader->PageCount = totalNumberOfPages - overflowPages;
                            txHeader->PreviousTransactionCrc = lastTransaction.PreviousTransactionCrc;
                            txHeader->TransactionId = lastTransaction.TransactionId;
                            txHeader->NextPageNumber = lastTransaction.NextPageNumber;
                            txHeader->LastPageNumber = lastTransaction.LastPageNumber;
                            txHeader->TxMarker = TransactionMarker.Commit | TransactionMarker.Merged;
                            txHeader->Compressed = false;
                            txHeader->UncompressedSize = txHeader->CompressedSize = totalNumberOfPages * AbstractPager.PageSize;

                            txHeader->Crc = Crc.Value(finalPager.AcquirePagePointer(1), 0, totalNumberOfPages * AbstractPager.PageSize);

                            var entry = package.CreateEntry(string.Format("{0:D19}.merged-journal", nextJournalNum), compression);
                            nextJournalNum++;
                            using (var stream = entry.Open())
                            {
                                copier.ToStream(finalPager.AcquirePagePointer(0), (totalNumberOfPages + 1) * AbstractPager.PageSize, stream);
                            }
                        }

                    }
                    file.Flush(true);// make sure we hit the disk and stay there
                }

                env.HeaderAccessor.Modify(header =>
                {
                    header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile;
                    header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage;
                });
            }
            finally
            {
                foreach (var disposable in toDispose)
                    disposable.Dispose();
            }
        }
Пример #6
0
 public TransactionToShip(TransactionHeader header)
 {
     Header = header;
 }
Пример #7
0
 public bool ReadTransaction(long pos, TransactionHeader* txHeader)
 {
     return _journalWriter.Read(pos, (byte*)txHeader, sizeof(TransactionHeader));
 }
Пример #8
0
		public bool RecoverDatabase(TransactionHeader* txHeader)
		{
			// note, we don't need to do any concurrency here, happens as a single threaded
			// fashion on db startup
			var requireHeaderUpdate = false;

			var logInfo = _headerAccessor.Get(ptr => ptr->Journal);

			if (logInfo.JournalFilesCount == 0)
			{
				_journalIndex = logInfo.LastSyncedJournal;
				return false;
			}

			var oldestLogFileStillInUse = logInfo.CurrentJournal - logInfo.JournalFilesCount + 1;
			if (_env.Options.IncrementalBackupEnabled == false)
			{
				// we want to check that we cleanup old log files if they aren't needed
				// this is more just to be safe than anything else, they shouldn't be there.
				var unusedfiles = oldestLogFileStillInUse;
				while (true)
				{
					unusedfiles--;
					if (_env.Options.TryDeleteJournal(unusedfiles) == false)
						break;
				}

			}

			var lastSyncedTransactionId = logInfo.LastSyncedTransactionId;

			var journalFiles = new List<JournalFile>();
			long lastSyncedTxId = -1;
			long lastSyncedJournal = logInfo.LastSyncedJournal;
			uint lastShippedTxCrc = 0;
			for (var journalNumber = oldestLogFileStillInUse; journalNumber <= logInfo.CurrentJournal; journalNumber++)
			{
				using (var recoveryPager = _env.Options.CreateScratchPager(StorageEnvironmentOptions.JournalRecoveryName(journalNumber)))
				using (var pager = _env.Options.OpenJournalPager(journalNumber))
				{
					RecoverCurrentJournalSize(pager);

					var transactionHeader = txHeader->TransactionId == 0 ? null : txHeader;
					var journalReader = new JournalReader(pager, recoveryPager, lastSyncedTransactionId, transactionHeader);
					journalReader.RecoverAndValidate(_env.Options);

					var pagesToWrite = journalReader
						.TransactionPageTranslation
						.Select(kvp => recoveryPager.Read(kvp.Value.JournalPos))
						.OrderBy(x => x.PageNumber)
						.ToList();

					var lastReadHeaderPtr = journalReader.LastTransactionHeader;

					if (lastReadHeaderPtr != null)
					{
						if (pagesToWrite.Count > 0)
							ApplyPagesToDataFileFromJournal(pagesToWrite);

						*txHeader = *lastReadHeaderPtr;
						lastSyncedTxId = txHeader->TransactionId;
						lastShippedTxCrc = txHeader->Crc;
						lastSyncedJournal = journalNumber;
					}

					if (journalReader.RequireHeaderUpdate || journalNumber == logInfo.CurrentJournal)
					{
						var jrnlWriter = _env.Options.CreateJournalWriter(journalNumber, pager.NumberOfAllocatedPages * AbstractPager.PageSize);
						var jrnlFile = new JournalFile(jrnlWriter, journalNumber);
						jrnlFile.InitFrom(journalReader);
						jrnlFile.AddRef(); // creator reference - write ahead log

						journalFiles.Add(jrnlFile);
					}

					if (journalReader.RequireHeaderUpdate) //this should prevent further loading of transactions
					{
						requireHeaderUpdate = true;
						break;
					}
				}
			}

			Shipper.SetPreviousTransaction(lastSyncedTxId, lastShippedTxCrc);
			

			_files = _files.AppendRange(journalFiles);
			
			Debug.Assert(lastSyncedTxId >= 0);
			Debug.Assert(lastSyncedJournal >= 0);

			_journalIndex = lastSyncedJournal;

			_headerAccessor.Modify(
				header =>
				{
					header->Journal.LastSyncedJournal = lastSyncedJournal;
					header->Journal.LastSyncedTransactionId = lastSyncedTxId;
					header->Journal.CurrentJournal = lastSyncedJournal;
					header->Journal.JournalFilesCount = _files.Count;
					header->IncrementalBackup.LastCreatedJournal = _journalIndex;
					header->PreviousTransactionCrc = lastShippedTxCrc;
				});

			CleanupInvalidJournalFiles(lastSyncedJournal);
			CleanupUnusedJournalFiles(oldestLogFileStillInUse, lastSyncedJournal);

			if (_files.Count > 0)
			{
				var lastFile = _files.Last();
				if (lastFile.AvailablePages >= 2)
					// it must have at least one page for the next transaction header and one page for data
					CurrentFile = lastFile;
			}

			return requireHeaderUpdate;
		}
Пример #9
0
        internal void WriteDirect(TransactionHeader* transactionHeader, PageFromScratchBuffer pages)
        {
            for (int i = 0; i < pages.NumberOfPages; i++)
            {
                var page = _env.ScratchBufferPool.ReadPage(pages.PositionInScratchBuffer+i);
                int numberOfPages = 1;
                if (page.IsOverflow)
                {
                    numberOfPages = (page.OverflowSize / AbstractPager.PageSize) + (page.OverflowSize % AbstractPager.PageSize == 0 ? 0 : 1);
                    i += numberOfPages;
                    _overflowPagesInTransaction += (numberOfPages - 1);
                }

                var pageFromScratchBuffer = _env.ScratchBufferPool.Allocate(this, numberOfPages);

                var dest = _env.ScratchBufferPool.AcquirePagePointer(pageFromScratchBuffer.PositionInScratchBuffer);
                NativeMethods.memcpy(dest, page.Base, numberOfPages*AbstractPager.PageSize);

                _allocatedPagesInTransaction++;

                _dirtyPages.Add(page.PageNumber);
                page.Dirty = true;

                if (numberOfPages > 1)
                    _dirtyOverflowPages.Add(page.PageNumber + 1, numberOfPages - 1);

                _scratchPagesTable[page.PageNumber] = pageFromScratchBuffer;
                _transactionPages.Add(pageFromScratchBuffer);

                _state.NextPageNumber = transactionHeader->NextPageNumber;
            }
        }
Пример #10
0
        private bool ValidatePagesCrc(StorageEnvironmentOptions options, int compressedPages, TransactionHeader* current)
        {
            uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize);

            if (crc != current->Crc)
            {
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null);

                return false;
            }
            return true;
        }
Пример #11
0
        private void ValidateHeader(TransactionHeader* current, TransactionHeader* previous)
        {
            if (current->TransactionId < 0)
                throw new InvalidDataException("Transaction id cannot be less than 0 (Tx: " + current->TransactionId + " )");
            if (current->TxMarker.HasFlag(TransactionMarker.Commit) && current->LastPageNumber < 0)
                throw new InvalidDataException("Last page number after committed transaction must be greater than 0");
            if (current->TxMarker.HasFlag(TransactionMarker.Commit) && current->PageCount > 0 && current->Crc == 0)
                throw new InvalidDataException("Committed and not empty transaction checksum can't be equal to 0");
            if (current->Compressed)
            {
                if (current->CompressedSize <= 0)
                    throw new InvalidDataException("Compression error in transaction.");
            } else
                throw new InvalidDataException("Uncompressed transactions are not supported.");

            if (previous == null)
                return;

            if (current->TransactionId != 1 &&
                // 1 is a first storage transaction which does not increment transaction counter after commit
                current->TransactionId - previous->TransactionId != 1)
                throw new InvalidDataException("Unexpected transaction id. Expected: " + (previous->TransactionId + 1) +
                                               ", got:" + current->TransactionId);
        }
Пример #12
0
        private bool TryReadAndValidateHeader(StorageEnvironmentOptions options,out TransactionHeader* current)
        {
            current = (TransactionHeader*)_pager.Read(_readingPage).Base;

            if (current->HeaderMarker != Constants.TransactionHeaderMarker)
            {
                // not a transaction page,

                // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records
                // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading
                // this log file and move to the next one.

                RequireHeaderUpdate = current->HeaderMarker != 0;
                if (RequireHeaderUpdate)
                {
                    options.InvokeRecoveryError(this,
                        "Transaction " + current->TransactionId +
                        " header marker was set to garbage value, file is probably corrupted", null);
                }

                return false;
            }

            ValidateHeader(current, LastTransactionHeader);

            if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false)
            {
                // uncommitted transaction, probably
                RequireHeaderUpdate = true;
                options.InvokeRecoveryError(this,
                        "Transaction " + current->TransactionId +
                        " was not committed", null);
                return false;
            }

            _readingPage++;
            return true;
        }
Пример #13
0
		private unsafe bool TryDecompressTransactionPages(StorageEnvironmentOptions options, TransactionHeader* current, byte* dataPage)
		{
			try
			{
				LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
			}
			catch (Exception e)
			{
				options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
				RequireHeaderUpdate = true;

				return false;
			}
			return true;
		}