Example #1
0
        public PageFromScratchBuffer Allocate(Transaction tx, int numberOfPages)
        {
            var size = Utils.NearestPowerOfTwo(numberOfPages);

            PageFromScratchBuffer result;

            if (TryGettingFromAllocatedBuffer(tx, numberOfPages, size, out result))
            {
                return(result);
            }

            // we don't have free pages to give out, need to allocate some
            _scratchPager.EnsureContinuous(tx, _lastUsedPage, (int)size);

            result = new PageFromScratchBuffer
            {
                PositionInScratchBuffer = _lastUsedPage,
                Size          = size,
                NumberOfPages = numberOfPages
            };
            _allocatedPages.Add(_lastUsedPage, result);
            _lastUsedPage += size;

            return(result);
        }
Example #2
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }

            var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage         += compressedPages;
                return(true);                // skipping
            }

            if (checkCrc && !ValidatePagesCrc(options, compressedPages, current))
            {
                return(false);
            }

            var totalPageCount = current->PageCount + current->OverflowPageCount;

            _recoveryPager.EnsureContinuous(null, _recoveryPage, totalPageCount + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            NativeMethods.memset(dataPage, 0, totalPageCount * AbstractPager.PageSize);
            try
            {
                LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }

            var tempTransactionPageTranslaction = (*current).GetTransactionToPageTranslation(_recoveryPager, ref _recoveryPage);

            _readingPage += compressedPages;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;
            }

            return(true);
        }
Example #3
0
        private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager, uint previousTransactionCrc)
        {
            // numberOfPages include the tx header page, which we don't compress
            var dataPagesCount      = numberOfPages - 1;
            var sizeInBytes         = dataPagesCount * AbstractPager.PageSize;
            var outputBuffer        = LZ4.MaximumOutputLength(sizeInBytes);
            var outputBufferInPages = outputBuffer / AbstractPager.PageSize +
                                      (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1);
            var pagesRequired = (dataPagesCount + outputBufferInPages);

            compressionPager.EnsureContinuous(tx, 0, pagesRequired);
            var tempBuffer        = compressionPager.AcquirePagePointer(tx, 0);
            var compressionBuffer = compressionPager.AcquirePagePointer(tx, dataPagesCount);

            var write   = tempBuffer;
            var txPages = tx.GetTransactionPages();

            foreach (var txPage in txPages)
            {
                var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txPage.ScratchFileNumber, txPage.PositionInScratchBuffer);
                var count       = txPage.NumberOfPages * AbstractPager.PageSize;
                Memory.BulkCopy(write, scratchPage, count);
                write += count;
            }

            var len             = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer);
            var remainder       = len % AbstractPager.PageSize;
            var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1);

            if (remainder != 0)
            {
                // zero the remainder of the page
                UnmanagedMemory.Set(compressionBuffer + len, 0, remainder);
            }

            var pages = new IntPtr[compressedPages + 1];

            var txHeaderPage = tx.GetTransactionHeaderPage();
            var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer);
            var txHeader     = (TransactionHeader *)txHeaderBase;

            txHeader->Compressed             = true;
            txHeader->CompressedSize         = len;
            txHeader->UncompressedSize       = sizeInBytes;
            txHeader->PreviousTransactionCrc = previousTransactionCrc;

            pages[0] = new IntPtr(txHeaderBase);
            for (int index = 0; index < compressedPages; index++)
            {
                pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize));
            }

            txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize);

            return(pages);
        }
Example #4
0
        public PageFromScratchBuffer Allocate(Transaction tx, int numberOfPages, long size)
        {
            _scratchPager.EnsureContinuous(tx, _lastUsedPage, (int)size);

            var result = new PageFromScratchBuffer(_scratchNumber, _lastUsedPage, size, numberOfPages);

            _allocatedPagesCount += numberOfPages;
            _allocatedPages.Add(_lastUsedPage, result);
            _lastUsedPage += size;

            return(result);
        }
        private byte *[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager)
        {
            // numberOfPages include the tx header page, which we don't compress
            var dataPagesCount      = numberOfPages - 1;
            var sizeInBytes         = dataPagesCount * AbstractPager.PageSize;
            var outputBuffer        = LZ4.MaximumOutputLength(sizeInBytes);
            var outputBufferInPages = outputBuffer / AbstractPager.PageSize +
                                      (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1);
            var pagesRequired = (dataPagesCount + outputBufferInPages);

            compressionPager.EnsureContinuous(tx, 0, pagesRequired);
            var tempBuffer        = compressionPager.AcquirePagePointer(0);
            var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount);

            var write   = tempBuffer;
            var txPages = tx.GetTransactionPages();

            for (int index = 1; index < txPages.Count; index++)
            {
                var txPage      = txPages[index];
                var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.PositionInScratchBuffer);
                var count       = txPage.NumberOfPages * AbstractPager.PageSize;
                NativeMethods.memcpy(write, scratchPage, count);
                write += count;
            }

            var sizeAfterCompression = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer);

            var compressedPages = (sizeAfterCompression / AbstractPager.PageSize) + (sizeAfterCompression % AbstractPager.PageSize == 0 ? 0 : 1);
            var txHeaderBase    = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer);
            var txHeader        = (TransactionHeader *)txHeaderBase;

            txHeader->Compressed       = true;
            txHeader->CompressedSize   = sizeAfterCompression;
            txHeader->UncompressedSize = sizeInBytes;

            var pages = new byte *[compressedPages + 1];

            pages[0] = txHeaderBase;

            for (int index = 0; index < compressedPages; index++)
            {
                pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize);
            }

            txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize);

            return(pages);
        }
Example #6
0
        private void ApplyPagesToDataFileFromJournal(List <Page> sortedPagesToWrite)
        {
            var last = sortedPagesToWrite.Last();

            var numberOfPagesInLastPage = last.IsOverflow == false ? 1 :
                                          _env.Options.DataPager.GetNumberOfOverflowPages(last.OverflowSize);

            _dataPager.EnsureContinuous(null, last.PageNumber, numberOfPagesInLastPage);

            foreach (var page in sortedPagesToWrite)
            {
                _dataPager.Write(page);
            }

            _dataPager.Sync();
        }
Example #7
0
        public PageFromScratchBuffer Allocate(Transaction tx, int numberOfPages, long size)
        {
            _scratchPager.EnsureContinuous(tx, _lastUsedPage, (int)size);

            var result = new PageFromScratchBuffer
            {
                ScratchFileNumber       = _scratchNumber,
                PositionInScratchBuffer = _lastUsedPage,
                Size          = size,
                NumberOfPages = numberOfPages
            };

            _allocatedPages.Add(_lastUsedPage, result);
            _lastUsedPage += size;

            return(result);
        }
Example #8
0
        protected void ReadFromShippedTransaction(TransactionToShip transaction)
        {
            var compressedPages      = (transaction.Header.CompressedSize / AbstractPager.PageSize) + (transaction.Header.CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1);
            var compressedDataBuffer = new byte[compressedPages * AbstractPager.PageSize];

            transaction.CompressedData.Read(compressedDataBuffer, 0, compressedPages * AbstractPager.PageSize);

            fixed(byte *compressedDataBufferPtr = compressedDataBuffer)
            {
                var crc = Crc.Value(compressedDataBufferPtr, 0, compressedPages * AbstractPager.PageSize);

                if (transaction.Header.Crc != crc || _previousTransactionCrc != transaction.PreviousTransactionCrc)
                {
                    throw new InvalidDataException("Invalid CRC signature for transaction " + transaction.Header.TransactionId);
                }

                _previousTransactionCrc = crc;
                var totalPages = transaction.Header.PageCount + transaction.Header.OverflowPageCount;

                _pager.EnsureContinuous(null, currentPage, totalPages + 1);
                try
                {
                    LZ4.Decode64(compressedDataBufferPtr, transaction.Header.CompressedSize, _pager.AcquirePagePointer(currentPage), transaction.Header.UncompressedSize, true);
                }
                catch (Exception e)
                {
                    throw new InvalidDataException("Could not de-compress, invalid data", e);
                }
            }

            var lastAddedPage = currentPage + transaction.Header.PageCount;

            for (int pageNumber = currentPage; pageNumber < lastAddedPage; pageNumber++)
            {
                _pageNumbers.Add(pageNumber);
            }

            if (LastTransactionHeader.HasValue && LastTransactionHeader.Value.TransactionId < transaction.Header.TransactionId)
            {
                LastTransactionHeader = transaction.Header;
            }

            currentPage = lastAddedPage;
        }
Example #9
0
		private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager,uint previousTransactionCrc)
		{
			// numberOfPages include the tx header page, which we don't compress
			var dataPagesCount = numberOfPages - 1;
			var sizeInBytes = dataPagesCount * AbstractPager.PageSize;
			var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes);
			var outputBufferInPages = outputBuffer / AbstractPager.PageSize +
									  (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1);
			var pagesRequired = (dataPagesCount + outputBufferInPages);

			compressionPager.EnsureContinuous(tx, 0, pagesRequired);
			var tempBuffer = compressionPager.AcquirePagePointer(0);
			var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount);

			var write = tempBuffer;
			var txPages = tx.GetTransactionPages();

            foreach( var txPage in txPages )
            {
                var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.ScratchFileNumber, txPage.PositionInScratchBuffer);
                var count = txPage.NumberOfPages * AbstractPager.PageSize;
                MemoryUtils.BulkCopy(write, scratchPage, count);
                write += count;
            }

			var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer);
		    var remainder = len % AbstractPager.PageSize;
            var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1);

		    if (remainder != 0)
		    {
                // zero the remainder of the page
				StdLib.memset(compressionBuffer + len, 0, remainder);
		    }

			var pages = new IntPtr[compressedPages + 1];

            var txHeaderPage = tx.GetTransactionHeaderPage();
            var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer);
			var txHeader = (TransactionHeader*)txHeaderBase;

			txHeader->Compressed = true;
			txHeader->CompressedSize = len;
			txHeader->UncompressedSize = sizeInBytes;
			txHeader->PreviousTransactionCrc = previousTransactionCrc;

			pages[0] = new IntPtr(txHeaderBase);
			for (int index = 0; index < compressedPages; index++)
			{
				pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize));
			}

			txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize);

			return pages;
		}
		private byte*[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager)
		{
			// numberOfPages include the tx header page, which we don't compress
			var dataPagesCount = numberOfPages - 1;
			var sizeInBytes = dataPagesCount * AbstractPager.PageSize;
			var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes);
			var outputBufferInPages = outputBuffer / AbstractPager.PageSize +
									  (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1);
			var pagesRequired = (dataPagesCount + outputBufferInPages);

			compressionPager.EnsureContinuous(tx, 0, pagesRequired);
			var tempBuffer = compressionPager.AcquirePagePointer(0);
			var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount);

			var write = tempBuffer;
			var txPages = tx.GetTransactionPages();

			for (int index = 1; index < txPages.Count; index++)
			{
				var txPage = txPages[index];
				var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.PositionInScratchBuffer);
				var count = txPage.NumberOfPages * AbstractPager.PageSize;
				NativeMethods.memcpy(write, scratchPage, count);
				write += count;
			}

			var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer);
			var compressedPages = (len / AbstractPager.PageSize) + (len % AbstractPager.PageSize == 0 ? 0 : 1);

			var pages = new byte*[compressedPages + 1];

			var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer);
			var txHeader = (TransactionHeader*)txHeaderBase;

			txHeader->Compressed = true;
			txHeader->CompressedSize = len;
			txHeader->UncompressedSize = sizeInBytes;

			pages[0] = txHeaderBase;
			for (int index = 0; index < compressedPages; index++)
			{
				pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize);
			}

			txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize);

			return pages;
		}
Example #11
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
            {
                return(false);
            }

            if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }

            var transactionSize = GetNumberOfPagesFromSize(current->Compressed ? current->CompressedSize : current->UncompressedSize);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage         += transactionSize;
                return(true);                // skipping
            }

            if (checkCrc && !ValidatePagesCrc(options, transactionSize, current))
            {
                return(false);
            }

            _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            UnmanagedMemory.Set(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            if (current->Compressed)
            {
                if (TryDecompressTransactionPages(options, current, dataPage) == false)
                {
                    return(false);
                }
            }
            else
            {
                Memory.Copy(dataPage, _pager.AcquirePagePointer(_readingPage), (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            }

            var tempTransactionPageTranslaction = new Dictionary <long, RecoveryPagePosition>();

            for (var i = 0; i < current->PageCount; i++)
            {
                Debug.Assert(_pager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var page = _recoveryPager.Read(_recoveryPage);

                var pagePosition = new RecoveryPagePosition
                {
                    JournalPos    = _recoveryPage,
                    TransactionId = current->TransactionId
                };

                if (page.IsOverflow)
                {
                    var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);

                    pagePosition.IsOverflow            = true;
                    pagePosition.NumberOfOverflowPages = numOfPages;

                    _recoveryPage += numOfPages;
                }
                else
                {
                    _recoveryPage++;
                }

                tempTransactionPageTranslaction[page.PageNumber] = pagePosition;
            }

            _readingPage += transactionSize;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;

                if (pagePosition.Value.IsOverflow)
                {
                    Debug.Assert(pagePosition.Value.NumberOfOverflowPages != -1);

                    for (int i = 1; i < pagePosition.Value.NumberOfOverflowPages; i++)
                    {
                        _transactionPageTranslation.Remove(pagePosition.Key + i);
                    }
                }
            }

            return(true);
        }
Example #12
0
        public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true)
        {
            if (_readingPage >= _pager.NumberOfAllocatedPages)
            {
                return(false);
            }

            TransactionHeader *current;

            if (!TryReadAndValidateHeader(options, out current))
            {
                return(false);
            }

            var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1);

            if (current->TransactionId <= _lastSyncedTransactionId)
            {
                LastTransactionHeader = current;
                _readingPage         += compressedPages;
                return(true); // skipping
            }

            if (checkCrc)
            {
                uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize);

                if (crc != current->Crc)
                {
                    RequireHeaderUpdate = true;
                    options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null);

                    return(false);
                }
            }

            _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1);
            var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage);

            NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize);
            try
            {
                LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true);
            }
            catch (Exception e)
            {
                options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e);
                RequireHeaderUpdate = true;

                return(false);
            }

            var tempTransactionPageTranslaction = new Dictionary <long, JournalFile.PagePosition>();

            for (var i = 0; i < current->PageCount; i++)
            {
                Debug.Assert(_pager.Disposed == false);
                Debug.Assert(_recoveryPager.Disposed == false);

                var page = _recoveryPager.Read(_recoveryPage);

                tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition
                {
                    JournalPos    = _recoveryPage,
                    TransactionId = current->TransactionId
                };

                if (page.IsOverflow)
                {
                    var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize);
                    _recoveryPage += numOfPages;
                }
                else
                {
                    _recoveryPage++;
                }
            }

            _readingPage += compressedPages;

            LastTransactionHeader = current;

            foreach (var pagePosition in tempTransactionPageTranslaction)
            {
                _transactionPageTranslation[pagePosition.Key] = pagePosition.Value;
            }

            return(true);
        }
Example #13
0
        public PageFromScratchBuffer Allocate(Transaction tx, int numberOfPages)
        {
            if (tx == null)
            {
                throw new ArgumentNullException("tx");
            }
            var size = Utils.NearestPowerOfTwo(numberOfPages);

            PageFromScratchBuffer result;

            if (TryGettingFromAllocatedBuffer(tx, numberOfPages, size, out result))
            {
                return(result);
            }

            if ((_lastUsedPage + size) * AbstractPager.PageSize > _sizeLimit)
            {
                var sp = Stopwatch.StartNew();
                // Our problem is that we don't have any available free pages, probably because
                // there are read transactions that are holding things open. We are going to see if
                // there are any free pages that _might_ be freed for us if we wait for a bit. The idea
                // is that we let the read transactions time to complete and do their work, at which point
                // we can continue running.
                // We start this by forcing a flush, then we are waiting up to the timeout for we are waiting
                // for the read transactions to complete. It is possible that a long running read transaction
                // would in fact generate enough work for us to timeout, but hopefully we can avoid that.

                tx.Environment.ForceLogFlushToDataFile(tx);
                while (sp.ElapsedMilliseconds < tx.Environment.Options.ScratchBufferOverflowTimeout)
                {
                    if (TryGettingFromAllocatedBuffer(tx, numberOfPages, size, out result))
                    {
                        return(result);
                    }
                    Thread.Sleep(32);
                }
                string message = string.Format("Cannot allocate more space for the scratch buffer.\r\n" +
                                               "Current size is:\t{0:#,#;;0} kb.\r\n" +
                                               "Limit:\t\t\t{1:#,#;;0} kb.\r\n" +
                                               "Requested Size:\t{2:#,#;;0} kb.\r\n" +
                                               "Already flushed and waited for {3:#,#;;0} ms for read transactions to complete.\r\n" +
                                               "Do you have a long running read transaction executing?",
                                               (_scratchPager.NumberOfAllocatedPages * AbstractPager.PageSize) / 1024,
                                               _sizeLimit / 1024,
                                               ((_lastUsedPage + size) * AbstractPager.PageSize) / 1024,
                                               sp.ElapsedMilliseconds);
                throw new ScratchBufferSizeLimitException(message);
            }

            // we don't have free pages to give out, need to allocate some
            _scratchPager.EnsureContinuous(tx, _lastUsedPage, (int)size);

            result = new PageFromScratchBuffer
            {
                PositionInScratchBuffer = _lastUsedPage,
                Size          = size,
                NumberOfPages = numberOfPages
            };

            _allocatedPages.Add(_lastUsedPage, result);
            _lastUsedPage += size;

            return(result);
        }