private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager, uint previousTransactionCrc) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(tx, 0); var compressionBuffer = compressionPager.AcquirePagePointer(tx, dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); foreach (var txPage in txPages) { var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txPage.ScratchFileNumber, txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; Memory.BulkCopy(write, scratchPage, count); write += count; } var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var remainder = len % AbstractPager.PageSize; var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1); if (remainder != 0) { // zero the remainder of the page UnmanagedMemory.Set(compressionBuffer + len, 0, remainder); } var pages = new IntPtr[compressedPages + 1]; var txHeaderPage = tx.GetTransactionHeaderPage(); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(tx, txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer); var txHeader = (TransactionHeader *)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; txHeader->PreviousTransactionCrc = previousTransactionCrc; pages[0] = new IntPtr(txHeaderBase); for (int index = 0; index < compressedPages; index++) { pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize)); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return(pages); }
private byte *[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(0); var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); for (int index = 1; index < txPages.Count; index++) { var txPage = txPages[index]; var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; NativeMethods.memcpy(write, scratchPage, count); write += count; } var sizeAfterCompression = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var compressedPages = (sizeAfterCompression / AbstractPager.PageSize) + (sizeAfterCompression % AbstractPager.PageSize == 0 ? 0 : 1); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer); var txHeader = (TransactionHeader *)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = sizeAfterCompression; txHeader->UncompressedSize = sizeInBytes; var pages = new byte *[compressedPages + 1]; pages[0] = txHeaderBase; for (int index = 0; index < compressedPages; index++) { pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return(pages); }
public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPages; return(true); // skipping } if (checkCrc && !ValidatePagesCrc(options, compressedPages, current)) { return(false); } var totalPageCount = current->PageCount + current->OverflowPageCount; _recoveryPager.EnsureContinuous(null, _recoveryPage, totalPageCount + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); NativeMethods.memset(dataPage, 0, totalPageCount * AbstractPager.PageSize); try { LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } var tempTransactionPageTranslaction = (*current).GetTransactionToPageTranslation(_recoveryPager, ref _recoveryPage); _readingPage += compressedPages; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; } return(true); }
protected bool ReadOneTransactionForShipping(StorageEnvironmentOptions options, out TransactionToShip transactionToShipRecord) { transactionToShipRecord = null; if (_readingPage >= _pager.NumberOfAllocatedPages) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var compressedPageCount = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPageCount; return(true); // skipping } if (!ValidatePagesCrc(options, compressedPageCount, current)) { return(false); } var compressedPagesRaw = new byte[compressedPageCount * AbstractPager.PageSize]; fixed(byte *compressedDataPtr = compressedPagesRaw) NativeMethods.memcpy(compressedDataPtr, _pager.AcquirePagePointer(_readingPage), compressedPageCount * AbstractPager.PageSize); transactionToShipRecord = new TransactionToShip(*current) { CompressedData = new MemoryStream(compressedPagesRaw), //no need to compress the pages --> after being written to Journal they are already compressed PreviousTransactionCrc = _previousTransactionCrc }; _previousTransactionCrc = current->Crc; _readingPage += compressedPageCount; return(true); }
public byte *AcquirePagePointer(long p) { return(_scratchPager.AcquirePagePointer(p)); }
private IntPtr[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager,uint previousTransactionCrc) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(0); var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); foreach( var txPage in txPages ) { var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.ScratchFileNumber, txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; MemoryUtils.BulkCopy(write, scratchPage, count); write += count; } var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var remainder = len % AbstractPager.PageSize; var compressedPages = (len / AbstractPager.PageSize) + (remainder == 0 ? 0 : 1); if (remainder != 0) { // zero the remainder of the page StdLib.memset(compressionBuffer + len, 0, remainder); } var pages = new IntPtr[compressedPages + 1]; var txHeaderPage = tx.GetTransactionHeaderPage(); var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txHeaderPage.ScratchFileNumber, txHeaderPage.PositionInScratchBuffer); var txHeader = (TransactionHeader*)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; txHeader->PreviousTransactionCrc = previousTransactionCrc; pages[0] = new IntPtr(txHeaderBase); for (int index = 0; index < compressedPages; index++) { pages[index + 1] = new IntPtr(compressionBuffer + (index * AbstractPager.PageSize)); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return pages; }
private byte*[] CompressPages(Transaction tx, int numberOfPages, IVirtualPager compressionPager) { // numberOfPages include the tx header page, which we don't compress var dataPagesCount = numberOfPages - 1; var sizeInBytes = dataPagesCount * AbstractPager.PageSize; var outputBuffer = LZ4.MaximumOutputLength(sizeInBytes); var outputBufferInPages = outputBuffer / AbstractPager.PageSize + (outputBuffer % AbstractPager.PageSize == 0 ? 0 : 1); var pagesRequired = (dataPagesCount + outputBufferInPages); compressionPager.EnsureContinuous(tx, 0, pagesRequired); var tempBuffer = compressionPager.AcquirePagePointer(0); var compressionBuffer = compressionPager.AcquirePagePointer(dataPagesCount); var write = tempBuffer; var txPages = tx.GetTransactionPages(); for (int index = 1; index < txPages.Count; index++) { var txPage = txPages[index]; var scratchPage = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPage.PositionInScratchBuffer); var count = txPage.NumberOfPages * AbstractPager.PageSize; NativeMethods.memcpy(write, scratchPage, count); write += count; } var len = DoCompression(tempBuffer, compressionBuffer, sizeInBytes, outputBuffer); var compressedPages = (len / AbstractPager.PageSize) + (len % AbstractPager.PageSize == 0 ? 0 : 1); var pages = new byte*[compressedPages + 1]; var txHeaderBase = tx.Environment.ScratchBufferPool.AcquirePagePointer(txPages[0].PositionInScratchBuffer); var txHeader = (TransactionHeader*)txHeaderBase; txHeader->Compressed = true; txHeader->CompressedSize = len; txHeader->UncompressedSize = sizeInBytes; pages[0] = txHeaderBase; for (int index = 0; index < compressedPages; index++) { pages[index + 1] = compressionBuffer + (index * AbstractPager.PageSize); } txHeader->Crc = Crc.Value(compressionBuffer, 0, compressedPages * AbstractPager.PageSize); return pages; }
public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) { return(false); } if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var transactionSize = GetNumberOfPagesFromSize(current->Compressed ? current->CompressedSize : current->UncompressedSize); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += transactionSize; return(true); // skipping } if (checkCrc && !ValidatePagesCrc(options, transactionSize, current)) { return(false); } _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); UnmanagedMemory.Set(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); if (current->Compressed) { if (TryDecompressTransactionPages(options, current, dataPage) == false) { return(false); } } else { Memory.Copy(dataPage, _pager.AcquirePagePointer(_readingPage), (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); } var tempTransactionPageTranslaction = new Dictionary <long, RecoveryPagePosition>(); for (var i = 0; i < current->PageCount; i++) { Debug.Assert(_pager.Disposed == false); Debug.Assert(_recoveryPager.Disposed == false); var page = _recoveryPager.Read(_recoveryPage); var pagePosition = new RecoveryPagePosition { JournalPos = _recoveryPage, TransactionId = current->TransactionId }; if (page.IsOverflow) { var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); pagePosition.IsOverflow = true; pagePosition.NumberOfOverflowPages = numOfPages; _recoveryPage += numOfPages; } else { _recoveryPage++; } tempTransactionPageTranslaction[page.PageNumber] = pagePosition; } _readingPage += transactionSize; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; if (pagePosition.Value.IsOverflow) { Debug.Assert(pagePosition.Value.NumberOfOverflowPages != -1); for (int i = 1; i < pagePosition.Value.NumberOfOverflowPages; i++) { _transactionPageTranslation.Remove(pagePosition.Key + i); } } } return(true); }
protected void ReadFromShippedTransaction(TransactionToShip transaction) { var compressedPages = (transaction.Header.CompressedSize / AbstractPager.PageSize) + (transaction.Header.CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); var compressedDataBuffer = new byte[compressedPages * AbstractPager.PageSize]; transaction.CompressedData.Read(compressedDataBuffer, 0, compressedPages * AbstractPager.PageSize); fixed(byte *compressedDataBufferPtr = compressedDataBuffer) { var crc = Crc.Value(compressedDataBufferPtr, 0, compressedPages * AbstractPager.PageSize); if (transaction.Header.Crc != crc || _previousTransactionCrc != transaction.PreviousTransactionCrc) { throw new InvalidDataException("Invalid CRC signature for transaction " + transaction.Header.TransactionId); } _previousTransactionCrc = crc; var totalPages = transaction.Header.PageCount + transaction.Header.OverflowPageCount; _pager.EnsureContinuous(null, currentPage, totalPages + 1); try { LZ4.Decode64(compressedDataBufferPtr, transaction.Header.CompressedSize, _pager.AcquirePagePointer(currentPage), transaction.Header.UncompressedSize, true); } catch (Exception e) { throw new InvalidDataException("Could not de-compress, invalid data", e); } } var lastAddedPage = currentPage + transaction.Header.PageCount; for (int pageNumber = currentPage; pageNumber < lastAddedPage; pageNumber++) { _pageNumbers.Add(pageNumber); } if (LastTransactionHeader.HasValue && LastTransactionHeader.Value.TransactionId < transaction.Header.TransactionId) { LastTransactionHeader = transaction.Header; } currentPage = lastAddedPage; }
public byte *AcquirePagePointer(Transaction tx, long p) { return(_scratchPager.AcquirePagePointer(tx, p)); }
public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPages; return(true); // skipping } if (checkCrc) { uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize); if (crc != current->Crc) { RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null); return(false); } } _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); try { LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } var tempTransactionPageTranslaction = new Dictionary <long, JournalFile.PagePosition>(); for (var i = 0; i < current->PageCount; i++) { Debug.Assert(_pager.Disposed == false); Debug.Assert(_recoveryPager.Disposed == false); var page = _recoveryPager.Read(_recoveryPage); tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition { JournalPos = _recoveryPage, TransactionId = current->TransactionId }; if (page.IsOverflow) { var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); _recoveryPage += numOfPages; } else { _recoveryPage++; } } _readingPage += compressedPages; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; } return(true); }