private DecompressedLeafPage DecompressFromBuffer(DecompressionUsage usage, ref DecompressionInput input) { var result = _llt.Environment.DecompressionBuffers.GetPage(_llt, input.DecompressedPageSize, usage, input.Page); var decompressedNodesOffset = (ushort)(result.PageSize - input.DecompressedSize); LZ4.Decode64LongBuffers( input.Data, input.CompressedSize, result.Base + decompressedNodesOffset, input.DecompressedSize, true); result.Lower += input.KeysOffsetsSize; result.Upper = decompressedNodesOffset; for (var i = 0; i < input.NumberOfEntries; i++) { result.KeysOffsets[i] = (ushort)(input.KeysOffsets[i] + result.Upper); } return(result); }
public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options) { if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb) { return(false); } if (TryReadAndValidateHeader(options, out TransactionHeader * current) == false) { var lastValid4Kb = _readAt4Kb; _readAt4Kb++; while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb) { if (TryReadAndValidateHeader(options, out current)) { if (CanIgnoreDataIntegrityErrorBecauseTxWasSynced(current, options)) { SkipCurrentTransaction(current); return(true); } RequireHeaderUpdate = true; break; } _readAt4Kb++; } _readAt4Kb = lastValid4Kb; return(false); } if (IsAlreadySyncTransaction(current)) { SkipCurrentTransaction(current); return(true); } var performDecompression = current->CompressedSize != -1; var transactionSizeIn4Kb = GetTransactionSizeIn4Kb(current); _readAt4Kb += transactionSizeIn4Kb; TransactionHeaderPageInfo *pageInfoPtr; byte *outputPage; if (performDecompression) { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); try { LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } else { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); Memory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize); pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount; if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } for (var i = 0; i < current->PageCount; i++) { if (pageInfoPtr[i].PageNumber > current->LastPageNumber) { throw new InvalidDataException($"Transaction {current->TransactionId} contains reference to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}"); } } for (var i = 0; i < current->PageCount; i++) { if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } Debug.Assert(_journalPager.Disposed == false); if (performDecompression) { Debug.Assert(_recoveryPager.Disposed == false); } var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size); _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); // We are going to overwrite the page, so we don't care about its current content var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.MaybePrefetchMemory(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); var pageNumber = *(long *)(outputPage + totalRead); if (pageInfoPtr[i].PageNumber != pageNumber) { throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}"); } totalRead += sizeof(long); _modifiedPages.Add(pageNumber); for (var j = 1; j < numberOfPagesOnDestination; j++) { _modifiedPages.Remove(pageNumber + j); } _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); if (pageInfoPtr[i].DiffSize == 0) { if (pageInfoPtr[i].Size == 0) { // diff contained no changes continue; } var journalPagePtr = outputPage + totalRead; if (options.Encryption.IsEnabled == false) { var pageHeader = (PageHeader *)journalPagePtr; var checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageNumber, out var expectedChecksum); if (checksum != expectedChecksum) { ThrowInvalidChecksumOnPageFromJournal(pageNumber, current, expectedChecksum, checksum, pageHeader); } } Memory.Copy(pagePtr, journalPagePtr, pageInfoPtr[i].Size); totalRead += pageInfoPtr[i].Size; if (options.Encryption.IsEnabled) { var pageHeader = (PageHeader *)pagePtr; if ((pageHeader->Flags & PageFlags.Overflow) == PageFlags.Overflow) { // need to mark overlapped buffers as invalid for commit var encryptionBuffers = ((IPagerLevelTransactionState)this).CryptoPagerTransactionState[_dataPager]; var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize); for (var j = 1; j < numberOfPages; j++) { if (encryptionBuffers.TryGetValue(pageNumber + j, out var buffer)) { buffer.SkipOnTxCommit = true; } } } } } else { _diffApplier.Destination = pagePtr; _diffApplier.Diff = outputPage + totalRead; _diffApplier.Size = pageInfoPtr[i].Size; _diffApplier.DiffSize = pageInfoPtr[i].DiffSize; _diffApplier.Apply(pageInfoPtr[i].IsNewDiff); totalRead += pageInfoPtr[i].DiffSize; } _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); } LastTransactionHeader = current; return(true); }
[InlineData(0)] // special case : in = Exactly 1GB, out > 1GB public unsafe void LZ4TestAbove2GB(long divider) { using (var cts = new CancellationTokenSource(TimeSpan.FromMinutes(10))) { var options = StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, $"bigLz4-test-{divider}.data")); using (var env = new StorageEnvironment(options)) { long gb = 1024 * 1024 * 1024; long inputSize = 3L * gb; var guid = Guid.NewGuid(); using (var outputPager = CreateScratchFile($"output-{divider}-{guid}", env, inputSize, out byte *outputBuffer)) using (var inputPager = CreateScratchFile($"input-{divider}-{guid}", env, inputSize, out byte *inputBuffer)) using (var checkedPager = CreateScratchFile($"checked-{divider}-{guid}", env, inputSize, out byte *checkedBuffer)) { var random = new Random(123); if (divider != 0) { for (long p = 0; p < inputSize / divider; p++) { cts.Token.ThrowIfCancellationRequested(); (*(byte *)((long)inputBuffer + p)) = Convert.ToByte(random.Next(0, 255)); } } else { inputSize = int.MaxValue / 2 - 1; // MAX_INPUT_LENGTH_PER_SEGMENT for (long p = 0; p < inputSize; p++) { cts.Token.ThrowIfCancellationRequested(); (*(byte *)((long)inputBuffer + p)) = Convert.ToByte(random.Next(0, 255)); } } Console.WriteLine("Calculating LZ4 MaximumOutputLength..."); var outputBufferSize = LZ4.MaximumOutputLength(inputSize); Console.WriteLine("...done"); // write some data in known places in inputBuffer byte testNum = 0; for (long testPoints = 0; testPoints < inputSize; testPoints += gb) { cts.Token.ThrowIfCancellationRequested(); var testPointer = (byte *)((long)inputBuffer + testPoints); * testPointer = ++testNum; } Console.WriteLine("Encoding LZ4 LongBuffer..."); // encode inputBuffer into outputBuffer var compressedLenTask = Task.Factory.StartNew(() => LZ4.Encode64LongBuffer(inputBuffer, outputBuffer, inputSize, outputBufferSize), cts.Token); compressedLenTask.Wait(cts.Token); var compressedLen = compressedLenTask.Result; Console.WriteLine("...done"); Console.WriteLine("Decoding LZ4 LongBuffers..."); // decode outputBuffer into checkedBuffer var totalOutputSizeTask = Task.Factory.StartNew(() => LZ4.Decode64LongBuffers(outputBuffer, compressedLen, checkedBuffer, inputSize, true), cts.Token); totalOutputSizeTask.Wait(cts.Token); var totalOutputSize = totalOutputSizeTask.Result; Console.WriteLine("...done"); Assert.Equal(compressedLen, totalOutputSize); testNum = 0; for (long testPoints = 0; testPoints < inputSize; testPoints += gb) { cts.Token.ThrowIfCancellationRequested(); var testPointer = (byte *)((long)checkedBuffer + testPoints); Assert.Equal(++testNum, *testPointer); } } } } }
public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options) { if (_readAt4Kb >= _journalPagerNumberOfAllocated4Kb) { return(false); } TransactionHeader *current; if (TryReadAndValidateHeader(options, out current) == false) { var lastValid4Kb = _readAt4Kb; _readAt4Kb++; while (_readAt4Kb < _journalPagerNumberOfAllocated4Kb) { if (TryReadAndValidateHeader(options, out current)) { RequireHeaderUpdate = true; break; } _readAt4Kb++; } _readAt4Kb = lastValid4Kb; return(false); } bool performDecompression = current->CompressedSize != -1; var size = current->CompressedSize != -1 ? current->CompressedSize : current->UncompressedSize; var transactionSizeIn4Kb = (size + sizeof(TransactionHeader)) / (4 * Constants.Size.Kilobyte) + ((size + sizeof(TransactionHeader)) % (4 * Constants.Size.Kilobyte) == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { _readAt4Kb += transactionSizeIn4Kb; LastTransactionHeader = current; return(true); // skipping } _readAt4Kb += transactionSizeIn4Kb; TransactionHeaderPageInfo *pageInfoPtr; byte *outputPage; if (performDecompression) { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); try { LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } else { var numberOfPages = GetNumberOfPagesFor(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); outputPage = _recoveryPager.AcquirePagePointer(this, 0); UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * Constants.Storage.PageSize); Memory.Copy(outputPage, (byte *)current + sizeof(TransactionHeader), current->UncompressedSize); pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; } long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount; if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } for (var i = 0; i < current->PageCount; i++) { if (pageInfoPtr[i].PageNumber > current->LastPageNumber) { throw new InvalidDataException($"Transaction {current->TransactionId} contains refeence to page {pageInfoPtr[i].PageNumber} which is after the last allocated page {current->LastPageNumber}"); } } for (var i = 0; i < current->PageCount; i++) { if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } Debug.Assert(_journalPager.Disposed == false); if (performDecompression) { Debug.Assert(_recoveryPager.Disposed == false); } var numberOfPagesOnDestination = GetNumberOfPagesFor(pageInfoPtr[i].Size); _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); // We are going to overwrite the page, so we don't care about its current content var pagePtr = _dataPager.AcquirePagePointerForNewPage(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); var pageNumber = *(long *)(outputPage + totalRead); if (pageInfoPtr[i].PageNumber != pageNumber) { throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {pageNumber}"); } totalRead += sizeof(long); _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); if (pageInfoPtr[i].DiffSize == 0) { Memory.Copy(pagePtr, outputPage + totalRead, pageInfoPtr[i].Size); totalRead += pageInfoPtr[i].Size; } else { _diffApplier.Destination = pagePtr; _diffApplier.Diff = outputPage + totalRead; _diffApplier.Size = pageInfoPtr[i].Size; _diffApplier.DiffSize = pageInfoPtr[i].DiffSize; _diffApplier.Apply(pageInfoPtr[i].IsNewDiff); totalRead += pageInfoPtr[i].DiffSize; } _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); } LastTransactionHeader = current; return(true); }
[InlineData(0)] // special case : in = Exactly 1GB, out > 1GB public unsafe void LZ4TestAbove2GB(long devider) { var options = StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, $"bigLz4-test-{devider}.data")); using (var env = new StorageEnvironment(options)) { long Gb = 1024 * 1024 * 1024; long inputSize = 3L * Gb; byte *outputBuffer, inputBuffer, checkedBuffer; var guid = Guid.NewGuid(); var outputPager = CreateScratchFile($"output-{devider}-{guid}", env, inputSize, out outputBuffer); var inputPager = CreateScratchFile($"input-{devider}-{guid}", env, inputSize, out inputBuffer); var checkedPager = CreateScratchFile($"checked-{devider}-{guid}", env, inputSize, out checkedBuffer); var random = new Random(123); if (devider != 0) { for (long p = 0; p < inputSize / devider; p++) { (*(byte *)((long)inputBuffer + p)) = Convert.ToByte(random.Next(0, 255)); } } else { inputSize = int.MaxValue / 2 - 1; // MAX_INPUT_LENGTH_PER_SEGMENT for (long p = 0; p < inputSize; p++) { (*(byte *)((long)inputBuffer + p)) = Convert.ToByte(random.Next(0, 255)); } } var outputBufferSize = LZ4.MaximumOutputLength(inputSize); // write some data in known places in inputBuffer long compressedLen = 0; byte testNum = 0; for (long testPoints = 0; testPoints < inputSize; testPoints += Gb) { var testPointer = (byte *)((long)inputBuffer + testPoints); * testPointer = ++testNum; } // encode inputBuffer into outputBuffer compressedLen = LZ4.Encode64LongBuffer(inputBuffer, outputBuffer, inputSize, outputBufferSize); // decode outputBuffer into checkedBuffer var totalOutputSize = LZ4.Decode64LongBuffers(outputBuffer, compressedLen, checkedBuffer, inputSize, true); Assert.Equal(compressedLen, totalOutputSize); testNum = 0; for (long testPoints = 0; testPoints < inputSize; testPoints += Gb) { var testPointer = (byte *)((long)checkedBuffer + testPoints); Assert.Equal(++testNum, *testPointer); } outputPager.Dispose(); inputPager.Dispose(); checkedPager.Dispose(); } }
public bool ReadOneTransactionToDataFile(StorageEnvironmentOptions options, bool checkCrc = true) { if (_readingPage >= _journalPager.NumberOfAllocatedPages) { return(false); } if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value) { return(false); } TransactionHeader *current; if (!TryReadAndValidateHeader(options, out current)) { return(false); } var transactionSize = GetNumberOfPagesFromSize(options, current->CompressedSize + sizeof(TransactionHeader)); if (current->TransactionId <= _lastSyncedTransactionId) { _readingPage += transactionSize; LastTransactionHeader = current; return(true); // skipping } if (checkCrc && !ValidatePagesHash(options, current)) { return(false); } _readingPage += transactionSize; var numberOfPages = _recoveryPager.GetNumberOfOverflowPages(current->UncompressedSize); _recoveryPager.EnsureContinuous(0, numberOfPages); _recoveryPager.EnsureMapped(this, 0, numberOfPages); var outputPage = _recoveryPager.AcquirePagePointer(this, 0); UnmanagedMemory.Set(outputPage, 0, (long)numberOfPages * options.PageSize); try { LZ4.Decode64LongBuffers((byte *)current + sizeof(TransactionHeader), current->CompressedSize, outputPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return(false); } var pageInfoPtr = (TransactionHeaderPageInfo *)outputPage; long totalRead = sizeof(TransactionHeaderPageInfo) * current->PageCount; for (var i = 0; i < current->PageCount; i++) { if (totalRead > current->UncompressedSize) { throw new InvalidDataException($"Attempted to read position {totalRead} from transaction data while the transaction is size {current->UncompressedSize}"); } Debug.Assert(_journalPager.Disposed == false); Debug.Assert(_recoveryPager.Disposed == false); var numberOfPagesOnDestination = GetNumberOfPagesFromSize(options, pageInfoPtr[i].Size); _dataPager.EnsureContinuous(pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); _dataPager.EnsureMapped(this, pageInfoPtr[i].PageNumber, numberOfPagesOnDestination); var pagePtr = _dataPager.AcquirePagePointer(this, pageInfoPtr[i].PageNumber); var diffPageNumber = *(long *)(outputPage + totalRead); if (pageInfoPtr[i].PageNumber != diffPageNumber) { throw new InvalidDataException($"Expected a diff for page {pageInfoPtr[i].PageNumber} but got one for {diffPageNumber}"); } totalRead += sizeof(long); _dataPager.UnprotectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); if (pageInfoPtr[i].DiffSize == 0) { Memory.Copy(pagePtr, outputPage + totalRead, pageInfoPtr[i].Size); totalRead += pageInfoPtr[i].Size; } else { _diffApplier.Destination = pagePtr; _diffApplier.Diff = outputPage + totalRead; _diffApplier.Size = pageInfoPtr[i].Size; _diffApplier.DiffSize = pageInfoPtr[i].DiffSize; _diffApplier.Apply(); totalRead += pageInfoPtr[i].DiffSize; } _dataPager.ProtectPageRange(pagePtr, (ulong)pageInfoPtr[i].Size); } LastTransactionHeader = current; return(true); }