void UnpWriteArea(size_t StartPtr, size_t EndPtr) { if (EndPtr != StartPtr) { UnpSomeRead = true; } if (EndPtr < StartPtr) { UnpAllBuf = true; } if (Fragmented) { size_t SizeToWrite = (EndPtr - StartPtr) & MaxWinMask; while (SizeToWrite > 0) { size_t BlockSize = FragWindow.GetBlockSize(StartPtr, SizeToWrite); //UnpWriteData(&FragWindow[StartPtr],BlockSize); FragWindow.GetBuffer(StartPtr, out var __buffer, out var __offset); UnpWriteData(__buffer, __offset, BlockSize); SizeToWrite -= BlockSize; StartPtr = (StartPtr + BlockSize) & MaxWinMask; } } else if (EndPtr < StartPtr) { UnpWriteData(Window, StartPtr, MaxWinSize - StartPtr); UnpWriteData(Window, 0, EndPtr); } else { UnpWriteData(Window, StartPtr, EndPtr - StartPtr); } }
// later: may need Dispose() if we support thread pool //Unpack::~Unpack() //{ // InitFilters30(false); // // if (Window!=null) // free(Window); //#if RarV2017_RAR_SMP // DestroyThreadPool(UnpThreadPool); // delete[] ReadBufMT; // delete[] UnpThreadData; //#endif //} private void Init(size_t WinSize, bool Solid) { // If 32-bit RAR unpacks an archive with 4 GB dictionary, the window size // will be 0 because of size_t overflow. Let's issue the memory error. if (WinSize == 0) { //ErrHandler.MemoryError(); throw new InvalidFormatException("invalid window size (possibly due to a rar file with a 4GB being unpacked on a 32-bit platform)"); } // Minimum window size must be at least twice more than maximum possible // size of filter block, which is 0x10000 in RAR now. If window size is // smaller, we can have a block with never cleared flt->NextWindow flag // in UnpWriteBuf(). Minimum window size 0x20000 would be enough, but let's // use 0x40000 for extra safety and possible filter area size expansion. const size_t MinAllocSize = 0x40000; if (WinSize < MinAllocSize) { WinSize = MinAllocSize; } if (WinSize <= MaxWinSize) // Use the already allocated window. { return; } if ((WinSize >> 16) > 0x10000) // Window size must not exceed 4 GB. { return; } // Archiving code guarantees that window size does not grow in the same // solid stream. So if we are here, we are either creating a new window // or increasing the size of non-solid window. So we could safely reject // current window data without copying them to a new window, though being // extra cautious, we still handle the solid window grow case below. bool Grow = Solid && (Window != null || Fragmented); // We do not handle growth for existing fragmented window. if (Grow && Fragmented) { //throw std::bad_alloc(); throw new InvalidFormatException("Grow && Fragmented"); } byte[] NewWindow = Fragmented ? null : new byte[WinSize]; if (NewWindow == null) { if (Grow || WinSize < 0x1000000) { // We do not support growth for new fragmented window. // Also exclude RAR4 and small dictionaries. //throw std::bad_alloc(); throw new InvalidFormatException("Grow || WinSize<0x1000000"); } else { if (Window != null) // If allocated by preceding files. { //free(Window); Window = null; } FragWindow.Init(WinSize); Fragmented = true; } } if (!Fragmented) { // Clean the window to generate the same output when unpacking corrupt // RAR files, which may access unused areas of sliding dictionary. // sharpcompress: don't need this, freshly allocated above //memset(NewWindow,0,WinSize); // If Window is not NULL, it means that window size has grown. // In solid streams we need to copy data to a new window in such case. // RAR archiving code does not allow it in solid streams now, // but let's implement it anyway just in case we'll change it sometimes. if (Grow) { for (size_t I = 1; I <= MaxWinSize; I++) { NewWindow[(UnpPtr - I) & (WinSize - 1)] = Window[(UnpPtr - I) & (MaxWinSize - 1)]; } } //if (Window!=null) // free(Window); Window = NewWindow; } MaxWinSize = WinSize; MaxWinMask = MaxWinSize - 1; }
private void UnpWriteBuf() { size_t WrittenBorder = WrPtr; size_t FullWriteSize = (UnpPtr - WrittenBorder) & MaxWinMask; size_t WriteSizeLeft = FullWriteSize; bool NotAllFiltersProcessed = false; //for (size_t I=0;I<Filters.Count;I++) // sharpcompress: size_t -> int for (int I = 0; I < Filters.Count; I++) { // Here we apply filters to data which we need to write. // We always copy data to another memory block before processing. // We cannot process them just in place in Window buffer, because // these data can be used for future string matches, so we must // preserve them in original form. UnpackFilter flt = Filters[I]; if (flt.Type == FILTER_NONE) { continue; } if (flt.NextWindow) { // Here we skip filters which have block start in current data range // due to address wrap around in circular dictionary, but actually // belong to next dictionary block. If such filter start position // is included to current write range, then we reset 'NextWindow' flag. // In fact we can reset it even without such check, because current // implementation seems to guarantee 'NextWindow' flag reset after // buffer writing for all existing filters. But let's keep this check // just in case. Compressor guarantees that distance between // filter block start and filter storing position cannot exceed // the dictionary size. So if we covered the filter block start with // our write here, we can safely assume that filter is applicable // to next block on no further wrap arounds is possible. if (((flt.BlockStart - WrPtr) & MaxWinMask) <= FullWriteSize) { flt.NextWindow = false; } continue; } uint BlockStart = flt.BlockStart; uint BlockLength = flt.BlockLength; if (((BlockStart - WrittenBorder) & MaxWinMask) < WriteSizeLeft) { if (WrittenBorder != BlockStart) { UnpWriteArea(WrittenBorder, BlockStart); WrittenBorder = BlockStart; WriteSizeLeft = (UnpPtr - WrittenBorder) & MaxWinMask; } if (BlockLength <= WriteSizeLeft) { if (BlockLength > 0) // We set it to 0 also for invalid filters. { uint BlockEnd = (BlockStart + BlockLength) & MaxWinMask; //x FilterSrcMemory.Alloc(BlockLength); FilterSrcMemory = EnsureCapacity(FilterSrcMemory, checked ((int)BlockLength)); byte[] Mem = FilterSrcMemory; if (BlockStart < BlockEnd || BlockEnd == 0) { if (Fragmented) { FragWindow.CopyData(Mem, 0, BlockStart, BlockLength); } else //x memcpy(Mem,Window+BlockStart,BlockLength); { Utility.Copy(Window, BlockStart, Mem, 0, BlockLength); } } else { size_t FirstPartLength = (size_t)(MaxWinSize - BlockStart); if (Fragmented) { FragWindow.CopyData(Mem, 0, BlockStart, FirstPartLength); FragWindow.CopyData(Mem, FirstPartLength, 0, BlockEnd); } else { //x memcpy(Mem,Window+BlockStart,FirstPartLength); Utility.Copy(Window, BlockStart, Mem, 0, FirstPartLength); //x memcpy(Mem+FirstPartLength,Window,BlockEnd); Utility.Copy(Window, 0, Mem, FirstPartLength, BlockEnd); } } byte[] OutMem = ApplyFilter(Mem, BlockLength, flt); Filters[I].Type = FILTER_NONE; if (OutMem != null) { UnpIO_UnpWrite(OutMem, 0, BlockLength); } UnpSomeRead = true; WrittenFileSize += BlockLength; WrittenBorder = BlockEnd; WriteSizeLeft = (UnpPtr - WrittenBorder) & MaxWinMask; } } else { // Current filter intersects the window write border, so we adjust // the window border to process this filter next time, not now. WrPtr = WrittenBorder; // Since Filter start position can only increase, we quit processing // all following filters for this data block and reset 'NextWindow' // flag for them. //for (size_t J=I;J<Filters.Count;J++) // sharpcompress: size_t -> int for (int J = I; J < Filters.Count; J++) { UnpackFilter _flt = Filters[J]; if (_flt.Type != FILTER_NONE) { _flt.NextWindow = false; } } // Do not write data left after current filter now. NotAllFiltersProcessed = true; break; } } } // Remove processed filters from queue. // sharpcompress: size_t -> int int EmptyCount = 0; // sharpcompress: size_t -> int for (int I = 0; I < Filters.Count; I++) { if (EmptyCount > 0) { Filters[I - EmptyCount] = Filters[I]; } if (Filters[I].Type == FILTER_NONE) { EmptyCount++; } } if (EmptyCount > 0) //Filters.Alloc(Filters.Count-EmptyCount); { Filters.RemoveRange(Filters.Count - EmptyCount, EmptyCount); } if (!NotAllFiltersProcessed) // Only if all filters are processed. { // Write data left after last filter. UnpWriteArea(WrittenBorder, UnpPtr); WrPtr = UnpPtr; } // We prefer to write data in blocks not exceeding UNPACK_MAX_WRITE // instead of potentially huge MaxWinSize blocks. It also allows us // to keep the size of Filters array reasonable. WriteBorder = (UnpPtr + Math.Min(MaxWinSize, UNPACK_MAX_WRITE)) & MaxWinMask; // Choose the nearest among WriteBorder and WrPtr actual written border. // If border is equal to UnpPtr, it means that we have MaxWinSize data ahead. if (WriteBorder == UnpPtr || WrPtr != UnpPtr && ((WrPtr - UnpPtr) & MaxWinMask) < ((WriteBorder - UnpPtr) & MaxWinMask)) { WriteBorder = WrPtr; } }
private void Unpack5(bool Solid) { FileExtracted = true; if (!Suspended) { UnpInitData(Solid); if (!UnpReadBuf()) { return; } // Check TablesRead5 to be sure that we read tables at least once // regardless of current block header TablePresent flag. // So we can safefly use these tables below. if (!ReadBlockHeader(Inp, ref BlockHeader) || !ReadTables(Inp, ref BlockHeader, ref BlockTables) || !TablesRead5) { return; } } while (true) { UnpPtr &= MaxWinMask; if (Inp.InAddr >= ReadBorder) { bool FileDone = false; // We use 'while', because for empty block containing only Huffman table, // we'll be on the block border once again just after reading the table. while (Inp.InAddr > BlockHeader.BlockStart + BlockHeader.BlockSize - 1 || Inp.InAddr == BlockHeader.BlockStart + BlockHeader.BlockSize - 1 && Inp.InBit >= BlockHeader.BlockBitSize) { if (BlockHeader.LastBlockInFile) { FileDone = true; break; } if (!ReadBlockHeader(Inp, ref BlockHeader) || !ReadTables(Inp, ref BlockHeader, ref BlockTables)) { return; } } if (FileDone || !UnpReadBuf()) { break; } } if (((WriteBorder - UnpPtr) & MaxWinMask) < MAX_LZ_MATCH + 3 && WriteBorder != UnpPtr) { UnpWriteBuf(); if (WrittenFileSize > DestUnpSize) { return; } if (Suspended) { FileExtracted = false; return; } } uint MainSlot = DecodeNumber(Inp, BlockTables.LD); if (MainSlot < 256) { if (Fragmented) { FragWindow[UnpPtr++] = (byte)MainSlot; } else { Window[UnpPtr++] = (byte)MainSlot; } continue; } if (MainSlot >= 262) { uint Length = SlotToLength(Inp, MainSlot - 262); uint DBits, Distance = 1, DistSlot = DecodeNumber(Inp, BlockTables.DD); if (DistSlot < 4) { DBits = 0; Distance += DistSlot; } else { DBits = DistSlot / 2 - 1; Distance += (2 | (DistSlot & 1)) << (int)DBits; } if (DBits > 0) { if (DBits >= 4) { if (DBits > 4) { Distance += ((Inp.getbits32() >> (int)(36 - DBits)) << 4); Inp.addbits(DBits - 4); } uint LowDist = DecodeNumber(Inp, BlockTables.LDD); Distance += LowDist; } else { Distance += Inp.getbits32() >> (int)(32 - DBits); Inp.addbits(DBits); } } if (Distance > 0x100) { Length++; if (Distance > 0x2000) { Length++; if (Distance > 0x40000) { Length++; } } } InsertOldDist(Distance); LastLength = Length; if (Fragmented) { FragWindow.CopyString(Length, Distance, ref UnpPtr, MaxWinMask); } else { CopyString(Length, Distance); } continue; } if (MainSlot == 256) { UnpackFilter Filter = new UnpackFilter(); if (!ReadFilter(Inp, Filter) || !AddFilter(Filter)) { break; } continue; } if (MainSlot == 257) { if (LastLength != 0) { if (Fragmented) { FragWindow.CopyString(LastLength, OldDist[0], ref UnpPtr, MaxWinMask); } else { CopyString(LastLength, OldDist[0]); } } continue; } if (MainSlot < 262) { uint DistNum = MainSlot - 258; uint Distance = OldDist[DistNum]; for (uint I = DistNum; I > 0; I--) { OldDist[I] = OldDist[I - 1]; } OldDist[0] = Distance; uint LengthSlot = DecodeNumber(Inp, BlockTables.RD); uint Length = SlotToLength(Inp, LengthSlot); LastLength = Length; if (Fragmented) { FragWindow.CopyString(Length, Distance, ref UnpPtr, MaxWinMask); } else { CopyString(Length, Distance); } continue; } } UnpWriteBuf(); }