bool read_chunk_table() { byte[] buffer = new byte[8]; // read the 8 bytes that store the location of the chunk table long chunk_table_start_position; try { if (instream.Read(buffer, 0, 8) != 8) { throw new EndOfStreamException(); } chunk_table_start_position = BitConverter.ToInt64(buffer, 0); } catch { return(false); } // this is where the chunks start long chunks_start = instream.Position; if ((chunk_table_start_position + 8) == chunks_start) { // no choice but to fail if adaptive chunking was used if (chunk_size == uint.MaxValue) { return(false); } // otherwise we build the chunk table as we read the file number_chunks = 0; chunk_starts = new List <long>(); chunk_starts.Add(chunks_start); number_chunks++; tabled_chunks = 1; return(true); } if (!instream.CanSeek) { // no choice but to fail if adaptive chunking was used if (chunk_size == uint.MaxValue) { return(false); } // if the stream is not seekable we cannot seek to the chunk table but won't need it anyways number_chunks = uint.MaxValue - 1; tabled_chunks = 0; return(true); } if (chunk_table_start_position == -1) { // the compressor was writing to a non-seekable stream and wrote the chunk table start at the end if (instream.Seek(-8, SeekOrigin.End) == 0) { return(false); } try { if (instream.Read(buffer, 0, 8) != 8) { throw new EndOfStreamException(); } chunk_table_start_position = BitConverter.ToInt64(buffer, 0); } catch { return(false); } } // read the chunk table try { instream.Seek(chunk_table_start_position, SeekOrigin.Begin); instream.Read(buffer, 0, 8); uint version = BitConverter.ToUInt32(buffer, 0); if (version != 0) { throw new Exception(); } number_chunks = BitConverter.ToUInt32(buffer, 4); chunk_totals = null; chunk_starts = null; if (chunk_size == uint.MaxValue) { chunk_totals = new uint[number_chunks + 1]; chunk_totals[0] = 0; } chunk_starts = new List <long>(); chunk_starts.Add(chunks_start); tabled_chunks = 1; if (number_chunks > 0) { dec.init(instream); IntegerCompressor ic = new IntegerCompressor(dec, 32, 2); ic.initDecompressor(); for (int i = 1; i <= number_chunks; i++) { if (chunk_size == uint.MaxValue) { chunk_totals[i] = (uint)ic.decompress((i > 1?(int)chunk_totals[i - 1]:0), 0); } chunk_starts.Add(ic.decompress((i > 1?(int)(chunk_starts[i - 1]):0), 1)); tabled_chunks++; } dec.done(); for (int i = 1; i <= number_chunks; i++) { if (chunk_size == uint.MaxValue) { chunk_totals[i] += chunk_totals[i - 1]; } chunk_starts[i] += chunk_starts[i - 1]; } } } catch { // something went wrong while reading the chunk table chunk_totals = null; // no choice but to fail if adaptive chunking was used if (chunk_size == uint.MaxValue) { return(false); } // did we not even read the number of chunks if (number_chunks == uint.MaxValue) { // then compressor was interrupted before getting a chance to write the chunk table number_chunks = 0; chunk_starts = new List <long>(); chunk_starts.Add(chunks_start); number_chunks++; tabled_chunks = 1; } else { // otherwise fix as many additional chunk_starts as possible for (int i = 1; i < tabled_chunks; i++) { chunk_starts[i] += chunk_starts[i - 1]; } } } if (instream.Seek(chunks_start, SeekOrigin.Begin) == 0) { return(false); } return(true); }