public LASreadItemCompressed_RGB12_v1(ArithmeticDecoder dec) { // set decoder Debug.Assert(dec != null); this.dec = dec; // create models and integer compressors m_byte_used = dec.createSymbolModel(64); ic_rgb = new IntegerCompressor(dec, 8, 6); }
public LASwriteItemCompressed_GPSTIME11_v2(ArithmeticEncoder enc) { // set encoder Debug.Assert(enc!=null); this.enc=enc; // create entropy models and integer compressors m_gpstime_multi=enc.createSymbolModel(LASZIP_GPSTIME_MULTI_TOTAL); m_gpstime_0diff=enc.createSymbolModel(6); ic_gpstime=new IntegerCompressor(enc, 32, 9); // 32 bits, 9 contexts }
public LASwriteItemCompressed_GPSTIME11_v2(ArithmeticEncoder enc) { // set encoder Debug.Assert(enc != null); this.enc = enc; // create entropy models and integer compressors m_gpstime_multi = enc.createSymbolModel(LASZIP_GPSTIME_MULTI_TOTAL); m_gpstime_0diff = enc.createSymbolModel(6); ic_gpstime = new IntegerCompressor(enc, 32, 9); // 32 bits, 9 contexts }
public LASreadItemCompressed_GPSTIME11_v2(ArithmeticDecoder dec) { // set decoder Debug.Assert(dec!=null); this.dec=dec; // create entropy models and integer compressors m_gpstime_multi=dec.createSymbolModel(LASZIP_GPSTIME_MULTI_TOTAL); m_gpstime_0diff=dec.createSymbolModel(6); ic_gpstime=new IntegerCompressor(dec, 32, 9); // 32 bits, 9 contexts }
public LASreadItemCompressed_BYTE_v1(ArithmeticDecoder dec, uint number) { // set decoder Debug.Assert(dec != null); this.dec = dec; Debug.Assert(number != 0); this.number = number; // create models and integer compressors ic_byte = new IntegerCompressor(dec, 8, number); // create last item last_item = new byte[number]; }
public LASreadItemCompressed_BYTE_v1(ArithmeticDecoder dec, uint number) { // set decoder Debug.Assert(dec!=null); this.dec=dec; Debug.Assert(number!=0); this.number=number; // create models and integer compressors ic_byte=new IntegerCompressor(dec, 8, number); // create last item last_item=new byte[number]; }
public LASwriteItemCompressed_WAVEPACKET13_v1(ArithmeticEncoder enc) { // set encoder Debug.Assert(enc != null); this.enc = enc; // create models and integer compressors m_packet_index = enc.createSymbolModel(256); m_offset_diff[0] = enc.createSymbolModel(4); m_offset_diff[1] = enc.createSymbolModel(4); m_offset_diff[2] = enc.createSymbolModel(4); m_offset_diff[3] = enc.createSymbolModel(4); ic_offset_diff = new IntegerCompressor(enc, 32); ic_packet_size = new IntegerCompressor(enc, 32); ic_return_point = new IntegerCompressor(enc, 32); ic_xyz = new IntegerCompressor(enc, 32, 3); }
public LASreadItemCompressed_WAVEPACKET13_v1(ArithmeticDecoder dec) { // set decoder Debug.Assert(dec != null); this.dec = dec; // create models and integer compressors m_packet_index = dec.createSymbolModel(256); m_offset_diff[0] = dec.createSymbolModel(4); m_offset_diff[1] = dec.createSymbolModel(4); m_offset_diff[2] = dec.createSymbolModel(4); m_offset_diff[3] = dec.createSymbolModel(4); ic_offset_diff = new IntegerCompressor(dec, 32); ic_packet_size = new IntegerCompressor(dec, 32); ic_return_point = new IntegerCompressor(dec, 32); ic_xyz = new IntegerCompressor(dec, 32, 3); }
public LASreadItemCompressed_WAVEPACKET13_v1(ArithmeticDecoder dec) { // set decoder Debug.Assert(dec!=null); this.dec=dec; // create models and integer compressors m_packet_index=dec.createSymbolModel(256); m_offset_diff[0]=dec.createSymbolModel(4); m_offset_diff[1]=dec.createSymbolModel(4); m_offset_diff[2]=dec.createSymbolModel(4); m_offset_diff[3]=dec.createSymbolModel(4); ic_offset_diff=new IntegerCompressor(dec, 32); ic_packet_size=new IntegerCompressor(dec, 32); ic_return_point=new IntegerCompressor(dec, 32); ic_xyz=new IntegerCompressor(dec, 32, 3); }
public LASreadItemCompressed_POINT10_v1(ArithmeticDecoder dec) { // set decoder Debug.Assert(dec!=null); this.dec=dec; // create models and integer compressors ic_dx=new IntegerCompressor(dec, 32); // 32 bits, 1 context ic_dy=new IntegerCompressor(dec, 32, 20); // 32 bits, 20 contexts ic_z=new IntegerCompressor(dec, 32, 20); // 32 bits, 20 contexts ic_intensity=new IntegerCompressor(dec, 16); ic_scan_angle_rank=new IntegerCompressor(dec, 8, 2); ic_point_source_ID=new IntegerCompressor(dec, 16); m_changed_values=dec.createSymbolModel(64); for(int i=0; i<256; i++) { m_bit_byte[i]=null; m_classification[i]=null; m_user_data[i]=null; } }
public LASreadItemCompressed_POINT10_v1(ArithmeticDecoder dec) { // set decoder Debug.Assert(dec != null); this.dec = dec; // create models and integer compressors ic_dx = new IntegerCompressor(dec, 32); // 32 bits, 1 context ic_dy = new IntegerCompressor(dec, 32, 20); // 32 bits, 20 contexts ic_z = new IntegerCompressor(dec, 32, 20); // 32 bits, 20 contexts ic_intensity = new IntegerCompressor(dec, 16); ic_scan_angle_rank = new IntegerCompressor(dec, 8, 2); ic_point_source_ID = new IntegerCompressor(dec, 16); m_changed_values = dec.createSymbolModel(64); for (int i = 0; i < 256; i++) { m_bit_byte[i] = null; m_classification[i] = null; m_user_data[i] = null; } }
public LASwriteItemCompressed_POINT10_v2(ArithmeticEncoder enc) { // set encoder Debug.Assert(enc!=null); this.enc=enc; // create models and integer compressors ic_dx=new IntegerCompressor(enc, 32, 2); // 32 bits, 2 context ic_dy=new IntegerCompressor(enc, 32, 22); // 32 bits, 22 contexts ic_z=new IntegerCompressor(enc, 32, 20); // 32 bits, 20 contexts ic_intensity=new IntegerCompressor(enc, 16, 4); m_scan_angle_rank[0]=enc.createSymbolModel(256); m_scan_angle_rank[1]=enc.createSymbolModel(256); ic_point_source_ID=new IntegerCompressor(enc, 16); m_changed_values=enc.createSymbolModel(64); for(int i=0; i<256; i++) { m_bit_byte[i]=null; m_classification[i]=null; m_user_data[i]=null; } }
public LASwriteItemCompressed_POINT10_v2(ArithmeticEncoder enc) { // set encoder Debug.Assert(enc != null); this.enc = enc; // create models and integer compressors ic_dx = new IntegerCompressor(enc, 32, 2); // 32 bits, 2 context ic_dy = new IntegerCompressor(enc, 32, 22); // 32 bits, 22 contexts ic_z = new IntegerCompressor(enc, 32, 20); // 32 bits, 20 contexts ic_intensity = new IntegerCompressor(enc, 16, 4); m_scan_angle_rank[0] = enc.createSymbolModel(256); m_scan_angle_rank[1] = enc.createSymbolModel(256); ic_point_source_ID = new IntegerCompressor(enc, 16); m_changed_values = enc.createSymbolModel(64); for (int i = 0; i < 256; i++) { m_bit_byte[i] = null; m_classification[i] = null; m_user_data[i] = null; } }
bool read_chunk_table() { byte[] buffer = new byte[8]; // read the 8 bytes that store the location of the chunk table long chunk_table_start_position; try { if (instream.Read(buffer, 0, 8) != 8) { throw new EndOfStreamException(); } chunk_table_start_position = BitConverter.ToInt64(buffer, 0); } catch { return(false); } // this is where the chunks start long chunks_start = instream.Position; if ((chunk_table_start_position + 8) == chunks_start) { // no choice but to fail if adaptive chunking was used if (chunk_size == uint.MaxValue) { return(false); } // otherwise we build the chunk table as we read the file number_chunks = 0; chunk_starts = new List <long>(); chunk_starts.Add(chunks_start); number_chunks++; tabled_chunks = 1; return(true); } if (!instream.CanSeek) { // no choice but to fail if adaptive chunking was used if (chunk_size == uint.MaxValue) { return(false); } // if the stream is not seekable we cannot seek to the chunk table but won't need it anyways number_chunks = uint.MaxValue - 1; tabled_chunks = 0; return(true); } if (chunk_table_start_position == -1) { // the compressor was writing to a non-seekable stream and wrote the chunk table start at the end if (instream.Seek(-8, SeekOrigin.End) == 0) { return(false); } try { if (instream.Read(buffer, 0, 8) != 8) { throw new EndOfStreamException(); } chunk_table_start_position = BitConverter.ToInt64(buffer, 0); } catch { return(false); } } // read the chunk table try { instream.Seek(chunk_table_start_position, SeekOrigin.Begin); instream.Read(buffer, 0, 8); uint version = BitConverter.ToUInt32(buffer, 0); if (version != 0) { throw new Exception(); } number_chunks = BitConverter.ToUInt32(buffer, 4); chunk_totals = null; chunk_starts = null; if (chunk_size == uint.MaxValue) { chunk_totals = new uint[number_chunks + 1]; chunk_totals[0] = 0; } chunk_starts = new List <long>(); chunk_starts.Add(chunks_start); tabled_chunks = 1; if (number_chunks > 0) { dec.init(instream); IntegerCompressor ic = new IntegerCompressor(dec, 32, 2); ic.initDecompressor(); for (int i = 1; i <= number_chunks; i++) { if (chunk_size == uint.MaxValue) { chunk_totals[i] = (uint)ic.decompress((i > 1?(int)chunk_totals[i - 1]:0), 0); } chunk_starts.Add(ic.decompress((i > 1?(int)(chunk_starts[i - 1]):0), 1)); tabled_chunks++; } dec.done(); for (int i = 1; i <= number_chunks; i++) { if (chunk_size == uint.MaxValue) { chunk_totals[i] += chunk_totals[i - 1]; } chunk_starts[i] += chunk_starts[i - 1]; } } } catch { // something went wrong while reading the chunk table chunk_totals = null; // no choice but to fail if adaptive chunking was used if (chunk_size == uint.MaxValue) { return(false); } // did we not even read the number of chunks if (number_chunks == uint.MaxValue) { // then compressor was interrupted before getting a chance to write the chunk table number_chunks = 0; chunk_starts = new List <long>(); chunk_starts.Add(chunks_start); number_chunks++; tabled_chunks = 1; } else { // otherwise fix as many additional chunk_starts as possible for (int i = 1; i < tabled_chunks; i++) { chunk_starts[i] += chunk_starts[i - 1]; } } } if (instream.Seek(chunks_start, SeekOrigin.Begin) == 0) { return(false); } return(true); }
bool write_chunk_table() { long position = outstream.Position; if (chunk_table_start_position != -1) // stream is seekable { try { outstream.Seek(chunk_table_start_position, SeekOrigin.Begin); outstream.Write(BitConverter.GetBytes(position), 0, 8); outstream.Seek(position, SeekOrigin.Begin); } catch { return(false); } } try { uint version = 0; outstream.Write(BitConverter.GetBytes(version), 0, 4); outstream.Write(BitConverter.GetBytes(chunk_bytes.Count), 0, 4); } catch { return(false); } if (chunk_bytes.Count > 0) { enc.init(outstream); IntegerCompressor ic = new IntegerCompressor(enc, 32, 2); ic.initCompressor(); for (int i = 0; i < chunk_bytes.Count; i++) { if (chunk_size == uint.MaxValue) { ic.compress((i != 0?(int)chunk_sizes[i - 1]:0), (int)chunk_sizes[i], 0); } ic.compress((i != 0?(int)chunk_bytes[i - 1]:0), (int)chunk_bytes[i], 1); } enc.done(); } if (chunk_table_start_position == -1) // stream is not-seekable { try { outstream.Write(BitConverter.GetBytes(position), 0, 8); } catch { return(false); } } return(true); }
bool write_chunk_table() { long position=outstream.Position; if(chunk_table_start_position!=-1) // stream is seekable { try { outstream.Seek(chunk_table_start_position, SeekOrigin.Begin); outstream.Write(BitConverter.GetBytes(position), 0, 8); outstream.Seek(position, SeekOrigin.Begin); } catch { return false; } } try { uint version=0; outstream.Write(BitConverter.GetBytes(version), 0, 4); outstream.Write(BitConverter.GetBytes(chunk_bytes.Count), 0, 4); } catch { return false; } if(chunk_bytes.Count>0) { enc.init(outstream); IntegerCompressor ic=new IntegerCompressor(enc, 32, 2); ic.initCompressor(); for(int i=0; i<chunk_bytes.Count; i++) { if(chunk_size==uint.MaxValue) ic.compress((i!=0?(int)chunk_sizes[i-1]:0), (int)chunk_sizes[i], 0); ic.compress((i!=0?(int)chunk_bytes[i-1]:0), (int)chunk_bytes[i], 1); } enc.done(); } if(chunk_table_start_position==-1) // stream is not-seekable { try { outstream.Write(BitConverter.GetBytes(position), 0, 8); } catch { return false; } } return true; }
bool read_chunk_table() { byte[] buffer=new byte[8]; // read the 8 bytes that store the location of the chunk table long chunk_table_start_position; try { if(instream.Read(buffer, 0, 8)!=8) throw new EndOfStreamException(); chunk_table_start_position=BitConverter.ToInt64(buffer, 0); } catch { return false; } // this is where the chunks start long chunks_start=instream.Position; if((chunk_table_start_position+8)==chunks_start) { // no choice but to fail if adaptive chunking was used if(chunk_size==uint.MaxValue) { return false; } // otherwise we build the chunk table as we read the file number_chunks=0; chunk_starts=new List<long>(); chunk_starts.Add(chunks_start); number_chunks++; tabled_chunks=1; return true; } if(!instream.CanSeek) { // no choice but to fail if adaptive chunking was used if(chunk_size==uint.MaxValue) { return false; } // if the stream is not seekable we cannot seek to the chunk table but won't need it anyways number_chunks=uint.MaxValue-1; tabled_chunks=0; return true; } if(chunk_table_start_position==-1) { // the compressor was writing to a non-seekable stream and wrote the chunk table start at the end if(instream.Seek(-8, SeekOrigin.End)==0) { return false; } try { if(instream.Read(buffer, 0, 8)!=8) throw new EndOfStreamException(); chunk_table_start_position=BitConverter.ToInt64(buffer, 0); } catch { return false; } } // read the chunk table try { instream.Seek(chunk_table_start_position, SeekOrigin.Begin); instream.Read(buffer, 0, 8); uint version=BitConverter.ToUInt32(buffer, 0); if(version!=0) throw new Exception(); number_chunks=BitConverter.ToUInt32(buffer, 4); chunk_totals=null; chunk_starts=null; if(chunk_size==uint.MaxValue) { chunk_totals=new uint[number_chunks+1]; chunk_totals[0]=0; } chunk_starts=new List<long>(); chunk_starts.Add(chunks_start); tabled_chunks=1; if(number_chunks>0) { dec.init(instream); IntegerCompressor ic=new IntegerCompressor(dec, 32, 2); ic.initDecompressor(); for(int i=1; i<=number_chunks; i++) { if(chunk_size==uint.MaxValue) chunk_totals[i]=(uint)ic.decompress((i>1?(int)chunk_totals[i-1]:0), 0); chunk_starts.Add(ic.decompress((i>1?(int)(chunk_starts[i-1]):0), 1)); tabled_chunks++; } dec.done(); for(int i=1; i<=number_chunks; i++) { if(chunk_size==uint.MaxValue) chunk_totals[i]+=chunk_totals[i-1]; chunk_starts[i]+=chunk_starts[i-1]; } } } catch { // something went wrong while reading the chunk table chunk_totals=null; // no choice but to fail if adaptive chunking was used if(chunk_size==uint.MaxValue) { return false; } // did we not even read the number of chunks if(number_chunks==uint.MaxValue) { // then compressor was interrupted before getting a chance to write the chunk table number_chunks=0; chunk_starts=new List<long>(); chunk_starts.Add(chunks_start); number_chunks++; tabled_chunks=1; } else { // otherwise fix as many additional chunk_starts as possible for(int i=1; i<tabled_chunks; i++) { chunk_starts[i]+=chunk_starts[i-1]; } } } if(instream.Seek(chunks_start, SeekOrigin.Begin)==0) { return false; } return true; }