private void OnTransferBegin(int total, TransferDirection direction, CompressionState state) { if (TransferBegin != null) { TransferBegin(total, direction, state); } }
public DevILTranslator() { m_importer = new ImageImporter(); m_imageState = new ImageState(); m_compState = new CompressionState(); m_compState.KeepDxtcData = true; }
private void OnTransferBegin(int total, TransferDirection direction, CompressionState state) { if (TransferBegin != null) TransferBegin(total, direction, state); }
public override object Filter(object input, bool closing) { PhpBytes bInput = Core.Convert.ObjectToPhpBytes(input); if (bInput != null) { if (_state == CompressionState.Failed) { PhpException.Throw(PhpError.Warning, "using filter in failed state"); return null; } if (_state == CompressionState.Finished) { PhpException.Throw(PhpError.Warning, "using filter in finished state"); return null; } byte[] header = null; byte[] footer = null; if (_state == CompressionState.Header) { header = new byte[Zlib.GZIP_HEADER_LENGTH]; header[0] = Zlib.GZIP_HEADER[0]; header[1] = Zlib.GZIP_HEADER[1]; header[2] = Zlib.Z_DEFLATED; header[3] = 0; // 3-8 represent time and are set to zero header[9] = Zlib.OS_CODE; _crc.Init(); _state = CompressionState.Data; } int outputOffset = 0; byte[] output; try { output = FilterInner(bInput.ReadonlyData, ref outputOffset, closing); } catch { _state = CompressionState.Failed; throw; } if (output == null) { _state = CompressionState.Failed; return null; } // input should be read to the end Debug.Assert(outputOffset == bInput.Length); _crc.Update(bInput.ReadonlyData); if (closing) { byte[] crcBytes = _crc.Final(); footer = new byte[Zlib.GZIP_FOOTER_LENGTH]; // well this implementation simply has the hash inverted compared to C implementation footer[0] = crcBytes[3]; footer[1] = crcBytes[2]; footer[2] = crcBytes[1]; footer[3] = crcBytes[0]; footer[4] = (byte)(_stream.total_in & 0xFF); footer[5] = (byte)((_stream.total_in >> 8) & 0xFF); footer[6] = (byte)((_stream.total_in >> 16) & 0xFF); footer[7] = (byte)((_stream.total_in >> 24) & 0xFF); _state = CompressionState.Finished; } if (header != null || footer != null) { int offset = 0; byte[] appended = new byte[(header != null ? header.Length : 0) + output.Length + (footer != null ? footer.Length : 0)]; if (header != null) { Buffer.BlockCopy(header, 0, appended, 0, header.Length); offset += header.Length; } if (output != null && output.Length > 0) { Buffer.BlockCopy(output, 0, appended, offset, output.Length); offset += output.Length; } if (footer != null) { Buffer.BlockCopy(footer, 0, appended, offset, footer.Length); } return new PhpBytes(appended); } else { return new PhpBytes(output); } } else { Debug.Fail("GzipCompresionFilter expects chunks to be of type PhpBytes."); return null; } }
public GzipCompresionFilter(int level, DeflateFilterMode mode) : base(level, mode) { _crc = new PhpHash.HashPhpResource.CRC32B(); _state = CompressionState.Header; }
public void SetCompressionSign(CompressionState state) { _asyncOp.Post(() => _compressPicture.Visible = state == CompressionState.On); }
/** * Method "mainQSort3", file "blocksort.c", BZip2 1.0.2 */ private void mainQSort3(CompressionState dataShadow, int loSt, int hiSt, int dSt) { int[] stack_ll = dataShadow.stack_ll; int[] stack_hh = dataShadow.stack_hh; int[] stack_dd = dataShadow.stack_dd; int[] fmap = dataShadow.fmap; byte[] block = dataShadow.block; stack_ll[0] = loSt; stack_hh[0] = hiSt; stack_dd[0] = dSt; for (int sp = 1; --sp >= 0;) { int lo = stack_ll[sp]; int hi = stack_hh[sp]; int d = stack_dd[sp]; if ((hi - lo < SMALL_THRESH) || (d > DEPTH_THRESH)) { if (mainSimpleSort(dataShadow, lo, hi, d)) { return; } } else { int d1 = d + 1; int med = med3(block[fmap[lo] + d1], block[fmap[hi] + d1], block[fmap[(lo + hi) >> 1] + d1]) & 0xff; int unLo = lo; int unHi = hi; int ltLo = lo; int gtHi = hi; while (true) { while (unLo <= unHi) { int n = (block[fmap[unLo] + d1] & 0xff) - med; if (n == 0) { int temp = fmap[unLo]; fmap[unLo++] = fmap[ltLo]; fmap[ltLo++] = temp; } else if (n < 0) { unLo++; } else { break; } } while (unLo <= unHi) { int n = (block[fmap[unHi] + d1] & 0xff) - med; if (n == 0) { int temp = fmap[unHi]; fmap[unHi--] = fmap[gtHi]; fmap[gtHi--] = temp; } else if (n > 0) { unHi--; } else { break; } } if (unLo <= unHi) { int temp = fmap[unLo]; fmap[unLo++] = fmap[unHi]; fmap[unHi--] = temp; } else { break; } } if (gtHi < ltLo) { stack_ll[sp] = lo; stack_hh[sp] = hi; stack_dd[sp] = d1; sp++; } else { int n = ((ltLo - lo) < (unLo - ltLo)) ? (ltLo - lo) : (unLo - ltLo); vswap(fmap, lo, unLo - n, n); int m = ((hi - gtHi) < (gtHi - unHi)) ? (hi - gtHi) : (gtHi - unHi); vswap(fmap, unLo, hi - m + 1, m); n = lo + unLo - ltLo - 1; m = hi - (gtHi - unHi) + 1; stack_ll[sp] = lo; stack_hh[sp] = n; stack_dd[sp] = d; sp++; stack_ll[sp] = n + 1; stack_hh[sp] = m - 1; stack_dd[sp] = d1; sp++; stack_ll[sp] = m; stack_hh[sp] = hi; stack_dd[sp] = d; sp++; } } } }
/** * This is the most hammered method of this class. * * <p> * This is the version using unrolled loops. * </p> */ private bool mainSimpleSort(CompressionState dataShadow, int lo, int hi, int d) { int bigN = hi - lo + 1; if (bigN < 2) { return this.firstAttempt && (this.workDone > this.workLimit); } int hp = 0; while (increments[hp] < bigN) hp++; int[] fmap = dataShadow.fmap; char[] quadrant = dataShadow.quadrant; byte[] block = dataShadow.block; int lastShadow = this.last; int lastPlus1 = lastShadow + 1; bool firstAttemptShadow = this.firstAttempt; int workLimitShadow = this.workLimit; int workDoneShadow = this.workDone; // Following block contains unrolled code which could be shortened by // coding it in additional loops. // HP: while (--hp >= 0) { int h = increments[hp]; int mj = lo + h - 1; for (int i = lo + h; i <= hi;) { // copy for (int k = 3; (i <= hi) && (--k >= 0); i++) { int v = fmap[i]; int vd = v + d; int j = i; // for (int a; // (j > mj) && mainGtU((a = fmap[j - h]) + d, vd, // block, quadrant, lastShadow); // j -= h) { // fmap[j] = a; // } // // unrolled version: // start inline mainGTU bool onceRunned = false; int a = 0; HAMMER: while (true) { if (onceRunned) { fmap[j] = a; if ((j -= h) <= mj) { goto END_HAMMER; } } else { onceRunned = true; } a = fmap[j - h]; int i1 = a + d; int i2 = vd; // following could be done in a loop, but // unrolled it for performance: if (block[i1 + 1] == block[i2 + 1]) { if (block[i1 + 2] == block[i2 + 2]) { if (block[i1 + 3] == block[i2 + 3]) { if (block[i1 + 4] == block[i2 + 4]) { if (block[i1 + 5] == block[i2 + 5]) { if (block[(i1 += 6)] == block[(i2 += 6)]) { int x = lastShadow; X: while (x > 0) { x -= 4; if (block[i1 + 1] == block[i2 + 1]) { if (quadrant[i1] == quadrant[i2]) { if (block[i1 + 2] == block[i2 + 2]) { if (quadrant[i1 + 1] == quadrant[i2 + 1]) { if (block[i1 + 3] == block[i2 + 3]) { if (quadrant[i1 + 2] == quadrant[i2 + 2]) { if (block[i1 + 4] == block[i2 + 4]) { if (quadrant[i1 + 3] == quadrant[i2 + 3]) { if ((i1 += 4) >= lastPlus1) { i1 -= lastPlus1; } if ((i2 += 4) >= lastPlus1) { i2 -= lastPlus1; } workDoneShadow++; goto X; } else if ((quadrant[i1 + 3] > quadrant[i2 + 3])) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((quadrant[i1 + 2] > quadrant[i2 + 2])) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((quadrant[i1 + 1] > quadrant[i2 + 1])) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((quadrant[i1] > quadrant[i2])) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } goto END_HAMMER; } // while x > 0 else { if ((block[i1] & 0xff) > (block[i2] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } } else if ((block[i1 + 5] & 0xff) > (block[i2 + 5] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 4] & 0xff) > (block[i2 + 4] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 3] & 0xff) > (block[i2 + 3] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 2] & 0xff) > (block[i2 + 2] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } else if ((block[i1 + 1] & 0xff) > (block[i2 + 1] & 0xff)) { goto HAMMER; } else { goto END_HAMMER; } } // HAMMER END_HAMMER: // end inline mainGTU fmap[j] = v; } if (firstAttemptShadow && (i <= hi) && (workDoneShadow > workLimitShadow)) { goto END_HP; } } } END_HP: this.workDone = workDoneShadow; return firstAttemptShadow && (workDoneShadow > workLimitShadow); }
public BZip2Compressor(BitWriter writer, int blockSize) { this.blockSize100k = blockSize; this.bw = writer; // 20 provides a margin of slop (not to say "Safety"). The maximum // size of an encoded run in the output block is 5 bytes, so really, 5 // bytes ought to do, but this is a margin of slop found in the // original bzip code. Not sure if important for decoding // (decompressing). So we'll leave the slop. this.outBlockFillThreshold = (blockSize * BZip2.BlockSizeMultiple) - 20; this.cstate = new CompressionState(blockSize); Reset(); }
private static void hbMakeCodeLengths(byte[] len, int[] freq, CompressionState state1, int alphaSize, int maxLen) { /* * Nodes and heap entries run from 1. Entry 0 for both the heap and * nodes is a sentinel. */ int[] heap = state1.heap; int[] weight = state1.weight; int[] parent = state1.parent; for (int i = alphaSize; --i >= 0;) { weight[i + 1] = (freq[i] == 0 ? 1 : freq[i]) << 8; } for (bool tooLong = true; tooLong;) { tooLong = false; int nNodes = alphaSize; int nHeap = 0; heap[0] = 0; weight[0] = 0; parent[0] = -2; for (int i = 1; i <= alphaSize; i++) { parent[i] = -1; nHeap++; heap[nHeap] = i; int zz = nHeap; int tmp = heap[zz]; while (weight[tmp] < weight[heap[zz >> 1]]) { heap[zz] = heap[zz >> 1]; zz >>= 1; } heap[zz] = tmp; } while (nHeap > 1) { int n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; int yy = 0; int zz = 1; int tmp = heap[1]; while (true) { yy = zz << 1; if (yy > nHeap) { break; } if ((yy < nHeap) && (weight[heap[yy + 1]] < weight[heap[yy]])) { yy++; } if (weight[tmp] < weight[heap[yy]]) { break; } heap[zz] = heap[yy]; zz = yy; } heap[zz] = tmp; int n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; yy = 0; zz = 1; tmp = heap[1]; while (true) { yy = zz << 1; if (yy > nHeap) { break; } if ((yy < nHeap) && (weight[heap[yy + 1]] < weight[heap[yy]])) { yy++; } if (weight[tmp] < weight[heap[yy]]) { break; } heap[zz] = heap[yy]; zz = yy; } heap[zz] = tmp; nNodes++; parent[n1] = parent[n2] = nNodes; int weight_n1 = weight[n1]; int weight_n2 = weight[n2]; weight[nNodes] = (int) (((uint)weight_n1 & 0xffffff00U) + ((uint)weight_n2 & 0xffffff00U)) | (1 + (((weight_n1 & 0x000000ff) > (weight_n2 & 0x000000ff)) ? (weight_n1 & 0x000000ff) : (weight_n2 & 0x000000ff))); parent[nNodes] = -1; nHeap++; heap[nHeap] = nNodes; tmp = 0; zz = nHeap; tmp = heap[zz]; int weight_tmp = weight[tmp]; while (weight_tmp < weight[heap[zz >> 1]]) { heap[zz] = heap[zz >> 1]; zz >>= 1; } heap[zz] = tmp; } for (int i = 1; i <= alphaSize; i++) { int j = 0; int k = i; for (int parent_k; (parent_k = parent[k]) >= 0;) { k = parent_k; j++; } len[i - 1] = (byte) j; if (j > maxLen) { tooLong = true; } } if (tooLong) { for (int i = 1; i < alphaSize; i++) { int j = weight[i] >> 8; j = 1 + (j >> 1); weight[i] = j << 8; } } } }
/// <summary> /// Reads in the input file, determines its current compression state, and reverses it by default. /// </summary> /// <param name="input">Cache file to compress</param> /// <param name="engineDb">The engine database to use to process the cache file.</param> /// <param name="desiredState">Optional. When set to not null, the default behavior is overridden and skips action if the cache file is already that state.</param> /// <returns></returns> public static CompressionState HandleCompression(string input, EngineDatabase engineDb, CompressionState desiredState = CompressionState.Null) { CompressionState state; EngineType type; using (FileStream fileStream = File.OpenRead(input)) { var reader = new EndianReader(fileStream, Endian.BigEndian); state = DetermineState(reader, engineDb, out type); } if (state == desiredState) { return(state); } switch (state) { default: case CompressionState.Null: return(state); case CompressionState.Compressed: { if (type == EngineType.SecondGeneration) { DecompressSecondGen(input); return(CompressionState.Decompressed); } else { return(state); } } case CompressionState.Decompressed: { if (type == EngineType.SecondGeneration) { CompressSecondGen(input); return(CompressionState.Compressed); } else { return(state); } } } }
private void OnTransferBegin(int total, TransferDirection direction, CompressionState state) { TransferBegin?.Invoke(total, direction, state); }