public void parseDHT() { UInt32 headerLength = (uint)input.ReadInt16() - 2; // Subtract myself while (headerLength != 0) { UInt32 b = input.ReadByte(); UInt32 Tc = (b >> 4); if (Tc != 0) { throw new Exception("parseDHT: Unsupported Table class."); } UInt32 Th = b & 0xf; if (Th > 3) { throw new Exception("parseDHT: Invalid huffman table destination id."); } UInt32 acc = 0; HuffmanTable t = huff[Th]; if (t.initialized) { throw new Exception("parseDHT: Duplicate table definition"); } for (UInt32 i = 0; i < 16; i++) { t.bits[i + 1] = input.ReadByte(); acc += t.bits[i + 1]; } t.bits[0] = 0; //Common.memset<uint>(t.huffval, 0, sizeof(uint) * t.huffval.Length); if (acc > 256) { throw new Exception("parseDHT: Invalid DHT table."); } if (headerLength < 1 + 16 + acc) { throw new Exception("parseDHT: Invalid DHT table length."); } for (UInt32 i = 0; i < acc; i++) { t.huffval[i] = input.ReadByte(); } createHuffmanTable(t); headerLength -= 1 + 16 + acc; } }
public void initTable(UInt32 huffSelect) { HuffmanTable dctbl1 = huff[0]; UInt32 acc = 0; for (UInt32 i = 0; i < 16; i++) { dctbl1.bits[i + 1] = nikon_tree[huffSelect][i]; acc += dctbl1.bits[i + 1]; } dctbl1.bits[0] = 0; for (UInt32 i = 0; i < acc; i++) { dctbl1.huffval[i] = nikon_tree[huffSelect][i + 16]; } createHuffmanTable(dctbl1); }
/* *-------------------------------------------------------------- * * HuffDecode -- * * Taken from Figure F.16: extract next coded symbol from * input stream. This should becode a macro. * * Results: * Next coded symbol * * Side effects: * Bitstream is parsed. * *-------------------------------------------------------------- */ public int HuffDecodeNikon(BitPumpMSB bits) { int rv; int l, temp; int code, val; HuffmanTable dctbl1 = huff[0]; bits.fill(); code = (int)bits.peekBitsNoFill(14); val = dctbl1.bigTable[code]; if ((val & 0xff) != 0xff) { bits.skipBitsNoFill((uint)val & 0xff); return(val >> 8); } rv = 0; code = (int)bits.peekByteNoFill(); val = (int)dctbl1.numbits[code]; l = val & 15; if (l != 0) { bits.skipBitsNoFill((uint)l); rv = val >> 4; } else { bits.skipBits(8); l = 8; while (code > dctbl1.maxcode[l]) { temp = (int)bits.getBitNoFill(); code = (code << 1) | temp; l++; } if (l > 16) { throw new Exception("Corrupt JPEG data: bad Huffman code:" + l); } else { rv = (int)dctbl1.huffval[dctbl1.valptr[l] + (code - dctbl1.minCode[l])]; } } if (rv == 16) { return(-32768); } /* * Section F.2.2.1: decode the difference and * Figure F.12: extend sign bit */ Int32 len = rv & 15; Int32 shl = rv >> 4; int diff = (int)((bits.getBits((uint)(len - shl)) << 1) + 1) << shl >> 1; if ((diff & (1 << (len - 1))) == 0) { //TODO optimise if (shl == 0) { shl = 1; } else { shl = 0; } diff -= (1 << len) - shl; } return(diff); }
void decodeScanLeft4_2_2() { /* * _ASSERTE(slicesW.size() < 16); // We only have 4 bits for slice number. * _ASSERTE(!(slicesW.size() > 1 && skipX)); // Check if this is a valid state * _ASSERTE(frame.compInfo[0].superH == 2); // Check if this is a valid state * _ASSERTE(frame.compInfo[0].superV == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[1].superH == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[1].superV == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[2].superH == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[2].superV == 1); // Check if this is a valid state * _ASSERTE(frame.cps == COMPS); * _ASSERTE(skipX == 0); */ HuffmanTable dctbl1 = huff[frame.compInfo[0].dcTblNo]; HuffmanTable dctbl2 = huff[frame.compInfo[1].dcTblNo]; HuffmanTable dctbl3 = huff[frame.compInfo[2].dcTblNo]; mRaw.metadata.subsampling.x = 2; mRaw.metadata.subsampling.y = 1; UInt16 predict; // Prediction pointer byte[] draw = mRaw.rawData; //Prepare slices (for CR2) UInt32 slices = (UInt32)slicesW.Count * (frame.h - skipY); offset = new Int32[slices + 1]; UInt32 t_y = 0; UInt32 t_x = 0; UInt32 t_s = 0; UInt32 slice = 0; slice_width = new uint[slices]; // This is divided by comps, since comps pixels are processed at the time for (Int32 i = 0; i < slicesW.Count; i++) { slice_width[i] = (uint)slicesW[i] / 2; } for (slice = 0; slice < slices; slice++) { offset[slice] = (int)(((t_x + offX) * mRaw.bpp + ((offY + t_y) * mRaw.pitch)) | (t_s << 28)); //_ASSERTE((offset[slice] & 0x0fffffff) < mRaw.pitch * mRaw.dim.y); t_y++; if (t_y >= (frame.h - skipY)) { t_y = 0; t_x += (uint)slice_width[t_s++]; } } if ((offset[slices - 1] & 0x0fffffff) >= mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("LJpegPlain::decodeScanLeft: Last slice out of bounds"); } offset[slices] = offset[slices - 1]; // Extra offset to avoid branch in loop. if (skipX != 0) { slice_width[slicesW.Count - 1] -= skipX; } // Predictors for components UInt16 *dest = (UInt16 *)&draw[offset[0] & 0x0fffffff]; // Always points to next slice slice = 1; UInt32 pixInSlice = slice_width[0]; // Initialize predictors and decode one group. UInt32 x = 0; int p1; int p2; int p3; // First pixel is not predicted, all other are. *dest = p1 = (1 << (frame.prec - Pt - 1)) + HuffDecode(dctbl1); p1 = dest[COMPS] = p1 + HuffDecode(ref dctbl1); predict = dest; dest[1] = p2 = (1 << (frame.prec - Pt - 1)) + HuffDecode(ref dctbl2); dest[2] = p3 = (1 << (frame.prec - Pt - 1)) + HuffDecode(ref dctbl3); // Skip to next dest += COMPS * 2; x = 2; pixInSlice -= 2; UInt32 cw = (frame.w - skipX); for (UInt32 y = 0; y < (frame.h - skipY); y++) { for (; x < cw; x += 2) { if (0 == pixInSlice) { // Next slice if (slice > slices) { throw new RawDecoderException("LJpegPlain::decodeScanLeft: Ran out of slices"); } UInt32 o = offset[slice++]; dest = (UInt16 *)&draw[o & 0x0fffffff]; // Adjust destination for next pixel if ((o & 0x0fffffff) > mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("LJpegPlain::decodeScanLeft: Offset out of bounds"); } pixInSlice = slice_width[o >> 28]; // If new are at the start of a new line, also update predictors. if (x == 0) { predict = dest; } } p1 += HuffDecode(ref dctbl1); *dest = p1; p1 += HuffDecode(ref dctbl1); dest[COMPS] = p1; dest[1] = p2 = p2 + HuffDecode(ref dctbl2); dest[2] = p3 = p3 + HuffDecode(ref dctbl3); dest += COMPS * 2; pixInSlice -= 2; } // Update predictors p1 = predict[0]; p2 = predict[1]; p3 = predict[2]; predict = dest; x = 0; // Check if we are still within the file. bits.checkPos(); } }
/** * CR2 Slice handling: * In the following code, canon slices are handled in-place, to avoid having to * copy the entire frame afterwards. * The "offset" array is created to easily map slice positions on to the output image. * The offset array size is the number of slices multiplied by height. * Each of these offsets are an offset into the destination image, and it also contains the * slice number (shifted up 28 bits), so it is possible to retrieve the width of each slice. * Every time "components" pixels has been processed the slice size is tested, and output offset * is adjusted if needed. This makes slice handling very "light", since it involves a single * counter, and a predictable branch. * For unsliced images, add one slice with the width of the image. **/ /* * void decodeScanLeftGeneric() * { * * //_ASSERTE(slicesW.size() < 16); // We only have 4 bits for slice number. * //_ASSERTE(!(slicesW.size() > 1 && skipX)); // Check if this is a valid state * * UInt32 comps = frame.cps; // Components * HuffmanTable[] dctbl = new HuffmanTable[4]; // Tables for up to 4 components * UInt16[] predict; // Prediction pointer * // Fast access to supersampling component settings * //this is the number of components in a given block. * * UInt32[] samplesH = new UInt32[4]; * UInt32[] samplesV = new UInt32[4]; * * //byte[] draw = mRaw.rawData; * UInt32 maxSuperH = 1; * UInt32 maxSuperV = 1; * UInt32[] samplesComp = new UInt32[4]; // How many samples per group does this component have * UInt32 pixGroup = 0; // How many pixels per group. * * for (UInt32 i = 0; i < comps; i++) * { * dctbl[i] = huff[frame.compInfo[i].dcTblNo]; * samplesH[i] = frame.compInfo[i].superH; * if (!Common.isPowerOfTwo(samplesH[i])) * throw new RawDecoderException("LJpegPlain::decodeScanLeftGeneric: Horizontal sampling is not power of two."); * maxSuperH = Math.Max(samplesH[i], maxSuperH); * samplesV[i] = frame.compInfo[i].superV; * if (!Common.isPowerOfTwo(samplesV[i])) * throw new RawDecoderException("LJpegPlain::decodeScanLeftGeneric: Vertical sampling is not power of two."); * maxSuperV = Math.Max(samplesV[i], maxSuperV); * samplesComp[i] = samplesV[i] * samplesH[i]; * pixGroup += samplesComp[i]; * } * * mRaw.metadata.subsampling.x = (int)maxSuperH; * mRaw.metadata.subsampling.y = (int)maxSuperV; * * //Prepare slices (for CR2) * UInt32 slices = (UInt32)slicesW.Count * (frame.h - skipY) / maxSuperV; * UInt16[] imagePos = new UInt16[(int)slices + 1]; * uint[] sliceWidth = new uint[(int)slices + 1]; * * UInt32 t_y = 0; * UInt32 t_x = 0; * UInt32 t_s = 0; * UInt32 slice = 0; * UInt32 pitch_s = mRaw.pitch / 2; // Pitch in shorts * slice_width = new uint[slices]; * * // This is divided by comps, since comps pixels are processed at the time * for (Int32 i = 0; i < slicesW.Count; i++) * slice_width[i] = (uint)slicesW[i] / pixGroup / maxSuperH; // This is a guess, but works for sRaw1+2. * * if (skipX != 0 && (maxSuperV > 1 || maxSuperH > 1)) * { * throw new RawDecoderException("LJpegPlain::decodeScanLeftGeneric: Cannot skip right border in subsampled mode"); * } * if (skipX != 0) * { * slice_width[slicesW.Count - 1] -= skipX; * } * * for (slice = 0; slice < slices; slice++) * { * imagePos[slice] = mRaw.getByteAt((t_x + offX) * mRaw.bpp + ((offY + t_y) * mRaw.pitch));//divide by numer of byte * sliceWidth[slice] = slice_width[t_s]; * t_y += maxSuperV; * if (t_y >= (frame.h - skipY)) * { * t_y = 0; * t_x += slice_width[t_s++]; * } * } * slice_width = null; * * // We check the final position. If bad slice sizes are given we risk writing outside the image * if (imagePos[slices - 1] >= mRaw.rawData[mRaw.pitch * mRaw.dim.y]) * { * throw new RawDecoderException("LJpegPlain::decodeScanLeft: Last slice out of bounds"); * } * imagePos[slices] = imagePos[slices - 1]; // Extra offset to avoid branch in loop. * sliceWidth[slices] = sliceWidth[slices - 1]; // Extra offset to avoid branch in loop. * * // Predictors for components * int[] p = new int[4]; * UInt16 dest = imagePos[0]; * int destI = 0; * // Always points to next slice * slice = 1; * UInt32 pixInSlice = sliceWidth[0]; * * // Initialize predictors and decode one group. * UInt32 x = 0; * predict = dest; * for (UInt32 i = 0; i < comps; i++) * { * for (UInt32 y2 = 0; y2 < samplesV[i]; y2++) * { * for (UInt32 x2 = 0; x2 < samplesH[i]; x2++) * { * // First pixel is not predicted, all other are. * if (y2 == 0 && x2 == 0) * { * p[i] = ((1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl[i])); * dest[destI] = (ushort)p[i]; * } * else * { * p[i] += HuffDecode(ref dctbl[i]); * // _ASSERTE(p[i] >= 0 && p[i] < 65536); * dest[x2 * comps + y2 * pitch_s] = (ushort)p[i]; * } * } * } * // Set predictor for this component * // Next component * destI++; * } * * // Increment destination to next group * destI += (int)((maxSuperH - 1) * comps); * x = maxSuperH; * pixInSlice -= maxSuperH; * * UInt32 cw = (frame.w - skipX); * for (UInt32 y = 0; y < (frame.h - skipY); y += maxSuperV) * { * for (; x < cw; x += maxSuperH) * { * * if (0 == pixInSlice) * { // Next slice * if (slice > slices) * throw new RawDecoderException("LJpegPlain::decodeScanLeft: Ran out of slices"); * pixInSlice = sliceWidth[slice]; * dest = imagePos[slice]; // Adjust destination for next pixel * * slice++; * // If new are at the start of a new line, also update predictors. * if (x == 0) * predict = dest; * } * * for (UInt32 i = 0; i < comps; i++) * { * for (UInt32 y2 = 0; y2 < samplesV[i]; y2++) * { * for (UInt32 x2 = 0; x2 < samplesH[i]; x2++) * { * p[i] += HuffDecode(ref dctbl[i]); * // _ASSERTE(p[i] >= 0 && p[i] < 65536); * dest[x2 * comps + y2 * pitch_s + destI] = (ushort)p[i]; * } * } * destI++; * } * destI += (int)((maxSuperH * comps) - comps); * pixInSlice -= maxSuperH; * } * * if (skipX != 0) * { * for (UInt32 sx = 0; sx < skipX; sx++) * { * for (UInt32 i = 0; i < comps; i++) * { * HuffDecode(ref dctbl[i]); * } * } * } * * // Update predictors * for (UInt32 i = 0; i < comps; i++) * { * p[i] = predict[i]; * // Ensure, that there is a slice shift at new line * if (!(pixInSlice == 0 || maxSuperV == 1)) * throw new RawDecoderException("LJpegPlain::decodeScanLeftGeneric: Slice not placed at new line"); * } * // Check if we are still within the file. * bits.checkPos(); * predict = dest; * x = 0; * } * }*/ /*************************************************************************/ /* These are often used compression schemes, heavily optimized to decode */ /* that specfic kind of images. */ /*************************************************************************/ void decodeScanLeft4_2_0() { /* * _ASSERTE(slicesW.size() < 16); // We only have 4 bits for slice number. * _ASSERTE(!(slicesW.size() > 1 && skipX)); // Check if this is a valid state * _ASSERTE(frame.compInfo[0].superH == 2); // Check if this is a valid state * _ASSERTE(frame.compInfo[0].superV == 2); // Check if this is a valid state * _ASSERTE(frame.compInfo[1].superH == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[1].superV == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[2].superH == 1); // Check if this is a valid state * _ASSERTE(frame.compInfo[2].superV == 1); // Check if this is a valid state * _ASSERTE(frame.cps == COMPS); * _ASSERTE(skipX == 0); */ int COMPS = 3; HuffmanTable dctbl1 = huff[frame.compInfo[0].dcTblNo]; HuffmanTable dctbl2 = huff[frame.compInfo[1].dcTblNo]; HuffmanTable dctbl3 = huff[frame.compInfo[2].dcTblNo]; UInt16[] predict; // Prediction pointer mRaw.metadata.subsampling.x = 2; mRaw.metadata.subsampling.y = 2; // Fix for Canon 6D mRaw, which has flipped width & height UInt32 real_h = mCanonFlipDim ? frame.w : frame.h; //Prepare slices (for CR2) UInt32 slices = (UInt32)slicesW.Count * (real_h - skipY) / 2; offset = new UInt32[slices + 1]; UInt32 t_y = 0; UInt32 t_x = 0; UInt32 t_s = 0; UInt32 slice = 0; UInt32 pitch_s = mRaw.pitch / 2; // Pitch in shorts slice_width = new uint[slices]; // This is divided by comps, since comps pixels are processed at the time for (Int32 i = 0; i < slicesW.Count; i++) { slice_width[i] = (uint)(slicesW[i] / COMPS); } for (slice = 0; slice < slices; slice++) { offset[slice] = (uint)((t_x + offX) * mRaw.bpp + ((offY + t_y) * mRaw.pitch)) | (uint)(t_s << 28); //_ASSERTE((offset[slice] & 0x0fffffff) < mRaw.pitch * mRaw.dim.y); t_y += 2; if (t_y >= (real_h - skipY)) { t_y = 0; t_x += (uint)slice_width[t_s++]; } } // We check the final position. If bad slice sizes are given we risk writing outside the image if ((offset[slices - 1] & 0x0fffffff) >= mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("LJpegPlain::decodeScanLeft: Last slice out of bounds"); } offset[slices] = offset[slices - 1]; // Extra offset to avoid branch in loop. if (skipX != 0) { slice_width[slicesW.Count - 1] -= skipX; } // Predictors for components UInt16[] dest = draw.Skip((int)(offset[0] & 0x0fffffff) / 2).ToArray(); int destI = 0; // Always points to next slice slice = 1; UInt32 pixInSlice = (uint)slice_width[0]; // Initialize predictors and decode one group. UInt32 x = 0; int p1; int p2; int p3; // First pixel is not predicted, all other are. p1 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl1); dest[destI] = (ushort)p1; p1 = dest[COMPS] = (ushort)(p1 + HuffDecode(ref dctbl1)); p1 = dest[pitch_s] = (ushort)(p1 + HuffDecode(ref dctbl1)); p1 = dest[COMPS + pitch_s] = (ushort)(p1 + HuffDecode(ref dctbl1)); predict = dest; p2 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl2); dest[destI + 1] = (ushort)p2; p3 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl3); dest[destI + 2] = (ushort)p3; // Skip next destI += COMPS * 2; x = 2; pixInSlice -= 2; UInt32 cw = (frame.w - skipX); for (UInt32 y = 0; y < (frame.h - skipY); y += 2) { for (; x < cw; x += 2) { if (0 == pixInSlice) { // Next slice if (slice > slices) { throw new RawDecoderException("LJpegPlain::decodeScanLeft: Ran out of slices"); } UInt32 o = offset[slice++]; dest = mRaw.getByteAt(o & 0x0fffffff); // Adjust destination for next pixel destI = 0; //_ASSERTE((o & 0x0fffffff) < mRaw.pitch * mRaw.dim.y); if ((o & 0x0fffffff) > mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("LJpegPlain::decodeScanLeft: Offset out of bounds"); } pixInSlice = slice_width[o >> 28]; // If new are at the start of a new line, also update predictors. if (x == 0) { predict = dest; } } p1 += HuffDecode(ref dctbl1); destI = p1; p1 += HuffDecode(ref dctbl1); dest[COMPS + destI] = (ushort)p1; p1 += HuffDecode(ref dctbl1); dest[pitch_s + destI] = (ushort)p1; p1 += HuffDecode(ref dctbl1); dest[pitch_s + COMPS + destI] = (ushort)p1; p2 = p2 + HuffDecode(ref dctbl2); dest[destI + 1] = (ushort)p2; p3 = p3 + HuffDecode(ref dctbl3); dest[destI + 2] = (ushort)p3; destI += COMPS * 2; pixInSlice -= 2; } // Update predictors p1 = predict[0]; p2 = predict[1]; p3 = predict[2]; // _ASSERTE(pixInSlice == 0); // Ensure, that there is a slice shift at new line // Check if we are still within the file. bits.checkPos(); x = 0; } }
/* *-------------------------------------------------------------- * * HuffDecode -- * * Taken from Figure F.16: extract next coded symbol from * input stream. This should becode a macro. * * Results: * Next coded symbol * * Side effects: * Bitstream is parsed. * *-------------------------------------------------------------- */ public int HuffDecode(ref HuffmanTable htbl) { int rv; int temp; int code, val; UInt32 l; /** * First attempt to do complete decode, by using the first 14 bits */ bits.fill(); code = (int)bits.peekBitsNoFill(14); if (htbl.bigTable != null) { val = htbl.bigTable[code]; if ((val & 0xff) != 0xff) { bits.skipBitsNoFill((uint)val & 0xff); return(val >> 8); } } /* * If the huffman code is less than 8 bits, we can use the fast * table lookup to get its value. It's more than 8 bits about * 3-4% of the time. */ rv = 0; code = code >> 6; val = (int)htbl.numbits[code]; l = (uint)val & 15; if (l != 0) { bits.skipBitsNoFill(l); rv = val >> 4; } else { bits.skipBitsNoFill(8); l = 8; while (code > htbl.maxcode[l]) { temp = (int)bits.getBitNoFill(); code = (code << 1) | temp; l++; } /* * With garbage input we may reach the sentinel value l = 17. */ if (l > frame.prec || htbl.valptr[l] == 0xff) { throw new Exception("Corrupt JPEG data: bad Huffman code:" + l); } else { rv = (int)htbl.huffval[htbl.valptr[l] + (code - htbl.minCode[l])]; } } if (rv == 16) { if (mDNGCompatible) { bits.skipBitsNoFill(16); } return(-32768); } // Ensure we have enough bits if ((rv + l) > 24) { if (rv > 16) // There is no values above 16 bits. { throw new Exception("Corrupt JPEG data: Too many bits requested."); } else { bits.fill(); } } /* * Section F.2.2.1: decode the difference and * Figure F.12: extend sign bit */ if (rv != 0) { int x = (int)bits.getBitsNoFill((uint)rv); if ((x & (1 << (rv - 1))) == 0) { x -= (1 << rv) - 1; } return(x); } return(0); }
/************************************ * Bitable creation * * This is expanding the concept of fast lookups * * A complete table for 14 arbitrary bits will be * created that enables fast lookup of number of bits used, * and final delta result. * Hit rate is about 90-99% for typical LJPEGS, usually about 98% * ************************************/ public void createBigTable(ref HuffmanTable htbl) { UInt32 bits = 14; // HuffDecode functions must be changed, if this is modified. UInt32 size = (uint)(1 << (int)(bits)); int rv = 0; int temp; UInt32 l; if (htbl.bigTable == null) { htbl.bigTable = new int[size]; } if (htbl.bigTable == null) { throw new Exception("Out of memory, failed to allocate " + size * sizeof(int) + " bytes"); } for (UInt32 i = 0; i < size; i++) { UInt16 input = (ushort)((int)i << 2); // Calculate input value int code = input >> 8; // Get 8 bits UInt32 val = htbl.numbits[code]; l = val & 15; if (l != 0) { rv = (int)val >> 4; } else { l = 8; while (code > htbl.maxcode[l]) { temp = input >> (int)(15 - l) & 1; code = (code << 1) | temp; l++; } /* * With garbage input we may reach the sentinel value l = 17. */ if (l > frame.prec || htbl.valptr[l] == 0xff) { htbl.bigTable[i] = 0xff; continue; } else { rv = (int)htbl.huffval[htbl.valptr[l] + (code - htbl.minCode[l])]; } } if (rv == 16) { if (mDNGCompatible) { htbl.bigTable[i] = (-(32768 << 8)) | (16 + (int)l); } else { htbl.bigTable[i] = (-(32768 << 8)) | (int)l; } continue; } if (rv + l > bits) { htbl.bigTable[i] = 0xff; continue; } if (rv != 0) { int x = input >> (int)(16 - l - rv) & ((1 << rv) - 1); if ((x & (1 << (rv - 1))) == 0) { x -= (1 << rv) - 1; } htbl.bigTable[i] = (x << 8) | ((int)l + rv); } else { htbl.bigTable[i] = (int)l; } } }
public void createHuffmanTable(HuffmanTable htbl) { int p, i, l, lastp, si; byte[] huffsize = new byte[257]; UInt16[] huffcode = new ushort[257]; UInt16 code; int size; int value, ll, ul; /* * Figure C.1: make table of Huffman code length for each symbol * Note that this is in code-length order. */ p = 0; for (l = 1; l <= 16; l++) { for (i = 1; i <= (int)htbl.bits[l]; i++) { huffsize[p++] = (byte)l; if (p > 256) { throw new Exception("createHuffmanTable: Code length too long. Corrupt data."); } } } huffsize[p] = 0; lastp = p; /* * Figure C.2: generate the codes themselves * Note that this is in code-length order. */ code = 0; si = huffsize[0]; p = 0; while (huffsize[p] != 0) { while (huffsize[p] == si) { huffcode[p++] = code; code++; } code <<= 1; si++; if (p > 256) { throw new Exception("createHuffmanTable: Code length too long. Corrupt data."); } } /* * Figure F.15: generate decoding tables */ htbl.minCode[0] = 0; htbl.maxcode[0] = 0; p = 0; for (l = 1; l <= 16; l++) { if (htbl.bits[l] != 0) { htbl.valptr[l] = (short)p; htbl.minCode[l] = huffcode[p]; p += (int)htbl.bits[l]; htbl.maxcode[l] = huffcode[p - 1]; } else { htbl.valptr[l] = 0xff; // This check must be present to avoid crash on junk htbl.maxcode[l] = -1; } if (p > 256) { throw new Exception("createHuffmanTable: Code length too long. Corrupt data."); } } /* * We put in this value to ensure HuffDecode terminates. */ htbl.maxcode[17] = (int)0xFFFFFL; /* * Build the numbits, value lookup tables. * These table allow us to gather 8 bits from the bits stream, * and immediately lookup the size and value of the huffman codes. * If size is zero, it means that more than 8 bits are in the huffman * code (this happens about 3-4% of the time). */ //Common.memset<uint>(htbl.numbits, 0, sizeof(uint) * htbl.numbits.Length); for (p = 0; p < lastp; p++) { size = huffsize[p]; if (size <= 8) { value = (int)htbl.huffval[p]; code = huffcode[p]; ll = code << (8 - size); if (size < 8) { ul = (int)((uint)ll | bitMask[24 + size]); } else { ul = ll; } if (ul > 256 || ll > ul) { throw new Exception("createHuffmanTable: Code length too long. Corrupt data."); } for (i = ll; i <= ul; i++) { htbl.numbits[i] = (uint)(size | (value << 4)); } } } if (mUseBigtable) { createBigTable(ref htbl); } htbl.initialized = true; }
void decodeScanLeft4Comps() { int COMPS = 4; // First line HuffmanTable dctbl1 = huff[frame.compInfo[0].dcTblNo]; HuffmanTable dctbl2 = huff[frame.compInfo[1].dcTblNo]; HuffmanTable dctbl3 = huff[frame.compInfo[2].dcTblNo]; HuffmanTable dctbl4 = huff[frame.compInfo[3].dcTblNo]; if (mCanonDoubleHeight) { frame.h *= 2; mRaw.dim = new iPoint2D((int)frame.w * 2, (int)frame.h); } fixed(ushort *d = mRaw.rawData) { //TODO remove this hack byte *draw = (byte *)d; //Prepare slices (for CR2) Int32 slices = slicesW.Count * (int)(frame.h - skipY); uint *offset = stackalloc UInt32[(slices + 1)]; UInt32 t_y = 0; UInt32 t_x = 0; UInt32 t_s = 0; UInt32 slice = 0; for (slice = 0; slice < slices; slice++) { offset[slice] = ((t_x + offX) * mRaw.bpp + ((offY + t_y) * mRaw.pitch)) | (t_s << 28); //_ASSERTE((offset[slice] & 0x0fffffff) < mRaw.pitch * mRaw.dim.y); t_y++; if (t_y == (frame.h - skipY)) { t_y = 0; t_x += (uint)slicesW[(int)t_s++]; } } // We check the final position. If bad slice sizes are given we risk writing outside the image if ((offset[slices - 1] & 0x0fffffff) >= mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("decodeScanLeft: Last slice out of bounds"); } offset[slices] = offset[slices - 1]; // Extra offset to avoid branch in loop. int *slice_width = stackalloc int[slices]; // This is divided by comps, since comps pixels are processed at the time for (Int32 i = 0; i < slicesW.Count; i++) { slice_width[i] = slicesW[i] / COMPS; } if (skipX != 0) { slice_width[slicesW.Count - 1] -= (int)skipX; } // First pixels are obviously not predicted int p1; int p2; int p3; int p4; UInt16 *dest = (UInt16 *)&draw[offset[0] & 0x0fffffff]; UInt16 *predict = dest; p1 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl1); *dest++ = (ushort)p1; p2 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl2); *dest++ = (ushort)p2; p3 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl3); *dest++ = (ushort)p3; p4 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl4); *dest++ = (ushort)p4; slice = 1; UInt32 pixInSlice = (uint)slice_width[0] - 1; UInt32 cw = (frame.w - skipX); UInt32 x = 1; // Skip first pixels on first line. if (mCanonDoubleHeight) { skipY = frame.h >> 1; } for (UInt32 y = 0; y < (frame.h - skipY); y++) { for (; x < cw; x++) { p1 += HuffDecode(ref dctbl1); *dest++ = (UInt16)p1; p2 += HuffDecode(ref dctbl2); *dest++ = (UInt16)p2; p3 += HuffDecode(ref dctbl3); *dest++ = (UInt16)p3; p4 += HuffDecode(ref dctbl4); *dest++ = (UInt16)p4; if (0 == --pixInSlice) { // Next slice if (slice > slices) { throw new RawDecoderException("decodeScanLeft: Ran out of slices"); } UInt32 o = offset[slice++]; dest = (UInt16 *)&draw[o & 0x0fffffff]; // Adjust destination for next pixel if ((o & 0x0fffffff) > mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("decodeScanLeft: Offset out of bounds"); } pixInSlice = (uint)slice_width[o >> 28]; } } if (skipX != 0) { for (UInt32 i = 0; i < skipX; i++) { HuffDecode(ref dctbl1); HuffDecode(ref dctbl2); HuffDecode(ref dctbl3); HuffDecode(ref dctbl4); } } bits.checkPos(); p1 = predict[0]; // Predictors for next row p2 = predict[1]; p3 = predict[2]; // Predictors for next row p4 = predict[3]; predict = dest; // Adjust destination for next prediction x = 0; } } }
void decodeScanLeft2Comps() { int COMPS = 2; //_ASSERTE(slicesW.Count < 16); // We only have 4 bits for slice number. //_ASSERTE(!(slicesW.Count > 1 && skipX)); // Check if this is a valid state fixed(ushort *d = mRaw.rawData) { //TODO remove this hack byte *draw = (byte *)d; // First line HuffmanTable dctbl1 = huff[frame.compInfo[0].dcTblNo]; HuffmanTable dctbl2 = huff[frame.compInfo[1].dcTblNo]; //Prepare slices (for CR2) Int32 slices = slicesW.Count * (int)(frame.h - skipY); uint *offset = stackalloc UInt32[(slices + 1)]; UInt32 t_y = 0; UInt32 t_x = 0; UInt32 t_s = 0; UInt32 slice = 0; UInt32 cw = (frame.w - skipX); for (slice = 0; slice < slices; slice++) { offset[slice] = ((t_x + offX) * mRaw.bpp + ((offY + t_y) * mRaw.pitch)) | (t_s << 28); //_ASSERTE((offset[slice] & 0x0fffffff) < mRaw.pitch * mRaw.dim.y); t_y++; if (t_y == (frame.h - skipY)) { t_y = 0; t_x += (uint)slicesW[(int)t_s++]; } } // We check the final position. If bad slice sizes are given we risk writing outside the image if ((offset[slices - 1] & 0x0fffffff) >= mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("decodeScanLeft: Last slice out of bounds"); } offset[slices] = offset[slices - 1]; // Extra offset to avoid branch in loop. int *slice_width = stackalloc int[slices]; // This is divided by comps, since comps pixels are processed at the time for (Int32 i = 0; i < slicesW.Count; i++) { slice_width[i] = slicesW[i] / COMPS; } if (skipX != 0) { slice_width[slicesW.Count - 1] -= (int)skipX; } // First pixels are obviously not predicted int p1; int p2; UInt16 *dest = (UInt16 *)&draw[offset[0] & 0x0fffffff]; UInt16 *predict = dest; p1 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl1); *dest++ = (ushort)p1; p2 = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl2); *dest++ = (ushort)p2; slice = 1; // Always points to next slice UInt32 pixInSlice = (uint)slice_width[0] - 1; // Skip first pixel UInt32 x = 1; // Skip first pixels on first line. for (UInt32 y = 0; y < (frame.h - skipY); y++) { for (; x < cw; x++) { int diff = HuffDecode(ref dctbl1); p1 += diff; *dest++ = (UInt16)p1; // //_ASSERTE(p1 >= 0 && p1 < 65536); diff = HuffDecode(ref dctbl2); p2 += diff; *dest++ = (UInt16)p2; // //_ASSERTE(p2 >= 0 && p2 < 65536); if (0 == --pixInSlice) { // Next slice if (slice > slices) { throw new RawDecoderException("decodeScanLeft: Ran out of slices"); } UInt32 o = offset[slice++]; dest = (UInt16 *)&draw[o & 0x0fffffff]; // Adjust destination for next pixel if ((o & 0x0fffffff) > mRaw.pitch * mRaw.dim.y) { throw new RawDecoderException("decodeScanLeft: Offset out of bounds"); } pixInSlice = (uint)slice_width[o >> 28]; } } if (skipX != 0) { for (UInt32 i = 0; i < skipX; i++) { HuffDecode(ref dctbl1); HuffDecode(ref dctbl2); } } p1 = predict[0]; // Predictors for next row p2 = predict[1]; predict = dest; // Adjust destination for next prediction x = 0; bits.checkPos(); } } }
/** * CR2 Slice handling: * In the following code, canon slices are handled in-place, to avoid having to * copy the entire frame afterwards. * The "offset" array is created to easily map slice positions on to the output image. * The offset array size is the number of slices multiplied by height. * Each of these offsets are an offset into the destination image, and it also contains the * slice number (shifted up 28 bits), so it is possible to retrieve the width of each slice. * Every time "components" pixels has been processed the slice size is tested, and output offset * is adjusted if needed. This makes slice handling very "light", since it involves a single * counter, and a predictable branch. * For unsliced images, add one slice with the width of the image. **/ void decodeScanLeftGeneric() { //_ASSERTE(slicesW.Count < 16); // We only have 4 bits for slice number. //_ASSERTE(!(slicesW.Count > 1 && skipX)); // Check if this is a valid state UInt32 comps = frame.cps; // Components HuffmanTable[] dctbl = new HuffmanTable[4]; // Tables for up to 4 components UInt16 * predict; // Prediction pointer /* Fast access to supersampling component settings * this is the number of components in a given block. */ UInt32[] samplesH = new UInt32[4]; UInt32[] samplesV = new uint[4]; fixed(ushort *d = mRaw.rawData) { //TODO remove this hack byte * draw = (byte *)d; UInt32 maxSuperH = 1; UInt32 maxSuperV = 1; UInt32[] samplesComp = new UInt32[4]; // How many samples per group does this component have UInt32 pixGroup = 0; // How many pixels per group. for (UInt32 i = 0; i < comps; i++) { dctbl[i] = huff[frame.compInfo[i].dcTblNo]; samplesH[i] = frame.compInfo[i].superH; if (!Common.isPowerOfTwo(samplesH[i])) { throw new RawDecoderException("decodeScanLeftGeneric: Horizontal sampling is not power of two."); } maxSuperH = Math.Max(samplesH[i], maxSuperH); samplesV[i] = frame.compInfo[i].superV; if (!Common.isPowerOfTwo(samplesV[i])) { throw new RawDecoderException("decodeScanLeftGeneric: Vertical sampling is not power of two."); } maxSuperV = Math.Max(samplesV[i], maxSuperV); samplesComp[i] = samplesV[i] * samplesH[i]; pixGroup += samplesComp[i]; } mRaw.metadata.subsampling.x = (int)maxSuperH; mRaw.metadata.subsampling.y = (int)maxSuperV; //Prepare slices (for CR2) Int32 slices = slicesW.Count * (int)((frame.h - skipY) / maxSuperV); UInt16 **imagePos = stackalloc UInt16 *[(slices + 1)]; int * sliceWidth = stackalloc int[(slices + 1)]; UInt32 t_y = 0; UInt32 t_x = 0; UInt32 t_s = 0; UInt32 slice = 0; UInt32 pitch_s = mRaw.pitch / 2; // Pitch in shorts int *slice_width = stackalloc int[slices]; // This is divided by comps, since comps pixels are processed at the time for (Int32 i = 0; i < slicesW.Count; i++) { slice_width[i] = (int)(slicesW[i] / pixGroup / maxSuperH); // This is a guess, but works for sRaw1+2. } if (skipX != 0 && (maxSuperV > 1 || maxSuperH > 1)) { throw new RawDecoderException("decodeScanLeftGeneric: Cannot skip right border in subsampled mode"); } if (skipX != 0) { slice_width[slicesW.Count - 1] -= (int)skipX; } for (slice = 0; slice < slices; slice++) { imagePos[slice] = (UInt16 *)&draw[(t_x + offX) * mRaw.bpp + ((offY + t_y) * mRaw.pitch)]; sliceWidth[slice] = slice_width[t_s]; t_y += maxSuperV; if (t_y >= (frame.h - skipY)) { t_y = 0; t_x += (uint)slice_width[t_s++]; } } slice_width = null; // We check the final position. If bad slice sizes are given we risk writing outside the image fixed(ushort *t = &mRaw.rawData[mRaw.pitch *mRaw.dim.y]) { if (imagePos[slices - 1] >= t) { throw new RawDecoderException("decodeScanLeft: Last slice out of bounds"); } } imagePos[slices] = imagePos[slices - 1]; // Extra offset to avoid branch in loop. sliceWidth[slices] = sliceWidth[slices - 1]; // Extra offset to avoid branch in loop. // Predictors for components int[] p = new int[4]; UInt16 *dest = imagePos[0]; // Always points to next slice slice = 1; UInt32 pixInSlice = (uint)sliceWidth[0]; // Initialize predictors and decode one group. UInt32 x = 0; predict = dest; for (UInt32 i = 0; i < comps; i++) { for (UInt32 y2 = 0; y2 < samplesV[i]; y2++) { for (UInt32 x2 = 0; x2 < samplesH[i]; x2++) { // First pixel is not predicted, all other are. if (y2 == 0 && x2 == 0) { p[i] = (1 << (int)(frame.prec - Pt - 1)) + HuffDecode(ref dctbl[i]); *dest = (ushort)p[i]; } else { p[i] += HuffDecode(ref dctbl[i]); //_ASSERTE(p[i] >= 0 && p[i] < 65536); dest[x2 * comps + y2 * pitch_s] = (ushort)p[i]; } } } // Set predictor for this component // Next component dest++; } // Increment destination to next group dest += (maxSuperH - 1) * comps; x = maxSuperH; pixInSlice -= maxSuperH; UInt32 cw = (frame.w - skipX); for (Int32 y = 0; y < (frame.h - skipY); y += (int)maxSuperV) { for (; x < cw; x += maxSuperH) { if (0 == pixInSlice) { // Next slice if (slice > slices) { throw new RawDecoderException("decodeScanLeft: Ran out of slices"); } pixInSlice = (uint)sliceWidth[slice]; dest = imagePos[slice]; // Adjust destination for next pixel slice++; // If new are at the start of a new line, also update predictors. if (x == 0) { predict = dest; } } for (Int32 i = 0; i < comps; i++) { for (Int32 y2 = 0; y2 < samplesV[i]; y2++) { for (Int32 x2 = 0; x2 < samplesH[i]; x2++) { p[i] += HuffDecode(ref dctbl[i]); //_ASSERTE(p[i] >= 0 && p[i] < 65536); dest[x2 * comps + y2 * pitch_s] = (ushort)p[i]; } } dest++; } dest += (maxSuperH * comps) - comps; pixInSlice -= maxSuperH; } if (skipX != 0) { for (UInt32 sx = 0; sx < skipX; sx++) { for (UInt32 i = 0; i < comps; i++) { HuffDecode(ref dctbl[i]); } } } // Update predictors for (UInt32 i = 0; i < comps; i++) { p[i] = predict[i]; // Ensure, that there is a slice shift at new line if (!(pixInSlice == 0 || maxSuperV == 1)) { throw new RawDecoderException("decodeScanLeftGeneric: Slice not placed at new line"); } } // Check if we are still within the file. bits.checkPos(); predict = dest; x = 0; } } }