/// <summary> /// A Webp lossless image can go through four different types of transformation before being entropy encoded. /// This will reverse the transformations, if any are present. /// </summary> /// <param name="decoder">The decoder holding the transformation infos.</param> /// <param name="pixelData">The pixel data to apply the transformation.</param> /// <param name="memoryAllocator">The memory allocator is needed to allocate memory during the predictor transform.</param> public static void ApplyInverseTransforms(Vp8LDecoder decoder, Span <uint> pixelData, MemoryAllocator memoryAllocator) { List <Vp8LTransform> transforms = decoder.Transforms; for (int i = transforms.Count - 1; i >= 0; i--) { Vp8LTransform transform = transforms[i]; Vp8LTransformType transformType = transform.TransformType; switch (transformType) { case Vp8LTransformType.PredictorTransform: using (IMemoryOwner <uint> output = memoryAllocator.Allocate <uint>(pixelData.Length, AllocationOptions.Clean)) { LosslessUtils.PredictorInverseTransform(transform, pixelData, output.GetSpan()); } break; case Vp8LTransformType.SubtractGreen: LosslessUtils.AddGreenToBlueAndRed(pixelData); break; case Vp8LTransformType.CrossColorTransform: LosslessUtils.ColorSpaceInverseTransform(transform, pixelData); break; case Vp8LTransformType.ColorIndexingTransform: LosslessUtils.ColorIndexInverseTransform(transform, pixelData); break; } } }
private void UpdateDecoder(Vp8LDecoder decoder, int width, int height) { int numBits = decoder.Metadata.HuffmanSubSampleBits; decoder.Width = width; decoder.Height = height; decoder.Metadata.HuffmanXSize = LosslessUtils.SubSampleSize(width, numBits); decoder.Metadata.HuffmanMask = numBits == 0 ? ~0 : (1 << numBits) - 1; }
/// <summary> /// Decodes the image from the stream using the bitreader. /// </summary> /// <typeparam name="TPixel">The pixel format.</typeparam> /// <param name="pixels">The pixel buffer to store the decoded data.</param> /// <param name="width">The width of the image.</param> /// <param name="height">The height of the image.</param> public void Decode <TPixel>(Buffer2D <TPixel> pixels, int width, int height) where TPixel : unmanaged, IPixel <TPixel> { using (var decoder = new Vp8LDecoder(width, height, this.memoryAllocator)) { this.DecodeImageStream(decoder, width, height, true); this.DecodeImageData(decoder, decoder.Pixels.Memory.Span); this.DecodePixelValues(decoder, pixels, width, height); } }
/// <summary> /// Reads the transformations, if any are present. /// </summary> /// <param name="xSize">The width of the image.</param> /// <param name="ySize">The height of the image.</param> /// <param name="decoder">Vp8LDecoder where the transformations will be stored.</param> private void ReadTransformation(int xSize, int ySize, Vp8LDecoder decoder) { var transformType = (Vp8LTransformType)this.bitReader.ReadValue(2); var transform = new Vp8LTransform(transformType, xSize, ySize); // Each transform is allowed to be used only once. foreach (Vp8LTransform decoderTransform in decoder.Transforms) { if (decoderTransform.TransformType == transform.TransformType) { WebpThrowHelper.ThrowImageFormatException("Each transform can only be present once"); } } switch (transformType) { case Vp8LTransformType.SubtractGreen: // There is no data associated with this transform. break; case Vp8LTransformType.ColorIndexingTransform: // The transform data contains color table size and the entries in the color table. // 8 bit value for color table size. uint numColors = this.bitReader.ReadValue(8) + 1; int bits = numColors > 16 ? 0 : numColors > 4 ? 1 : numColors > 2 ? 2 : 3; transform.Bits = bits; using (IMemoryOwner <uint> colorMap = this.DecodeImageStream(decoder, (int)numColors, 1, false)) { int finalNumColors = 1 << (8 >> transform.Bits); IMemoryOwner <uint> newColorMap = this.memoryAllocator.Allocate <uint>(finalNumColors, AllocationOptions.Clean); LosslessUtils.ExpandColorMap((int)numColors, colorMap.GetSpan(), newColorMap.GetSpan()); transform.Data = newColorMap; } break; case Vp8LTransformType.PredictorTransform: case Vp8LTransformType.CrossColorTransform: { // The first 3 bits of prediction data define the block width and height in number of bits. transform.Bits = (int)this.bitReader.ReadValue(3) + 2; int blockWidth = LosslessUtils.SubSampleSize(transform.XSize, transform.Bits); int blockHeight = LosslessUtils.SubSampleSize(transform.YSize, transform.Bits); IMemoryOwner <uint> transformData = this.DecodeImageStream(decoder, blockWidth, blockHeight, false); transform.Data = transformData; break; } } decoder.Transforms.Add(transform); }
private void DecodePixelValues <TPixel>(Vp8LDecoder decoder, Buffer2D <TPixel> pixels, int width, int height) where TPixel : unmanaged, IPixel <TPixel> { Span <uint> pixelData = decoder.Pixels.GetSpan(); // Apply reverse transformations, if any are present. ApplyInverseTransforms(decoder, pixelData, this.memoryAllocator); Span <byte> pixelDataAsBytes = MemoryMarshal.Cast <uint, byte>(pixelData); int bytesPerRow = width * 4; for (int y = 0; y < height; y++) { Span <byte> rowAsBytes = pixelDataAsBytes.Slice(y * bytesPerRow, bytesPerRow); Span <TPixel> pixelRow = pixels.DangerousGetRowSpan(y); PixelOperations <TPixel> .Instance.FromBgra32Bytes( this.configuration, rowAsBytes.Slice(0, bytesPerRow), pixelRow.Slice(0, width), width); } }
/// <summary> /// The alpha channel of a lossy webp image can be compressed using the lossless webp compression. /// This method will undo the compression. /// </summary> /// <param name="dec">The alpha decoder.</param> public void DecodeAlphaData(AlphaDecoder dec) { Span <uint> pixelData = dec.Vp8LDec.Pixels.Memory.Span; Span <byte> data = MemoryMarshal.Cast <uint, byte>(pixelData); int row = 0; int col = 0; Vp8LDecoder vp8LDec = dec.Vp8LDec; int width = vp8LDec.Width; int height = vp8LDec.Height; Vp8LMetadata hdr = vp8LDec.Metadata; int pos = 0; // Current position. int end = width * height; // End of data. int last = end; // Last pixel to decode. int lastRow = height; const int lenCodeLimit = WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes; int mask = hdr.HuffmanMask; Span <HTreeGroup> htreeGroup = pos < last?GetHTreeGroupForPos(hdr, col, row) : null; while (!this.bitReader.Eos && pos < last) { // Only update when changing tile. if ((col & mask) == 0) { htreeGroup = GetHTreeGroupForPos(hdr, col, row); } this.bitReader.FillBitWindow(); int code = (int)this.ReadSymbol(htreeGroup[0].HTrees[HuffIndex.Green]); if (code < WebpConstants.NumLiteralCodes) { // Literal data[pos] = (byte)code; ++pos; ++col; if (col >= width) { col = 0; ++row; if (row <= lastRow && row % WebpConstants.NumArgbCacheRows == 0) { dec.ExtractPalettedAlphaRows(row); } } } else if (code < lenCodeLimit) { // Backward reference int lengthSym = code - WebpConstants.NumLiteralCodes; int length = this.GetCopyLength(lengthSym); int distSymbol = (int)this.ReadSymbol(htreeGroup[0].HTrees[HuffIndex.Dist]); this.bitReader.FillBitWindow(); int distCode = this.GetCopyDistance(distSymbol); int dist = PlaneCodeToDistance(width, distCode); if (pos >= dist && end - pos >= length) { CopyBlock8B(data, pos, dist, length); } else { WebpThrowHelper.ThrowImageFormatException("error while decoding alpha data"); } pos += length; col += length; while (col >= width) { col -= width; ++row; if (row <= lastRow && row % WebpConstants.NumArgbCacheRows == 0) { dec.ExtractPalettedAlphaRows(row); } } if (pos < last && (col & mask) > 0) { htreeGroup = GetHTreeGroupForPos(hdr, col, row); } } else { WebpThrowHelper.ThrowImageFormatException("bitstream error while parsing alpha data"); } this.bitReader.Eos = this.bitReader.IsEndOfStream(); } // Process the remaining rows corresponding to last row-block. dec.ExtractPalettedAlphaRows(row > lastRow ? lastRow : row); }
private void ReadHuffmanCodes(Vp8LDecoder decoder, int xSize, int ySize, int colorCacheBits, bool allowRecursion) { int maxAlphabetSize = 0; int numHTreeGroups = 1; int numHTreeGroupsMax = 1; // If the next bit is zero, there is only one meta Huffman code used everywhere in the image. No more data is stored. // If this bit is one, the image uses multiple meta Huffman codes. These meta Huffman codes are stored as an entropy image. if (allowRecursion && this.bitReader.ReadBit()) { // Use meta Huffman codes. int huffmanPrecision = (int)(this.bitReader.ReadValue(3) + 2); int huffmanXSize = LosslessUtils.SubSampleSize(xSize, huffmanPrecision); int huffmanYSize = LosslessUtils.SubSampleSize(ySize, huffmanPrecision); int huffmanPixels = huffmanXSize * huffmanYSize; IMemoryOwner <uint> huffmanImage = this.DecodeImageStream(decoder, huffmanXSize, huffmanYSize, false); Span <uint> huffmanImageSpan = huffmanImage.GetSpan(); decoder.Metadata.HuffmanSubSampleBits = huffmanPrecision; // TODO: Isn't huffmanPixels the length of the span? for (int i = 0; i < huffmanPixels; i++) { // The huffman data is stored in red and green bytes. uint group = (huffmanImageSpan[i] >> 8) & 0xffff; huffmanImageSpan[i] = group; if (group >= numHTreeGroupsMax) { numHTreeGroupsMax = (int)group + 1; } } numHTreeGroups = numHTreeGroupsMax; decoder.Metadata.HuffmanImage = huffmanImage; } // Find maximum alphabet size for the hTree group. for (int j = 0; j < WebpConstants.HuffmanCodesPerMetaCode; j++) { int alphabetSize = WebpConstants.AlphabetSize[j]; if (j == 0 && colorCacheBits > 0) { alphabetSize += 1 << colorCacheBits; } if (maxAlphabetSize < alphabetSize) { maxAlphabetSize = alphabetSize; } } int tableSize = TableSize[colorCacheBits]; var huffmanTables = new HuffmanCode[numHTreeGroups * tableSize]; var hTreeGroups = new HTreeGroup[numHTreeGroups]; Span <HuffmanCode> huffmanTable = huffmanTables.AsSpan(); int[] codeLengths = new int[maxAlphabetSize]; for (int i = 0; i < numHTreeGroupsMax; i++) { hTreeGroups[i] = new HTreeGroup(HuffmanUtils.HuffmanPackedTableSize); HTreeGroup hTreeGroup = hTreeGroups[i]; int totalSize = 0; bool isTrivialLiteral = true; int maxBits = 0; codeLengths.AsSpan().Clear(); for (int j = 0; j < WebpConstants.HuffmanCodesPerMetaCode; j++) { int alphabetSize = WebpConstants.AlphabetSize[j]; if (j == 0 && colorCacheBits > 0) { alphabetSize += 1 << colorCacheBits; } int size = this.ReadHuffmanCode(alphabetSize, codeLengths, huffmanTable); if (size == 0) { WebpThrowHelper.ThrowImageFormatException("Huffman table size is zero"); } // TODO: Avoid allocation. hTreeGroup.HTrees.Add(huffmanTable.Slice(0, size).ToArray()); HuffmanCode huffTableZero = huffmanTable[0]; if (isTrivialLiteral && LiteralMap[j] == 1) { isTrivialLiteral = huffTableZero.BitsUsed == 0; } totalSize += huffTableZero.BitsUsed; huffmanTable = huffmanTable.Slice(size); if (j <= HuffIndex.Alpha) { int localMaxBits = codeLengths[0]; int k; for (k = 1; k < alphabetSize; ++k) { int codeLengthK = codeLengths[k]; if (codeLengthK > localMaxBits) { localMaxBits = codeLengthK; } } maxBits += localMaxBits; } } hTreeGroup.IsTrivialLiteral = isTrivialLiteral; hTreeGroup.IsTrivialCode = false; if (isTrivialLiteral) { uint red = hTreeGroup.HTrees[HuffIndex.Red][0].Value; uint blue = hTreeGroup.HTrees[HuffIndex.Blue][0].Value; uint green = hTreeGroup.HTrees[HuffIndex.Green][0].Value; uint alpha = hTreeGroup.HTrees[HuffIndex.Alpha][0].Value; hTreeGroup.LiteralArb = (alpha << 24) | (red << 16) | blue; if (totalSize == 0 && green < WebpConstants.NumLiteralCodes) { hTreeGroup.IsTrivialCode = true; hTreeGroup.LiteralArb |= green << 8; } } hTreeGroup.UsePackedTable = !hTreeGroup.IsTrivialCode && maxBits < HuffmanUtils.HuffmanPackedBits; if (hTreeGroup.UsePackedTable) { this.BuildPackedTable(hTreeGroup); } } decoder.Metadata.NumHTreeGroups = numHTreeGroups; decoder.Metadata.HTreeGroups = hTreeGroups; decoder.Metadata.HuffmanTables = huffmanTables; }
public void DecodeImageData(Vp8LDecoder decoder, Span <uint> pixelData) { int lastPixel = 0; int width = decoder.Width; int height = decoder.Height; int row = lastPixel / width; int col = lastPixel % width; const int lenCodeLimit = WebpConstants.NumLiteralCodes + WebpConstants.NumLengthCodes; int colorCacheSize = decoder.Metadata.ColorCacheSize; ColorCache colorCache = decoder.Metadata.ColorCache; int colorCacheLimit = lenCodeLimit + colorCacheSize; int mask = decoder.Metadata.HuffmanMask; Span <HTreeGroup> hTreeGroup = GetHTreeGroupForPos(decoder.Metadata, col, row); int totalPixels = width * height; int decodedPixels = 0; int lastCached = decodedPixels; while (decodedPixels < totalPixels) { int code; if ((col & mask) == 0) { hTreeGroup = GetHTreeGroupForPos(decoder.Metadata, col, row); } if (hTreeGroup[0].IsTrivialCode) { pixelData[decodedPixels] = hTreeGroup[0].LiteralArb; this.AdvanceByOne(ref col, ref row, width, colorCache, ref decodedPixels, pixelData, ref lastCached); continue; } this.bitReader.FillBitWindow(); if (hTreeGroup[0].UsePackedTable) { code = (int)this.ReadPackedSymbols(hTreeGroup, pixelData, decodedPixels); if (this.bitReader.IsEndOfStream()) { break; } if (code == PackedNonLiteralCode) { this.AdvanceByOne(ref col, ref row, width, colorCache, ref decodedPixels, pixelData, ref lastCached); continue; } } else { code = (int)this.ReadSymbol(hTreeGroup[0].HTrees[HuffIndex.Green]); } if (this.bitReader.IsEndOfStream()) { break; } // Literal if (code < WebpConstants.NumLiteralCodes) { if (hTreeGroup[0].IsTrivialLiteral) { pixelData[decodedPixels] = hTreeGroup[0].LiteralArb | ((uint)code << 8); } else { uint red = this.ReadSymbol(hTreeGroup[0].HTrees[HuffIndex.Red]); this.bitReader.FillBitWindow(); uint blue = this.ReadSymbol(hTreeGroup[0].HTrees[HuffIndex.Blue]); uint alpha = this.ReadSymbol(hTreeGroup[0].HTrees[HuffIndex.Alpha]); if (this.bitReader.IsEndOfStream()) { break; } pixelData[decodedPixels] = (uint)(((byte)alpha << 24) | ((byte)red << 16) | ((byte)code << 8) | (byte)blue); } this.AdvanceByOne(ref col, ref row, width, colorCache, ref decodedPixels, pixelData, ref lastCached); } else if (code < lenCodeLimit) { // Backward reference is used. int lengthSym = code - WebpConstants.NumLiteralCodes; int length = this.GetCopyLength(lengthSym); uint distSymbol = this.ReadSymbol(hTreeGroup[0].HTrees[HuffIndex.Dist]); this.bitReader.FillBitWindow(); int distCode = this.GetCopyDistance((int)distSymbol); int dist = PlaneCodeToDistance(width, distCode); if (this.bitReader.IsEndOfStream()) { break; } CopyBlock(pixelData, decodedPixels, dist, length); decodedPixels += length; col += length; while (col >= width) { col -= width; row++; } if ((col & mask) != 0) { hTreeGroup = GetHTreeGroupForPos(decoder.Metadata, col, row); } if (colorCache != null) { while (lastCached < decodedPixels) { colorCache.Insert(pixelData[lastCached]); lastCached++; } } } else if (code < colorCacheLimit) { // Color cache should be used. int key = code - lenCodeLimit; while (lastCached < decodedPixels) { colorCache.Insert(pixelData[lastCached]); lastCached++; } pixelData[decodedPixels] = colorCache.Lookup(key); this.AdvanceByOne(ref col, ref row, width, colorCache, ref decodedPixels, pixelData, ref lastCached); } else { WebpThrowHelper.ThrowImageFormatException("Webp parsing error"); } } }
public IMemoryOwner <uint> DecodeImageStream(Vp8LDecoder decoder, int xSize, int ySize, bool isLevel0) { int transformXSize = xSize; int transformYSize = ySize; int numberOfTransformsPresent = 0; if (isLevel0) { decoder.Transforms = new List <Vp8LTransform>(WebpConstants.MaxNumberOfTransforms); // Next bit indicates, if a transformation is present. while (this.bitReader.ReadBit()) { if (numberOfTransformsPresent > WebpConstants.MaxNumberOfTransforms) { WebpThrowHelper.ThrowImageFormatException($"The maximum number of transforms of {WebpConstants.MaxNumberOfTransforms} was exceeded"); } this.ReadTransformation(transformXSize, transformYSize, decoder); if (decoder.Transforms[numberOfTransformsPresent].TransformType == Vp8LTransformType.ColorIndexingTransform) { transformXSize = LosslessUtils.SubSampleSize(transformXSize, decoder.Transforms[numberOfTransformsPresent].Bits); } numberOfTransformsPresent++; } } else { decoder.Metadata = new Vp8LMetadata(); } // Color cache. bool isColorCachePresent = this.bitReader.ReadBit(); int colorCacheBits = 0; int colorCacheSize = 0; if (isColorCachePresent) { colorCacheBits = (int)this.bitReader.ReadValue(4); // Note: According to webpinfo color cache bits of 11 are valid, even though 10 is defined in the source code as maximum. // That is why 11 bits is also considered valid here. bool colorCacheBitsIsValid = colorCacheBits is >= 1 and <= WebpConstants.MaxColorCacheBits + 1; if (!colorCacheBitsIsValid) { WebpThrowHelper.ThrowImageFormatException("Invalid color cache bits found"); } } // Read the Huffman codes (may recurse). this.ReadHuffmanCodes(decoder, transformXSize, transformYSize, colorCacheBits, isLevel0); decoder.Metadata.ColorCacheSize = colorCacheSize; // Finish setting up the color-cache. if (isColorCachePresent) { decoder.Metadata.ColorCache = new ColorCache(); colorCacheSize = 1 << colorCacheBits; decoder.Metadata.ColorCacheSize = colorCacheSize; decoder.Metadata.ColorCache.Init(colorCacheBits); } else { decoder.Metadata.ColorCacheSize = 0; } this.UpdateDecoder(decoder, transformXSize, transformYSize); if (isLevel0) { // level 0 complete. return(null); } // Use the Huffman trees to decode the LZ77 encoded data. IMemoryOwner <uint> pixelData = this.memoryAllocator.Allocate <uint>(decoder.Width * decoder.Height, AllocationOptions.Clean); this.DecodeImageData(decoder, pixelData.GetSpan()); return(pixelData); }