public static SpectralData LoadFromImageSharpDecoder(OrigJpegDecoderCore decoder)
            {
                OrigComponent[] srcComponents = decoder.Components;
                LibJpegTools.ComponentData[] destComponents = srcComponents.Select(LibJpegTools.ComponentData.Load).ToArray();

                return(new SpectralData(destComponents));
            }
示例#2
0
        /// <summary>
        /// Initializes <see cref="SpectralBlocks"/>
        /// </summary>
        /// <param name="memoryManager">The <see cref="MemoryManager"/> to use for buffer allocations.</param>
        /// <param name="decoder">The <see cref="OrigJpegDecoderCore"/> instance</param>
        public void InitializeDerivedData(MemoryManager memoryManager, OrigJpegDecoderCore decoder)
        {
            // For 4-component images (either CMYK or YCbCrK), we only support two
            // hv vectors: [0x11 0x11 0x11 0x11] and [0x22 0x11 0x11 0x22].
            // Theoretically, 4-component JPEG images could mix and match hv values
            // but in practice, those two combinations are the only ones in use,
            // and it simplifies the applyBlack code below if we can assume that:
            // - for CMYK, the C and K channels have full samples, and if the M
            // and Y channels subsample, they subsample both horizontally and
            // vertically.
            // - for YCbCrK, the Y and K channels have full samples.
            this.SizeInBlocks = decoder.ImageSizeInMCU.MultiplyBy(this.SamplingFactors);

            if (this.Index == 0 || this.Index == 3)
            {
                this.SubSamplingDivisors = new Size(1, 1);
            }
            else
            {
                OrigComponent c0 = decoder.Components[0];
                this.SubSamplingDivisors = c0.SamplingFactors.DivideBy(this.SamplingFactors);
            }

            this.SpectralBlocks = memoryManager.Allocate2D <Block8x8>(this.SizeInBlocks.Width, this.SizeInBlocks.Height, true);
        }
        public void PostProcess <TPixel>(TestImageProvider <TPixel> provider)
            where TPixel : struct, IPixel <TPixel>
        {
            string imageFile = provider.SourceFileOrDescription;

            using (OrigJpegDecoderCore decoder = JpegFixture.ParseStream(imageFile))
                using (var pp = new JpegImagePostProcessor(Configuration.Default.MemoryManager, decoder))
                    using (var image = new Image <Rgba32>(decoder.ImageWidth, decoder.ImageHeight))
                    {
                        pp.PostProcess(image.Frames.RootFrame);

                        image.DebugSave(provider);

                        ImagingTestCaseUtility testUtil = provider.Utility;
                        testUtil.TestGroupName = nameof(JpegDecoderTests);
                        testUtil.TestName      = JpegDecoderTests.DecodeBaselineJpegOutputName;

                        using (Image <TPixel> referenceImage =
                                   provider.GetReferenceOutputImage <TPixel>(appendPixelTypeToFileName: false))
                        {
                            ImageSimilarityReport report = ImageComparer.Exact.CompareImagesOrFrames(referenceImage, image);

                            this.Output.WriteLine($"*** {imageFile} ***");
                            this.Output.WriteLine($"Difference: " + report.DifferencePercentageString);

                            // ReSharper disable once PossibleInvalidOperationException
                            Assert.True(report.TotalNormalizedDifference.Value < 0.005f);
                        }
                    }
        }
        /// <summary>
        /// Read Huffman data from Jpeg scans in <see cref="OrigJpegDecoderCore.InputStream"/>,
        /// and decode it as <see cref="Block8x8"/> into <see cref="OrigComponent.SpectralBlocks"/>.
        ///
        /// The blocks are traversed one MCU at a time. For 4:2:0 chroma
        /// subsampling, there are four Y 8x8 blocks in every 16x16 MCU.
        /// For a baseline 32x16 pixel image, the Y blocks visiting order is:
        /// 0 1 4 5
        /// 2 3 6 7
        /// For progressive images, the interleaved scans (those with component count &gt; 1)
        /// are traversed as above, but non-interleaved scans are traversed left
        /// to right, top to bottom:
        /// 0 1 2 3
        /// 4 5 6 7
        /// Only DC scans (zigStart == 0) can be interleave AC scans must have
        /// only one component.
        /// To further complicate matters, for non-interleaved scans, there is no
        /// data for any blocks that are inside the image at the MCU level but
        /// outside the image at the pixel level. For example, a 24x16 pixel 4:2:0
        /// progressive image consists of two 16x16 MCUs. The interleaved scans
        /// will process 8 Y blocks:
        /// 0 1 4 5
        /// 2 3 6 7
        /// The non-interleaved scans will process only 6 Y blocks:
        /// 0 1 2
        /// 3 4 5
        /// </summary>
        /// <param name="decoder">The <see cref="OrigJpegDecoderCore"/> instance</param>
        public void DecodeBlocks(OrigJpegDecoderCore decoder)
        {
            decoder.InputProcessor.ResetErrorState();

            this.blockCounter = 0;
            this.mcuCounter   = 0;
            this.expectedRst  = OrigJpegConstants.Markers.RST0;

            for (int my = 0; my < decoder.MCUCountY; my++)
            {
                for (int mx = 0; mx < decoder.MCUCountX; mx++)
                {
                    this.DecodeBlocksAtMcuIndex(decoder, mx, my);

                    this.mcuCounter++;

                    // Handling restart intervals
                    // Useful info: https://stackoverflow.com/a/8751802
                    if (decoder.IsAtRestartInterval(this.mcuCounter))
                    {
                        this.ProcessRSTMarker(decoder);
                        this.Reset(decoder);
                    }
                }
            }
        }
示例#5
0
        public void ColorSpace_IsDeducedCorrectly(string imageFile, object expectedColorSpaceValue)
        {
            var expecteColorSpace = (JpegColorSpace)expectedColorSpaceValue;

            using (OrigJpegDecoderCore decoder = JpegFixture.ParseStream(imageFile, true))
            {
                Assert.Equal(expecteColorSpace, decoder.ColorSpace);
            }
        }
示例#6
0
        /// <inheritdoc/>
        public IImageInfo Identify(Configuration configuration, Stream stream)
        {
            Guard.NotNull(stream, "stream");

            using (var decoder = new OrigJpegDecoderCore(configuration, this))
            {
                return(decoder.Identify(stream));
            }
        }
示例#7
0
        /// <inheritdoc/>
        public Image <TPixel> Decode <TPixel>(Configuration configuration, Stream stream)
            where TPixel : struct, IPixel <TPixel>
        {
            Guard.NotNull(stream, nameof(stream));

            using (var decoder = new OrigJpegDecoderCore(configuration, this))
            {
                return(decoder.Decode <TPixel>(stream));
            }
        }
示例#8
0
 internal static OrigJpegDecoderCore ParseStream(string testFileName, bool metaDataOnly = false)
 {
     byte[] bytes = TestFile.Create(testFileName).Bytes;
     using (var ms = new MemoryStream(bytes))
     {
         var decoder = new OrigJpegDecoderCore(Configuration.Default, new JpegDecoder());
         decoder.ParseStream(ms, metaDataOnly);
         return(decoder);
     }
 }
示例#9
0
        public void OriginalDecoder_ParseStream_SaveSpectralResult <TPixel>(TestImageProvider <TPixel> provider)
            where TPixel : struct, IPixel <TPixel>
        {
            OrigJpegDecoderCore decoder = new OrigJpegDecoderCore(Configuration.Default, new JpegDecoder());

            byte[] sourceBytes = TestFile.Create(provider.SourceFileOrDescription).Bytes;

            using (var ms = new MemoryStream(sourceBytes))
            {
                decoder.ParseStream(ms, false);

                var data = LibJpegTools.SpectralData.LoadFromImageSharpDecoder(decoder);
                VerifyJpeg.SaveSpectralImage(provider, data);
            }
        }
示例#10
0
        public void ComponentScalingIsCorrect_1ChannelJpeg()
        {
            using (OrigJpegDecoderCore decoder = JpegFixture.ParseStream(TestImages.Jpeg.Baseline.Jpeg400, true))
            {
                Assert.Equal(1, decoder.ComponentCount);
                Assert.Equal(1, decoder.Components.Length);

                Size expectedSizeInBlocks = decoder.ImageSizeInPixels.DivideRoundUp(8);

                Assert.Equal(expectedSizeInBlocks, decoder.ImageSizeInMCU);

                var           uniform1 = new Size(1, 1);
                OrigComponent c0       = decoder.Components[0];
                VerifyJpeg.VerifyComponent(c0, expectedSizeInBlocks, uniform1, uniform1);
            }
        }
示例#11
0
        public void PrintComponentData(string imageFile)
        {
            StringBuilder bld = new StringBuilder();

            using (OrigJpegDecoderCore decoder = JpegFixture.ParseStream(imageFile, true))
            {
                bld.AppendLine(imageFile);
                bld.AppendLine($"Size:{decoder.ImageSizeInPixels} MCU:{decoder.ImageSizeInMCU}");
                OrigComponent c0 = decoder.Components[0];
                OrigComponent c1 = decoder.Components[1];

                bld.AppendLine($"Luma: SAMP: {c0.SamplingFactors} BLOCKS: {c0.SizeInBlocks}");
                bld.AppendLine($"Chroma: {c1.SamplingFactors} BLOCKS: {c1.SizeInBlocks}");
            }
            this.Output.WriteLine(bld.ToString());
        }
        public void DoProcessorStep <TPixel>(TestImageProvider <TPixel> provider)
            where TPixel : struct, IPixel <TPixel>
        {
            string imageFile = provider.SourceFileOrDescription;

            using (OrigJpegDecoderCore decoder = JpegFixture.ParseStream(imageFile))
                using (var pp = new JpegImagePostProcessor(Configuration.Default.MemoryManager, decoder))
                    using (var imageFrame = new ImageFrame <Rgba32>(Configuration.Default.MemoryManager, decoder.ImageWidth, decoder.ImageHeight))
                    {
                        pp.DoPostProcessorStep(imageFrame);

                        JpegComponentPostProcessor[] cp = pp.ComponentProcessors;

                        SaveBuffer(cp[0], provider);
                        SaveBuffer(cp[1], provider);
                        SaveBuffer(cp[2], provider);
                    }
        }
示例#13
0
        private void DecodeBlocksAtMcuIndex(OrigJpegDecoderCore decoder, int mx, int my)
        {
            for (int scanIndex = 0; scanIndex < this.componentScanCount; scanIndex++)
            {
                this.ComponentIndex = this.pointers.ComponentScan[scanIndex].ComponentIndex;
                OrigComponent component = decoder.Components[this.ComponentIndex];

                this.hi = component.HorizontalSamplingFactor;
                int vi = component.VerticalSamplingFactor;

                for (int j = 0; j < this.hi * vi; j++)
                {
                    if (this.componentScanCount != 1)
                    {
                        this.bx = (this.hi * mx) + (j % this.hi);
                        this.by = (vi * my) + (j / this.hi);
                    }
                    else
                    {
                        int q = decoder.MCUCountX * this.hi;
                        this.bx = this.blockCounter % q;
                        this.by = this.blockCounter / q;
                        this.blockCounter++;
                        if (this.bx * 8 >= decoder.ImageWidth || this.by * 8 >= decoder.ImageHeight)
                        {
                            continue;
                        }
                    }

                    // Find the block at (bx,by) in the component's buffer:
                    ref Block8x8 blockRefOnHeap = ref component.GetBlockReference(this.bx, this.by);

                    // Copy block to stack
                    this.data.Block = blockRefOnHeap;

                    if (!decoder.InputProcessor.ReachedEOF)
                    {
                        this.DecodeBlock(decoder, scanIndex);
                    }

                    // Store the result block:
                    blockRefOnHeap = this.data.Block;
                }
            }
示例#14
0
        public void VerifySpectralResults_OriginalDecoder <TPixel>(TestImageProvider <TPixel> provider)
            where TPixel : struct, IPixel <TPixel>
        {
            if (!TestEnvironment.IsWindows)
            {
                return;
            }

            OrigJpegDecoderCore decoder = new OrigJpegDecoderCore(Configuration.Default, new JpegDecoder());

            byte[] sourceBytes = TestFile.Create(provider.SourceFileOrDescription).Bytes;

            using (var ms = new MemoryStream(sourceBytes))
            {
                decoder.ParseStream(ms);
                var imageSharpData = LibJpegTools.SpectralData.LoadFromImageSharpDecoder(decoder);

                this.VerifySpectralCorrectness <TPixel>(provider, imageSharpData);
            }
        }
示例#15
0
        public void ComponentScalingIsCorrect_MultiChannelJpeg(
            string imageFile,
            int componentCount,
            object expectedLumaFactors,
            object expectedChromaFactors)
        {
            Size fLuma   = (Size)expectedLumaFactors;
            Size fChroma = (Size)expectedChromaFactors;

            using (OrigJpegDecoderCore decoder = JpegFixture.ParseStream(imageFile, true))
            {
                Assert.Equal(componentCount, decoder.ComponentCount);
                Assert.Equal(componentCount, decoder.Components.Length);

                OrigComponent c0 = decoder.Components[0];
                OrigComponent c1 = decoder.Components[1];
                OrigComponent c2 = decoder.Components[2];

                var uniform1 = new Size(1, 1);

                Size expectedLumaSizeInBlocks = decoder.ImageSizeInMCU.MultiplyBy(fLuma);

                Size divisor = fLuma.DivideBy(fChroma);

                Size expectedChromaSizeInBlocks = expectedLumaSizeInBlocks.DivideRoundUp(divisor);

                VerifyJpeg.VerifyComponent(c0, expectedLumaSizeInBlocks, fLuma, uniform1);
                VerifyJpeg.VerifyComponent(c1, expectedChromaSizeInBlocks, fChroma, divisor);
                VerifyJpeg.VerifyComponent(c2, expectedChromaSizeInBlocks, fChroma, divisor);

                if (componentCount == 4)
                {
                    OrigComponent c3 = decoder.Components[2];
                    VerifyJpeg.VerifyComponent(c3, expectedLumaSizeInBlocks, fLuma, uniform1);
                }
            }
        }
示例#16
0
 /// <summary>
 /// Initializes a default-constructed <see cref="OrigJpegScanDecoder"/> instance for reading data from <see cref="OrigJpegDecoderCore"/>-s stream.
 /// </summary>
 /// <param name="p">Pointer to <see cref="OrigJpegScanDecoder"/> on the stack</param>
 /// <param name="decoder">The <see cref="OrigJpegDecoderCore"/> instance</param>
 /// <param name="remaining">The remaining bytes in the segment block.</param>
 public static void InitStreamReading(OrigJpegScanDecoder *p, OrigJpegDecoderCore decoder, int remaining)
 {
     p->data     = ComputationData.Create();
     p->pointers = new DataPointers(&p->data);
     p->InitStreamReadingImpl(decoder, remaining);
 }
示例#17
0
        /// <summary>
        /// Read Huffman data from Jpeg scans in <see cref="OrigJpegDecoderCore.InputStream"/>,
        /// and decode it as <see cref="Block8x8"/> into <see cref="OrigComponent.SpectralBlocks"/>.
        ///
        /// The blocks are traversed one MCU at a time. For 4:2:0 chroma
        /// subsampling, there are four Y 8x8 blocks in every 16x16 MCU.
        /// For a baseline 32x16 pixel image, the Y blocks visiting order is:
        /// 0 1 4 5
        /// 2 3 6 7
        /// For progressive images, the interleaved scans (those with component count &gt; 1)
        /// are traversed as above, but non-interleaved scans are traversed left
        /// to right, top to bottom:
        /// 0 1 2 3
        /// 4 5 6 7
        /// Only DC scans (zigStart == 0) can be interleave AC scans must have
        /// only one component.
        /// To further complicate matters, for non-interleaved scans, there is no
        /// data for any blocks that are inside the image at the MCU level but
        /// outside the image at the pixel level. For example, a 24x16 pixel 4:2:0
        /// progressive image consists of two 16x16 MCUs. The interleaved scans
        /// will process 8 Y blocks:
        /// 0 1 4 5
        /// 2 3 6 7
        /// The non-interleaved scans will process only 6 Y blocks:
        /// 0 1 2
        /// 3 4 5
        /// </summary>
        /// <param name="decoder">The <see cref="OrigJpegDecoderCore"/> instance</param>
        public void DecodeBlocks(OrigJpegDecoderCore decoder)
        {
            decoder.InputProcessor.ResetErrorState();

            int  blockCount  = 0;
            int  mcu         = 0;
            byte expectedRst = OrigJpegConstants.Markers.RST0;

            for (int my = 0; my < decoder.MCUCountY; my++)
            {
                for (int mx = 0; mx < decoder.MCUCountX; mx++)
                {
                    for (int scanIndex = 0; scanIndex < this.componentScanCount; scanIndex++)
                    {
                        this.ComponentIndex = this.pointers.ComponentScan[scanIndex].ComponentIndex;
                        OrigComponent component = decoder.Components[this.ComponentIndex];

                        this.hi = component.HorizontalSamplingFactor;
                        int vi = component.VerticalSamplingFactor;

                        for (int j = 0; j < this.hi * vi; j++)
                        {
                            if (this.componentScanCount != 1)
                            {
                                this.bx = (this.hi * mx) + (j % this.hi);
                                this.by = (vi * my) + (j / this.hi);
                            }
                            else
                            {
                                int q = decoder.MCUCountX * this.hi;
                                this.bx = blockCount % q;
                                this.by = blockCount / q;
                                blockCount++;
                                if (this.bx * 8 >= decoder.ImageWidth || this.by * 8 >= decoder.ImageHeight)
                                {
                                    continue;
                                }
                            }

                            // Find the block at (bx,by) in the component's buffer:
                            ref Block8x8 blockRefOnHeap = ref component.GetBlockReference(this.bx, this.by);

                            // Copy block to stack
                            this.data.Block = blockRefOnHeap;

                            if (!decoder.InputProcessor.ReachedEOF)
                            {
                                this.DecodeBlock(decoder, scanIndex);
                            }

                            // Store the result block:
                            blockRefOnHeap = this.data.Block;
                        }

                        // for j
                    }

                    // for i
                    mcu++;

                    if (decoder.RestartInterval > 0 && mcu % decoder.RestartInterval == 0 && mcu < decoder.TotalMCUCount)
                    {
                        // A more sophisticated decoder could use RST[0-7] markers to resynchronize from corrupt input,
                        // but this one assumes well-formed input, and hence the restart marker follows immediately.
                        if (!decoder.InputProcessor.ReachedEOF)
                        {
                            decoder.InputProcessor.ReadFullUnsafe(decoder.Temp, 0, 2);
                            if (decoder.InputProcessor.CheckEOFEnsureNoError())
                            {
                                if (decoder.Temp[0] != 0xff || decoder.Temp[1] != expectedRst)
                                {
                                    throw new ImageFormatException("Bad RST marker");
                                }

                                expectedRst++;
                                if (expectedRst == OrigJpegConstants.Markers.RST7 + 1)
                                {
                                    expectedRst = OrigJpegConstants.Markers.RST0;
                                }
                            }
                        }

                        // Reset the Huffman decoder.
                        decoder.InputProcessor.Bits = default(Bits);

                        // Reset the DC components, as per section F.2.1.3.1.
                        this.ResetDc();

                        // Reset the progressive decoder state, as per section G.1.2.2.
                        this.eobRun = 0;
                    }
                }

                // for mx
            }
示例#18
0
        /// <summary>
        /// Initializes all component data except <see cref="SpectralBlocks"/>.
        /// </summary>
        /// <param name="decoder">The <see cref="OrigJpegDecoderCore"/> instance</param>
        public void InitializeCoreData(OrigJpegDecoderCore decoder)
        {
            // Section B.2.2 states that "the value of C_i shall be different from
            // the values of C_1 through C_(i-1)".
            int i = this.Index;

            for (int j = 0; j < this.Index; j++)
            {
                if (this.Identifier == decoder.Components[j].Identifier)
                {
                    throw new ImageFormatException("Repeated component identifier");
                }
            }

            this.QuantizationTableIndex = decoder.Temp[8 + (3 * i)];
            if (this.QuantizationTableIndex > OrigJpegDecoderCore.MaxTq)
            {
                throw new ImageFormatException("Bad Tq value");
            }

            byte hv = decoder.Temp[7 + (3 * i)];
            int  h  = hv >> 4;
            int  v  = hv & 0x0f;

            if (h < 1 || h > 4 || v < 1 || v > 4)
            {
                throw new ImageFormatException("Unsupported Luma/chroma subsampling ratio");
            }

            if (h == 3 || v == 3)
            {
                throw new ImageFormatException("Lnsupported subsampling ratio");
            }

            switch (decoder.ComponentCount)
            {
            case 1:

                // If a JPEG image has only one component, section A.2 says "this data
                // is non-interleaved by definition" and section A.2.2 says "[in this
                // case...] the order of data units within a scan shall be left-to-right
                // and top-to-bottom... regardless of the values of H_1 and V_1". Section
                // 4.8.2 also says "[for non-interleaved data], the MCU is defined to be
                // one data unit". Similarly, section A.1.1 explains that it is the ratio
                // of H_i to max_j(H_j) that matters, and similarly for V. For grayscale
                // images, H_1 is the maximum H_j for all components j, so that ratio is
                // always 1. The component's (h, v) is effectively always (1, 1): even if
                // the nominal (h, v) is (2, 1), a 20x5 image is encoded in three 8x8
                // MCUs, not two 16x8 MCUs.
                h = 1;
                v = 1;
                break;

            case 3:

                // For YCbCr images, we only support 4:4:4, 4:4:0, 4:2:2, 4:2:0,
                // 4:1:1 or 4:1:0 chroma subsampling ratios. This implies that the
                // (h, v) values for the Y component are either (1, 1), (1, 2),
                // (2, 1), (2, 2), (4, 1) or (4, 2), and the Y component's values
                // must be a multiple of the Cb and Cr component's values. We also
                // assume that the two chroma components have the same subsampling
                // ratio.
                switch (i)
                {
                case 0:
                {
                    // Y.
                    // We have already verified, above, that h and v are both
                    // either 1, 2 or 4, so invalid (h, v) combinations are those
                    // with v == 4.
                    if (v == 4)
                    {
                        throw new ImageFormatException("Unsupported subsampling ratio");
                    }

                    break;
                }

                case 1:
                {
                    // Cb.
                    Size s0 = decoder.Components[0].SamplingFactors;

                    if (s0.Width % h != 0 || s0.Height % v != 0)
                    {
                        throw new ImageFormatException("Unsupported subsampling ratio");
                    }

                    break;
                }

                case 2:
                {
                    // Cr.
                    Size s1 = decoder.Components[1].SamplingFactors;

                    if (s1.Width != h || s1.Height != v)
                    {
                        throw new ImageFormatException("Unsupported subsampling ratio");
                    }

                    break;
                }
                }

                break;

            case 4:

                // For 4-component images (either CMYK or YCbCrK), we only support two
                // hv vectors: [0x11 0x11 0x11 0x11] and [0x22 0x11 0x11 0x22].
                // Theoretically, 4-component JPEG images could mix and match hv values
                // but in practice, those two combinations are the only ones in use,
                // and it simplifies the applyBlack code below if we can assume that:
                // - for CMYK, the C and K channels have full samples, and if the M
                // and Y channels subsample, they subsample both horizontally and
                // vertically.
                // - for YCbCrK, the Y and K channels have full samples.
                switch (i)
                {
                case 0:
                    if (hv != 0x11 && hv != 0x22)
                    {
                        throw new ImageFormatException("Unsupported subsampling ratio");
                    }

                    break;

                case 1:
                case 2:
                    if (hv != 0x11)
                    {
                        throw new ImageFormatException("Unsupported subsampling ratio");
                    }

                    break;

                case 3:
                    Size s0 = decoder.Components[0].SamplingFactors;

                    if (s0.Width != h || s0.Height != v)
                    {
                        throw new ImageFormatException("Unsupported subsampling ratio");
                    }

                    break;
                }

                break;
            }

            this.SamplingFactors = new Size(h, v);
        }