Esempio n. 1
0
        /// <summary>
        /// Fast processing for the common case of 2:1 horizontal and 2:1 vertical.
        /// It's still a box filter.
        /// </summary>
        private void H2V2UpSample(ref ComponentBuffer input_data)
        {
            var output_data = m_color_buf[m_currentComponent];

            var inrow  = 0;
            var outrow = 0;

            while (outrow < m_cinfo.m_maxVSampleFactor)
            {
                var row      = m_upsampleRowOffset + inrow;
                var outIndex = 0;

                for (var col = 0; outIndex < m_cinfo.outputWidth; col++)
                {
                    var invalue = input_data[row][col]; /* don't need GETJSAMPLE() here */
                    output_data[outrow][outIndex] = invalue;
                    outIndex++;
                    output_data[outrow][outIndex] = invalue;
                    outIndex++;
                }

                JpegUtils.jcopy_sample_rows(output_data, outrow, output_data, outrow + 1, 1, m_cinfo.outputWidth);
                inrow++;
                outrow += 2;
            }
        }
Esempio n. 2
0
    // Update is called once per frame
    void OnPreviewUpdate(ComponentBuffer bufferType, IntPtr buffer, int width, int height, int size)
    {
        //Check that this is the Y4 buffer
        if (bufferType != ComponentBuffer.Y4)
        {
            return;
        }
        //Create an rgbBuffer pointer
        IntPtr rgbBuffer;

        //Pass the buffer to the native layer
        Convert(out rgbBuffer, buffer, (UIntPtr)(uint)size);
        //Null checking
        if (greyscaleTexture == null)
        {
            //Create the texture
            greyscaleTexture = new Texture2D(width, height, TextureFormat.RGB24, false, false);
            //Set the RawImage to display the texture
            RawImage.texture = greyscaleTexture;
        }
        //Load the rgb data from the native layer
        greyscaleTexture.LoadRawTextureData(rgbBuffer, size * 3);
        //Upload changes to the GPU
        greyscaleTexture.Apply();
    }
Esempio n. 3
0
        /// <summary>
        /// This version handles any integral sampling ratios.
        /// This is not used for typical JPEG files, so it need not be fast.
        /// Nor, for that matter, is it particularly accurate: the algorithm is
        /// simple replication of the input pixel onto the corresponding output
        /// pixels.  The hi-falutin sampling literature refers to this as a
        /// "box filter".  A box filter tends to introduce visible artifacts,
        /// so if you are actually going to use 3:1 or 4:1 sampling ratios
        /// you would be well advised to improve this code.
        /// </summary>
        private void int_upsample(ref ComponentBuffer input_data)
        {
            ComponentBuffer output_data = m_color_buf[m_currentComponent];
            int             h_expand    = m_h_expand[m_currentComponent];
            int             v_expand    = m_v_expand[m_currentComponent];

            int inrow  = 0;
            int outrow = 0;

            while (outrow < m_cinfo.m_max_v_samp_factor)
            {
                /* Generate one output row with proper horizontal expansion */
                int row = m_upsampleRowOffset + inrow;
                for (int col = 0; col < m_cinfo.m_output_width; col++)
                {
                    byte invalue  = input_data[row][col]; /* don't need GETJSAMPLE() here */
                    int  outIndex = 0;
                    for (int h = h_expand; h > 0; h--)
                    {
                        output_data[outrow][outIndex] = invalue;
                        outIndex++;
                    }
                }

                /* Generate any additional output rows by duplicating the first one */
                if (v_expand > 1)
                {
                    JpegUtils.jcopy_sample_rows(output_data, outrow, output_data,
                                                outrow + 1, v_expand - 1, m_cinfo.m_output_width);
                }

                inrow++;
                outrow += v_expand;
            }
        }
Esempio n. 4
0
        /// <summary>
        /// Fast processing for the common case of 2:1 horizontal and 2:1 vertical.
        /// It's still a box filter.
        /// </summary>
        private void h2v2_upsample(ComponentBuffer input_data)
        {
            ComponentBuffer output_data = m_color_buf[m_currentComponent];

            int inrow  = 0;
            int outrow = 0;

            while (outrow < m_cinfo.m_max_v_samp_factor)
            {
                int row      = m_upsampleRowOffset + inrow;
                int outIndex = 0;

                var inputBuffer  = input_data[row];
                var outputBuffer = output_data[outrow];
                for (int col = 0; outIndex < m_cinfo.m_output_width; col++)
                {
                    byte invalue = inputBuffer[col]; /* don't need GETJSAMPLE() here */
                    outputBuffer[outIndex++] = invalue;
                    outputBuffer[outIndex++] = invalue;
                }

                JpegUtils.jcopy_sample_rows(output_data, outrow, output_data, outrow + 1, 1, m_cinfo.m_output_width);
                inrow++;
                outrow += 2;
            }
        }
Esempio n. 5
0
        private void upsampleComponent(ComponentBuffer input_data)
        {
            switch (m_upsampleMethods[m_currentComponent])
            {
            case ComponentUpsampler.noop_upsampler:
                noop_upsample();
                break;

            case ComponentUpsampler.fullsize_upsampler:
                fullsize_upsample(input_data);
                break;

            case ComponentUpsampler.h2v1_upsampler:
                h2v1_upsample(input_data);
                break;

            case ComponentUpsampler.h2v2_upsampler:
                h2v2_upsample(input_data);
                break;

            case ComponentUpsampler.int_upsampler:
                int_upsample(input_data);
                break;

            default:
                m_cinfo.ERREXIT(J_MESSAGE_CODE.JERR_NOTIMPL);
                break;
            }
        }
Esempio n. 6
0
        private void UpSampleComponent(ref ComponentBuffer input_data)
        {
            switch (m_upsampleMethods[m_currentComponent])
            {
            case ComponentUpsampler.noop_upsampler:
                NoOpUpSample();
                break;

            case ComponentUpsampler.fullsize_upsampler:
                FullSizeUpSample(ref input_data);
                break;

            case ComponentUpsampler.h2v1_upsampler:
                H2V1UpSample(ref input_data);
                break;

            case ComponentUpsampler.h2v2_upsampler:
                H2V2UpSample(ref input_data);
                break;

            case ComponentUpsampler.int_upsampler:
                IntUpSample(ref input_data);
                break;

            default:
                m_cinfo.ErrExit(JMessageCode.JERR_NOTIMPL);
                break;
            }
        }
Esempio n. 7
0
        internal override ComponentBuffer CreateBuffer()
        {
            var buffer = new ComponentBuffer(new Dimension(Text.Length, 1));
            for (var i = 0; i < Text.Length; i++)
            {
                buffer[i, 0] = new ConsoleElement(Text[i], ForegroundColor, BackgroundColor);
            }

            return buffer;
        }
        protected internal override void KeepAlive(Span <bool> keep, Span <GameEntity> resources)
        {
            var componentType = gameWorld.AsComponentType <TBuffer>();

            if (!(gameWorld.Boards.ComponentType.ComponentBoardColumns[(int)componentType.Id] is BufferComponentBoard bufferComponentBoard))
            {
                return;
            }

            var rawBufferSpan = bufferComponentBoard.AsSpan();

            for (var i = 0; i != rawBufferSpan.Length; i++)
            {
                var buffer = new ComponentBuffer <TBuffer>(rawBufferSpan[i]);
                KeepAlive(keep, buffer.Span, MemoryMarshal.Cast <GameEntity, GameResource <TResource> >(resources));
            }
        }
Esempio n. 9
0
        /// <summary>
        /// Fast processing for the common case of 2:1 horizontal and 1:1 vertical.
        /// It's still a box filter.
        /// </summary>
        private void h2v1_upsample(ComponentBuffer input_data)
        {
            ComponentBuffer output_data = m_color_buf[m_currentComponent];

            for (int inrow = 0; inrow < m_cinfo.m_max_v_samp_factor; inrow++)
            {
                int row      = m_upsampleRowOffset + inrow;
                int outIndex = 0;

                var inputBuffer  = input_data[row];
                var outputBuffer = output_data[inrow];
                for (int col = 0; outIndex < m_cinfo.m_output_width; col++)
                {
                    byte invalue = inputBuffer[col]; /* don't need GETJSAMPLE() here */
                    outputBuffer[outIndex++] = invalue;
                    outputBuffer[outIndex++] = invalue;
                }
            }
        }
Esempio n. 10
0
        /// <summary>
        /// Fast processing for the common case of 2:1 horizontal and 1:1 vertical.
        /// It's still a box filter.
        /// </summary>
        private void H2V1UpSample(ref ComponentBuffer input_data)
        {
            var output_data = m_color_buf[m_currentComponent];

            for (var inrow = 0; inrow < m_cinfo.m_maxVSampleFactor; inrow++)
            {
                var row      = m_upsampleRowOffset + inrow;
                var outIndex = 0;

                for (var col = 0; outIndex < m_cinfo.outputWidth; col++)
                {
                    var invalue = input_data[row][col]; /* don't need GETJSAMPLE() here */
                    output_data[inrow][outIndex] = invalue;
                    outIndex++;
                    output_data[inrow][outIndex] = invalue;
                    outIndex++;
                }
            }
        }
Esempio n. 11
0
        /// <summary>
        /// Process some data.
        /// This handles the simple case where no context is required.
        /// </summary>
        private void ProcessDataSimpleMain(byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            var cb = new ComponentBuffer[JpegConstants.MAX_COMPONENTS];

            for (var i = 0; i < JpegConstants.MAX_COMPONENTS; i++)
            {
                cb[i] = new ComponentBuffer();
                cb[i].SetBuffer(m_buffer[i], null, 0);
            }

            /* Read input data if we haven't filled the main buffer yet */
            if (!m_buffer_full)
            {
                if (m_cinfo.m_coef.DecompressData(cb) == ReadResult.JPEG_SUSPENDED)
                {
                    /* suspension forced, can do nothing more */
                    return;
                }

                /* OK, we have an iMCU row to work with */
                m_buffer_full = true;
            }

            /* There are always min_DCT_scaled_size row groups in an iMCU row. */
            var rowgroups_avail = m_cinfo.min_DCT_v_scaled_size;

            /* Note: at the bottom of the image, we may pass extra garbage row groups
             * to the postprocessor.  The postprocessor has to check for bottom
             * of image anyway (at row resolution), so no point in us doing it too.
             */

            /* Feed the postprocessor */
            m_cinfo.m_post.PostProcessData(cb, ref m_rowgroup_ctr, rowgroups_avail, output_buf, ref out_row_ctr, out_rows_avail);

            /* Has postprocessor consumed all the data yet? If so, mark buffer empty */
            if (m_rowgroup_ctr >= rowgroups_avail)
            {
                m_buffer_full  = false;
                m_rowgroup_ctr = 0;
            }
        }
        /// <summary>
        /// Color conversion for no colorspace change: just copy the data,
        /// converting from separate-planes to interleaved representation.
        /// </summary>
        private void null_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            for (int row = 0; row < num_rows; row++)
            {
                for (int ci = 0; ci < m_cinfo.m_num_components; ci++)
                {
                    int columnIndex = 0;
                    int componentOffset = 0;
                    int perComponentOffset = m_perComponentOffsets[ci];

                    for (int col = 0; col < m_cinfo.m_output_width; col++)
                    {
                        /* needn't bother with GETJSAMPLE() here */
                        output_buf[output_row + row][ci + componentOffset] = input_buf[ci][input_row + perComponentOffset][columnIndex];
                        componentOffset += m_cinfo.m_num_components;
                        columnIndex++;
                    }
                }

                input_row++;
            }
        }
        /// <summary>
        /// Color conversion for CMYK -> RGB
        /// </summary>
        private void cmyk_rgb_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];
            int component3RowOffset = m_perComponentOffsets[3];

            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < m_cinfo.m_output_width; col++)
                {
                    int c = input_buf[0][input_row + component0RowOffset][col];
                    int m = input_buf[1][input_row + component1RowOffset][col];
                    int y = input_buf[2][input_row + component2RowOffset][col];
                    int k = input_buf[3][input_row + component3RowOffset][col];

                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_RED] = (byte)((c * k) / 255);
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_GREEN] = (byte)((m * k) / 255);
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_BLUE] = (byte)((y * k) / 255);
                    columnOffset += JpegConstants.RGB_PIXELSIZE;
                }

                input_row++;
            }
        }
        /// <summary>
        /// Convert grayscale to RGB: just duplicate the graylevel three times.
        /// This is provided to support applications that don't want to cope
        /// with grayscale as a separate case.
        /// </summary>
        private void gray_rgb_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];

            int num_cols = m_cinfo.m_output_width;
            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < num_cols; col++)
                {
                    /* We can dispense with GETJSAMPLE() here */
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_RED] = input_buf[0][input_row + component0RowOffset][col];
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_GREEN] = input_buf[0][input_row + component1RowOffset][col];
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_BLUE] = input_buf[0][input_row + component2RowOffset][col];
                    columnOffset += JpegConstants.RGB_PIXELSIZE;
                }

                input_row++;
            }
        }
        /*
         * [R-G,G,B-G] to grayscale conversion with modulo calculation
         * (inverse color transform).
         */
        private void rgb1_gray_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];

            int num_cols = m_cinfo.m_output_width;

            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < num_cols; col++)
                {
                    int r = input_buf[0][input_row + component0RowOffset][col];
                    int g = input_buf[1][input_row + component1RowOffset][col];
                    int b = input_buf[2][input_row + component2RowOffset][col];

                    /* Assume that MAXJSAMPLE+1 is a power of 2, so that the MOD
                     * (modulo) operator is equivalent to the bitmask operator AND.
                     */
                    r = (r + g - JpegConstants.CENTERJSAMPLE) & JpegConstants.MAXJSAMPLE;
                    b = (b + g - JpegConstants.CENTERJSAMPLE) & JpegConstants.MAXJSAMPLE;

                    /* Y */
                    output_buf[output_row + row][columnOffset++] = (byte)((rgb_y_tab[r + R_Y_OFF] + rgb_y_tab[g + G_Y_OFF] + rgb_y_tab[b + B_Y_OFF]) >> SCALEBITS);
                }
            }
        }
        /*
         * Convert RGB to grayscale.
         */
        private void rgb_gray_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];

            int num_cols = m_cinfo.m_output_width;

            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < num_cols; col++)
                {
                    int r = input_buf[0][input_row + component0RowOffset][col];
                    int g = input_buf[1][input_row + component1RowOffset][col];
                    int b = input_buf[2][input_row + component2RowOffset][col];

                    /* Y */
                    output_buf[output_row + row][columnOffset++] = (byte)((rgb_y_tab[r + R_Y_OFF] + rgb_y_tab[g + G_Y_OFF] + rgb_y_tab[b + B_Y_OFF]) >> SCALEBITS);
                }
            }
        }
 /// <summary>
 /// Convert some rows of samples to the output colorspace.
 /// 
 /// Note that we change from noninterleaved, one-plane-per-component format
 /// to interleaved-pixel format.  The output buffer is therefore three times
 /// as wide as the input buffer.
 /// A starting row offset is provided only for the input buffer.  The caller
 /// can easily adjust the passed output_buf value to accommodate any row
 /// offset required on that side.
 /// </summary>
 public void color_convert(ComponentBuffer[] input_buf, int[] perComponentOffsets, int input_row, byte[][] output_buf, int output_row, int num_rows)
 {
     m_perComponentOffsets = perComponentOffsets;
     m_converter(input_buf, input_row, output_buf, output_row, num_rows);
 }
Esempio n. 18
0
 /// <summary>
 /// For full-size components, we just make color_buf[ci] point at the
 /// input buffer, and thus avoid copying any data.  Note that this is
 /// safe only because sep_upsample doesn't declare the input row group
 /// "consumed" until we are done color converting and emitting it.
 /// </summary>
 private void FullSizeUpSample(ref ComponentBuffer input_data)
 {
     m_color_buf[m_currentComponent]           = input_data;
     m_perComponentOffsets[m_currentComponent] = m_upsampleRowOffset;
 }
Esempio n. 19
0
        /// <summary>
        /// Fast processing for the common case of 2:1 horizontal and 1:1 vertical.
        /// It's still a box filter.
        /// </summary>
        private void h2v1_upsample(ref ComponentBuffer input_data)
        {
            ComponentBuffer output_data = m_color_buf[m_currentComponent];

            for (int inrow = 0; inrow < m_cinfo.m_max_v_samp_factor; inrow++)
            {
                int row = m_upsampleRowOffset + inrow;
                int outIndex = 0;

                for (int col = 0; outIndex < m_cinfo.m_output_width; col++)
                {
                    byte invalue = input_data[row][col]; /* don't need GETJSAMPLE() here */
                    output_data[inrow][outIndex] = invalue;
                    outIndex++;
                    output_data[inrow][outIndex] = invalue;
                    outIndex++;
                }
            }
        }
        /// <summary>
        /// Process some data.
        /// This handles the case where context rows must be provided.
        /// </summary>
        private void process_data_context_main(byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            ComponentBuffer[] cb = new ComponentBuffer[m_cinfo.m_num_components];
            for (int i = 0; i < m_cinfo.m_num_components; i++)
            {
                cb[i] = new ComponentBuffer();
                cb[i].SetBuffer(m_buffer[i], m_funnyIndices[m_whichFunny][i], m_funnyOffsets[i]);
            }

            /* Read input data if we haven't filled the main buffer yet */
            if (!m_buffer_full)
            {
                if (m_cinfo.m_coef.decompress_data(cb) == ReadResult.JPEG_SUSPENDED)
                {
                    /* suspension forced, can do nothing more */
                    return;
                }

                /* OK, we have an iMCU row to work with */
                m_buffer_full = true;

                /* count rows received */
                m_iMCU_row_ctr++;
            }

            /* Postprocessor typically will not swallow all the input data it is handed
             * in one call (due to filling the output buffer first).  Must be prepared
             * to exit and restart.
     
     
             This switch lets us keep track of how far we got.
             * Note that each case falls through to the next on successful completion.
             */
            if (m_context_state == CTX_POSTPONED_ROW)
            {
                /* Call postprocessor using previously set pointers for postponed row */
                m_cinfo.m_post.post_process_data(cb, ref m_rowgroup_ctr,
                    m_rowgroups_avail, output_buf, ref out_row_ctr, out_rows_avail);

                if (m_rowgroup_ctr < m_rowgroups_avail)
                {
                    /* Need to suspend */
                    return;
                }

                m_context_state = CTX_PREPARE_FOR_IMCU;

                if (out_row_ctr >= out_rows_avail)
                {
                    /* Postprocessor exactly filled output buf */
                    return;
                }
            }

            if (m_context_state == CTX_PREPARE_FOR_IMCU)
            {
                /* Prepare to process first M-1 row groups of this iMCU row */
                m_rowgroup_ctr = 0;
                m_rowgroups_avail = m_cinfo.min_DCT_v_scaled_size - 1;

                /* Check for bottom of image: if so, tweak pointers to "duplicate"
                 * the last sample row, and adjust rowgroups_avail to ignore padding rows.
                 */
                if (m_iMCU_row_ctr == m_cinfo.m_total_iMCU_rows)
                    set_bottom_pointers();

                m_context_state = CTX_PROCESS_IMCU;
            }

            if (m_context_state == CTX_PROCESS_IMCU)
            {
                /* Call postprocessor using previously set pointers */
                m_cinfo.m_post.post_process_data(cb, ref m_rowgroup_ctr,
                    m_rowgroups_avail, output_buf, ref out_row_ctr, out_rows_avail);

                if (m_rowgroup_ctr < m_rowgroups_avail)
                {
                    /* Need to suspend */
                    return;
                }

                /* After the first iMCU, change wraparound pointers to normal state */
                if (m_iMCU_row_ctr == 1)
                    set_wraparound_pointers();

                /* Prepare to load new iMCU row using other xbuffer list */
                m_whichFunny ^= 1;    /* 0=>1 or 1=>0 */
                m_buffer_full = false;

                /* Still need to process last row group of this iMCU row, */
                /* which is saved at index M+1 of the other xbuffer */
                m_rowgroup_ctr = m_cinfo.min_DCT_v_scaled_size + 1;
                m_rowgroups_avail = m_cinfo.min_DCT_v_scaled_size + 2;
                m_context_state = CTX_POSTPONED_ROW;
            }
        }
        /// <summary>
        /// Process some data.
        /// This handles the simple case where no context is required.
        /// </summary>
        private void process_data_simple_main(byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            ComponentBuffer[] cb = new ComponentBuffer[JpegConstants.MAX_COMPONENTS];
            for (int i = 0; i < JpegConstants.MAX_COMPONENTS; i++)
            {
                cb[i] = new ComponentBuffer();
                cb[i].SetBuffer(m_buffer[i], null, 0);
            }

            /* Read input data if we haven't filled the main buffer yet */
            if (!m_buffer_full)
            {
                if (m_cinfo.m_coef.decompress_data(cb) == ReadResult.JPEG_SUSPENDED)
                {
                    /* suspension forced, can do nothing more */
                    return;
                }

                /* OK, we have an iMCU row to work with */
                m_buffer_full = true;
            }

            /* There are always min_DCT_scaled_size row groups in an iMCU row. */
            int rowgroups_avail = m_cinfo.min_DCT_v_scaled_size;

            /* Note: at the bottom of the image, we may pass extra garbage row groups
             * to the postprocessor.  The postprocessor has to check for bottom
             * of image anyway (at row resolution), so no point in us doing it too.
             */

            /* Feed the postprocessor */
            m_cinfo.m_post.post_process_data(cb, ref m_rowgroup_ctr, rowgroups_avail, output_buf, ref out_row_ctr, out_rows_avail);

            /* Has postprocessor consumed all the data yet? If so, mark buffer empty */
            if (m_rowgroup_ctr >= rowgroups_avail)
            {
                m_buffer_full = false;
                m_rowgroup_ctr = 0;
            }
        }
Esempio n. 22
0
 public abstract void upsample(ComponentBuffer[] input_buf, ref int in_row_group_ctr, int in_row_groups_avail, byte[][] output_buf, ref int out_row_ctr, int out_rows_avail);
Esempio n. 23
0
        public my_upsampler(jpeg_decompress_struct cinfo)
        {
            m_cinfo = cinfo;
            m_need_context_rows = false; /* until we find out differently */

            if (cinfo.m_CCIR601_sampling)    /* this isn't supported */
                cinfo.ERREXIT(J_MESSAGE_CODE.JERR_CCIR601_NOTIMPL);

            /* Verify we can handle the sampling factors, select per-component methods,
            * and create storage as needed.
            */
            for (int ci = 0; ci < cinfo.m_num_components; ci++)
            {
                jpeg_component_info componentInfo = cinfo.Comp_info[ci];

                /* Compute size of an "input group" after IDCT scaling.  This many samples
                * are to be converted to max_h_samp_factor * max_v_samp_factor pixels.
                */
                int h_in_group = (componentInfo.H_samp_factor * componentInfo.DCT_h_scaled_size) / cinfo.min_DCT_h_scaled_size;
                int v_in_group = (componentInfo.V_samp_factor * componentInfo.DCT_v_scaled_size) / cinfo.min_DCT_v_scaled_size;
                int h_out_group = cinfo.m_max_h_samp_factor;
                int v_out_group = cinfo.m_max_v_samp_factor;

                /* save for use later */
                m_rowgroup_height[ci] = v_in_group;

                if (!componentInfo.component_needed)
                {
                    /* Don't bother to upsample an uninteresting component. */
                    m_upsampleMethods[ci] = ComponentUpsampler.noop_upsampler;
                    continue;		/* don't need to allocate buffer */
                }

                if (h_in_group == h_out_group && v_in_group == v_out_group)
                {
                    /* Fullsize components can be processed without any work. */
                    m_upsampleMethods[ci] = ComponentUpsampler.fullsize_upsampler;
                    continue;		/* don't need to allocate buffer */
                }

                if (h_in_group * 2 == h_out_group && v_in_group == v_out_group)
                {
                    /* Special case for 2h1v upsampling */
                    m_upsampleMethods[ci] = ComponentUpsampler.h2v1_upsampler;
                }
                else if (h_in_group * 2 == h_out_group && v_in_group * 2 == v_out_group)
                {
                    /* Special case for 2h2v upsampling */
                    m_upsampleMethods[ci] = ComponentUpsampler.h2v2_upsampler;
                }
                else if ((h_out_group % h_in_group) == 0 && (v_out_group % v_in_group) == 0)
                {
                    /* Generic integral-factors upsampling method */
                    m_upsampleMethods[ci] = ComponentUpsampler.int_upsampler;
                    m_h_expand[ci] = (byte)(h_out_group / h_in_group);
                    m_v_expand[ci] = (byte)(v_out_group / v_in_group);
                }
                else
                {
                    cinfo.ERREXIT(J_MESSAGE_CODE.JERR_FRACT_SAMPLE_NOTIMPL);
                }

                ComponentBuffer cb = new ComponentBuffer();
                cb.SetBuffer(jpeg_common_struct.AllocJpegSamples(JpegUtils.jround_up(cinfo.m_output_width, 
                    cinfo.m_max_h_samp_factor), cinfo.m_max_v_samp_factor), null, 0);

                m_color_buf[ci] = cb;
            }
        }
Esempio n. 24
0
 /// <summary>
 /// For full-size components, we just make color_buf[ci] point at the
 /// input buffer, and thus avoid copying any data.  Note that this is
 /// safe only because sep_upsample doesn't declare the input row group
 /// "consumed" until we are done color converting and emitting it.
 /// </summary>
 private void fullsize_upsample(ComponentBuffer input_data)
 {
     m_color_buf[m_currentComponent]           = input_data;
     m_perComponentOffsets[m_currentComponent] = m_upsampleRowOffset;
 }
Esempio n. 25
0
        /// <summary>
        /// This version handles any integral sampling ratios.
        /// This is not used for typical JPEG files, so it need not be fast.
        /// Nor, for that matter, is it particularly accurate: the algorithm is
        /// simple replication of the input pixel onto the corresponding output
        /// pixels.  The hi-falutin sampling literature refers to this as a
        /// "box filter".  A box filter tends to introduce visible artifacts,
        /// so if you are actually going to use 3:1 or 4:1 sampling ratios
        /// you would be well advised to improve this code.
        /// </summary>
        private void int_upsample(ref ComponentBuffer input_data)
        {
            ComponentBuffer output_data = m_color_buf[m_currentComponent];
            int h_expand = m_h_expand[m_currentComponent];
            int v_expand = m_v_expand[m_currentComponent];

            int inrow = 0;
            int outrow = 0;
            while (outrow < m_cinfo.m_max_v_samp_factor)
            {
                /* Generate one output row with proper horizontal expansion */
                int row = m_upsampleRowOffset + inrow;
                for (int col = 0; col < m_cinfo.m_output_width; col++)
                {
                    byte invalue = input_data[row][col]; /* don't need GETJSAMPLE() here */
                    int outIndex = 0;
                    for (int h = h_expand; h > 0; h--)
                    {
                        output_data[outrow][outIndex] = invalue;
                        outIndex++;
                    }
                }
                
                /* Generate any additional output rows by duplicating the first one */
                if (v_expand > 1)
                {
                    JpegUtils.jcopy_sample_rows(output_data, outrow, output_data, 
                        outrow + 1, v_expand - 1, m_cinfo.m_output_width);
                }

                inrow++;
                outrow += v_expand;
            }
        }
Esempio n. 26
0
 /// <summary>
 /// For full-size components, we just make color_buf[ci] point at the
 /// input buffer, and thus avoid copying any data.  Note that this is
 /// safe only because sep_upsample doesn't declare the input row group
 /// "consumed" until we are done color converting and emitting it.
 /// </summary>
 private void fullsize_upsample(ref ComponentBuffer input_data)
 {
     m_color_buf[m_currentComponent] = input_data;
     m_perComponentOffsets[m_currentComponent] = m_upsampleRowOffset;
 }
Esempio n. 27
0
        /// <summary>
        /// Process some data.
        /// This handles the case where context rows must be provided.
        /// </summary>
        private void ProcessDataContextMain(byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            var cb = new ComponentBuffer[m_cinfo.numComponents];

            for (var i = 0; i < m_cinfo.numComponents; i++)
            {
                cb[i] = new ComponentBuffer();
                cb[i].SetBuffer(m_buffer[i], m_funnyIndices[m_whichFunny][i], m_funnyOffsets[i]);
            }

            /* Read input data if we haven't filled the main buffer yet */
            if (!m_buffer_full)
            {
                if (m_cinfo.m_coef.DecompressData(cb) == ReadResult.JPEG_SUSPENDED)
                {
                    /* suspension forced, can do nothing more */
                    return;
                }

                /* OK, we have an iMCU row to work with */
                m_buffer_full = true;

                /* count rows received */
                m_iMCU_row_ctr++;
            }

            /* Postprocessor typically will not swallow all the input data it is handed
             * in one call (due to filling the output buffer first).  Must be prepared
             * to exit and restart.
             *
             *
             * This switch lets us keep track of how far we got.
             * Note that each case falls through to the next on successful completion.
             */
            if (m_context_state == CTX_POSTPONED_ROW)
            {
                /* Call postprocessor using previously set pointers for postponed row */
                m_cinfo.m_post.PostProcessData(cb, ref m_rowgroup_ctr,
                                               m_rowgroups_avail, output_buf, ref out_row_ctr, out_rows_avail);

                if (m_rowgroup_ctr < m_rowgroups_avail)
                {
                    /* Need to suspend */
                    return;
                }

                m_context_state = CTX_PREPARE_FOR_IMCU;

                if (out_row_ctr >= out_rows_avail)
                {
                    /* Postprocessor exactly filled output buf */
                    return;
                }
            }

            if (m_context_state == CTX_PREPARE_FOR_IMCU)
            {
                /* Prepare to process first M-1 row groups of this iMCU row */
                m_rowgroup_ctr    = 0;
                m_rowgroups_avail = m_cinfo.min_DCT_v_scaled_size - 1;

                /* Check for bottom of image: if so, tweak pointers to "duplicate"
                 * the last sample row, and adjust rowgroups_avail to ignore padding rows.
                 */
                if (m_iMCU_row_ctr == m_cinfo.m_total_iMCU_rows)
                {
                    SetBottomPointers();
                }

                m_context_state = CTX_PROCESS_IMCU;
            }

            if (m_context_state == CTX_PROCESS_IMCU)
            {
                /* Call postprocessor using previously set pointers */
                m_cinfo.m_post.PostProcessData(cb, ref m_rowgroup_ctr,
                                               m_rowgroups_avail, output_buf, ref out_row_ctr, out_rows_avail);

                if (m_rowgroup_ctr < m_rowgroups_avail)
                {
                    /* Need to suspend */
                    return;
                }

                /* After the first iMCU, change wraparound pointers to normal state */
                if (m_iMCU_row_ctr == 1)
                {
                    SetWraparoundPointers();
                }

                /* Prepare to load new iMCU row using other xbuffer list */
                m_whichFunny ^= 1;    /* 0=>1 or 1=>0 */
                m_buffer_full = false;

                /* Still need to process last row group of this iMCU row, */
                /* which is saved at index M+1 of the other xbuffer */
                m_rowgroup_ctr    = m_cinfo.min_DCT_v_scaled_size + 1;
                m_rowgroups_avail = m_cinfo.min_DCT_v_scaled_size + 2;
                m_context_state   = CTX_POSTPONED_ROW;
            }
        }
Esempio n. 28
0
 private void upsampleComponent(ref ComponentBuffer input_data)
 {
     switch (m_upsampleMethods[m_currentComponent])
     {
         case ComponentUpsampler.noop_upsampler:
             noop_upsample();
             break;
         case ComponentUpsampler.fullsize_upsampler:
             fullsize_upsample(ref input_data);
             break;
         case ComponentUpsampler.h2v1_upsampler:
             h2v1_upsample(ref input_data);
             break;
         case ComponentUpsampler.h2v2_upsampler:
             h2v2_upsample(ref input_data);
             break;
         case ComponentUpsampler.int_upsampler:
             int_upsample(ref input_data);
             break;
         default:
             m_cinfo.ERREXIT(J_MESSAGE_CODE.JERR_NOTIMPL);
             break;
     }
 }
        private void ycc_rgb_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];

            byte[] limit = m_cinfo.m_sample_range_limit;
            int limitOffset = m_cinfo.m_sampleRangeLimitOffset;

            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < m_cinfo.m_output_width; col++)
                {
                    int y = input_buf[0][input_row + component0RowOffset][col];
                    int cb = input_buf[1][input_row + component1RowOffset][col];
                    int cr = input_buf[2][input_row + component2RowOffset][col];

                    /* Range-limiting is essential due to noise introduced by DCT losses.
                     * for extended gamut (sYCC) and wide gamut (bg-sYCC) encodings.
                     */
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_RED] = limit[limitOffset + y + m_Cr_r_tab[cr]];
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_GREEN] = limit[limitOffset + y + JpegUtils.RIGHT_SHIFT(m_Cb_g_tab[cb] + m_Cr_g_tab[cr], SCALEBITS)];
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_BLUE] = limit[limitOffset + y + m_Cb_b_tab[cb]];
                    columnOffset += JpegConstants.RGB_PIXELSIZE;
                }

                input_row++;
            }
        }
Esempio n. 30
0
        /// <summary>
        /// Control routine to do upsampling (and color conversion).
        /// 
        /// In this version we upsample each component independently.
        /// We upsample one row group into the conversion buffer, then apply
        /// color conversion a row at a time.
        /// </summary>
        public override void upsample(ComponentBuffer[] input_buf, ref int in_row_group_ctr, int in_row_groups_avail, byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            /* Fill the conversion buffer, if it's empty */
            if (m_next_row_out >= m_cinfo.m_max_v_samp_factor)
            {
                for (int ci = 0; ci < m_cinfo.m_num_components; ci++)
                {
                    m_perComponentOffsets[ci] = 0;

                    /* Invoke per-component upsample method.*/
                    m_currentComponent = ci;
                    m_upsampleRowOffset = in_row_group_ctr * m_rowgroup_height[ci];
                    upsampleComponent(ref input_buf[ci]);
                }

                m_next_row_out = 0;
            }

            /* Color-convert and emit rows */

            /* How many we have in the buffer: */
            int num_rows = m_cinfo.m_max_v_samp_factor - m_next_row_out;

            /* Not more than the distance to the end of the image.  Need this test
             * in case the image height is not a multiple of max_v_samp_factor:
             */
            if (num_rows > m_rows_to_go)
                num_rows = m_rows_to_go;

            /* And not more than what the client can accept: */
            out_rows_avail -= out_row_ctr;
            if (num_rows > out_rows_avail)
                num_rows = out_rows_avail;

            m_cinfo.m_cconvert.color_convert(m_color_buf, m_perComponentOffsets, m_next_row_out, output_buf, out_row_ctr, num_rows);

            /* Adjust counts */
            out_row_ctr += num_rows;
            m_rows_to_go -= num_rows;
            m_next_row_out += num_rows;

            /* When the buffer is emptied, declare this input row group consumed */
            if (m_next_row_out >= m_cinfo.m_max_v_samp_factor)
                in_row_group_ctr++;
        }
        /*
         * [R-G,G,B-G] to [R,G,B] conversion with modulo calculation
         * (inverse color transform).
         * This can be seen as an adaption of the general YCbCr->RGB
         * conversion equation with Kr = Kb = 0, while replacing the
         * normalization by modulo calculation.
         */
        private void rgb1_rgb_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];

            int num_cols = m_cinfo.m_output_width;

            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < num_cols; col++)
                {
                    int r = input_buf[0][input_row + component0RowOffset][col];
                    int g = input_buf[1][input_row + component1RowOffset][col];
                    int b = input_buf[2][input_row + component2RowOffset][col];

                    /* Assume that MAXJSAMPLE+1 is a power of 2, so that the MOD
                     * (modulo) operator is equivalent to the bitmask operator AND.
                     */
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_RED] = (byte)((r + g - JpegConstants.CENTERJSAMPLE) & JpegConstants.MAXJSAMPLE);
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_GREEN] = (byte)g;
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_BLUE] = (byte)((b + g - JpegConstants.CENTERJSAMPLE) & JpegConstants.MAXJSAMPLE);
                    columnOffset += JpegConstants.RGB_PIXELSIZE;
                }
            }
        }
Esempio n. 32
0
 public override void upsample(ComponentBuffer[] input_buf, ref int in_row_group_ctr, int in_row_groups_avail, byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
 {
     if (m_use_2v_upsample)
         merged_2v_upsample(input_buf, ref in_row_group_ctr, output_buf, ref out_row_ctr, out_rows_avail);
     else
         merged_1v_upsample(input_buf, ref in_row_group_ctr, output_buf, ref out_row_ctr);
 }
        /// <summary>
        /// Adobe-style YCCK->CMYK conversion.
        /// We convert YCbCr to R=1-C, G=1-M, and B=1-Y using the same
        /// conversion as above, while passing K (black) unchanged.
        /// We assume build_ycc_rgb_table has been called.
        /// </summary>
        private void ycck_cmyk_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];
            int component3RowOffset = m_perComponentOffsets[3];

            byte[] limit = m_cinfo.m_sample_range_limit;
            int limitOffset = m_cinfo.m_sampleRangeLimitOffset;

            int num_cols = m_cinfo.m_output_width;
            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < num_cols; col++)
                {
                    int y = input_buf[0][input_row + component0RowOffset][col];
                    int cb = input_buf[1][input_row + component1RowOffset][col];
                    int cr = input_buf[2][input_row + component2RowOffset][col];

                    /* Range-limiting is essential due to noise introduced by DCT losses,
                     * and for extended gamut encodings (sYCC).
                     */
                    output_buf[output_row + row][columnOffset] = limit[limitOffset + JpegConstants.MAXJSAMPLE - (y + m_Cr_r_tab[cr])]; /* red */
                    output_buf[output_row + row][columnOffset + 1] = limit[limitOffset + JpegConstants.MAXJSAMPLE - (y + JpegUtils.RIGHT_SHIFT(m_Cb_g_tab[cb] + m_Cr_g_tab[cr], SCALEBITS))]; /* green */
                    output_buf[output_row + row][columnOffset + 2] = limit[limitOffset + JpegConstants.MAXJSAMPLE - (y + m_Cb_b_tab[cb])]; /* blue */

                    /* K passes through unchanged */
                    /* don't need GETJSAMPLE here */
                    output_buf[output_row + row][columnOffset + 3] = input_buf[3][input_row + component3RowOffset][col];
                    columnOffset += 4;
                }

                input_row++;
            }
        }
Esempio n. 34
0
        /// <summary>
        /// Control routine to do upsampling (and color conversion).
        /// The control routine just handles the row buffering considerations.
        /// 1:1 vertical sampling case: much easier, never need a spare row.
        /// </summary>
        private void merged_1v_upsample(ComponentBuffer[] input_buf, ref int in_row_group_ctr, byte[][] output_buf, ref int out_row_ctr)
        {
            /* Just do the upsampling. */
            h2v1_merged_upsample(input_buf, in_row_group_ctr, output_buf, out_row_ctr);

            /* Adjust counts */
            out_row_ctr++;
            in_row_group_ctr++;
        }
 /// <summary>
 /// Color conversion for grayscale: just copy the data.
 /// This also works for YCC -> grayscale conversion, in which
 /// we just copy the Y (luminance) component and ignore chrominance.
 /// </summary>
 private void grayscale_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
 {
     JpegUtils.jcopy_sample_rows(input_buf[0], input_row + m_perComponentOffsets[0], output_buf, output_row, num_rows, m_cinfo.m_output_width);
 }
        /// <summary>
        /// Process some data in the first pass of 2-pass quantization.
        /// </summary>
        private void post_process_prepass(ComponentBuffer[] input_buf, ref int in_row_group_ctr, int in_row_groups_avail, ref int out_row_ctr)
        {
            int old_next_row, num_rows;

            /* Reposition virtual buffer if at start of strip. */
            if (m_next_row == 0)
                m_buffer = m_whole_image.Access(m_starting_row, m_strip_height);

            /* Upsample some data (up to a strip height's worth). */
            old_next_row = m_next_row;
            m_cinfo.m_upsample.upsample(input_buf, ref in_row_group_ctr, in_row_groups_avail, m_buffer, ref m_next_row, m_strip_height);

            /* Allow quantizer to scan new data.  No data is emitted, */
            /* but we advance out_row_ctr so outer loop can tell when we're done. */
            if (m_next_row > old_next_row)
            {
                num_rows = m_next_row - old_next_row;
                m_cinfo.m_cquantize.color_quantize(m_buffer, old_next_row, null, 0, num_rows);
                out_row_ctr += num_rows;
            }

            /* Advance if we filled the strip. */
            if (m_next_row >= m_strip_height)
            {
                m_starting_row += m_strip_height;
                m_next_row = 0;
            }
        }
        /// <summary>
        /// Color conversion for YCCK -> RGB
        /// it's just a gybrid of YCCK -> CMYK and CMYK -> RGB conversions
        /// </summary>
        private void ycck_rgb_convert(ComponentBuffer[] input_buf, int input_row, byte[][] output_buf, int output_row, int num_rows)
        {
            int component0RowOffset = m_perComponentOffsets[0];
            int component1RowOffset = m_perComponentOffsets[1];
            int component2RowOffset = m_perComponentOffsets[2];
            int component3RowOffset = m_perComponentOffsets[3];

            byte[] limit = m_cinfo.m_sample_range_limit;
            int limitOffset = m_cinfo.m_sampleRangeLimitOffset;

            int num_cols = m_cinfo.m_output_width;
            for (int row = 0; row < num_rows; row++)
            {
                int columnOffset = 0;
                for (int col = 0; col < num_cols; col++)
                {
                    int y = input_buf[0][input_row + component0RowOffset][col];
                    int cb = input_buf[1][input_row + component1RowOffset][col];
                    int cr = input_buf[2][input_row + component2RowOffset][col];

                    int cmyk_c = limit[limitOffset + JpegConstants.MAXJSAMPLE - (y + m_Cr_r_tab[cr])];
                    int cmyk_m = limit[limitOffset + JpegConstants.MAXJSAMPLE - (y + JpegUtils.RIGHT_SHIFT(m_Cb_g_tab[cb] + m_Cr_g_tab[cr], SCALEBITS))];
                    int cmyk_y = limit[limitOffset + JpegConstants.MAXJSAMPLE - (y + m_Cb_b_tab[cb])];
                    int cmyk_k = input_buf[3][input_row + component3RowOffset][col];

                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_RED] = (byte)((cmyk_c * cmyk_k) / 255);
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_GREEN] = (byte)((cmyk_m * cmyk_k) / 255);
                    output_buf[output_row + row][columnOffset + JpegConstants.RGB_BLUE] = (byte)((cmyk_y * cmyk_k) / 255);
                    columnOffset += JpegConstants.RGB_PIXELSIZE;
                }

                input_row++;
            }
        }
Esempio n. 38
0
        /* Inverse DCT (also performs dequantization) */
        public void inverse(int component_index, short[] coef_block, ComponentBuffer output_buf, int output_row, int output_col)
        {
            m_componentBuffer = output_buf;

            inverse_method method = m_inverse_DCT_method[component_index];
            if (method == null)
                m_cinfo.ERREXIT(J_MESSAGE_CODE.JERR_NOT_COMPILED);
            else
                method(component_index, coef_block, output_row, output_col);
        }
Esempio n. 39
0
        /// <summary>
        /// Control routine to do upsampling (and color conversion).
        /// The control routine just handles the row buffering considerations.
        /// 2:1 vertical sampling case: may need a spare row.
        /// </summary>
        private void merged_2v_upsample(ComponentBuffer[] input_buf, ref int in_row_group_ctr, byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            int num_rows;        /* number of rows returned to caller */
            if (m_spare_full)
            {
                /* If we have a spare row saved from a previous cycle, just return it. */
                byte[][] temp = new byte[1][];
                temp[0] = m_spare_row;
                JpegUtils.jcopy_sample_rows(temp, 0, output_buf, out_row_ctr, 1, m_out_row_width);
                num_rows = 1;
                m_spare_full = false;
            }
            else
            {
                /* Figure number of rows to return to caller. */
                num_rows = 2;

                /* Not more than the distance to the end of the image. */
                if (num_rows > m_rows_to_go)
                    num_rows = m_rows_to_go;
                
                /* And not more than what the client can accept: */
                out_rows_avail -= out_row_ctr;
                if (num_rows > out_rows_avail)
                    num_rows = out_rows_avail;
                
                /* Create output pointer array for upsampler. */
                byte[][] work_ptrs = new byte[2][];
                work_ptrs[0] = output_buf[out_row_ctr];
                if (num_rows > 1)
                {
                    work_ptrs[1] = output_buf[out_row_ctr + 1];
                }
                else
                {
                    work_ptrs[1] = m_spare_row;
                    m_spare_full = true;
                }

                /* Now do the upsampling. */
                h2v2_merged_upsample(input_buf, in_row_group_ctr, work_ptrs);
            }

            /* Adjust counts */
            out_row_ctr += num_rows;
            m_rows_to_go -= num_rows;

            /* When the buffer is emptied, declare this input row group consumed */
            if (!m_spare_full)
                in_row_group_ctr++;
        }
Esempio n. 40
0
        /// <summary>
        /// Upsample and color convert for the case of 2:1 horizontal and 2:1 vertical.
        /// </summary>
        private void h2v2_merged_upsample(ComponentBuffer[] input_buf, int in_row_group_ctr, byte[][] output_buf)
        {
            int inputRow00 = in_row_group_ctr * 2;
            int inputIndex00 = 0;

            int inputRow01 = in_row_group_ctr * 2 + 1;
            int inputIndex01 = 0;

            int inputIndex1 = 0;
            int inputIndex2 = 0;

            int outIndex0 = 0;
            int outIndex1 = 0;

            byte[] limit = m_cinfo.m_sample_range_limit;
            int limitOffset = m_cinfo.m_sampleRangeLimitOffset;

            /* Loop for each group of output pixels */
            for (int col = m_cinfo.m_output_width >> 1; col > 0; col--)
            {
                /* Do the chroma part of the calculation */
                int cb = input_buf[1][in_row_group_ctr][inputIndex1];
                inputIndex1++;

                int cr = input_buf[2][in_row_group_ctr][inputIndex2];
                inputIndex2++;

                int cred = m_Cr_r_tab[cr];
                int cgreen = JpegUtils.RIGHT_SHIFT(m_Cb_g_tab[cb] + m_Cr_g_tab[cr], SCALEBITS);
                int cblue = m_Cb_b_tab[cb];

                /* Fetch 4 Y values and emit 4 pixels */
                int y = input_buf[0][inputRow00][inputIndex00];
                inputIndex00++;

                output_buf[0][outIndex0 + JpegConstants.RGB_RED] = limit[limitOffset + y + cred];
                output_buf[0][outIndex0 + JpegConstants.RGB_GREEN] = limit[limitOffset + y + cgreen];
                output_buf[0][outIndex0 + JpegConstants.RGB_BLUE] = limit[limitOffset + y + cblue];
                outIndex0 += JpegConstants.RGB_PIXELSIZE;
                
                y = input_buf[0][inputRow00][inputIndex00];
                inputIndex00++;

                output_buf[0][outIndex0 + JpegConstants.RGB_RED] = limit[limitOffset + y + cred];
                output_buf[0][outIndex0 + JpegConstants.RGB_GREEN] = limit[limitOffset + y + cgreen];
                output_buf[0][outIndex0 + JpegConstants.RGB_BLUE] = limit[limitOffset + y + cblue];
                outIndex0 += JpegConstants.RGB_PIXELSIZE;
                
                y = input_buf[0][inputRow01][inputIndex01];
                inputIndex01++;

                output_buf[1][outIndex1 + JpegConstants.RGB_RED] = limit[limitOffset + y + cred];
                output_buf[1][outIndex1 + JpegConstants.RGB_GREEN] = limit[limitOffset + y + cgreen];
                output_buf[1][outIndex1 + JpegConstants.RGB_BLUE] = limit[limitOffset + y + cblue];
                outIndex1 += JpegConstants.RGB_PIXELSIZE;
                
                y = input_buf[0][inputRow01][inputIndex01];
                inputIndex01++;

                output_buf[1][outIndex1 + JpegConstants.RGB_RED] = limit[limitOffset + y + cred];
                output_buf[1][outIndex1 + JpegConstants.RGB_GREEN] = limit[limitOffset + y + cgreen];
                output_buf[1][outIndex1 + JpegConstants.RGB_BLUE] = limit[limitOffset + y + cblue];
                outIndex1 += JpegConstants.RGB_PIXELSIZE;
            }

            /* If image width is odd, do the last output column separately */
            if ((m_cinfo.m_output_width & 1) != 0)
            {
                int cb = input_buf[1][in_row_group_ctr][inputIndex1];
                int cr = input_buf[2][in_row_group_ctr][inputIndex2];
                int cred = m_Cr_r_tab[cr];
                int cgreen = JpegUtils.RIGHT_SHIFT(m_Cb_g_tab[cb] + m_Cr_g_tab[cr], SCALEBITS);
                int cblue = m_Cb_b_tab[cb];

                int y = input_buf[0][inputRow00][inputIndex00];
                output_buf[0][outIndex0 + JpegConstants.RGB_RED] = limit[limitOffset + y + cred];
                output_buf[0][outIndex0 + JpegConstants.RGB_GREEN] = limit[limitOffset + y + cgreen];
                output_buf[0][outIndex0 + JpegConstants.RGB_BLUE] = limit[limitOffset + y + cblue];
                
                y = input_buf[0][inputRow01][inputIndex01];
                output_buf[1][outIndex1 + JpegConstants.RGB_RED] = limit[limitOffset + y + cred];
                output_buf[1][outIndex1 + JpegConstants.RGB_GREEN] = limit[limitOffset + y + cgreen];
                output_buf[1][outIndex1 + JpegConstants.RGB_BLUE] = limit[limitOffset + y + cblue];
            }
        }
 public void post_process_data(ComponentBuffer[] input_buf, ref int in_row_group_ctr, int in_row_groups_avail, byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
 {
     switch (m_processor)
     {
         case ProcessorType.OnePass:
             post_process_1pass(input_buf, ref in_row_group_ctr, in_row_groups_avail, output_buf, ref out_row_ctr, out_rows_avail);
             break;
         case ProcessorType.PrePass:
             post_process_prepass(input_buf, ref in_row_group_ctr, in_row_groups_avail, ref out_row_ctr);
             break;
         case ProcessorType.Upsample:
             m_cinfo.m_upsample.upsample(input_buf, ref in_row_group_ctr, in_row_groups_avail, output_buf, ref out_row_ctr, out_rows_avail);
             break;
         case ProcessorType.SecondPass:
             post_process_2pass(output_buf, ref out_row_ctr, out_rows_avail);
             break;
         default:
             m_cinfo.ERREXIT(J_MESSAGE_CODE.JERR_NOTIMPL);
             break;
     }
 }
Esempio n. 42
0
        /// <summary>
        /// Fast processing for the common case of 2:1 horizontal and 2:1 vertical.
        /// It's still a box filter.
        /// </summary>
        private void h2v2_upsample(ref ComponentBuffer input_data)
        {
            ComponentBuffer output_data = m_color_buf[m_currentComponent];

            int inrow = 0;
            int outrow = 0;
            while (outrow < m_cinfo.m_max_v_samp_factor)
            {
                int row = m_upsampleRowOffset + inrow;
                int outIndex = 0;

                for (int col = 0; outIndex < m_cinfo.m_output_width; col++)
                {
                    byte invalue = input_data[row][col]; /* don't need GETJSAMPLE() here */
                    output_data[outrow][outIndex] = invalue;
                    outIndex++;
                    output_data[outrow][outIndex] = invalue;
                    outIndex++;
                }

                JpegUtils.jcopy_sample_rows(output_data, outrow, output_data, outrow + 1, 1, m_cinfo.m_output_width);
                inrow++;
                outrow += 2;
            }
        }
Esempio n. 43
0
        public my_upsampler(jpeg_decompress_struct cinfo)
        {
            m_cinfo             = cinfo;
            m_need_context_rows = false;  /* until we find out differently */

            if (cinfo.m_CCIR601_sampling) /* this isn't supported */
            {
                cinfo.ERREXIT(J_MESSAGE_CODE.JERR_CCIR601_NOTIMPL);
            }

            /* Verify we can handle the sampling factors, select per-component methods,
             * and create storage as needed.
             */
            for (int ci = 0; ci < cinfo.m_num_components; ci++)
            {
                jpeg_component_info componentInfo = cinfo.Comp_info[ci];

                /* Compute size of an "input group" after IDCT scaling.  This many samples
                 * are to be converted to max_h_samp_factor * max_v_samp_factor pixels.
                 */
                int h_in_group  = (componentInfo.H_samp_factor * componentInfo.DCT_h_scaled_size) / cinfo.min_DCT_h_scaled_size;
                int v_in_group  = (componentInfo.V_samp_factor * componentInfo.DCT_v_scaled_size) / cinfo.min_DCT_v_scaled_size;
                int h_out_group = cinfo.m_max_h_samp_factor;
                int v_out_group = cinfo.m_max_v_samp_factor;

                /* save for use later */
                m_rowgroup_height[ci] = v_in_group;

                if (!componentInfo.component_needed)
                {
                    /* Don't bother to upsample an uninteresting component. */
                    m_upsampleMethods[ci] = ComponentUpsampler.noop_upsampler;
                    continue;           /* don't need to allocate buffer */
                }

                if (h_in_group == h_out_group && v_in_group == v_out_group)
                {
                    /* Fullsize components can be processed without any work. */
                    m_upsampleMethods[ci] = ComponentUpsampler.fullsize_upsampler;
                    continue;           /* don't need to allocate buffer */
                }

                if (h_in_group * 2 == h_out_group && v_in_group == v_out_group)
                {
                    /* Special case for 2h1v upsampling */
                    m_upsampleMethods[ci] = ComponentUpsampler.h2v1_upsampler;
                }
                else if (h_in_group * 2 == h_out_group && v_in_group * 2 == v_out_group)
                {
                    /* Special case for 2h2v upsampling */
                    m_upsampleMethods[ci] = ComponentUpsampler.h2v2_upsampler;
                }
                else if ((h_out_group % h_in_group) == 0 && (v_out_group % v_in_group) == 0)
                {
                    /* Generic integral-factors upsampling method */
                    m_upsampleMethods[ci] = ComponentUpsampler.int_upsampler;
                    m_h_expand[ci]        = (byte)(h_out_group / h_in_group);
                    m_v_expand[ci]        = (byte)(v_out_group / v_in_group);
                }
                else
                {
                    cinfo.ERREXIT(J_MESSAGE_CODE.JERR_FRACT_SAMPLE_NOTIMPL);
                }

                ComponentBuffer cb = new ComponentBuffer();
                cb.SetBuffer(jpeg_common_struct.AllocJpegSamples(
                                 JpegUtils.jround_up(cinfo.m_output_width,
                                                     cinfo.m_max_h_samp_factor), cinfo.m_max_v_samp_factor));

                m_color_buf[ci] = cb;
            }
        }
        /// <summary>
        /// Process some data in the one-pass (strip buffer) case.
        /// This is used for color precision reduction as well as one-pass quantization.
        /// </summary>
        private void post_process_1pass(ComponentBuffer[] input_buf, ref int in_row_group_ctr, int in_row_groups_avail, byte[][] output_buf, ref int out_row_ctr, int out_rows_avail)
        {
            /* Fill the buffer, but not more than what we can dump out in one go. */
            /* Note we rely on the upsampler to detect bottom of image. */
            int max_rows = out_rows_avail - out_row_ctr;
            if (max_rows > m_strip_height)
                max_rows = m_strip_height;

            int num_rows = 0;
            m_cinfo.m_upsample.upsample(input_buf, ref in_row_group_ctr, in_row_groups_avail, m_buffer, ref num_rows, max_rows);

            /* Quantize and emit data. */
            m_cinfo.m_cquantize.color_quantize(m_buffer, 0, output_buf, out_row_ctr, num_rows);
            out_row_ctr += num_rows;
        }