// Write some scanlines of data to the JPEG compressor. // // The return value will be the number of lines actually written. // This should be less than the supplied num_lines only in case that // the data destination module has requested suspension of the compressor, // or if more than image_height scanlines are passed in. // // Note: we warn about excess calls to jpeg_write_scanlines() since // this likely signals an application programmer error. However, // excess scanlines passed in the last valid call are *silently* ignored, // so that the application need not adjust num_lines for end-of-image // when using a multiple-scanline buffer. public static uint jpeg_write_scanlines(jpeg_compress cinfo, byte[][] scanlines, uint num_lines) { if(cinfo.global_state!=STATE.CSCANNING) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); if(cinfo.next_scanline>=cinfo.image_height) WARNMS(cinfo, J_MESSAGE_CODE.JWRN_TOO_MUCH_DATA); // Call progress monitor hook if present if(cinfo.progress!=null) { cinfo.progress.pass_counter=(int)cinfo.next_scanline; cinfo.progress.pass_limit=(int)cinfo.image_height; cinfo.progress.progress_monitor(cinfo); } // Give master control module another chance if this is first call to // jpeg_write_scanlines. This lets output of the frame/scan headers be // delayed so that application can write COM, etc, markers between // jpeg_start_compress and jpeg_write_scanlines. if(cinfo.master.call_pass_startup) cinfo.master.pass_startup(cinfo); // Ignore any extra scanlines at bottom of image. uint rows_left=cinfo.image_height-cinfo.next_scanline; if(num_lines>rows_left) num_lines=rows_left; uint row_ctr=0; cinfo.main.process_data(cinfo, scanlines, ref row_ctr, num_lines); cinfo.next_scanline+=row_ctr; return row_ctr; }
// Initialize for a processing pass. static void start_pass_diff(jpeg_compress cinfo, J_BUF_MODE pass_mode) { jpeg_lossless_c_codec losslsc=(jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff=(c_diff_controller)losslsc.diff_private; diff.iMCU_row_num=0; start_iMCU_row_c_diff(cinfo); switch(pass_mode) { case J_BUF_MODE.JBUF_PASS_THRU: if(diff.whole_image[0]!=null) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); losslsc.compress_data=compress_data_diff; break; #if FULL_SAMP_BUFFER_SUPPORTED case J_BUF_MODE.JBUF_SAVE_AND_PASS: if(diff.whole_image[0]==null) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); losslsc.compress_data=compress_first_pass_diff; break; case J_BUF_MODE.JBUF_CRANK_DEST: if(diff.whole_image[0]==null) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); losslsc.compress_data=compress_output_diff; break; #endif default: ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); break; } }
static void scaler_start_pass(jpeg_compress cinfo) { jpeg_lossless_c_codec losslsc=(jpeg_lossless_c_codec)cinfo.coef; // Set scaler function based on Pt if(cinfo.Al!=0) losslsc.scaler_scale=simple_downscale; else losslsc.scaler_scale=noscale; }
// Support routines that do various essential calculations. // Do computations that are needed before master selection phase static void initial_setup(jpeg_compress cinfo) { // Sanity check on image dimensions if(cinfo.image_height<=0||cinfo.image_width<=0||cinfo.num_components<=0||cinfo.input_components<=0) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_EMPTY_IMAGE); // Make sure image isn't bigger than I can handle if(cinfo.image_height>JPEG_MAX_DIMENSION||cinfo.image_width>JPEG_MAX_DIMENSION) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_IMAGE_TOO_BIG, JPEG_MAX_DIMENSION); // Width of an input scanline must be representable as uint. long samplesperrow=cinfo.image_width*cinfo.input_components; if(samplesperrow<0||samplesperrow>uint.MaxValue) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_WIDTH_OVERFLOW); // For now, precision must match compiled-in value... if(cinfo.data_precision!=BITS_IN_JSAMPLE) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PRECISION, cinfo.data_precision); // Check that number of components won't exceed internal array sizes if(cinfo.num_components>MAX_COMPONENTS) ERREXIT2(cinfo, J_MESSAGE_CODE.JERR_COMPONENT_COUNT, cinfo.num_components, MAX_COMPONENTS); // Compute maximum sampling factors; check factor validity cinfo.max_h_samp_factor=1; cinfo.max_v_samp_factor=1; for(int ci=0; ci<cinfo.num_components; ci++) { jpeg_component_info compptr=cinfo.comp_info[ci]; if(compptr.h_samp_factor<=0||compptr.h_samp_factor>MAX_SAMP_FACTOR||compptr.v_samp_factor<=0||compptr.v_samp_factor>MAX_SAMP_FACTOR) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_SAMPLING); cinfo.max_h_samp_factor=Math.Max(cinfo.max_h_samp_factor, compptr.h_samp_factor); cinfo.max_v_samp_factor=Math.Max(cinfo.max_v_samp_factor, compptr.v_samp_factor); } // Compute dimensions of components uint DCT_size=cinfo.DCT_size; for(int ci=0; ci<cinfo.num_components; ci++) { jpeg_component_info compptr=cinfo.comp_info[ci]; // Fill in the correct component_index value; don't rely on application compptr.component_index=ci; // For compression, we never do any codec-based processing. compptr.DCT_scaled_size=DCT_size; // Size in blocks compptr.width_in_blocks=(uint)jdiv_round_up(cinfo.image_width*compptr.h_samp_factor, cinfo.max_h_samp_factor*DCT_size); compptr.height_in_blocks=(uint)jdiv_round_up(cinfo.image_height*compptr.v_samp_factor, cinfo.max_v_samp_factor*DCT_size); // Size in samples compptr.downsampled_width=(uint)jdiv_round_up(cinfo.image_width*compptr.h_samp_factor, cinfo.max_h_samp_factor); compptr.downsampled_height=(uint)jdiv_round_up(cinfo.image_height*compptr.v_samp_factor, cinfo.max_v_samp_factor); // Mark component needed (this flag isn't actually used for compression) compptr.component_needed=true; } // Compute number of fully interleaved MCU rows (number of times that // main controller will call coefficient controller). cinfo.total_iMCU_rows=(uint)jdiv_round_up(cinfo.image_height, cinfo.max_v_samp_factor*DCT_size); }
// Alternate entry point to write raw data. // Processes exactly one iMCU row per call, unless suspended. public static uint jpeg_write_raw_data(jpeg_compress cinfo, byte[][][] data, uint num_lines) { if (cinfo.global_state != STATE.CRAW_OK) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } if (cinfo.next_scanline >= cinfo.image_height) { WARNMS(cinfo, J_MESSAGE_CODE.JWRN_TOO_MUCH_DATA); return(0); } // Call progress monitor hook if present if (cinfo.progress != null) { cinfo.progress.pass_counter = (int)cinfo.next_scanline; cinfo.progress.pass_limit = (int)cinfo.image_height; cinfo.progress.progress_monitor(cinfo); } // Give master control module another chance if this is first call to // jpeg_write_raw_data. This lets output of the frame/scan headers be // delayed so that application can write COM, etc, markers between // jpeg_start_compress and jpeg_write_raw_data. if (cinfo.master.call_pass_startup) { cinfo.master.pass_startup(cinfo); } // Verify that at least one iMCU row has been passed. uint lines_per_iMCU_row = (uint)cinfo.max_v_samp_factor * cinfo.DCT_size; if (num_lines < lines_per_iMCU_row) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BUFFER_SIZE); } // Directly compress the row. if (!cinfo.coef.compress_data(cinfo, data)) { return(0); // If compressor did not consume the whole row, suspend processing. } // OK, we processed one iMCU row. cinfo.next_scanline += lines_per_iMCU_row; return(lines_per_iMCU_row); }
const int OUTPUT_BUF_SIZE=4096; // choose an efficiently Write'able size // Initialize destination --- called by jpeg_start_compress // before any data is actually written. static void init_destination(jpeg_compress cinfo) { my_destination_mgr dest=(my_destination_mgr)cinfo.dest; // Allocate the output buffer --- it will be released when done with image try { dest.buffer=new byte[OUTPUT_BUF_SIZE]; } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } dest.output_bytes=dest.buffer; dest.next_output_byte=0; dest.free_in_buffer=OUTPUT_BUF_SIZE; }
// Initialize preprocessing controller. static void jinit_c_prep_controller(jpeg_compress cinfo, bool need_full_buffer) { if (need_full_buffer) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); // safety check } my_prep_controller prep = null; try { prep = new my_prep_controller(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } cinfo.prep = prep; prep.start_pass = start_pass_prep; // Allocate the color conversion buffer. // We make the buffer wide enough to allow the downsampler to edge-expand // horizontally within the buffer, if it so chooses. if (cinfo.downsample.need_context_rows) { // Set up to provide context rows #if CONTEXT_ROWS_SUPPORTED prep.pre_process_data = pre_process_context; create_context_buffer(cinfo); #else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); #endif } else { // No context, just make it tall enough for one row group prep.pre_process_data = pre_process_data; for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; prep.color_buf[ci] = alloc_sarray(cinfo, (uint)(((int)compptr.width_in_blocks * cinfo.DCT_size * cinfo.max_h_samp_factor) / compptr.h_samp_factor), (uint)cinfo.max_v_samp_factor); } } }
// Initialize for a processing pass. static void start_pass_prep(jpeg_compress cinfo, J_BUF_MODE pass_mode) { my_prep_controller prep=(my_prep_controller)cinfo.prep; if(pass_mode!=J_BUF_MODE.JBUF_PASS_THRU) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); // Initialize total-height counter for detecting bottom of image prep.rows_to_go=cinfo.image_height; // Mark the conversion buffer empty prep.next_buf_row=0; #if CONTEXT_ROWS_SUPPORTED // Preset additional state variables for context mode. // These aren't used in non-context mode, so we needn't test which mode. prep.this_row_group=0; // Set next_buf_stop to stop after two row groups have been read in. prep.next_buf_stop=2*cinfo.max_v_samp_factor; #endif }
// Process some data in the first pass of a multi-pass case. // We process the equivalent of one fully interleaved MCU row ("iMCU" row) // per call, ie, v_samp_factor rows for each component in the image. // This amount of data is read from the source buffer and saved into the arrays. // // We must also emit the data to the compressor. This is conveniently // done by calling compress_output_diff() after we've loaded the current strip // of the arrays. // // NB: input_buf contains a plane for each component in image. All components // are loaded into the arrays in this pass. However, it may be that // only a subset of the components are emitted to the compressor during // this first pass; be careful about looking at the scan-dependent variables // (MCU dimensions, etc). static bool compress_first_pass_diff(jpeg_compress cinfo, byte[][][] input_buf) { jpeg_lossless_c_codec losslsc = (jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff = (c_diff_controller)losslsc.diff_private; uint last_iMCU_row = cinfo.total_iMCU_rows - 1; for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; // Count non-dummy sample rows in this iMCU row. int samp_rows; if (diff.iMCU_row_num < last_iMCU_row) { samp_rows = compptr.v_samp_factor; } else { // NB: can't use last_row_height here, since may not be set! samp_rows = (int)(compptr.height_in_blocks % compptr.v_samp_factor); if (samp_rows == 0) { samp_rows = compptr.v_samp_factor; } } uint samps_across = compptr.width_in_blocks; // Perform point transform scaling and prediction/differencing for all // non-dummy rows in this iMCU row. Each call on these functions // process a complete row of samples. for (int samp_row = 0; samp_row < samp_rows; samp_row++) { Array.Copy(input_buf[ci][samp_row], diff.whole_image[ci][samp_row + diff.iMCU_row_num * compptr.v_samp_factor], samps_across); } } // NB: compress_output will increment iMCU_row_num if successful. // A suspension return will result in redoing all the work above next time. // Emit data to the compressor, sharing code with subsequent passes return(compress_output_diff(cinfo, input_buf)); }
// Process some data in the simple no-context case. // // Preprocessor output data is counted in "row groups". A row group // is defined to be v_samp_factor sample rows of each component. // Downsampling will produce this much data from each max_v_samp_factor input rows. static void pre_process_data(jpeg_compress cinfo, byte[][] input_buf, ref uint in_row_ctr, uint in_rows_avail, byte[][][] output_buf, ref uint out_row_group_ctr, uint out_row_groups_avail) { my_prep_controller prep = (my_prep_controller)cinfo.prep; while (in_row_ctr < in_rows_avail && out_row_group_ctr < out_row_groups_avail) { // Do color conversion to fill the conversion buffer. uint inrows = in_rows_avail - in_row_ctr; int numrows = cinfo.max_v_samp_factor - prep.next_buf_row; numrows = (int)Math.Min((uint)numrows, inrows); cinfo.cconvert.color_convert(cinfo, input_buf, in_row_ctr, prep.color_buf, (uint)prep.next_buf_row, numrows); in_row_ctr += (uint)numrows; prep.next_buf_row += numrows; prep.rows_to_go -= (uint)numrows; // If at bottom of image, pad to fill the conversion buffer. if (prep.rows_to_go == 0 && prep.next_buf_row < cinfo.max_v_samp_factor) { for (int ci = 0; ci < cinfo.num_components; ci++) { expand_bottom_edge(prep.color_buf[ci], cinfo.image_width, prep.next_buf_row, cinfo.max_v_samp_factor); } prep.next_buf_row = cinfo.max_v_samp_factor; } // If we've filled the conversion buffer, empty it. if (prep.next_buf_row == cinfo.max_v_samp_factor) { cinfo.downsample.downsample(cinfo, prep.color_buf, 0, output_buf, out_row_group_ctr); prep.next_buf_row = 0; out_row_group_ctr++; } // If at bottom of image, pad the output to a full iMCU height. // Note we assume the caller is providing a one-iMCU-height output buffer! if (prep.rows_to_go == 0 && out_row_group_ctr < out_row_groups_avail) { for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; expand_bottom_edge(output_buf[ci], compptr.width_in_blocks * cinfo.DCT_size, (int)(out_row_group_ctr * compptr.v_samp_factor), (int)(out_row_groups_avail * compptr.v_samp_factor)); } out_row_group_ctr = out_row_groups_avail; break; // can exit outer loop without test } } // while(...) }
// Empty the output buffer --- called whenever buffer fills up. // // In typical applications, this should write the entire output buffer // (ignoring the current state of next_output_byte & free_in_buffer), // reset the pointer & count to the start of the buffer, and return true // indicating that the buffer has been dumped. // // In applications that need to be able to suspend compression due to output // overrun, a false return indicates that the buffer cannot be emptied now. // In this situation, the compressor will return to its caller (possibly with // an indication that it has not accepted all the supplied scanlines). The // application should resume compression after it has made more room in the // output buffer. Note that there are substantial restrictions on the use of // suspension --- see the documentation. // // When suspending, the compressor will back up to a convenient restart point // (typically the start of the current MCU). next_output_byte & free_in_buffer // indicate where the restart point will be if the current call returns false. // Data beyond this point will be regenerated after resumption, so do not // write it out when emptying the buffer externally. static bool empty_output_buffer(jpeg_compress cinfo) { my_destination_mgr dest = (my_destination_mgr)cinfo.dest; try { dest.outfile.Write(dest.buffer, 0, OUTPUT_BUF_SIZE); } catch { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_FILE_WRITE); } dest.output_bytes = dest.buffer; dest.next_output_byte = 0; dest.free_in_buffer = OUTPUT_BUF_SIZE; return(true); }
// Terminate destination --- called by jpeg_finish_compress // after all data has been written. Usually needs to flush buffer. // // NB: *not* called by jpeg_abort or jpeg_destroy; surrounding // application must deal with any cleanup that should happen even // for error exit. static void term_destination(jpeg_compress cinfo) { my_destination_mgr dest = (my_destination_mgr)cinfo.dest; int datacount = OUTPUT_BUF_SIZE - (int)dest.free_in_buffer; // Write any data remaining in the buffer if (datacount > 0) { try { dest.outfile.Write(dest.buffer, 0, datacount); } catch { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_FILE_WRITE); } } dest.outfile.Flush(); }
// Empty the output buffer --- called whenever buffer fills up. // // In typical applications, this should write the entire output buffer // (ignoring the current state of next_output_byte & free_in_buffer), // reset the pointer & count to the start of the buffer, and return true // indicating that the buffer has been dumped. // // In applications that need to be able to suspend compression due to output // overrun, a false return indicates that the buffer cannot be emptied now. // In this situation, the compressor will return to its caller (possibly with // an indication that it has not accepted all the supplied scanlines). The // application should resume compression after it has made more room in the // output buffer. Note that there are substantial restrictions on the use of // suspension --- see the documentation. // // When suspending, the compressor will back up to a convenient restart point // (typically the start of the current MCU). next_output_byte & free_in_buffer // indicate where the restart point will be if the current call returns false. // Data beyond this point will be regenerated after resumption, so do not // write it out when emptying the buffer externally. static bool empty_output_buffer(jpeg_compress cinfo) { my_destination_mgr dest=(my_destination_mgr)cinfo.dest; try { dest.outfile.Write(dest.buffer, 0, OUTPUT_BUF_SIZE); } catch { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_FILE_WRITE); } dest.output_bytes=dest.buffer; dest.next_output_byte=0; dest.free_in_buffer=OUTPUT_BUF_SIZE; return true; }
// Write some scanlines of data to the JPEG compressor. // // The return value will be the number of lines actually written. // This should be less than the supplied num_lines only in case that // the data destination module has requested suspension of the compressor, // or if more than image_height scanlines are passed in. // // Note: we warn about excess calls to jpeg_write_scanlines() since // this likely signals an application programmer error. However, // excess scanlines passed in the last valid call are *silently* ignored, // so that the application need not adjust num_lines for end-of-image // when using a multiple-scanline buffer. public static uint jpeg_write_scanlines(jpeg_compress cinfo, byte[][] scanlines, uint num_lines) { if (cinfo.global_state != STATE.CSCANNING) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } if (cinfo.next_scanline >= cinfo.image_height) { WARNMS(cinfo, J_MESSAGE_CODE.JWRN_TOO_MUCH_DATA); } // Call progress monitor hook if present if (cinfo.progress != null) { cinfo.progress.pass_counter = (int)cinfo.next_scanline; cinfo.progress.pass_limit = (int)cinfo.image_height; cinfo.progress.progress_monitor(cinfo); } // Give master control module another chance if this is first call to // jpeg_write_scanlines. This lets output of the frame/scan headers be // delayed so that application can write COM, etc, markers between // jpeg_start_compress and jpeg_write_scanlines. if (cinfo.master.call_pass_startup) { cinfo.master.pass_startup(cinfo); } // Ignore any extra scanlines at bottom of image. uint rows_left = cinfo.image_height - cinfo.next_scanline; if (num_lines > rows_left) { num_lines = rows_left; } uint row_ctr = 0; cinfo.main.process_data(cinfo, scanlines, ref row_ctr, num_lines); cinfo.next_scanline += row_ctr; return(row_ctr); }
// Quantization table setup routines // Define a quantization table equal to the basic_table times // a scale factor (given as a percentage). // If force_baseline is true, the computed quantization table entries // are limited to 1..255 for JPEG baseline compatibility. public static void jpeg_add_quant_table(jpeg_compress cinfo, int which_tbl, uint[] basic_table, int scale_factor, bool force_baseline) { // Safety check to ensure start_compress not called yet. if (cinfo.global_state != STATE.CSTART) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } if (which_tbl < 0 || which_tbl >= NUM_QUANT_TBLS) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_DQT_INDEX, which_tbl); } if (cinfo.quant_tbl_ptrs[which_tbl] == null) { cinfo.quant_tbl_ptrs[which_tbl] = jpeg_alloc_quant_table(cinfo); } for (int i = 0; i < DCTSIZE2; i++) { int temp = ((int)basic_table[i] * scale_factor + 50) / 100; // limit the values to the valid range if (temp <= 0) { temp = 1; } if (temp > 32767) { temp = 32767; // max quantizer needed for 12 bits } if (force_baseline && temp > 255) { temp = 255; // limit to baseline range if requested } cinfo.quant_tbl_ptrs[which_tbl].quantval[i] = (ushort)temp; } // Initialize sent_table false so table will be written to JPEG file. cinfo.quant_tbl_ptrs[which_tbl].sent_table = false; }
// Process some data in subsequent passes of a multi-pass case. // We process the equivalent of one fully interleaved MCU row ("iMCU" row) // per call, ie, v_samp_factor rows for each component in the scan. // The data is obtained from the arrays and fed to the compressor. // Returns true if the iMCU row is completed, false if suspended. // // NB: input_buf is ignored; it is likely to be a null pointer. static bool compress_output_diff(jpeg_compress cinfo, byte[][][] input_buf) { jpeg_lossless_c_codec losslsc = (jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff = (c_diff_controller)losslsc.diff_private; byte[][][] buffer = new byte[MAX_COMPONENTS][][]; int[] buffer_ind = new int[MAX_COMPONENTS]; // Align the buffers for the components used in this scan. // NB: during first pass, this is safe only because the buffers will // already be aligned properly, so jmemmgr.cs won't need to do any I/O. for (int comp = 0; comp < cinfo.comps_in_scan; comp++) { jpeg_component_info compptr = cinfo.cur_comp_info[comp]; int ci = compptr.component_index; buffer[ci] = diff.whole_image[ci]; buffer_ind[ci] = (int)diff.iMCU_row_num * compptr.v_samp_factor; } return(compress_data_diff(cinfo, buffer, buffer_ind)); }
// Downsample pixel values of a single component. // This version handles the common case of 2:1 horizontal and 1:1 vertical, // without smoothing. // // A note about the "bias" calculations: when rounding fractional values to // integer, we do not want to always round 0.5 up to the next integer. // If we did that, we'd introduce a noticeable bias towards larger values. // Instead, this code is arranged so that 0.5 will be rounded up or down at // alternate pixel locations (a simple ordered dither pattern). static void h2v1_downsample(jpeg_compress cinfo, jpeg_component_info compptr, byte[][] input_data, uint in_row_index, byte[][] output_data, uint out_row_index) { uint output_cols = compptr.width_in_blocks * cinfo.DCT_size; // Expand input data enough to let all the output samples be generated // by the standard loop. Special-casing padded output would be more // efficient. expand_right_edge(input_data, in_row_index, cinfo.max_v_samp_factor, cinfo.image_width, output_cols * 2); for (int outrow = 0; outrow < compptr.v_samp_factor; outrow++) { byte[] outptr = output_data[out_row_index + outrow]; byte[] inptr = input_data[in_row_index + outrow]; int bias = 0; // bias = 0,1,0,1,... for successive samples for (uint outcol = 0, ind = 0; outcol < output_cols; outcol++, ind += 2) { outptr[outcol] = (byte)((inptr[ind] + inptr[ind + 1] + bias) >> 1); bias ^= 1; // 0=>1, 1=>0 } } }
// Initialize for a processing pass. static void start_pass_prep(jpeg_compress cinfo, J_BUF_MODE pass_mode) { my_prep_controller prep = (my_prep_controller)cinfo.prep; if (pass_mode != J_BUF_MODE.JBUF_PASS_THRU) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); } // Initialize total-height counter for detecting bottom of image prep.rows_to_go = cinfo.image_height; // Mark the conversion buffer empty prep.next_buf_row = 0; #if CONTEXT_ROWS_SUPPORTED // Preset additional state variables for context mode. // These aren't used in non-context mode, so we needn't test which mode. prep.this_row_group = 0; // Set next_buf_stop to stop after two row groups have been read in. prep.next_buf_stop = 2 * cinfo.max_v_samp_factor; #endif }
// Compression initialization. // Before calling this, all parameters and a data destination must be set up. // // We require a write_all_tables parameter as a failsafe check when writing // multiple datastreams from the same compression object. Since prior runs // will have left all the tables marked sent_table=true, a subsequent run // would emit an abbreviated stream (no tables) by default. This may be what // is wanted, but for safety's sake it should not be the default behavior: // programmers should have to make a deliberate choice to emit abbreviated // images. Therefore the documentation and examples should encourage people // to pass write_all_tables=true; then it will take active thought to do the // wrong thing. public static void jpeg_start_compress(jpeg_compress cinfo, bool write_all_tables) { if(cinfo.global_state!=STATE.CSTART) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); if(write_all_tables) jpeg_suppress_tables(cinfo, false); // mark all tables to be written // (Re)initialize error mgr and destination modules cinfo.err.reset_error_mgr(cinfo); cinfo.dest.init_destination(cinfo); // Perform master selection of active modules jinit_compress_master(cinfo); // Set up for the first pass cinfo.master.prepare_for_pass(cinfo); // Ready for application to drive first pass through jpeg_write_scanlines // or jpeg_write_raw_data. cinfo.next_scanline=0; cinfo.global_state=(cinfo.raw_data_in?STATE.CRAW_OK:STATE.CSCANNING); }
// Prepare for output to a stdio stream. // The caller must have already opened the stream, and is responsible // for jpeg_destination_mgr it after finishing compression. public static void jpeg_stdio_dest(jpeg_compress cinfo, Stream outfile) { my_destination_mgr dest; // The destination object is made permanent so that multiple JPEG images // can be written to the same file without re-executing jpeg_stdio_dest. // This makes it dangerous to use this manager and a different destination // manager serially with the same JPEG object, because their private object // sizes may be different. Caveat programmer. if (cinfo.dest == null) { // first time for this JPEG object? cinfo.dest = new my_destination_mgr(); } dest = (my_destination_mgr)cinfo.dest; dest.init_destination = init_destination; dest.empty_output_buffer = empty_output_buffer; dest.term_destination = term_destination; dest.outfile = outfile; }
// Initialize for a processing pass. static void start_pass_diff(jpeg_compress cinfo, J_BUF_MODE pass_mode) { jpeg_lossless_c_codec losslsc = (jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff = (c_diff_controller)losslsc.diff_private; diff.iMCU_row_num = 0; start_iMCU_row_c_diff(cinfo); switch (pass_mode) { case J_BUF_MODE.JBUF_PASS_THRU: if (diff.whole_image[0] != null) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); } losslsc.compress_data = compress_data_diff; break; #if FULL_SAMP_BUFFER_SUPPORTED case J_BUF_MODE.JBUF_SAVE_AND_PASS: if (diff.whole_image[0] == null) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); } losslsc.compress_data = compress_first_pass_diff; break; case J_BUF_MODE.JBUF_CRANK_DEST: if (diff.whole_image[0] == null) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); } losslsc.compress_data = compress_output_diff; break; #endif default: ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); break; } }
// Finish up at end of pass. static void finish_pass_master(jpeg_compress cinfo) { my_comp_master master = (my_comp_master)cinfo.master; // The entropy coder always needs an end-of-pass call, // either to analyze statistics or to flush its output buffer. cinfo.coef.entropy_finish_pass(cinfo); // Update state for next pass switch (master.pass_type) { case c_pass_type.main_pass: // next pass is either output of scan 0 (after optimization) // or output of scan 1 (if no optimization). master.pass_type = c_pass_type.output_pass; if (!cinfo.optimize_coding) { master.scan_number++; } break; case c_pass_type.huff_opt_pass: // next pass is always output of current scan master.pass_type = c_pass_type.output_pass; break; case c_pass_type.output_pass: // next pass is either optimization or output of next scan if (cinfo.optimize_coding) { master.pass_type = c_pass_type.huff_opt_pass; } master.scan_number++; break; } master.pass_number++; }
// Quantization table setup routines // Define a quantization table equal to the basic_table times // a scale factor (given as a percentage). // If force_baseline is true, the computed quantization table entries // are limited to 1..255 for JPEG baseline compatibility. public static void jpeg_add_quant_table(jpeg_compress cinfo, int which_tbl, uint[] basic_table, int scale_factor, bool force_baseline) { // Safety check to ensure start_compress not called yet. if(cinfo.global_state!=STATE.CSTART) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); if(which_tbl<0||which_tbl>=NUM_QUANT_TBLS) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_DQT_INDEX, which_tbl); if(cinfo.quant_tbl_ptrs[which_tbl]==null) cinfo.quant_tbl_ptrs[which_tbl]=jpeg_alloc_quant_table(cinfo); for(int i=0; i<DCTSIZE2; i++) { int temp=((int)basic_table[i]*scale_factor+50)/100; // limit the values to the valid range if(temp<=0) temp=1; if(temp>32767) temp=32767; // max quantizer needed for 12 bits if(force_baseline&&temp>255) temp=255; // limit to baseline range if requested cinfo.quant_tbl_ptrs[which_tbl].quantval[i]=(ushort)temp; } // Initialize sent_table false so table will be written to JPEG file. cinfo.quant_tbl_ptrs[which_tbl].sent_table=false; }
// Reset within-iMCU-row counters for a new row static void start_iMCU_row_c_diff(jpeg_compress cinfo) { jpeg_lossless_c_codec losslsc=(jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff=(c_diff_controller)losslsc.diff_private; // In an interleaved scan, an MCU row is the same as an iMCU row. // In a noninterleaved scan, an iMCU row has v_samp_factor MCU rows. // But at the bottom of the image, process only what's left. if(cinfo.comps_in_scan>1) { diff.MCU_rows_per_iMCU_row=1; } else { if(diff.iMCU_row_num<(cinfo.total_iMCU_rows-1)) diff.MCU_rows_per_iMCU_row=cinfo.cur_comp_info[0].v_samp_factor; else diff.MCU_rows_per_iMCU_row=cinfo.cur_comp_info[0].last_row_height; } diff.mcu_ctr=0; diff.MCU_vert_offset=0; }
// Process some data in the context case. static void pre_process_context(jpeg_compress cinfo, byte[][] input_buf, ref uint in_row_ctr, uint in_rows_avail, byte[][][] output_buf, ref uint out_row_group_ctr, uint out_row_groups_avail) { my_prep_controller prep = (my_prep_controller)cinfo.prep; int buf_height = cinfo.max_v_samp_factor * 3; int rgroup_height = cinfo.max_v_samp_factor; while (out_row_group_ctr < out_row_groups_avail) { if (in_row_ctr < in_rows_avail) { // Do color conversion to fill the conversion buffer. uint inrows = in_rows_avail - in_row_ctr; int numrows = prep.next_buf_stop - prep.next_buf_row; numrows = (int)Math.Min((uint)numrows, inrows); cinfo.cconvert.color_convert(cinfo, input_buf, in_row_ctr, prep.color_buf, (uint)rgroup_height + (uint)prep.next_buf_row, numrows); // Pad at top of image, if first time through if (prep.rows_to_go == cinfo.image_height) { for (int ci = 0; ci < cinfo.num_components; ci++) { for (int row = 1; row <= cinfo.max_v_samp_factor; row++) { jcopy_sample_rows(prep.color_buf[ci], rgroup_height, prep.color_buf[ci], rgroup_height - row, 1, cinfo.image_width); } } } in_row_ctr += (uint)numrows; prep.next_buf_row += numrows; prep.rows_to_go -= (uint)numrows; } else { // Return for more data, unless we are at the bottom of the image. if (prep.rows_to_go != 0) { break; } // When at bottom of image, pad to fill the conversion buffer. if (prep.next_buf_row < prep.next_buf_stop) { for (int ci = 0; ci < cinfo.num_components; ci++) { expand_bottom_edge(prep.color_buf[ci], cinfo.image_width, rgroup_height + prep.next_buf_row, rgroup_height + prep.next_buf_stop); } prep.next_buf_row = prep.next_buf_stop; } } // If we've gotten enough data, downsample a row group. if (prep.next_buf_row == prep.next_buf_stop) { cinfo.downsample.downsample(cinfo, prep.color_buf, (uint)rgroup_height + (uint)prep.this_row_group, output_buf, out_row_group_ctr); out_row_group_ctr++; // Advance pointers with wraparound as necessary. prep.this_row_group += cinfo.max_v_samp_factor; if (prep.this_row_group >= buf_height) { prep.this_row_group = 0; } if (prep.next_buf_row >= buf_height) { prep.next_buf_row = 0; } prep.next_buf_stop = prep.next_buf_row + cinfo.max_v_samp_factor; } } // while(...) }
// Create a recommended progressive-JPEG script. // cinfo.num_components and cinfo.jpeg_color_space must be correct. public static void jpeg_simple_progression(jpeg_compress cinfo) { int ncomps = cinfo.num_components; // Safety check to ensure start_compress not called yet. if (cinfo.global_state != STATE.CSTART) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } // Figure space needed for script. Calculation must match code below! int nscans; if (ncomps == 3 && cinfo.jpeg_color_space == J_COLOR_SPACE.JCS_YCbCr) { // Custom script for YCbCr color images. nscans = 10; } else { // All-purpose script for other color spaces. if (ncomps > MAX_COMPS_IN_SCAN) { nscans = 6 * ncomps; // 2 DC + 4 AC scans per component } else { nscans = 2 + 4 * ncomps; // 2 DC scans; 4 AC scans per component } } // Allocate space for script. // We need to put it in the permanent pool in case the application performs // multiple compressions without changing the settings. To avoid a memory // leak if jpeg_simple_progression is called repeatedly for the same JPEG // object, we try to re-use previously allocated space, and we allocate // enough space to handle YCbCr even if initially asked for grayscale. if (cinfo.script_space == null || cinfo.script_space_size < nscans) { cinfo.script_space_size = Math.Max(nscans, 10); try { cinfo.script_space = new jpeg_scan_info[cinfo.script_space_size]; for (int i = 0; i < cinfo.script_space_size; i++) { cinfo.script_space[i] = new jpeg_scan_info(); } } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } } jpeg_scan_info[] scanptr = cinfo.script_space; int scanptr_ind = 0; cinfo.scan_info = scanptr; cinfo.num_scans = nscans; if (ncomps == 3 && cinfo.jpeg_color_space == J_COLOR_SPACE.JCS_YCbCr) { // Custom script for YCbCr color images. // Initial DC scan scanptr_ind = fill_dc_scans(scanptr, scanptr_ind, ncomps, 0, 1); // Initial AC scan: get some luma data out in a hurry scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 0, 1, 5, 0, 2); // Chroma data is too small to be worth expending many scans on scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 2, 1, 63, 0, 1); scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 1, 1, 63, 0, 1); // Complete spectral selection for luma AC scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 0, 6, 63, 0, 2); // Refine next bit of luma AC scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 0, 1, 63, 2, 1); // Finish DC successive approximation scanptr_ind = fill_dc_scans(scanptr, scanptr_ind, ncomps, 1, 0); // Finish AC successive approximation scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 2, 1, 63, 1, 0); scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 1, 1, 63, 1, 0); // Luma bottom bit comes last since it's usually largest scan scanptr_ind = fill_a_scan(scanptr, scanptr_ind, 0, 1, 63, 1, 0); } else { // All-purpose script for other color spaces. // Successive approximation first pass scanptr_ind = fill_dc_scans(scanptr, scanptr_ind, ncomps, 0, 1); scanptr_ind = fill_scans(scanptr, scanptr_ind, ncomps, 1, 5, 0, 2); scanptr_ind = fill_scans(scanptr, scanptr_ind, ncomps, 6, 63, 0, 2); // Successive approximation second pass scanptr_ind = fill_scans(scanptr, scanptr_ind, ncomps, 1, 63, 2, 1); // Successive approximation final pass scanptr_ind = fill_dc_scans(scanptr, scanptr_ind, ncomps, 1, 0); scanptr_ind = fill_scans(scanptr, scanptr_ind, ncomps, 1, 63, 1, 0); } }
// Set the JPEG colorspace, and choose colorspace-dependent default values. public static void jpeg_set_colorspace(jpeg_compress cinfo, J_COLOR_SPACE colorspace, J_SUBSAMPLING subsampling) { // Safety check to ensure start_compress not called yet. if (cinfo.global_state != STATE.CSTART) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } // For all colorspaces, we use Q and Huff tables 0 for luminance components, // tables 1 for chrominance components. cinfo.jpeg_color_space = colorspace; cinfo.write_JFIF_header = false; // No marker for non-JFIF colorspaces cinfo.write_Adobe_marker = false; // write no Adobe marker by default switch (colorspace) { case J_COLOR_SPACE.JCS_GRAYSCALE: cinfo.write_JFIF_header = true; // Write a JFIF marker cinfo.num_components = 1; // JFIF specifies component ID 1 SET_COMP(cinfo, 0, 1, 1, 1, 0, 0, 0); break; case J_COLOR_SPACE.JCS_RGB: cinfo.write_Adobe_marker = true; // write Adobe marker to flag RGB cinfo.num_components = 3; SET_COMP(cinfo, 0, 0x52, 1, 1, 0, 0, 0); // 0x52 = 'R' SET_COMP(cinfo, 1, 0x47, 1, 1, 0, 0, 0); // 0x47 = 'G' SET_COMP(cinfo, 2, 0x42, 1, 1, 0, 0, 0); // 0x42 = 'B' break; case J_COLOR_SPACE.JCS_YCbCr: cinfo.write_JFIF_header = true; // Write a JFIF marker cinfo.num_components = 3; // JFIF specifies component IDs 1,2,3 if (cinfo.lossless || subsampling == J_SUBSAMPLING.JPEG444) { SET_COMP(cinfo, 0, 1, 1, 1, 0, 0, 0); SET_COMP(cinfo, 1, 2, 1, 1, 1, 1, 1); SET_COMP(cinfo, 2, 3, 1, 1, 1, 1, 1); } else if (subsampling == J_SUBSAMPLING.JPEG422) { // We default to 2x1 subsamples of chrominance SET_COMP(cinfo, 0, 1, 2, 1, 0, 0, 0); SET_COMP(cinfo, 1, 2, 1, 1, 1, 1, 1); SET_COMP(cinfo, 2, 3, 1, 1, 1, 1, 1); } else { // We default to 2x2 subsamples of chrominance SET_COMP(cinfo, 0, 1, 2, 2, 0, 0, 0); SET_COMP(cinfo, 1, 2, 1, 1, 1, 1, 1); SET_COMP(cinfo, 2, 3, 1, 1, 1, 1, 1); } break; case J_COLOR_SPACE.JCS_CMYK: cinfo.write_Adobe_marker = true; // write Adobe marker to flag CMYK cinfo.num_components = 4; SET_COMP(cinfo, 0, 0x43, 1, 1, 0, 0, 0); // 0x43 = 'C' SET_COMP(cinfo, 1, 0x4D, 1, 1, 0, 0, 0); // 0x4D = 'M' SET_COMP(cinfo, 2, 0x59, 1, 1, 0, 0, 0); // 0x59 = 'Y' SET_COMP(cinfo, 3, 0x4B, 1, 1, 0, 0, 0); // 0x4B = 'K' break; case J_COLOR_SPACE.JCS_YCCK: cinfo.write_Adobe_marker = true; // write Adobe marker to flag YCCK cinfo.num_components = 4; if (cinfo.lossless) { SET_COMP(cinfo, 0, 1, 1, 1, 0, 0, 0); SET_COMP(cinfo, 1, 2, 1, 1, 1, 1, 1); SET_COMP(cinfo, 2, 3, 1, 1, 1, 1, 1); SET_COMP(cinfo, 3, 4, 1, 1, 0, 0, 0); } else { SET_COMP(cinfo, 0, 1, 2, 2, 0, 0, 0); SET_COMP(cinfo, 1, 2, 1, 1, 1, 1, 1); SET_COMP(cinfo, 2, 3, 1, 1, 1, 1, 1); SET_COMP(cinfo, 3, 4, 2, 2, 0, 0, 0); } break; case J_COLOR_SPACE.JCS_UNKNOWN: cinfo.num_components = cinfo.input_components; if (cinfo.num_components < 1 || cinfo.num_components > MAX_COMPONENTS) { ERREXIT2(cinfo, J_MESSAGE_CODE.JERR_COMPONENT_COUNT, cinfo.num_components, MAX_COMPONENTS); } for (int ci = 0; ci < cinfo.num_components; ci++) { SET_COMP(cinfo, ci, ci, 1, 1, 0, 0, 0); } break; default: ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_J_COLORSPACE); break; } }
// Special start-of-pass hook. // This is called by jpeg_write_scanlines if call_pass_startup is true. // In single-pass processing, we need this hook because we don't want to // write frame/scan headers during jpeg_start_compress; we want to let the // application write COM markers etc. between jpeg_start_compress and the // jpeg_write_scanlines loop. // In multi-pass processing, this routine is not used. static void pass_startup(jpeg_compress cinfo) { cinfo.master.call_pass_startup=false; // reset flag so call only once cinfo.marker.write_frame_header(cinfo); cinfo.marker.write_scan_header(cinfo); }
// Finish up at end of pass. static void finish_pass_master(jpeg_compress cinfo) { my_comp_master master=(my_comp_master)cinfo.master; // The entropy coder always needs an end-of-pass call, // either to analyze statistics or to flush its output buffer. cinfo.coef.entropy_finish_pass(cinfo); // Update state for next pass switch(master.pass_type) { case c_pass_type.main_pass: // next pass is either output of scan 0 (after optimization) // or output of scan 1 (if no optimization). master.pass_type=c_pass_type.output_pass; if(!cinfo.optimize_coding) master.scan_number++; break; case c_pass_type.huff_opt_pass: // next pass is always output of current scan master.pass_type=c_pass_type.output_pass; break; case c_pass_type.output_pass: // next pass is either optimization or output of next scan if(cinfo.optimize_coding) master.pass_type=c_pass_type.huff_opt_pass; master.scan_number++; break; } master.pass_number++; }
// Verify that the scan script in cinfo.scan_info[] is valid; also // determine whether it uses progressive JPEG, and set cinfo.process. static void validate_script(jpeg_compress cinfo) { #if C_PROGRESSIVE_SUPPORTED int[,] last_bitpos=new int[MAX_COMPONENTS, DCTSIZE2]; // -1 until that coefficient has been seen; then last Al for it #endif if(cinfo.num_scans<=0) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_SCAN_SCRIPT, 0); #if !C_MULTISCAN_FILES_SUPPORTED if(cinfo.num_scans>1) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); #endif bool[] component_sent=new bool[MAX_COMPONENTS]; if(cinfo.lossless) { #if C_LOSSLESS_SUPPORTED cinfo.process=J_CODEC_PROCESS.JPROC_LOSSLESS; for(int ci=0; ci<cinfo.num_components; ci++) component_sent[ci]=false; #else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); #endif } // For sequential JPEG, all scans must have Ss=0, Se=DCTSIZE2-1; // for progressive JPEG, no scan can have this. else if(cinfo.scan_info[0].Ss!=0||cinfo.scan_info[0].Se!=DCTSIZE2-1) { #if C_PROGRESSIVE_SUPPORTED cinfo.process=J_CODEC_PROCESS.JPROC_PROGRESSIVE; for(int ci=0; ci<cinfo.num_components; ci++) for(int coefi=0; coefi<DCTSIZE2; coefi++) last_bitpos[ci, coefi]=-1; #else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); #endif } else { cinfo.process=J_CODEC_PROCESS.JPROC_SEQUENTIAL; for(int ci=0; ci<cinfo.num_components; ci++) component_sent[ci]=false; } for(int scanno=1; scanno<=cinfo.num_scans; scanno++) { jpeg_scan_info scan_info=cinfo.scan_info[scanno-1]; // Validate component indexes int ncomps=scan_info.comps_in_scan; if(ncomps<=0||ncomps>MAX_COMPS_IN_SCAN) ERREXIT2(cinfo, J_MESSAGE_CODE.JERR_COMPONENT_COUNT, ncomps, MAX_COMPS_IN_SCAN); for(int ci=0; ci<ncomps; ci++) { int thisi=scan_info.component_index[ci]; if(thisi<0||thisi>=cinfo.num_components) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_SCAN_SCRIPT, scanno); // Components must appear in SOF order within each scan if(ci>0&&thisi<=scan_info.component_index[ci-1]) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_SCAN_SCRIPT, scanno); } // Validate progression parameters int Ss=scan_info.Ss; int Se=scan_info.Se; int Ah=scan_info.Ah; int Al=scan_info.Al; if(cinfo.process==J_CODEC_PROCESS.JPROC_LOSSLESS) { #if C_LOSSLESS_SUPPORTED // The JPEG spec simply gives the range 0..15 for Al (Pt), but that // seems wrong: the upper bound ought to depend on data precision. // Perhaps they really meant 0..N-1 for N-bit precision, which is what // we allow here. if(Ss<1||Ss>7||Se!=0||Ah!=0||Al<0||Al>=cinfo.data_precision) // Ss predictor selector; Al point transform ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_LOSSLESS_SCRIPT, scanno); // Make sure components are not sent twice for(int ci=0; ci<ncomps; ci++) { int thisi=scan_info.component_index[ci]; if(component_sent[thisi]) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_SCAN_SCRIPT, scanno); component_sent[thisi]=true; } #endif } else if(cinfo.process==J_CODEC_PROCESS.JPROC_PROGRESSIVE) { #if C_PROGRESSIVE_SUPPORTED // The JPEG spec simply gives the ranges 0..13 for Ah and Al, but that // seems wrong: the upper bound ought to depend on data precision. // Perhaps they really meant 0..N+1 for N-bit precision. // Here we allow 0..10 for 8-bit data; Al larger than 10 results in // out-of-range reconstructed DC values during the first DC scan, // which might cause problems for some decoders. const int MAX_AH_AL=10; if(Ss<0||Ss>=DCTSIZE2||Se<Ss||Se>=DCTSIZE2|| Ah<0||Ah>MAX_AH_AL||Al<0||Al>MAX_AH_AL) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); if(Ss==0) { if(Se!=0) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); // DC and AC together not OK } else { if(ncomps!=1) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); // AC scans must be for only one component } for(int ci=0; ci<ncomps; ci++) { int comp_ind=scan_info.component_index[ci]; if(Ss!=0&&last_bitpos[comp_ind, 0]<0) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); // AC without prior DC scan for(int coefi=Ss; coefi<=Se; coefi++) { if(last_bitpos[comp_ind, coefi]<0) { // first scan of this coefficient if(Ah!=0) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); } else { // not first scan if(Ah!=last_bitpos[comp_ind, coefi]||Al!=Ah-1) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); } last_bitpos[comp_ind, coefi]=Al; } } #endif // C_PROGRESSIVE_SUPPORTED } else { // For sequential JPEG, all progression parameters must be these: if(Ss!=0||Se!=DCTSIZE2-1||Ah!=0||Al!=0) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_PROG_SCRIPT, scanno); // Make sure components are not sent twice for(int ci=0; ci<ncomps; ci++) { int thisi=scan_info.component_index[ci]; if(component_sent[thisi]) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_SCAN_SCRIPT, scanno); component_sent[thisi]=true; } } } // for(...) // Now verify that everything got sent. if(cinfo.process==J_CODEC_PROCESS.JPROC_PROGRESSIVE) { #if C_PROGRESSIVE_SUPPORTED // For progressive mode, we only check that at least some DC data // got sent for each component; the spec does not require that all bits // of all coefficients be transmitted. Would it be wiser to enforce // transmission of all coefficient bits?? for(int ci=0; ci<cinfo.num_components; ci++) { if(last_bitpos[ci, 0]<0) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_MISSING_DATA); } #endif } else { for(int ci=0; ci<cinfo.num_components; ci++) { if(!component_sent[ci]) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_MISSING_DATA); } } }
// Do computations that are needed before processing a JPEG scan // cinfo.comps_in_scan and cinfo.cur_comp_info[] are already set static void per_scan_setup(jpeg_compress cinfo) { uint DCT_size=cinfo.DCT_size; if(cinfo.comps_in_scan==1) { // Noninterleaved (single-component) scan jpeg_component_info compptr=cinfo.cur_comp_info[0]; // Overall image size in MCUs cinfo.MCUs_per_row=compptr.width_in_blocks; cinfo.MCU_rows_in_scan=compptr.height_in_blocks; // For noninterleaved scan, always one block per MCU compptr.MCU_width=1; compptr.MCU_height=1; compptr.MCU_blocks=1; compptr.MCU_sample_width=(int)DCT_size; compptr.last_col_width=1; // For noninterleaved scans, it is convenient to define last_row_height // as the number of block rows present in the last iMCU row. int tmp=(int)(compptr.height_in_blocks%compptr.v_samp_factor); if(tmp==0) tmp=compptr.v_samp_factor; compptr.last_row_height=tmp; // Prepare array describing MCU composition cinfo.block_in_MCU=1; cinfo.MCU_membership[0]=0; } else { // Interleaved (multi-component) scan if(cinfo.comps_in_scan<=0||cinfo.comps_in_scan>MAX_COMPS_IN_SCAN) ERREXIT2(cinfo, J_MESSAGE_CODE.JERR_COMPONENT_COUNT, cinfo.comps_in_scan, MAX_COMPS_IN_SCAN); // Overall image size in MCUs cinfo.MCUs_per_row=(uint)jdiv_round_up(cinfo.image_width, cinfo.max_h_samp_factor*DCT_size); cinfo.MCU_rows_in_scan=(uint)jdiv_round_up(cinfo.image_height, cinfo.max_v_samp_factor*DCT_size); cinfo.block_in_MCU=0; for(int ci=0; ci<cinfo.comps_in_scan; ci++) { jpeg_component_info compptr=cinfo.cur_comp_info[ci]; // Sampling factors give # of blocks of component in each MCU compptr.MCU_width=compptr.h_samp_factor; compptr.MCU_height=compptr.v_samp_factor; compptr.MCU_blocks=(uint)(compptr.MCU_width*compptr.MCU_height); compptr.MCU_sample_width=(int)(compptr.MCU_width*DCT_size); // Figure number of non-dummy blocks in last MCU column & row int tmp=(int)(compptr.width_in_blocks%compptr.MCU_width); if(tmp==0) tmp=compptr.MCU_width; compptr.last_col_width=tmp; tmp=(int)(compptr.height_in_blocks%compptr.MCU_height); if(tmp==0) tmp=compptr.MCU_height; compptr.last_row_height=tmp; // Prepare array describing MCU composition int mcublks=(int)compptr.MCU_blocks; if(cinfo.block_in_MCU+mcublks>C_MAX_BLOCKS_IN_MCU) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_MCU_SIZE); while((mcublks--)>0) cinfo.MCU_membership[cinfo.block_in_MCU++]=ci; } } // Convert restart specified in rows to actual MCU count. // Note that count must fit in 16 bits, so we provide limiting. if(cinfo.restart_in_rows>0) cinfo.restart_interval=(uint)Math.Min(cinfo.restart_in_rows*cinfo.MCUs_per_row, 65535); }
// Create a single-entry lossless-JPEG script containing all components. // cinfo.num_components must be correct. // predictor: 1..7 // point_transform: 0..data_precision(usally 8) // reduction of colors public static void jpeg_simple_lossless(jpeg_compress cinfo, int predictor, int point_transform) { int ncomps = cinfo.num_components; // Safety check to ensure start_compress not called yet. if (cinfo.global_state != STATE.CSTART) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } cinfo.lossless = true; // Set jpeg_color_space. jpeg_default_colorspace(cinfo); // Check to ensure that all components will fit in one scan. if (cinfo.num_components > MAX_COMPS_IN_SCAN) { ERREXIT2(cinfo, J_MESSAGE_CODE.JERR_COMPONENT_COUNT, cinfo.num_components, MAX_COMPS_IN_SCAN); } // Allocate space for script. // We need to put it in the permanent pool in case the application performs // multiple compressions without changing the settings. To avoid a memory // leak if jpeg_simple_lossless is called repeatedly for the same JPEG // object, we try to re-use previously allocated space. int nscans = 1; if (cinfo.script_space == null || cinfo.script_space_size < nscans) { cinfo.script_space_size = nscans; try { cinfo.script_space = new jpeg_scan_info[cinfo.script_space_size]; for (int i = 0; i < cinfo.script_space_size; i++) { cinfo.script_space[i] = new jpeg_scan_info(); } } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } } jpeg_scan_info scanptr = cinfo.script_space[0]; cinfo.scan_info = cinfo.script_space; cinfo.num_scans = nscans; // Fill the script. scanptr.comps_in_scan = ncomps; for (int ci = 0; ci < ncomps; ci++) { scanptr.component_index[ci] = ci; } scanptr.Ss = predictor; scanptr.Se = 0; scanptr.Ah = 0; scanptr.Al = point_transform; }
static void noscale(jpeg_compress cinfo, byte[] input_buf, byte[] output_buf, uint width) { Array.Copy(input_buf, output_buf, width); }
// Terminate destination --- called by jpeg_finish_compress // after all data has been written. Usually needs to flush buffer. // // NB: *not* called by jpeg_abort or jpeg_destroy; surrounding // application must deal with any cleanup that should happen even // for error exit. static void term_destination(jpeg_compress cinfo) { my_destination_mgr dest=(my_destination_mgr)cinfo.dest; int datacount=OUTPUT_BUF_SIZE-(int)dest.free_in_buffer; // Write any data remaining in the buffer if(datacount>0) { try { dest.outfile.Write(dest.buffer, 0, datacount); } catch { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_FILE_WRITE); } } dest.outfile.Flush(); }
// Create the wrapped-around downsampling input buffer needed for context mode. static void create_context_buffer(jpeg_compress cinfo) { my_prep_controller prep=(my_prep_controller)cinfo.prep; int rgroup_height=cinfo.max_v_samp_factor; for(int ci=0; ci<cinfo.num_components; ci++) { jpeg_component_info compptr=cinfo.comp_info[ci]; // Grab enough space for fake row pointers; // we need five row groups' worth of pointers for each component. byte[][] fake_buffer=new byte[5*rgroup_height][]; // Allocate the actual buffer space (3 row groups) for this component. // We make the buffer wide enough to allow the downsampler to edge-expand // horizontally within the buffer, if it so chooses. byte[][] true_buffer=alloc_sarray(cinfo, (uint)(((int)compptr.width_in_blocks*cinfo.DCT_size*cinfo.max_h_samp_factor)/compptr.h_samp_factor), (uint)(3*rgroup_height)); // Copy true buffer row pointers into the middle of the fake row array Array.Copy(true_buffer, 0, fake_buffer, rgroup_height, 3*rgroup_height); // Fill in the above and below wraparound pointers for(int i=0; i<rgroup_height; i++) { fake_buffer[i]=true_buffer[2*rgroup_height+i]; fake_buffer[4*rgroup_height+i]=true_buffer[i]; } prep.color_buf[ci]=fake_buffer; } }
// Do downsampling for a whole row group (all components). // In this version we simply downsample each component independently. static void sep_downsample(jpeg_compress cinfo, byte[][][] input_buf, uint in_row_index, byte[][][] output_buf, uint out_row_group_index) { my_downsampler downsample=(my_downsampler)cinfo.downsample; for(int ci=0; ci<cinfo.num_components; ci++) downsample.methods[ci](cinfo, cinfo.comp_info[ci], input_buf[ci], in_row_index, output_buf[ci], (uint)(out_row_group_index*cinfo.comp_info[ci].v_samp_factor)); }
// Initialize for a downsampling pass. static void start_pass_downsample(jpeg_compress cinfo) { // no work for now }
// Module initialization routine for downsampling. // Note that we must select a routine for each component. static void jinit_downsampler(jpeg_compress cinfo) { my_downsampler downsample=null; try { downsample=new my_downsampler(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } cinfo.downsample=downsample; downsample.start_pass=start_pass_downsample; downsample.downsample=sep_downsample; downsample.need_context_rows=false; if(cinfo.CCIR601_sampling) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_CCIR601_NOTIMPL); bool smoothok=true; // Verify we can handle the sampling factors, and set up method pointers for(int ci=0; ci<cinfo.num_components; ci++) { jpeg_component_info compptr=cinfo.comp_info[ci]; if(compptr.h_samp_factor==cinfo.max_h_samp_factor&&compptr.v_samp_factor==cinfo.max_v_samp_factor) { #if INPUT_SMOOTHING_SUPPORTED if(cinfo.smoothing_factor!=0) { downsample.methods[ci]=fullsize_smooth_downsample; downsample.need_context_rows=true; } else #endif downsample.methods[ci]=fullsize_downsample; } else if(compptr.h_samp_factor*2==cinfo.max_h_samp_factor&&compptr.v_samp_factor==cinfo.max_v_samp_factor) { smoothok=false; downsample.methods[ci]=h2v1_downsample; } else if(compptr.h_samp_factor*2==cinfo.max_h_samp_factor&&compptr.v_samp_factor*2==cinfo.max_v_samp_factor) { #if INPUT_SMOOTHING_SUPPORTED if(cinfo.smoothing_factor!=0) { downsample.methods[ci]=h2v2_smooth_downsample; downsample.need_context_rows=true; } else #endif downsample.methods[ci]=h2v2_downsample; } else if((cinfo.max_h_samp_factor%compptr.h_samp_factor)==0&&(cinfo.max_v_samp_factor%compptr.v_samp_factor)==0) { smoothok=false; downsample.methods[ci]=int_downsample; } else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_FRACT_SAMPLE_NOTIMPL); } #if INPUT_SMOOTHING_SUPPORTED if(cinfo.smoothing_factor!=0&&!smoothok) TRACEMS(cinfo, 0, J_MESSAGE_CODE.JTRC_SMOOTH_NOTIMPL); #endif }
// Expand a Huffman table definition into the derived format // Compute the derived values for a Huffman table. // This routine also performs some validation checks on the table. static void jpeg_make_c_derived_tbl(jpeg_compress cinfo, bool isDC, int tblno, ref c_derived_tbl pdtbl) { // Note that huffsize[] and huffcode[] are filled in code-length order, // paralleling the order of the symbols themselves in htbl.huffval[]. // Find the input Huffman table if (tblno < 0 || tblno >= NUM_HUFF_TBLS) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_NO_HUFF_TABLE, tblno); } JHUFF_TBL htbl = isDC?cinfo.dc_huff_tbl_ptrs[tblno]:cinfo.ac_huff_tbl_ptrs[tblno]; if (htbl == null) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_NO_HUFF_TABLE, tblno); } // Allocate a workspace if we haven't already done so. if (pdtbl == null) { try { pdtbl = new c_derived_tbl(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } } c_derived_tbl dtbl = pdtbl; // Figure C.1: make table of Huffman code length for each symbol byte[] huffsize = new byte[257]; int p = 0; for (byte l = 1; l <= 16; l++) { int i = htbl.bits[l]; // protect against table overrun if (i < 0 || (p + i) > 256) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_HUFF_TABLE); } while ((i--) != 0) { huffsize[p++] = l; } } huffsize[p] = 0; int lastp = p; // Figure C.2: generate the codes themselves // We also validate that the counts represent a legal Huffman code tree. uint[] huffcode = new uint[257]; uint code = 0; int si = huffsize[0]; p = 0; while (huffsize[p] != 0) { while (((int)huffsize[p]) == si) { huffcode[p++] = code; code++; } // code is now 1 more than the last code used for codelength si; but // it must still fit in si bits, since no code is allowed to be all ones. if (((int)code) >= (1 << si)) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_HUFF_TABLE); } code <<= 1; si++; } // Figure C.3: generate encoding tables // These are code and size indexed by symbol value // Set all codeless symbols to have code length 0; // this lets us detect duplicate VAL entries here, and later // allows emit_bits to detect any attempt to emit such symbols. for (int i = 0; i < 256; i++) { dtbl.ehufsi[i] = 0; } // This is also a convenient place to check for out-of-range // and duplicated VAL entries. We allow 0..255 for AC symbols // but only 0..16 for DC. (We could constrain them further // based on data depth and mode, but this seems enough.) int maxsymbol = isDC?16:255; for (p = 0; p < lastp; p++) { int i = htbl.huffval[p]; if (i < 0 || i > maxsymbol || dtbl.ehufsi[i] != 0) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_HUFF_TABLE); } dtbl.ehufco[i] = huffcode[p]; dtbl.ehufsi[i] = huffsize[p]; } }
// Generate an optimal table definition given the specified counts // Generate the best Huffman code table for the given counts, fill htbl. // The JPEG standard requires that no symbol be assigned a codeword of all // one bits (so that padding bits added at the end of a compressed segment // can't look like a valid code). Because of the canonical ordering of // codewords, this just means that there must be an unused slot in the // longest codeword length category. Section K.2 of the JPEG spec suggests // reserving such a slot by pretending that symbol 256 is a valid symbol // with count 1. In theory that's not optimal; giving it count zero but // including it in the symbol set anyway should give a better Huffman code. // But the theoretically better code actually seems to come out worse in // practice, because it produces more all-ones bytes (which incur stuffed // zero bytes in the final file). In any case the difference is tiny. // The JPEG standard requires Huffman codes to be no more than 16 bits long. // If some symbols have a very small but nonzero probability, the Huffman tree // must be adjusted to meet the code length restriction. We currently use // the adjustment method suggested in JPEG section K.2. This method is *not* // optimal; it may not choose the best possible limited-length code. But // typically only very-low-frequency symbols will be given less-than-optimal // lengths, so the code is almost optimal. Experimental comparisons against // an optimal limited-length-code algorithm indicate that the difference is // microscopic --- usually less than a hundredth of a percent of total size. // So the extra complexity of an optimal algorithm doesn't seem worthwhile. static void jpeg_gen_optimal_table(jpeg_compress cinfo, JHUFF_TBL htbl, int[] freq) { int MAX_CLEN = 32; // assumed maximum initial code length byte[] bits = new byte[MAX_CLEN + 1]; // bits[k] = # of symbols with code length k int[] codesize = new int[257]; // codesize[k] = code length of symbol k int[] others = new int[257]; // next symbol in current branch of tree // This algorithm is explained in section K.2 of the JPEG standard for (int i = 0; i < 257; i++) { others[i] = -1; // init links to empty } freq[256] = 1; // make sure 256 has a nonzero count // Including the pseudo-symbol 256 in the Huffman procedure guarantees // that no real symbol is given code-value of all ones, because 256 // will be placed last in the largest codeword category. // Huffman's basic algorithm to assign optimal code lengths to symbols for (; ;) { // Find the smallest nonzero frequency, set c1 = its symbol // In case of ties, take the larger symbol number int c1 = -1; int v = 1000000000; for (int i = 0; i <= 256; i++) { if (freq[i] != 0 && freq[i] <= v) { v = freq[i]; c1 = i; } } // Find the next smallest nonzero frequency, set c2 = its symbol // In case of ties, take the larger symbol number int c2 = -1; v = 1000000000; for (int i = 0; i <= 256; i++) { if (freq[i] != 0 && freq[i] <= v && i != c1) { v = freq[i]; c2 = i; } } // Done if we've merged everything into one frequency if (c2 < 0) { break; } // Else merge the two counts/trees freq[c1] += freq[c2]; freq[c2] = 0; // Increment the codesize of everything in c1's tree branch codesize[c1]++; while (others[c1] >= 0) { c1 = others[c1]; codesize[c1]++; } others[c1] = c2; // chain c2 onto c1's tree branch // Increment the codesize of everything in c2's tree branch codesize[c2]++; while (others[c2] >= 0) { c2 = others[c2]; codesize[c2]++; } } // Now count the number of symbols of each code length for (int i = 0; i <= 256; i++) { if (codesize[i] != 0) { // The JPEG standard seems to think that this can't happen, // but I'm paranoid... if (codesize[i] > MAX_CLEN) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_HUFF_CLEN_OVERFLOW); } bits[codesize[i]]++; } } // JPEG doesn't allow symbols with code lengths over 16 bits, so if the pure // Huffman procedure assigned any such lengths, we must adjust the coding. // Here is what the JPEG spec says about how this next bit works: // Since symbols are paired for the longest Huffman code, the symbols are // removed from this length category two at a time. The prefix for the pair // (which is one bit shorter) is allocated to one of the pair; then, // skipping the BITS entry for that prefix length, a code word from the next // shortest nonzero BITS entry is converted into a prefix for two code words // one bit longer. int k; for (k = MAX_CLEN; k > 16; k--) { while (bits[k] > 0) { int j = k - 2; // find length of new prefix to be used while (bits[j] == 0) { j--; } bits[k] -= 2; // remove two symbols bits[k - 1]++; // one goes in this length bits[j + 1] += 2; // two new symbols in this length bits[j]--; // symbol of this length is now a prefix } } // Remove the count for the pseudo-symbol 256 from the largest codelength while (bits[k] == 0) { k--; // find largest codelength still in use } bits[k]--; // Return final symbol counts (only for lengths 0..16) Array.Copy(bits, htbl.bits, 17); // Return a list of the symbols sorted by code length // It's not real clear to me why we don't need to consider the codelength // changes made above, but the JPEG spec seems to think this works. for (int i = 1, p = 0; i <= MAX_CLEN; i++) { for (int j = 0; j <= 255; j++) { if (codesize[j] == i) { htbl.huffval[p] = (byte)j; p++; } } } // Set sent_table false so updated table will be written to JPEG file. htbl.sent_table = false; }
// Process some data in the context case. static void pre_process_context(jpeg_compress cinfo, byte[][] input_buf, ref uint in_row_ctr, uint in_rows_avail, byte[][][] output_buf, ref uint out_row_group_ctr, uint out_row_groups_avail) { my_prep_controller prep=(my_prep_controller)cinfo.prep; int buf_height=cinfo.max_v_samp_factor*3; int rgroup_height=cinfo.max_v_samp_factor; while(out_row_group_ctr<out_row_groups_avail) { if(in_row_ctr<in_rows_avail) { // Do color conversion to fill the conversion buffer. uint inrows=in_rows_avail-in_row_ctr; int numrows=prep.next_buf_stop-prep.next_buf_row; numrows=(int)Math.Min((uint)numrows, inrows); cinfo.cconvert.color_convert(cinfo, input_buf, in_row_ctr, prep.color_buf, (uint)rgroup_height+(uint)prep.next_buf_row, numrows); // Pad at top of image, if first time through if(prep.rows_to_go==cinfo.image_height) { for(int ci=0; ci<cinfo.num_components; ci++) for(int row=1; row<=cinfo.max_v_samp_factor; row++) jcopy_sample_rows(prep.color_buf[ci], rgroup_height, prep.color_buf[ci], rgroup_height-row, 1, cinfo.image_width); } in_row_ctr+=(uint)numrows; prep.next_buf_row+=numrows; prep.rows_to_go-=(uint)numrows; } else { // Return for more data, unless we are at the bottom of the image. if(prep.rows_to_go!=0) break; // When at bottom of image, pad to fill the conversion buffer. if(prep.next_buf_row<prep.next_buf_stop) { for(int ci=0; ci<cinfo.num_components; ci++) expand_bottom_edge(prep.color_buf[ci], cinfo.image_width, rgroup_height+prep.next_buf_row, rgroup_height+prep.next_buf_stop); prep.next_buf_row=prep.next_buf_stop; } } // If we've gotten enough data, downsample a row group. if(prep.next_buf_row==prep.next_buf_stop) { cinfo.downsample.downsample(cinfo, prep.color_buf, (uint)rgroup_height+(uint)prep.this_row_group, output_buf, out_row_group_ctr); out_row_group_ctr++; // Advance pointers with wraparound as necessary. prep.this_row_group+=cinfo.max_v_samp_factor; if(prep.this_row_group>=buf_height) prep.this_row_group=0; if(prep.next_buf_row>=buf_height) prep.next_buf_row=0; prep.next_buf_stop=prep.next_buf_row+cinfo.max_v_samp_factor; } } // while(...) }
public static uint jpeg_write_image(jpeg_compress cinfo, byte[] image, bool swapChannels, bool alpha) { if(cinfo.input_components!=3||cinfo.lossless||cinfo.in_color_space!=J_COLOR_SPACE.JCS_RGB|| cinfo.num_components!=3||cinfo.jpeg_color_space!=J_COLOR_SPACE.JCS_YCbCr|| cinfo.data_precision!=8||cinfo.DCT_size!=8||cinfo.block_in_MCU!=3||cinfo.arith_code|| cinfo.max_h_samp_factor!=1||cinfo.max_v_samp_factor!=1||cinfo.next_scanline!=0||cinfo.num_scans!=1) { throw new Exception(); } if(cinfo.global_state!=STATE.CSCANNING) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); // Give master control module another chance if this is first call to // jpeg_write_scanlines. This lets output of the frame/scan headers be // delayed so that application can write COM, etc, markers between // jpeg_start_compress and jpeg_write_scanlines. if(cinfo.master.call_pass_startup) cinfo.master.pass_startup(cinfo); jpeg_lossy_c_codec lossyc=(jpeg_lossy_c_codec)cinfo.coef; c_coef_controller coef=(c_coef_controller)lossyc.coef_private; fdct_controller fdct=(fdct_controller)lossyc.fdct_private; double[] workspaceY=new double[DCTSIZE2]; double[] workspaceCr=new double[DCTSIZE2]; double[] workspaceCb=new double[DCTSIZE2]; short[][] coefs=new short[][] { new short[DCTSIZE2], new short[DCTSIZE2], new short[DCTSIZE2] }; short[] coefY=coefs[0]; short[] coefCb=coefs[1]; short[] coefCr=coefs[2]; double[] divisorY=fdct.float_divisors[0]; double[] divisorC=fdct.float_divisors[1]; int bpp=alpha?4:3; for(int y=0; y<(cinfo.image_height+DCTSIZE-1)/DCTSIZE; y++) { int yFormImage=Math.Min((int)cinfo.image_height-y*DCTSIZE, DCTSIZE); for(int x=0; x<(cinfo.image_width+DCTSIZE-1)/DCTSIZE; x++) { int xFormImage=Math.Min((int)cinfo.image_width-x*DCTSIZE, DCTSIZE); int workspacepos=0; for(int j=0; j<yFormImage; j++) { int imagepos=((y*DCTSIZE+j)*(int)cinfo.image_width+x*DCTSIZE)*bpp; for(int i=0; i<xFormImage; i++, workspacepos++) { byte r=image[imagepos++]; byte g=image[imagepos++]; byte b=image[imagepos++]; if(alpha) imagepos++; if(!swapChannels) { workspaceY[workspacepos]=0.299*r+0.587*g+0.114*b-CENTERJSAMPLE; workspaceCb[workspacepos]=-0.168736*r-0.331264*g+0.5*b; workspaceCr[workspacepos]=0.5*r-0.418688*g-0.081312*b; } else { workspaceY[workspacepos]=0.299*b+0.587*g+0.114*r-CENTERJSAMPLE; workspaceCb[workspacepos]=-0.168736*b-0.331264*g+0.5*r; workspaceCr[workspacepos]=0.5*b-0.418688*g-0.081312*r; } } int lastworkspacepos=workspacepos-1; for(int i=xFormImage; i<DCTSIZE; i++, workspacepos++) { workspaceY[workspacepos]=workspaceY[lastworkspacepos]; workspaceCb[workspacepos]=workspaceCb[lastworkspacepos]; workspaceCr[workspacepos]=workspaceCr[lastworkspacepos]; } } int lastworkspacelinepos=(yFormImage-1)*DCTSIZE; for(int j=yFormImage; j<DCTSIZE; j++) { int lastworkspacepos=lastworkspacelinepos; for(int i=0; i<DCTSIZE; i++, workspacepos++, lastworkspacepos++) { workspaceY[workspacepos]=workspaceY[lastworkspacepos]; workspaceCb[workspacepos]=workspaceCb[lastworkspacepos]; workspaceCr[workspacepos]=workspaceCr[lastworkspacepos]; } } // ein block (3 componenten) jpeg_fdct_float(workspaceY); jpeg_fdct_float(workspaceCb); jpeg_fdct_float(workspaceCr); for(int i=0; i<DCTSIZE2; i++) { // Apply the quantization and scaling factor double tempY=workspaceY[i]*divisorY[i]; double tempCb=workspaceCb[i]*divisorC[i]; double tempCr=workspaceCr[i]*divisorC[i]; // Round to nearest integer. // Since C does not specify the direction of rounding for negative // quotients, we have to force the dividend positive for portability. // The maximum coefficient size is +-16K (for 12-bit data), so this // code should work for either 16-bit or 32-bit ints. coefY[i]=(short)((int)(tempY+16384.5)-16384); coefCb[i]=(short)((int)(tempCb+16384.5)-16384); coefCr[i]=(short)((int)(tempCr+16384.5)-16384); } lossyc.entropy_encode_mcu(cinfo, coefs); } } cinfo.next_scanline=cinfo.image_height; return cinfo.image_height; }
// Initialize preprocessing controller. static void jinit_c_prep_controller(jpeg_compress cinfo, bool need_full_buffer) { if(need_full_buffer) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); // safety check my_prep_controller prep=null; try { prep=new my_prep_controller(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } cinfo.prep=prep; prep.start_pass=start_pass_prep; // Allocate the color conversion buffer. // We make the buffer wide enough to allow the downsampler to edge-expand // horizontally within the buffer, if it so chooses. if(cinfo.downsample.need_context_rows) { // Set up to provide context rows #if CONTEXT_ROWS_SUPPORTED prep.pre_process_data=pre_process_context; create_context_buffer(cinfo); #else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); #endif } else { // No context, just make it tall enough for one row group prep.pre_process_data=pre_process_data; for(int ci=0; ci<cinfo.num_components; ci++) { jpeg_component_info compptr=cinfo.comp_info[ci]; prep.color_buf[ci]=alloc_sarray(cinfo, (uint)(((int)compptr.width_in_blocks*cinfo.DCT_size*cinfo.max_h_samp_factor)/compptr.h_samp_factor), (uint)cinfo.max_v_samp_factor); } } }
// Set the JPEG colorspace, and choose colorspace-dependent default values. public static void jpeg_set_colorspace(jpeg_compress cinfo, J_COLOR_SPACE colorspace) { jpeg_set_colorspace(cinfo, colorspace, J_SUBSAMPLING.JPEG420); }
// Process some data in the simple no-context case. // // Preprocessor output data is counted in "row groups". A row group // is defined to be v_samp_factor sample rows of each component. // Downsampling will produce this much data from each max_v_samp_factor input rows. static void pre_process_data(jpeg_compress cinfo, byte[][] input_buf, ref uint in_row_ctr, uint in_rows_avail, byte[][][] output_buf, ref uint out_row_group_ctr, uint out_row_groups_avail) { my_prep_controller prep=(my_prep_controller)cinfo.prep; while(in_row_ctr<in_rows_avail&&out_row_group_ctr<out_row_groups_avail) { // Do color conversion to fill the conversion buffer. uint inrows=in_rows_avail-in_row_ctr; int numrows=cinfo.max_v_samp_factor-prep.next_buf_row; numrows=(int)Math.Min((uint)numrows, inrows); cinfo.cconvert.color_convert(cinfo, input_buf, in_row_ctr, prep.color_buf, (uint)prep.next_buf_row, numrows); in_row_ctr+=(uint)numrows; prep.next_buf_row+=numrows; prep.rows_to_go-=(uint)numrows; // If at bottom of image, pad to fill the conversion buffer. if(prep.rows_to_go==0&&prep.next_buf_row<cinfo.max_v_samp_factor) { for(int ci=0; ci<cinfo.num_components; ci++) expand_bottom_edge(prep.color_buf[ci], cinfo.image_width, prep.next_buf_row, cinfo.max_v_samp_factor); prep.next_buf_row=cinfo.max_v_samp_factor; } // If we've filled the conversion buffer, empty it. if(prep.next_buf_row==cinfo.max_v_samp_factor) { cinfo.downsample.downsample(cinfo, prep.color_buf, 0, output_buf, out_row_group_ctr); prep.next_buf_row=0; out_row_group_ctr++; } // If at bottom of image, pad the output to a full iMCU height. // Note we assume the caller is providing a one-iMCU-height output buffer! if(prep.rows_to_go==0&&out_row_group_ctr<out_row_groups_avail) { for(int ci=0; ci<cinfo.num_components; ci++) { jpeg_component_info compptr=cinfo.comp_info[ci]; expand_bottom_edge(output_buf[ci], compptr.width_in_blocks*cinfo.DCT_size, (int)(out_row_group_ctr*compptr.v_samp_factor), (int)(out_row_groups_avail*compptr.v_samp_factor)); } out_row_group_ctr=out_row_groups_avail; break; // can exit outer loop without test } } // while(...) }
public static uint jpeg_write_image(jpeg_compress cinfo, byte[] image, bool swapChannels, bool alpha) { if (cinfo.input_components != 3 || cinfo.lossless || cinfo.in_color_space != J_COLOR_SPACE.JCS_RGB || cinfo.num_components != 3 || cinfo.jpeg_color_space != J_COLOR_SPACE.JCS_YCbCr || cinfo.data_precision != 8 || cinfo.DCT_size != 8 || cinfo.block_in_MCU != 3 || cinfo.arith_code || cinfo.max_h_samp_factor != 1 || cinfo.max_v_samp_factor != 1 || cinfo.next_scanline != 0 || cinfo.num_scans != 1) { throw new Exception(); } if (cinfo.global_state != STATE.CSCANNING) { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); } // Give master control module another chance if this is first call to // jpeg_write_scanlines. This lets output of the frame/scan headers be // delayed so that application can write COM, etc, markers between // jpeg_start_compress and jpeg_write_scanlines. if (cinfo.master.call_pass_startup) { cinfo.master.pass_startup(cinfo); } jpeg_lossy_c_codec lossyc = (jpeg_lossy_c_codec)cinfo.coef; c_coef_controller coef = (c_coef_controller)lossyc.coef_private; fdct_controller fdct = (fdct_controller)lossyc.fdct_private; double[] workspaceY = new double[DCTSIZE2]; double[] workspaceCr = new double[DCTSIZE2]; double[] workspaceCb = new double[DCTSIZE2]; short[][] coefs = new short[][] { new short[DCTSIZE2], new short[DCTSIZE2], new short[DCTSIZE2] }; short[] coefY = coefs[0]; short[] coefCb = coefs[1]; short[] coefCr = coefs[2]; double[] divisorY = fdct.float_divisors[0]; double[] divisorC = fdct.float_divisors[1]; int bpp = alpha?4:3; for (int y = 0; y < (cinfo.image_height + DCTSIZE - 1) / DCTSIZE; y++) { int yFormImage = Math.Min((int)cinfo.image_height - y * DCTSIZE, DCTSIZE); for (int x = 0; x < (cinfo.image_width + DCTSIZE - 1) / DCTSIZE; x++) { int xFormImage = Math.Min((int)cinfo.image_width - x * DCTSIZE, DCTSIZE); int workspacepos = 0; for (int j = 0; j < yFormImage; j++) { int imagepos = ((y * DCTSIZE + j) * (int)cinfo.image_width + x * DCTSIZE) * bpp; for (int i = 0; i < xFormImage; i++, workspacepos++) { byte r = image[imagepos++]; byte g = image[imagepos++]; byte b = image[imagepos++]; if (alpha) { imagepos++; } if (!swapChannels) { workspaceY[workspacepos] = 0.299 * r + 0.587 * g + 0.114 * b - CENTERJSAMPLE; workspaceCb[workspacepos] = -0.168736 * r - 0.331264 * g + 0.5 * b; workspaceCr[workspacepos] = 0.5 * r - 0.418688 * g - 0.081312 * b; } else { workspaceY[workspacepos] = 0.299 * b + 0.587 * g + 0.114 * r - CENTERJSAMPLE; workspaceCb[workspacepos] = -0.168736 * b - 0.331264 * g + 0.5 * r; workspaceCr[workspacepos] = 0.5 * b - 0.418688 * g - 0.081312 * r; } } int lastworkspacepos = workspacepos - 1; for (int i = xFormImage; i < DCTSIZE; i++, workspacepos++) { workspaceY[workspacepos] = workspaceY[lastworkspacepos]; workspaceCb[workspacepos] = workspaceCb[lastworkspacepos]; workspaceCr[workspacepos] = workspaceCr[lastworkspacepos]; } } int lastworkspacelinepos = (yFormImage - 1) * DCTSIZE; for (int j = yFormImage; j < DCTSIZE; j++) { int lastworkspacepos = lastworkspacelinepos; for (int i = 0; i < DCTSIZE; i++, workspacepos++, lastworkspacepos++) { workspaceY[workspacepos] = workspaceY[lastworkspacepos]; workspaceCb[workspacepos] = workspaceCb[lastworkspacepos]; workspaceCr[workspacepos] = workspaceCr[lastworkspacepos]; } } // ein block (3 componenten) jpeg_fdct_float(workspaceY); jpeg_fdct_float(workspaceCb); jpeg_fdct_float(workspaceCr); for (int i = 0; i < DCTSIZE2; i++) { // Apply the quantization and scaling factor double tempY = workspaceY[i] * divisorY[i]; double tempCb = workspaceCb[i] * divisorC[i]; double tempCr = workspaceCr[i] * divisorC[i]; // Round to nearest integer. // Since C does not specify the direction of rounding for negative // quotients, we have to force the dividend positive for portability. // The maximum coefficient size is +-16K (for 12-bit data), so this // code should work for either 16-bit or 32-bit ints. coefY[i] = (short)((int)(tempY + 16384.5) - 16384); coefCb[i] = (short)((int)(tempCb + 16384.5) - 16384); coefCr[i] = (short)((int)(tempCr + 16384.5) - 16384); } lossyc.entropy_encode_mcu(cinfo, coefs); } } cinfo.next_scanline = cinfo.image_height; return(cinfo.image_height); }
// Generate an optimal table definition given the specified counts // Generate the best Huffman code table for the given counts, fill htbl. // The JPEG standard requires that no symbol be assigned a codeword of all // one bits (so that padding bits added at the end of a compressed segment // can't look like a valid code). Because of the canonical ordering of // codewords, this just means that there must be an unused slot in the // longest codeword length category. Section K.2 of the JPEG spec suggests // reserving such a slot by pretending that symbol 256 is a valid symbol // with count 1. In theory that's not optimal; giving it count zero but // including it in the symbol set anyway should give a better Huffman code. // But the theoretically better code actually seems to come out worse in // practice, because it produces more all-ones bytes (which incur stuffed // zero bytes in the final file). In any case the difference is tiny. // The JPEG standard requires Huffman codes to be no more than 16 bits long. // If some symbols have a very small but nonzero probability, the Huffman tree // must be adjusted to meet the code length restriction. We currently use // the adjustment method suggested in JPEG section K.2. This method is *not* // optimal; it may not choose the best possible limited-length code. But // typically only very-low-frequency symbols will be given less-than-optimal // lengths, so the code is almost optimal. Experimental comparisons against // an optimal limited-length-code algorithm indicate that the difference is // microscopic --- usually less than a hundredth of a percent of total size. // So the extra complexity of an optimal algorithm doesn't seem worthwhile. static void jpeg_gen_optimal_table(jpeg_compress cinfo, JHUFF_TBL htbl, int[] freq) { int MAX_CLEN=32; // assumed maximum initial code length byte[] bits=new byte[MAX_CLEN+1]; // bits[k] = # of symbols with code length k int[] codesize=new int[257]; // codesize[k] = code length of symbol k int[] others=new int[257]; // next symbol in current branch of tree // This algorithm is explained in section K.2 of the JPEG standard for(int i=0; i<257; i++) others[i]=-1; // init links to empty freq[256]=1; // make sure 256 has a nonzero count // Including the pseudo-symbol 256 in the Huffman procedure guarantees // that no real symbol is given code-value of all ones, because 256 // will be placed last in the largest codeword category. // Huffman's basic algorithm to assign optimal code lengths to symbols for(; ; ) { // Find the smallest nonzero frequency, set c1 = its symbol // In case of ties, take the larger symbol number int c1=-1; int v=1000000000; for(int i=0; i<=256; i++) { if(freq[i]!=0&&freq[i]<=v) { v=freq[i]; c1=i; } } // Find the next smallest nonzero frequency, set c2 = its symbol // In case of ties, take the larger symbol number int c2=-1; v=1000000000; for(int i=0; i<=256; i++) { if(freq[i]!=0&&freq[i]<=v&&i!=c1) { v=freq[i]; c2=i; } } // Done if we've merged everything into one frequency if(c2<0) break; // Else merge the two counts/trees freq[c1]+=freq[c2]; freq[c2]=0; // Increment the codesize of everything in c1's tree branch codesize[c1]++; while(others[c1]>=0) { c1=others[c1]; codesize[c1]++; } others[c1]=c2; // chain c2 onto c1's tree branch // Increment the codesize of everything in c2's tree branch codesize[c2]++; while(others[c2]>=0) { c2=others[c2]; codesize[c2]++; } } // Now count the number of symbols of each code length for(int i=0; i<=256; i++) { if(codesize[i]!=0) { // The JPEG standard seems to think that this can't happen, // but I'm paranoid... if(codesize[i]>MAX_CLEN) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_HUFF_CLEN_OVERFLOW); bits[codesize[i]]++; } } // JPEG doesn't allow symbols with code lengths over 16 bits, so if the pure // Huffman procedure assigned any such lengths, we must adjust the coding. // Here is what the JPEG spec says about how this next bit works: // Since symbols are paired for the longest Huffman code, the symbols are // removed from this length category two at a time. The prefix for the pair // (which is one bit shorter) is allocated to one of the pair; then, // skipping the BITS entry for that prefix length, a code word from the next // shortest nonzero BITS entry is converted into a prefix for two code words // one bit longer. int k; for(k=MAX_CLEN; k>16; k--) { while(bits[k]>0) { int j=k-2; // find length of new prefix to be used while(bits[j]==0) j--; bits[k]-=2; // remove two symbols bits[k-1]++; // one goes in this length bits[j+1]+=2; // two new symbols in this length bits[j]--; // symbol of this length is now a prefix } } // Remove the count for the pseudo-symbol 256 from the largest codelength while(bits[k]==0) k--; // find largest codelength still in use bits[k]--; // Return final symbol counts (only for lengths 0..16) Array.Copy(bits, htbl.bits, 17); // Return a list of the symbols sorted by code length // It's not real clear to me why we don't need to consider the codelength // changes made above, but the JPEG spec seems to think this works. for(int i=1, p=0; i<=MAX_CLEN; i++) { for(int j=0; j<=255; j++) { if(codesize[j]==i) { htbl.huffval[p]=(byte)j; p++; } } } // Set sent_table false so updated table will be written to JPEG file. htbl.sent_table=false; }
// Special version of compress_data_diff with input_buf offsets. static bool compress_data_diff(jpeg_compress cinfo, byte[][][] input_buf, int[] input_buf_ind) { jpeg_lossless_c_codec losslsc = (jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff = (c_diff_controller)losslsc.diff_private; uint last_MCU_col = cinfo.MCUs_per_row - 1; uint last_iMCU_row = cinfo.total_iMCU_rows - 1; // Loop to write as much as one whole iMCU row for (int yoffset = diff.MCU_vert_offset; yoffset < diff.MCU_rows_per_iMCU_row; yoffset++) { uint MCU_col_num = diff.mcu_ctr; // index of current MCU within row // Scale and predict each scanline of the MCU-row separately. // // Note: We only do this if we are at the start of a MCU-row, ie, // we don't want to reprocess a row suspended by the output. if (MCU_col_num == 0) { for (int comp = 0; comp < cinfo.comps_in_scan; comp++) { jpeg_component_info compptr = cinfo.cur_comp_info[comp]; int ci = compptr.component_index; int samp_rows; if (diff.iMCU_row_num < last_iMCU_row) { samp_rows = compptr.v_samp_factor; } else { // NB: can't use last_row_height here, since may not be set! samp_rows = (int)(compptr.height_in_blocks % compptr.v_samp_factor); if (samp_rows == 0) { samp_rows = compptr.v_samp_factor; } else { // Fill dummy difference rows at the bottom edge with zeros, which // will encode to the smallest amount of data. for (int samp_row = samp_rows; samp_row < compptr.v_samp_factor; samp_row++) { int c = jround_up((int)compptr.width_in_blocks, (int)compptr.h_samp_factor); for (int i = 0; i < c; i++) { diff.diff_buf[ci][samp_row][i] = 0; } } } } uint samps_across = compptr.width_in_blocks; for (int samp_row = 0; samp_row < samp_rows; samp_row++) { losslsc.scaler_scale(cinfo, input_buf[ci][input_buf_ind[ci] + samp_row], diff.cur_row[ci], samps_across); losslsc.predict_difference[ci](cinfo, ci, diff.cur_row[ci], diff.prev_row[ci], diff.diff_buf[ci][samp_row], samps_across); byte[] temp = diff.cur_row[ci]; diff.cur_row[ci] = diff.prev_row[ci]; diff.prev_row[ci] = temp; } } } // Try to write the MCU-row (or remaining portion of suspended MCU-row). uint MCU_count = losslsc.entropy_encode_mcus(cinfo, diff.diff_buf, (uint)yoffset, MCU_col_num, cinfo.MCUs_per_row - MCU_col_num); if (MCU_count != cinfo.MCUs_per_row - MCU_col_num) { // Suspension forced; update state counters and exit diff.MCU_vert_offset = yoffset; diff.mcu_ctr += MCU_col_num; return(false); } // Completed an MCU row, but perhaps not an iMCU row diff.mcu_ctr = 0; } // Completed the iMCU row, advance counters for next one diff.iMCU_row_num++; start_iMCU_row_c_diff(cinfo); return(true); }
// Alternate entry point to write raw data. // Processes exactly one iMCU row per call, unless suspended. public static uint jpeg_write_raw_data(jpeg_compress cinfo, byte[][][] data, uint num_lines) { if(cinfo.global_state!=STATE.CRAW_OK) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_BAD_STATE, cinfo.global_state); if(cinfo.next_scanline>=cinfo.image_height) { WARNMS(cinfo, J_MESSAGE_CODE.JWRN_TOO_MUCH_DATA); return 0; } // Call progress monitor hook if present if(cinfo.progress!=null) { cinfo.progress.pass_counter=(int)cinfo.next_scanline; cinfo.progress.pass_limit=(int)cinfo.image_height; cinfo.progress.progress_monitor(cinfo); } // Give master control module another chance if this is first call to // jpeg_write_raw_data. This lets output of the frame/scan headers be // delayed so that application can write COM, etc, markers between // jpeg_start_compress and jpeg_write_raw_data. if(cinfo.master.call_pass_startup) cinfo.master.pass_startup(cinfo); // Verify that at least one iMCU row has been passed. uint lines_per_iMCU_row=(uint)cinfo.max_v_samp_factor*cinfo.DCT_size; if(num_lines<lines_per_iMCU_row) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BUFFER_SIZE); // Directly compress the row. if(!cinfo.coef.compress_data(cinfo, data)) return 0; // If compressor did not consume the whole row, suspend processing. // OK, we processed one iMCU row. cinfo.next_scanline+=lines_per_iMCU_row; return lines_per_iMCU_row; }
// Downsample pixel values of a single component. // This version handles the standard case of 2:1 horizontal and 2:1 vertical, // with smoothing. One row of context is required. static void h2v2_smooth_downsample(jpeg_compress cinfo, jpeg_component_info compptr, byte[][] input_data, uint in_row_index, byte[][] output_data, uint out_row_index) { uint output_cols = compptr.width_in_blocks * cinfo.DCT_size; // Expand input data enough to let all the output samples be generated // by the standard loop. Special-casing padded output would be more efficient. expand_right_edge(input_data, in_row_index - 1, cinfo.max_v_samp_factor + 2, cinfo.image_width, output_cols * 2); // We don't bother to form the individual "smoothed" input pixel values; // we can directly compute the output which is the average of the four // smoothed values. Each of the four member pixels contributes a fraction // (1-8*SF) to its own smoothed image and a fraction SF to each of the three // other smoothed pixels, therefore a total fraction (1-5*SF)/4 to the final // output. The four corner-adjacent neighbor pixels contribute a fraction // SF to just one smoothed pixel, or SF/4 to the final output; while the // eight edge-adjacent neighbors contribute SF to each of two smoothed // pixels, or SF/2 overall. In order to use integer arithmetic, these // factors are scaled by 2^16 = 65536. // Also recall that SF = smoothing_factor / 1024. int memberscale = 16384 - cinfo.smoothing_factor * 80; // scaled (1-5*SF)/4 int neighscale = cinfo.smoothing_factor * 16; // scaled SF/4 int inrow = (int)in_row_index; for (int outrow = 0; outrow < compptr.v_samp_factor; outrow++) { byte[] outptr = output_data[out_row_index + outrow]; byte[] inptr0 = input_data[inrow]; byte[] inptr1 = input_data[inrow + 1]; byte[] above_ptr = input_data[inrow - 1]; byte[] below_ptr = input_data[inrow + 2]; // Special case for first column: pretend column -1 is same as column 0 int membersum = inptr0[0] + inptr0[1] + inptr1[0] + inptr1[1]; int neighsum = above_ptr[0] + above_ptr[1] + below_ptr[0] + below_ptr[1] + inptr0[0] + inptr0[2] + inptr1[0] + inptr1[2]; neighsum += neighsum; neighsum += above_ptr[0] + above_ptr[2] + below_ptr[0] + below_ptr[2]; membersum = membersum * memberscale + neighsum * neighscale; outptr[0] = (byte)((membersum + 32768) >> 16); int iind = 2, oind = 1; for (uint colctr = output_cols - 2; colctr > 0; colctr--) { // sum of pixels directly mapped to this output element membersum = inptr0[iind] + inptr0[iind + 1] + inptr1[iind] + inptr1[iind + 1]; // sum of edge-neighbor pixels neighsum = above_ptr[iind] + above_ptr[iind + 1] + below_ptr[iind] + below_ptr[iind + 1] + inptr0[iind - 1] + inptr0[iind + 2] + inptr1[iind - 1] + inptr1[iind + 2]; // The edge-neighbors count twice as much as corner-neighbors neighsum += neighsum; // Add in the corner-neighbors neighsum += above_ptr[iind - 1] + above_ptr[iind + 2] + below_ptr[iind - 1] + below_ptr[iind + 2]; // form final output scaled up by 2^16 membersum = membersum * memberscale + neighsum * neighscale; // round, descale and output it outptr[oind] = (byte)((membersum + 32768) >> 16); iind += 2; oind++; } // Special case for last column membersum = inptr0[iind] + inptr0[iind + 1] + inptr1[iind] + inptr1[iind + 1]; neighsum = above_ptr[iind] + above_ptr[iind + 1] + below_ptr[iind] + below_ptr[iind + 1] + inptr0[iind - 1] + inptr0[iind + 1] + inptr1[iind - 1] + inptr1[iind + 1]; neighsum += neighsum; neighsum += above_ptr[iind - 1] + above_ptr[iind + 1] + below_ptr[iind - 1] + below_ptr[iind + 1]; membersum = membersum * memberscale + neighsum * neighscale; outptr[oind] = (byte)((membersum + 32768) >> 16); inrow += 2; } }
// Set up the scan parameters for the current scan static void select_scan_parameters(jpeg_compress cinfo) { #if NEED_SCAN_SCRIPT if(cinfo.scan_info!=null) { // Prepare for current scan --- the script is already validated my_comp_master master=(my_comp_master)cinfo.master; jpeg_scan_info scanptr=cinfo.scan_info[master.scan_number]; cinfo.comps_in_scan=scanptr.comps_in_scan; for(int ci=0; ci<scanptr.comps_in_scan; ci++) { cinfo.cur_comp_info[ci]=cinfo.comp_info[scanptr.component_index[ci]]; } cinfo.Ss=scanptr.Ss; cinfo.Se=scanptr.Se; cinfo.Ah=scanptr.Ah; cinfo.Al=scanptr.Al; } else #endif { // Prepare for single sequential-JPEG scan containing all components if(cinfo.num_components>MAX_COMPS_IN_SCAN) ERREXIT2(cinfo, J_MESSAGE_CODE.JERR_COMPONENT_COUNT, cinfo.num_components, MAX_COMPS_IN_SCAN); cinfo.comps_in_scan=cinfo.num_components; for(int ci=0; ci<cinfo.num_components; ci++) { cinfo.cur_comp_info[ci]=cinfo.comp_info[ci]; } if(cinfo.lossless) { #if C_LOSSLESS_SUPPORTED // If we fall through to here, the user specified lossless, but did not // provide a scan script. ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NO_LOSSLESS_SCRIPT); #endif } else { cinfo.process=J_CODEC_PROCESS.JPROC_SEQUENTIAL; cinfo.Ss=0; cinfo.Se=DCTSIZE2-1; cinfo.Ah=0; cinfo.Al=0; } } }
// Module initialization routine for downsampling. // Note that we must select a routine for each component. static void jinit_downsampler(jpeg_compress cinfo) { my_downsampler downsample = null; try { downsample = new my_downsampler(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } cinfo.downsample = downsample; downsample.start_pass = start_pass_downsample; downsample.downsample = sep_downsample; downsample.need_context_rows = false; if (cinfo.CCIR601_sampling) { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_CCIR601_NOTIMPL); } bool smoothok = true; // Verify we can handle the sampling factors, and set up method pointers for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; if (compptr.h_samp_factor == cinfo.max_h_samp_factor && compptr.v_samp_factor == cinfo.max_v_samp_factor) { #if INPUT_SMOOTHING_SUPPORTED if (cinfo.smoothing_factor != 0) { downsample.methods[ci] = fullsize_smooth_downsample; downsample.need_context_rows = true; } else #endif downsample.methods[ci] = fullsize_downsample; } else if (compptr.h_samp_factor * 2 == cinfo.max_h_samp_factor && compptr.v_samp_factor == cinfo.max_v_samp_factor) { smoothok = false; downsample.methods[ci] = h2v1_downsample; } else if (compptr.h_samp_factor * 2 == cinfo.max_h_samp_factor && compptr.v_samp_factor * 2 == cinfo.max_v_samp_factor) { #if INPUT_SMOOTHING_SUPPORTED if (cinfo.smoothing_factor != 0) { downsample.methods[ci] = h2v2_smooth_downsample; downsample.need_context_rows = true; } else #endif downsample.methods[ci] = h2v2_downsample; } else if ((cinfo.max_h_samp_factor % compptr.h_samp_factor) == 0 && (cinfo.max_v_samp_factor % compptr.v_samp_factor) == 0) { smoothok = false; downsample.methods[ci] = int_downsample; } else { ERREXIT(cinfo, J_MESSAGE_CODE.JERR_FRACT_SAMPLE_NOTIMPL); } } #if INPUT_SMOOTHING_SUPPORTED if (cinfo.smoothing_factor != 0 && !smoothok) { TRACEMS(cinfo, 0, J_MESSAGE_CODE.JTRC_SMOOTH_NOTIMPL); } #endif }
// Per-pass setup. // This is called at the beginning of each pass. We determine which modules // will be active during this pass and give them appropriate start_pass calls. // We also set is_last_pass to indicate whether any more passes will be required. static void prepare_for_pass(jpeg_compress cinfo) { my_comp_master master=(my_comp_master)cinfo.master; switch(master.pass_type) { case c_pass_type.main_pass: // Initial pass: will collect input data, and do either Huffman // optimization or data output for the first scan. select_scan_parameters(cinfo); per_scan_setup(cinfo); if(!cinfo.raw_data_in) { cinfo.cconvert.start_pass(cinfo); cinfo.downsample.start_pass(cinfo); cinfo.prep.start_pass(cinfo, J_BUF_MODE.JBUF_PASS_THRU); } cinfo.coef.entropy_start_pass(cinfo, cinfo.optimize_coding); cinfo.coef.start_pass(cinfo, (master.total_passes>1?J_BUF_MODE.JBUF_SAVE_AND_PASS:J_BUF_MODE.JBUF_PASS_THRU)); cinfo.main.start_pass(cinfo, J_BUF_MODE.JBUF_PASS_THRU); if(cinfo.optimize_coding) { // No immediate data output; postpone writing frame/scan headers master.call_pass_startup=false; } else { // Will write frame/scan headers at first jpeg_write_scanlines call master.call_pass_startup=true; } break; #if ENTROPY_OPT_SUPPORTED case c_pass_type.huff_opt_pass: // Do Huffman optimization for a scan after the first one. select_scan_parameters(cinfo); per_scan_setup(cinfo); if(cinfo.coef.need_optimization_pass(cinfo)) { cinfo.coef.entropy_start_pass(cinfo, true); cinfo.coef.start_pass(cinfo, J_BUF_MODE.JBUF_CRANK_DEST); master.call_pass_startup=false; break; } // Special case: Huffman DC refinement scans need no Huffman table // and therefore we can skip the optimization pass for them. master.pass_type=c_pass_type.output_pass; master.pass_number++; goto case c_pass_type.output_pass; // FALLTHROUGH #endif case c_pass_type.output_pass: // Do a data-output pass. // We need not repeat per-scan setup if prior optimization pass did it. if(!cinfo.optimize_coding) { select_scan_parameters(cinfo); per_scan_setup(cinfo); } cinfo.coef.entropy_start_pass(cinfo, false); cinfo.coef.start_pass(cinfo, J_BUF_MODE.JBUF_CRANK_DEST); // We emit frame/scan headers now if(master.scan_number==0) cinfo.marker.write_frame_header(cinfo); cinfo.marker.write_scan_header(cinfo); master.call_pass_startup=false; break; default: ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); break; } master.is_last_pass=(master.pass_number==master.total_passes-1); // Set up progress monitor's pass info if present if(cinfo.progress!=null) { cinfo.progress.completed_passes=master.pass_number; cinfo.progress.total_passes=master.total_passes; } }
// Initialize difference buffer controller. static void jinit_c_diff_controller(jpeg_compress cinfo, bool need_full_buffer) { jpeg_lossless_c_codec losslsc = (jpeg_lossless_c_codec)cinfo.coef; c_diff_controller diff = null; try { diff = new c_diff_controller(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } losslsc.diff_private = diff; losslsc.diff_start_pass = start_pass_diff; // Create the prediction row buffers. for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; try { diff.cur_row[ci] = new byte[jround_up(compptr.width_in_blocks, compptr.h_samp_factor)]; diff.prev_row[ci] = new byte[jround_up(compptr.width_in_blocks, compptr.h_samp_factor)]; } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } } // Create the difference buffer. for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; diff.diff_buf[ci] = alloc_darray(cinfo, (uint)jround_up(compptr.width_in_blocks, compptr.h_samp_factor), (uint)compptr.v_samp_factor); // Prefill difference rows with zeros. We do this because only actual // data is placed in the buffers during prediction/differencing, leaving // any dummy differences at the right edge as zeros, which will encode // to the smallest amount of data. for (int row = 0; row < compptr.v_samp_factor; row++) { int c = (int)jround_up(compptr.width_in_blocks, compptr.h_samp_factor); for (int i = 0; i < c; i++) { diff.diff_buf[ci][row][i] = 0; } } } // Create the sample buffer. if (need_full_buffer) { #if FULL_SAMP_BUFFER_SUPPORTED // Allocate a full-image array for each component, // padded to a multiple of samp_factor differences in each direction. for (int ci = 0; ci < cinfo.num_components; ci++) { jpeg_component_info compptr = cinfo.comp_info[ci]; diff.whole_image[ci] = alloc_sarray(cinfo, (uint)jround_up(compptr.width_in_blocks, compptr.h_samp_factor), (uint)jround_up(compptr.height_in_blocks, compptr.v_samp_factor)); } #else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_BUFFER_MODE); #endif } else { diff.whole_image[0] = null; // flag for no arrays } }
// Prepare for output to a stdio stream. // The caller must have already opened the stream, and is responsible // for jpeg_destination_mgr it after finishing compression. public static void jpeg_stdio_dest(jpeg_compress cinfo, Stream outfile) { my_destination_mgr dest; // The destination object is made permanent so that multiple JPEG images // can be written to the same file without re-executing jpeg_stdio_dest. // This makes it dangerous to use this manager and a different destination // manager serially with the same JPEG object, because their private object // sizes may be different. Caveat programmer. if(cinfo.dest==null) { // first time for this JPEG object? cinfo.dest=new my_destination_mgr(); } dest=(my_destination_mgr)cinfo.dest; dest.init_destination=init_destination; dest.empty_output_buffer=empty_output_buffer; dest.term_destination=term_destination; dest.outfile=outfile; }
// Initialize master compression control. static void jinit_c_master_control(jpeg_compress cinfo, bool transcode_only) { my_comp_master master=null; try { master=new my_comp_master(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } cinfo.master=master; master.prepare_for_pass=prepare_for_pass; master.pass_startup=pass_startup; master.finish_pass=finish_pass_master; master.is_last_pass=false; cinfo.DCT_size=cinfo.lossless?1:(uint)DCTSIZE; // Validate parameters, determine derived values initial_setup(cinfo); if(cinfo.scan_info!=null) { #if NEED_SCAN_SCRIPT validate_script(cinfo); #else ERREXIT(cinfo, J_MESSAGE_CODE.JERR_NOT_COMPILED); #endif } else { cinfo.process=J_CODEC_PROCESS.JPROC_SEQUENTIAL; cinfo.num_scans=1; } if((cinfo.process==J_CODEC_PROCESS.JPROC_PROGRESSIVE||cinfo.process==J_CODEC_PROCESS.JPROC_LOSSLESS)&&!cinfo.arith_code) cinfo.optimize_coding=true; // assume default tables no good for progressive mode or lossless mode; but only in Huffman case! // Initialize my private state if(transcode_only) { // no main pass in transcoding if(cinfo.optimize_coding) master.pass_type=c_pass_type.huff_opt_pass; else master.pass_type=c_pass_type.output_pass; } else { // for normal compression, first pass is always this type: master.pass_type=c_pass_type.main_pass; } master.scan_number=master.pass_number=0; if(cinfo.optimize_coding) master.total_passes=cinfo.num_scans*2; else master.total_passes=cinfo.num_scans; }
static void jinit_c_scaler(jpeg_compress cinfo) { jpeg_lossless_c_codec losslsc = (jpeg_lossless_c_codec)cinfo.coef; losslsc.scaler_start_pass = scaler_start_pass; }
// Expand a Huffman table definition into the derived format // Compute the derived values for a Huffman table. // This routine also performs some validation checks on the table. static void jpeg_make_c_derived_tbl(jpeg_compress cinfo, bool isDC, int tblno, ref c_derived_tbl pdtbl) { // Note that huffsize[] and huffcode[] are filled in code-length order, // paralleling the order of the symbols themselves in htbl.huffval[]. // Find the input Huffman table if(tblno<0||tblno>=NUM_HUFF_TBLS) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_NO_HUFF_TABLE, tblno); JHUFF_TBL htbl=isDC?cinfo.dc_huff_tbl_ptrs[tblno]:cinfo.ac_huff_tbl_ptrs[tblno]; if(htbl==null) ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_NO_HUFF_TABLE, tblno); // Allocate a workspace if we haven't already done so. if(pdtbl==null) { try { pdtbl=new c_derived_tbl(); } catch { ERREXIT1(cinfo, J_MESSAGE_CODE.JERR_OUT_OF_MEMORY, 4); } } c_derived_tbl dtbl=pdtbl; // Figure C.1: make table of Huffman code length for each symbol byte[] huffsize=new byte[257]; int p=0; for(byte l=1; l<=16; l++) { int i=htbl.bits[l]; // protect against table overrun if(i<0||(p+i)>256) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_HUFF_TABLE); while((i--)!=0) huffsize[p++]=l; } huffsize[p]=0; int lastp=p; // Figure C.2: generate the codes themselves // We also validate that the counts represent a legal Huffman code tree. uint[] huffcode=new uint[257]; uint code=0; int si=huffsize[0]; p=0; while(huffsize[p]!=0) { while(((int)huffsize[p])==si) { huffcode[p++]=code; code++; } // code is now 1 more than the last code used for codelength si; but // it must still fit in si bits, since no code is allowed to be all ones. if(((int)code)>=(1<<si)) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_HUFF_TABLE); code<<=1; si++; } // Figure C.3: generate encoding tables // These are code and size indexed by symbol value // Set all codeless symbols to have code length 0; // this lets us detect duplicate VAL entries here, and later // allows emit_bits to detect any attempt to emit such symbols. for(int i=0; i<256; i++) dtbl.ehufsi[i]=0; // This is also a convenient place to check for out-of-range // and duplicated VAL entries. We allow 0..255 for AC symbols // but only 0..16 for DC. (We could constrain them further // based on data depth and mode, but this seems enough.) int maxsymbol=isDC?16:255; for(p=0; p<lastp; p++) { int i=htbl.huffval[p]; if(i<0||i>maxsymbol||dtbl.ehufsi[i]!=0) ERREXIT(cinfo, J_MESSAGE_CODE.JERR_BAD_HUFF_TABLE); dtbl.ehufco[i]=huffcode[p]; dtbl.ehufsi[i]=huffsize[p]; } }
// Set or change the 'quality' (quantization) setting, using default tables // and a straight percentage-scaling quality scale. In most cases it's better // to use jpeg_set_quality (below); this entry point is provided for // applications that insist on a linear percentage scaling. public static void jpeg_set_linear_quality(jpeg_compress cinfo, int scale_factor, bool force_baseline) { // Set up two quantization tables using the specified scaling jpeg_add_quant_table(cinfo, 0, std_luminance_quant_tbl, scale_factor, force_baseline); jpeg_add_quant_table(cinfo, 1, std_chrominance_quant_tbl, scale_factor, force_baseline); }