예제 #1
0
파일: io.cs 프로젝트: soywiz/nwebp
		// Point-sampling U/V sampler.
		static int EmitSampledRGB(VP8Io* io, WebPDecParams* p) {
		  WebPDecBuffer* output = p.output;
		  WebPRGBABuffer* buf = &output.u.RGBA;
		  byte* dst = buf.rgba + io.mb_y * buf.stride;
		  byte* y_src = io.y;
		  byte* u_src = io.u;
		  byte* v_src = io.v;
		  WebPSampleLinePairFunc sample =
			  io.a ? WebPSamplersKeepAlpha[output.colorspace]
					: WebPSamplers[output.colorspace];
		  int mb_w = io.mb_w;
		  int last = io.mb_h - 1;
		  int j;
		  for (j = 0; j < last; j += 2) {
			sample(y_src, y_src + io.y_stride, u_src, v_src,
				   dst, dst + buf.stride, mb_w);
			y_src += 2 * io.y_stride;
			u_src += io.uv_stride;
			v_src += io.uv_stride;
			dst += 2 * buf.stride;
		  }
		  if (j == last) {  // Just do the last line twice
			sample(y_src, y_src, u_src, v_src, dst, dst, mb_w);
		  }
		  return io.mb_h;
		}
예제 #2
0
파일: vp8.cs 프로젝트: soywiz/nwebp
		int VP8InitIoInternal(VP8Io* io, int version) {
		  if (version != WEBP_DECODER_ABI_VERSION)
			return 0;  // mismatch error
		  if (io) {
			memset(io, 0, sizeof(*io));
		  }
		  return 1;
		}
예제 #3
0
파일: io.cs 프로젝트: soywiz/nwebp
		//------------------------------------------------------------------------------
		// Main YUV<.RGB conversion functions

		static int EmitYUV(VP8Io* io, WebPDecParams* p) {
		  WebPDecBuffer* output = p.output;
		  WebPYUVABuffer* buf = &output.u.YUVA;
		  byte* y_dst = buf.y + io.mb_y * buf.y_stride;
		  byte* u_dst = buf.u + (io.mb_y >> 1) * buf.u_stride;
		  byte* v_dst = buf.v + (io.mb_y >> 1) * buf.v_stride;
		  int mb_w = io.mb_w;
		  int mb_h = io.mb_h;
		  int uv_w = (mb_w + 1) / 2;
		  int uv_h = (mb_h + 1) / 2;
		  int j;
		  for (j = 0; j < mb_h; ++j) {
			memcpy(y_dst + j * buf.y_stride, io.y + j * io.y_stride, mb_w);
		  }
		  for (j = 0; j < uv_h; ++j) {
			memcpy(u_dst + j * buf.u_stride, io.u + j * io.uv_stride, uv_w);
			memcpy(v_dst + j * buf.v_stride, io.v + j * io.uv_stride, uv_w);
		  }
		  return io.mb_h;
		}
예제 #4
0
파일: io.cs 프로젝트: soywiz/nwebp
		//------------------------------------------------------------------------------
		// YUV444 . RGB conversion

		#if false   // TODO(skal): this is for future rescaling.
		static int EmitRGB(VP8Io* io, WebPDecParams* p) {
		  WebPDecBuffer* output = p.output;
		  WebPRGBABuffer* buf = &output.u.RGBA;
		  byte* dst = buf.rgba + io.mb_y * buf.stride;
		  byte* y_src = io.y;
		  byte* u_src = io.u;
		  byte* v_src = io.v;
		  WebPYUV444Converter convert = WebPYUV444Converters[output.colorspace];
		  int mb_w = io.mb_w;
		  int last = io.mb_h;
		  int j;
		  for (j = 0; j < last; ++j) {
			convert(y_src, u_src, v_src, dst, mb_w);
			y_src += io.y_stride;
			u_src += io.uv_stride;
			v_src += io.uv_stride;
			dst += buf.stride;
		  }
		  return io.mb_h;
		}
예제 #5
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int InitRGBRescaler(VP8Io* io, WebPDecParams* p) {
		  int has_alpha = IsAlphaMode(p.output.colorspace);
		  int out_width  = io.scaled_width;
		  int out_height = io.scaled_height;
		  int uv_in_width  = (io.mb_w + 1) >> 1;
		  int uv_in_height = (io.mb_h + 1) >> 1;
		  uint work_size = 2 * out_width;   // scratch memory for one rescaler
		  int* work;  // rescalers work area
		  byte* tmp;   // tmp storage for scaled YUV444 samples before RGB conversion
		  uint tmp_size1, tmp_size2;

		  tmp_size1 = 3 * work_size;
		  tmp_size2 = 3 * out_width;
		  if (has_alpha) {
			tmp_size1 += work_size;
			tmp_size2 += out_width;
		  }
		  p.memory =
			  calloc(1, tmp_size1 * sizeof(*work) + tmp_size2 * sizeof(*tmp));
		  if (p.memory == null) {
			return 0;   // memory error
		  }
		  work = (int*)p.memory;
		  tmp = (byte*)(work + tmp_size1);
		  InitRescaler(&p.scaler_y, io.mb_w, io.mb_h,
					   tmp + 0 * out_width, out_width, out_height, 0,
					   io.mb_w, out_width, io.mb_h, out_height,
					   work + 0 * work_size);
		  InitRescaler(&p.scaler_u, uv_in_width, uv_in_height,
					   tmp + 1 * out_width, out_width, out_height, 0,
					   io.mb_w, 2 * out_width, io.mb_h, 2 * out_height,
					   work + 1 * work_size);
		  InitRescaler(&p.scaler_v, uv_in_width, uv_in_height,
					   tmp + 2 * out_width, out_width, out_height, 0,
					   io.mb_w, 2 * out_width, io.mb_h, 2 * out_height,
					   work + 2 * work_size);
		  p.emit = EmitRescaledRGB;

		  if (has_alpha) {
			InitRescaler(&p.scaler_a, io.mb_w, io.mb_h,
						 tmp + 3 * out_width, out_width, out_height, 0,
						 io.mb_w, out_width, io.mb_h, out_height,
						 work + 3 * work_size);
			p.emit_alpha = EmitRescaledAlphaRGB;
		  }
		  return 1;
		}
예제 #6
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int EmitRescaledAlphaRGB(VP8Io* io, WebPDecParams* p) {
		  if (io.a != null) {
			int (* output_func)(WebPDecParams* const, int) =
				(p.output.colorspace == MODE_RGBA_4444) ? ExportAlphaRGBA4444
														  : ExportAlpha;
			WebPRescaler* scaler = &p.scaler_a;
			int j = 0, pos = 0;
			while (j < io.mb_h) {
			  j += Import(io.a + j * io.width, io.width, io.mb_h - j, scaler);
			  pos += output_func(p, pos);
			}
		  }
		  return 0;
		}
예제 #7
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int EmitRescaledRGB(VP8Io* io, WebPDecParams* p) {
		  int mb_h = io.mb_h;
		  int uv_mb_h = (mb_h + 1) >> 1;
		  int j = 0, uv_j = 0;
		  int num_lines_out = 0;
		  while (j < mb_h) {
			int y_lines_in = Import(io.y + j * io.y_stride, io.y_stride,
										  mb_h - j, &p.scaler_y);
			int u_lines_in = Import(io.u + uv_j * io.uv_stride, io.uv_stride,
										  uv_mb_h - uv_j, &p.scaler_u);
			int v_lines_in = Import(io.v + uv_j * io.uv_stride, io.uv_stride,
										  uv_mb_h - uv_j, &p.scaler_v);
			(void)v_lines_in;   // remove a gcc warning
			assert(u_lines_in == v_lines_in);
			j += y_lines_in;
			uv_j += u_lines_in;
			num_lines_out += ExportRGB(p, num_lines_out);
		  }
		  return num_lines_out;
		}
예제 #8
0
파일: frame.cs 프로젝트: soywiz/nwebp
        int VP8ExitCritical(VP8Io io)
        {
            int ok = 1;
            if (this.use_threads_)
            {
                ok = WebPWorkerSync(&this.worker_);
            }

            if (io.teardown)
            {
                io.teardown(io);
            }
            return ok;
        }
예제 #9
0
파일: vp8.cs 프로젝트: soywiz/nwebp
		// Topmost call
		int VP8GetHeaders(VP8Decoder* dec, VP8Io* io) {
		  byte* buf;
		  uint buf_size;
		  byte* alpha_data_tmp;
		  uint alpha_size_tmp;
		  uint vp8_chunk_size;
		  uint bytes_skipped;
		  VP8FrameHeader* frm_hdr;
		  VP8PictureHeader* pic_hdr;
		  VP8BitReader* br;
		  VP8StatusCode status;

		  if (dec == null) {
			return 0;
		  }
		  SetOk(dec);
		  if (io == null) {
			return VP8SetError(dec, VP8_STATUS_INVALID_PARAM,
							   "null VP8Io passed to VP8GetHeaders()");
		  }

		  buf = io.data;
		  buf_size = io.data_size;

		  // Process Pre-VP8 chunks.
		  status = WebPParseHeaders(&buf, &buf_size, &vp8_chunk_size, &bytes_skipped,
									&alpha_data_tmp, &alpha_size_tmp);
		  if (status != VP8_STATUS_OK) {
			return VP8SetError(dec, status, "Incorrect/incomplete header.");
		  }
		  if (dec.alpha_data_ == null) {
			assert(dec.alpha_data_size_ == 0);
			// We have NOT set alpha data yet. Set it now.
			// (This is to ensure that dec.alpha_data_ is NOT reset to null if
			// WebPParseHeaders() is called more than once, as in incremental decoding
			// case.)
			dec.alpha_data_ = alpha_data_tmp;
			dec.alpha_data_size_ = alpha_size_tmp;
		  }

		  // Process the VP8 frame header.
		  if (buf_size < 4) {
			return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
							   "Truncated header.");
		  }

		  // Paragraph 9.1
		  {
			uint bits = buf[0] | (buf[1] << 8) | (buf[2] << 16);
			frm_hdr = &dec.frm_hdr_;
			frm_hdr.key_frame_ = !(bits & 1);
			frm_hdr.profile_ = (bits >> 1) & 7;
			frm_hdr.show_ = (bits >> 4) & 1;
			frm_hdr.partition_length_ = (bits >> 5);
			if (frm_hdr.profile_ > 3)
			  return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
								 "Incorrect keyframe parameters.");
			if (!frm_hdr.show_)
			  return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
								 "Frame not displayable.");
			buf += 3;
			buf_size -= 3;
		  }

		  pic_hdr = &dec.pic_hdr_;
		  if (frm_hdr.key_frame_) {
			// Paragraph 9.2
			if (buf_size < 7) {
			  return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
								 "cannot parse picture header");
			}
			if (buf[0] != 0x9d || buf[1] != 0x01 || buf[2] != 0x2a) {
			  return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
								 "Bad code word");
			}
			pic_hdr.width_ = ((buf[4] << 8) | buf[3]) & 0x3fff;
			pic_hdr.xscale_ = buf[4] >> 6;   // ratio: 1, 5/4 5/3 or 2
			pic_hdr.height_ = ((buf[6] << 8) | buf[5]) & 0x3fff;
			pic_hdr.yscale_ = buf[6] >> 6;
			buf += 7;
			buf_size -= 7;

			dec.mb_w_ = (pic_hdr.width_ + 15) >> 4;
			dec.mb_h_ = (pic_hdr.height_ + 15) >> 4;
			// Setup default output area (can be later modified during io.setup())
			io.width = pic_hdr.width_;
			io.height = pic_hdr.height_;
			io.use_scaling  = 0;
			io.use_cropping = 0;
			io.crop_top  = 0;
			io.crop_left = 0;
			io.crop_right  = io.width;
			io.crop_bottom = io.height;
			io.mb_w = io.width;   // sanity check
			io.mb_h = io.height;  // ditto

			VP8ResetProba(&dec.proba_);
			ResetSegmentHeader(&dec.segment_hdr_);
			dec.segment_ = 0;    // default for intra
		  }

		  // Check if we have all the partition #0 available, and initialize dec.br_
		  // to read this partition (and this partition only).
		  if (frm_hdr.partition_length_ > buf_size) {
			return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA,
							   "bad partition length");
		  }

		  br = &dec.br_;
		  VP8InitBitReader(br, buf, buf + frm_hdr.partition_length_);
		  buf += frm_hdr.partition_length_;
		  buf_size -= frm_hdr.partition_length_;

		  if (frm_hdr.key_frame_) {
			pic_hdr.colorspace_ = VP8Get(br);
			pic_hdr.clamp_type_ = VP8Get(br);
		  }
		  if (!ParseSegmentHeader(br, &dec.segment_hdr_, &dec.proba_)) {
			return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
							   "cannot parse segment header");
		  }
		  // Filter specs
		  if (!ParseFilterHeader(br, dec)) {
			return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
							   "cannot parse filter header");
		  }
		  status = ParsePartitions(dec, buf, buf_size);
		  if (status != VP8_STATUS_OK) {
			return VP8SetError(dec, status, "cannot parse partitions");
		  }

		  // quantizer change
		  VP8ParseQuant(dec);

		  // Frame buffer marking
		  if (!frm_hdr.key_frame_) {
			// Paragraph 9.7
		#if !ONLY_KEYFRAME_CODE
			dec.buffer_flags_ = VP8Get(br) << 0;   // update golden
			dec.buffer_flags_ |= VP8Get(br) << 1;  // update alt ref
			if (!(dec.buffer_flags_ & 1)) {
			  dec.buffer_flags_ |= VP8GetValue(br, 2) << 2;
			}
			if (!(dec.buffer_flags_ & 2)) {
			  dec.buffer_flags_ |= VP8GetValue(br, 2) << 4;
			}
			dec.buffer_flags_ |= VP8Get(br) << 6;    // sign bias golden
			dec.buffer_flags_ |= VP8Get(br) << 7;    // sign bias alt ref
		#else
			return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE,
							   "Not a key frame.");
		#endif
		  } else {
			dec.buffer_flags_ = 0x003 | 0x100;
		  }

		  // Paragraph 9.8
		#if !ONLY_KEYFRAME_CODE
		  dec.update_proba_ = VP8Get(br);
		  if (!dec.update_proba_) {    // save for later restore
			dec.proba_saved_ = dec.proba_;
		  }
		  dec.buffer_flags_ &= 1 << 8;
		  dec.buffer_flags_ |=
			  (frm_hdr.key_frame_ || VP8Get(br)) << 8;    // refresh last frame
		#else
		  VP8Get(br);   // just ignore the value of update_proba_
		#endif

		  VP8ParseProba(br, dec);

		#if WEBP_EXPERIMENTAL_FEATURES
		  // Extensions
		  if (dec.pic_hdr_.colorspace_) {
			uint kTrailerSize = 8;
			byte kTrailerMarker = 0x01;
			byte* ext_buf = buf - kTrailerSize;
			uint size;

			if (frm_hdr.partition_length_ < kTrailerSize ||
				ext_buf[kTrailerSize - 1] != kTrailerMarker) {
			  return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
								 "RIFF: Inconsistent extra information.");
			}

			// Layer
			size = (ext_buf[0] << 0) | (ext_buf[1] << 8) | (ext_buf[2] << 16);
			dec.layer_data_size_ = size;
			dec.layer_data_ = null;  // will be set later
			dec.layer_colorspace_ = ext_buf[3];
		  }
		#endif

		  // sanitized state
		  dec.ready_ = 1;
		  return 1;
		}
예제 #10
0
파일: io.cs 프로젝트: soywiz/nwebp
		//------------------------------------------------------------------------------

		static void CustomTeardown(VP8Io* io) {
		  WebPDecParams* p = (WebPDecParams*)io.opaque;
		  free(p.memory);
		  p.memory = null;
		}
예제 #11
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int EmitAlphaRGBA4444(VP8Io* io, WebPDecParams* p) {
		  int mb_w = io.mb_w;
		  int mb_h = io.mb_h;
		  int i, j;
		  WebPRGBABuffer* buf = &p.output.u.RGBA;
		  byte* dst = buf.rgba + io.mb_y * buf.stride + 1;
		  byte* alpha = io.a;
		  if (alpha) {
			for (j = 0; j < mb_h; ++j) {
			  for (i = 0; i < mb_w; ++i) {
				// Fill in the alpha value (converted to 4 bits).
				uint alpha_val = clip((alpha[i] + 8) >> 4, 15);
				dst[2 * i] = (dst[2 * i] & 0xf0) | alpha_val;
			  }
			  alpha += io.width;
			  dst += buf.stride;
			}
		  }
		  return 0;
		}
예제 #12
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int EmitAlphaRGB(VP8Io* io, WebPDecParams* p) {
		  int mb_w = io.mb_w;
		  int mb_h = io.mb_h;
		  int i, j;
		  WebPRGBABuffer* buf = &p.output.u.RGBA;
		  byte* dst = buf.rgba + io.mb_y * buf.stride +
						 (p.output.colorspace == MODE_ARGB ? 0 : 3);
		  byte* alpha = io.a;
		  if (alpha) {
			for (j = 0; j < mb_h; ++j) {
			  for (i = 0; i < mb_w; ++i) {
				dst[4 * i] = alpha[i];
			  }
			  alpha += io.width;
			  dst += buf.stride;
			}
		  }
		  return 0;
		}
예제 #13
0
파일: io.cs 프로젝트: soywiz/nwebp
		// FANCY_UPSAMPLING 

		//------------------------------------------------------------------------------

		static int EmitAlphaYUV(VP8Io* io, WebPDecParams* p) {
		  byte* alpha = io.a;
		  if (alpha != null) {
			int j;
			int mb_w = io.mb_w;
			int mb_h = io.mb_h;
			WebPYUVABuffer* buf = &p.output.u.YUVA;
			byte* dst = buf.a + io.mb_y * buf.a_stride;
			for (j = 0; j < mb_h; ++j) {
			  memcpy(dst, alpha, mb_w * sizeof(*dst));
			  alpha += io.width;
			  dst += buf.a_stride;
			}
		  }
		  return 0;
		}
예제 #14
0
파일: frame.cs 프로젝트: soywiz/nwebp
 int VP8ProcessRow(VP8Io io)
 {
     int ok = 1;
     VP8ThreadContext* ctx = &this.thread_ctx_;
     if (!this.use_threads_)
     {
         // ctx.id_ and ctx.f_info_ are already set
         ctx.mb_y_ = this.mb_y_;
         ctx.filter_row_ = this.filter_row_;
         ok = FinishRow(dec, io);
     }
     else
     {
         WebPWorker* worker = &this.worker_;
         // Finish previous job *before* updating context
         ok &= WebPWorkerSync(worker);
         assert(worker.status_ == OK);
         if (ok)
         {   // spawn a new deblocking/output job
             ctx.io_ = *io;
             ctx.id_ = this.cache_id_;
             ctx.mb_y_ = this.mb_y_;
             ctx.filter_row_ = this.filter_row_;
             if (ctx.filter_row_)
             {    // just swap filter info
                 VP8FInfo* tmp = ctx.f_info_;
                 ctx.f_info_ = this.f_info_;
                 this.f_info_ = tmp;
             }
             WebPWorkerLaunch(worker);
             if (++this.cache_id_ == this.num_caches_)
             {
                 this.cache_id_ = 0;
             }
         }
     }
     return ok;
 }
예제 #15
0
파일: frame.cs 프로젝트: soywiz/nwebp
 int VP8InitFrame(VP8Io io)
 {
     if (!InitThreadContext(dec)) return 0;  // call first. Sets this.num_caches_.
     if (!AllocateMemory(dec)) return 0;
     InitIo(dec, io);
     VP8DspInit();  // Init critical function pointers and look-up tables.
     return 1;
 }
예제 #16
0
파일: io.cs 프로젝트: soywiz/nwebp
		//------------------------------------------------------------------------------
		// Default custom functions

		// Setup crop_xxx fields, mb_w and mb_h
		static int InitFromOptions(WebPDecoderOptions* options,
								   VP8Io* io) {
		  int W = io.width;
		  int H = io.height;
		  int x = 0, y = 0, w = W, h = H;

		  // Cropping
		  io.use_cropping = (options != null) && (options.use_cropping > 0);
		  if (io.use_cropping) {
			w = options.crop_width;
			h = options.crop_height;
			// TODO(skal): take colorspace into account. Don't assume YUV420.
			x = options.crop_left & ~1;
			y = options.crop_top & ~1;
			if (x < 0 || y < 0 || w <= 0 || h <= 0 || x + w > W || y + h > H) {
			  return 0;  // out of frame boundary error
			}
		  }
		  io.crop_left   = x;
		  io.crop_top    = y;
		  io.crop_right  = x + w;
		  io.crop_bottom = y + h;
		  io.mb_w = w;
		  io.mb_h = h;

		  // Scaling
		  io.use_scaling = (options != null) && (options.use_scaling > 0);
		  if (io.use_scaling) {
			if (options.scaled_width <= 0 || options.scaled_height <= 0) {
			  return 0;
			}
			io.scaled_width = options.scaled_width;
			io.scaled_height = options.scaled_height;
		  }

		  // Filter
		  io.bypass_filtering = options && options.bypass_filtering;

		  // Fancy upsampler
		#if FANCY_UPSAMPLING
		  io.fancy_upsampling = (options == null) || (!options.no_fancy_upsampling);
		#endif

		  if (io.use_scaling) {
			// disable filter (only for large downscaling ratio).
			io.bypass_filtering = (io.scaled_width < W * 3 / 4) &&
								   (io.scaled_height < H * 3 / 4);
			io.fancy_upsampling = 0;
		  }
		  return 1;
		}
예제 #17
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int EmitRescaledYUV(VP8Io* io, WebPDecParams* p) {
		  int mb_h = io.mb_h;
		  int uv_mb_h = (mb_h + 1) >> 1;
		  int num_lines_out = Rescale(io.y, io.y_stride, mb_h, &p.scaler_y);
		  Rescale(io.u, io.uv_stride, uv_mb_h, &p.scaler_u);
		  Rescale(io.v, io.uv_stride, uv_mb_h, &p.scaler_v);
		  return num_lines_out;
		}
예제 #18
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int CustomSetup(VP8Io* io) {
		  WebPDecParams* p = (WebPDecParams*)io.opaque;
		  int is_rgb = (p.output.colorspace < MODE_YUV);

		  p.memory = null;
		  p.emit = null;
		  p.emit_alpha = null;
		  if (!InitFromOptions(p.options, io)) {
			return 0;
		  }

		  if (io.use_scaling) {
			int ok = is_rgb ? InitRGBRescaler(io, p) : InitYUVRescaler(io, p);
			if (!ok) {
			  return 0;    // memory error
			}
		  } else {
			if (is_rgb) {
			  p.emit = EmitSampledRGB;   // default
		#if FANCY_UPSAMPLING
			  if (io.fancy_upsampling) {
				int uv_width = (io.mb_w + 1) >> 1;
				p.memory = malloc(io.mb_w + 2 * uv_width);
				if (p.memory == null) {
				  return 0;   // memory error.
				}
				p.tmp_y = (byte*)p.memory;
				p.tmp_u = p.tmp_y + io.mb_w;
				p.tmp_v = p.tmp_u + uv_width;
				p.emit = EmitFancyRGB;
				WebPInitUpsamplers();
			  }
		#endif
			} else {
			  p.emit = EmitYUV;
			}
			if (IsAlphaMode(p.output.colorspace)) {
			  // We need transparency output
			  p.emit_alpha =
				  is_rgb ? (p.output.colorspace == MODE_RGBA_4444 ?
					  EmitAlphaRGBA4444 : EmitAlphaRGB) : EmitAlphaYUV;
			}
		  }

		  if (is_rgb) {
			VP8YUVInit();
		  }
		  return 1;
		}
예제 #19
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int EmitRescaledAlphaYUV(VP8Io* io, WebPDecParams* p) {
		  if (io.a != null) {
			Rescale(io.a, io.width, io.mb_h, &p.scaler_a);
		  }
		  return 0;
		}
예제 #20
0
파일: io.cs 프로젝트: soywiz/nwebp
		//------------------------------------------------------------------------------

		static int CustomPut(VP8Io* io) {
		  WebPDecParams* p = (WebPDecParams*)io.opaque;
		  int mb_w = io.mb_w;
		  int mb_h = io.mb_h;
		  int num_lines_out;
		  assert(!(io.mb_y & 1));

		  if (mb_w <= 0 || mb_h <= 0) {
			return 0;
		  }
		  num_lines_out = p.emit(io, p);
		  if (p.emit_alpha) {
			p.emit_alpha(io, p);
		  }
		  p.last_y += num_lines_out;
		  return 1;
		}
예제 #21
0
파일: frame.cs 프로젝트: soywiz/nwebp
 void InitIo(VP8Io io)
 {
     // prepare 'io'
     io.mb_y = 0;
     io.y = this.cache_y_;
     io.u = this.cache_u_;
     io.v = this.cache_v_;
     io.y_stride = this.cache_y_stride_;
     io.uv_stride = this.cache_uv_stride_;
     io.fancy_upsampling = 0;    // default
     io.a = null;
 }
예제 #22
0
파일: io.cs 프로젝트: soywiz/nwebp
		//------------------------------------------------------------------------------
		// Fancy upsampling

		#if FANCY_UPSAMPLING
		static int EmitFancyRGB(VP8Io* io, WebPDecParams* p) {
		  int num_lines_out = io.mb_h;   // a priori guess
		  WebPRGBABuffer* buf = &p.output.u.RGBA;
		  byte* dst = buf.rgba + io.mb_y * buf.stride;
		  WebPUpsampleLinePairFunc upsample =
			  io.a ? WebPUpsamplersKeepAlpha[p.output.colorspace]
					: WebPUpsamplers[p.output.colorspace];
		  byte* cur_y = io.y;
		  byte* cur_u = io.u;
		  byte* cur_v = io.v;
		  byte* top_u = p.tmp_u;
		  byte* top_v = p.tmp_v;
		  int y = io.mb_y;
		  int y_end = io.mb_y + io.mb_h;
		  int mb_w = io.mb_w;
		  int uv_w = (mb_w + 1) / 2;

		  if (y == 0) {
			// First line is special cased. We mirror the u/v samples at boundary.
			upsample(null, cur_y, cur_u, cur_v, cur_u, cur_v, null, dst, mb_w);
		  } else {
			// We can finish the left-over line from previous call.
			// Warning! Don't overwrite the alpha values (if any), as they
			// are not lagging one line behind but are already written.
			upsample(p.tmp_y, cur_y, top_u, top_v, cur_u, cur_v,
					 dst - buf.stride, dst, mb_w);
			++num_lines_out;
		  }
		  // Loop over each output pairs of row.
		  for (; y + 2 < y_end; y += 2) {
			top_u = cur_u;
			top_v = cur_v;
			cur_u += io.uv_stride;
			cur_v += io.uv_stride;
			dst += 2 * buf.stride;
			cur_y += 2 * io.y_stride;
			upsample(cur_y - io.y_stride, cur_y,
					 top_u, top_v, cur_u, cur_v,
					 dst - buf.stride, dst, mb_w);
		  }
		  // move to last row
		  cur_y += io.y_stride;
		  if (io.crop_top + y_end < io.crop_bottom) {
			// Save the unfinished samples for next call (as we're not done yet).
			memcpy(p.tmp_y, cur_y, mb_w * sizeof(*p.tmp_y));
			memcpy(p.tmp_u, cur_u, uv_w * sizeof(*p.tmp_u));
			memcpy(p.tmp_v, cur_v, uv_w * sizeof(*p.tmp_v));
			// The fancy upsampler leaves a row unfinished behind
			// (except for the very last row)
			num_lines_out--;
		  } else {
			// Process the very last row of even-sized picture
			if (!(y_end & 1)) {
			  upsample(cur_y, null, cur_u, cur_v, cur_u, cur_v,
					  dst + buf.stride, null, mb_w);
			}
		  }
		  return num_lines_out;
		}
예제 #23
0
파일: io.cs 프로젝트: soywiz/nwebp
		static int InitYUVRescaler(VP8Io* io, WebPDecParams* p) {
		  int has_alpha = IsAlphaMode(p.output.colorspace);
		  WebPYUVABuffer* buf = &p.output.u.YUVA;
		  int out_width  = io.scaled_width;
		  int out_height = io.scaled_height;
		  int uv_out_width  = (out_width + 1) >> 1;
		  int uv_out_height = (out_height + 1) >> 1;
		  int uv_in_width  = (io.mb_w + 1) >> 1;
		  int uv_in_height = (io.mb_h + 1) >> 1;
		  uint work_size = 2 * out_width;   // scratch memory for luma rescaler
		  uint uv_work_size = 2 * uv_out_width;  // and for each u/v ones
		  uint tmp_size;
		  int* work;

		  tmp_size = work_size + 2 * uv_work_size;
		  if (has_alpha) {
			tmp_size += work_size;
		  }
		  p.memory = calloc(1, tmp_size * sizeof(*work));
		  if (p.memory == null) {
			return 0;   // memory error
		  }
		  work = (int*)p.memory;
		  InitRescaler(&p.scaler_y, io.mb_w, io.mb_h,
					   buf.y, out_width, out_height, buf.y_stride,
					   io.mb_w, out_width, io.mb_h, out_height,
					   work);
		  InitRescaler(&p.scaler_u, uv_in_width, uv_in_height,
					   buf.u, uv_out_width, uv_out_height, buf.u_stride,
					   uv_in_width, uv_out_width,
					   uv_in_height, uv_out_height,
					   work + work_size);
		  InitRescaler(&p.scaler_v, uv_in_width, uv_in_height,
					   buf.v, uv_out_width, uv_out_height, buf.v_stride,
					   uv_in_width, uv_out_width,
					   uv_in_height, uv_out_height,
					   work + work_size + uv_work_size);
		  p.emit = EmitRescaledYUV;
		  if (has_alpha) {
			InitRescaler(&p.scaler_a, io.mb_w, io.mb_h,
						 buf.a, out_width, out_height, buf.a_stride,
						 io.mb_w, out_width, io.mb_h, out_height,
						 work + work_size + 2 * uv_work_size);
			p.emit_alpha = EmitRescaledAlphaYUV;
		  }
		  return 1;
		}
예제 #24
0
파일: frame.cs 프로젝트: soywiz/nwebp
        /// <summary>
        /// Finalize and transmit a complete row. Return false in case of user-abort.
        /// </summary>
        /// <param name="dec"></param>
        /// <param name="io"></param>
        /// <returns></returns>
        int FinishRow(VP8Io io)
        {
            int ok = 1;
            VP8ThreadContext ctx = this.thread_ctx_;
            int extra_y_rows = kFilterExtraRows[this.filter_type_];
            int ysize = extra_y_rows * this.cache_y_stride_;
            int uvsize = (extra_y_rows / 2) * this.cache_uv_stride_;
            int y_offset = ctx.id_ * 16 * this.cache_y_stride_;
            int uv_offset = ctx.id_ * 8 * this.cache_uv_stride_;
            byte* ydst = this.cache_y_ - ysize + y_offset;
            byte* udst = this.cache_u_ - uvsize + uv_offset;
            byte* vdst = this.cache_v_ - uvsize + uv_offset;
            bool first_row = (ctx.mb_y_ == 0);
            bool last_row = (ctx.mb_y_ >= this.br_mb_y_ - 1);
            int y_start = MACROBLOCK_VPOS(ctx.mb_y_);
            int y_end = MACROBLOCK_VPOS(ctx.mb_y_ + 1);

            if (ctx.filter_row_)
            {
                FilterRow(dec);
            }

            if (io.put)
            {
                if (!first_row)
                {
                    y_start -= extra_y_rows;
                    io.y = ydst;
                    io.u = udst;
                    io.v = vdst;
                }
                else
                {
                    io.y = this.cache_y_ + y_offset;
                    io.u = this.cache_u_ + uv_offset;
                    io.v = this.cache_v_ + uv_offset;
                }

                if (!last_row)
                {
                    y_end -= extra_y_rows;
                }
                if (y_end > io.crop_bottom)
                {
                    y_end = io.crop_bottom;    // make sure we don't overflow on last row.
                }
                io.a = null;
                if (this.alpha_data_ && y_start < y_end)
                {
                    io.a = VP8DecompressAlphaRows(dec, y_start, y_end - y_start);
                    if (io.a == null)
                    {
                        return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR,
                                            "Could not decode alpha data.");
                    }
                }
                if (y_start < io.crop_top)
                {
                    int delta_y = io.crop_top - y_start;
                    y_start = io.crop_top;
                    assert(!(delta_y & 1));
                    io.y += this.cache_y_stride_ * delta_y;
                    io.u += this.cache_uv_stride_ * (delta_y >> 1);
                    io.v += this.cache_uv_stride_ * (delta_y >> 1);
                    if (io.a)
                    {
                        io.a += io.width * delta_y;
                    }
                }
                if (y_start < y_end)
                {
                    io.y += io.crop_left;
                    io.u += io.crop_left >> 1;
                    io.v += io.crop_left >> 1;
                    if (io.a)
                    {
                        io.a += io.crop_left;
                    }
                    io.mb_y = y_start - io.crop_top;
                    io.mb_w = io.crop_right - io.crop_left;
                    io.mb_h = y_end - y_start;
                    ok = io.put(io);
                }
            }
            // rotate top samples if needed
            if (ctx.id_ + 1 == this.num_caches_)
            {
                if (!last_row)
                {
                    memcpy(this.cache_y_ - ysize, ydst + 16 * this.cache_y_stride_, ysize);
                    memcpy(this.cache_u_ - uvsize, udst + 8 * this.cache_uv_stride_, uvsize);
                    memcpy(this.cache_v_ - uvsize, vdst + 8 * this.cache_uv_stride_, uvsize);
                }
            }

            return ok;
        }
예제 #25
0
파일: frame.cs 프로젝트: soywiz/nwebp
        /// <summary>
        /// Finish setting up the decoding parameter once user's setup() is called.
        /// </summary>
        /// <param name="io"></param>
        /// <returns></returns>
        VP8StatusCode VP8EnterCritical(VP8Io io)
        {
            // Call setup() first. This may trigger additional decoding features on 'io'.
            // Note: Afterward, we must call teardown() not matter what.
            if (io.setup && !io.setup(io))
            {
                VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed");
                return this.status_;
            }

            // Disable filtering per user request
            if (io.bypass_filtering)
            {
                this.filter_type_ = 0;
            }
            // TODO(skal): filter type / strength / sharpness forcing

            // Define the area where we can skip in-loop filtering, in case of cropping.
            //
            // 'Simple' filter reads two luma samples outside of the macroblock and
            // and filters one. It doesn't filter the chroma samples. Hence, we can
            // avoid doing the in-loop filtering before crop_top/crop_left position.
            // For the 'Complex' filter, 3 samples are read and up to 3 are filtered.
            // Means: there's a dependency chain that goes all the way up to the
            // top-left corner of the picture (MB #0). We must filter all the previous
            // macroblocks.
            // TODO(skal): add an 'approximate_decoding' option, that won't produce
            // a 1:1 bit-exactness for complex filtering?
            {
                int extra_pixels = kFilterExtraRows[this.filter_type_];
                if (this.filter_type_ == 2)
                {
                    // For complex filter, we need to preserve the dependency chain.
                    this.tl_mb_x_ = 0;
                    this.tl_mb_y_ = 0;
                }
                else
                {
                    // For simple filter, we can filter only the cropped region.
                    // We include 'extra_pixels' on the other side of the boundary, since
                    // vertical or horizontal filtering of the previous macroblock can
                    // modify some abutting pixels.
                    this.tl_mb_x_ = (io.crop_left - extra_pixels) >> 4;
                    this.tl_mb_y_ = (io.crop_top - extra_pixels) >> 4;
                    if (this.tl_mb_x_ < 0) this.tl_mb_x_ = 0;
                    if (this.tl_mb_y_ < 0) this.tl_mb_y_ = 0;
                }
                // We need some 'extra' pixels on the right/bottom.
                this.br_mb_y_ = (io.crop_bottom + 15 + extra_pixels) >> 4;
                this.br_mb_x_ = (io.crop_right + 15 + extra_pixels) >> 4;
                if (this.br_mb_x_ > this.mb_w_)
                {
                    this.br_mb_x_ = this.mb_w_;
                }
                if (this.br_mb_y_ > this.mb_h_)
                {
                    this.br_mb_y_ = this.mb_h_;
                }
            }
            return VP8StatusCode.VP8_STATUS_OK;
        }