static void vp8_build_inter_predictors_b(BLOCKD d, int pitch, byte *base_pre, int pre_stride, vp8_subpix_fn_t sppf) { int r; //byte* pred_ptr = d.predictor; fixed(byte *pPredictor = d.predictor.src()) { byte *pred_ptr = pPredictor; byte *ptr; ptr = base_pre + d.offset + (d.bmi.mv.as_mv.row >> 3) * pre_stride + (d.bmi.mv.as_mv.col >> 3); if ((d.bmi.mv.as_mv.row & 7) > 0 || (d.bmi.mv.as_mv.col & 7) > 0) { sppf(ptr, pre_stride, d.bmi.mv.as_mv.col & 7, d.bmi.mv.as_mv.row & 7, pred_ptr, pitch); } else { for (r = 0; r < 4; ++r) { pred_ptr[0] = ptr[0]; pred_ptr[1] = ptr[1]; pred_ptr[2] = ptr[2]; pred_ptr[3] = ptr[3]; pred_ptr += pitch; ptr += pre_stride; } } } }
static void build_inter_predictors_b(BLOCKD d, byte *dst, int dst_stride, byte *base_pre, int pre_stride, vp8_subpix_fn_t sppf) { int r; byte *ptr; ptr = base_pre + d.offset + (d.bmi.mv.as_mv.row >> 3) * pre_stride + (d.bmi.mv.as_mv.col >> 3); if ((d.bmi.mv.as_mv.row & 7) > 0 || (d.bmi.mv.as_mv.col & 7) > 0) { sppf(ptr, pre_stride, d.bmi.mv.as_mv.col & 7, d.bmi.mv.as_mv.row & 7, dst, dst_stride); } else { for (r = 0; r < 4; ++r) { dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2]; dst[3] = ptr[3]; dst += dst_stride; ptr += pre_stride; } } }
public static void vp8_dequantize_b_c(BLOCKD d, short *DQC) { int i; //short* DQ = d->dqcoeff; //short* Q = d->qcoeff; var DQ = d.dqcoeff; var Q = d.qcoeff; for (i = 0; i < 16; ++i) { //DQ.src()[i] = (short)(Q.src()[i] * DQC[i]); DQ.set(i, (short)(Q.get(i) * DQC[i])); } }
static void build_inter_predictors2b(MACROBLOCKD x, BLOCKD d, byte *dst, int dst_stride, byte *base_pre, int pre_stride) { byte *ptr; ptr = base_pre + d.offset + (d.bmi.mv.as_mv.row >> 3) * pre_stride + (d.bmi.mv.as_mv.col >> 3); if ((d.bmi.mv.as_mv.row & 7) > 0 || (d.bmi.mv.as_mv.col & 7) > 0) { x.subpixel_predict8x4(ptr, pre_stride, d.bmi.mv.as_mv.col & 7, d.bmi.mv.as_mv.row & 7, dst, dst_stride); } else { vp8_rtcd.vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride); } }
static void build_inter4x4_predictors_mb(MACROBLOCKD x) { int i; byte *base_dst = x.dst.y_buffer; byte *base_pre = x.pre.y_buffer; if (x.mode_info_context.get().mbmi.partitioning < 3) { BLOCKD b; int dst_stride = x.dst.y_stride; x.block[0].bmi = x.mode_info_context.get().bmi[0]; x.block[2].bmi = x.mode_info_context.get().bmi[2]; x.block[8].bmi = x.mode_info_context.get().bmi[8]; x.block[10].bmi = x.mode_info_context.get().bmi[10]; if (x.mode_info_context.get().mbmi.need_to_clamp_mvs > 0) { clamp_mv_to_umv_border(x.block[0].bmi.mv.as_mv, x); clamp_mv_to_umv_border(x.block[2].bmi.mv.as_mv, x); clamp_mv_to_umv_border(x.block[8].bmi.mv.as_mv, x); clamp_mv_to_umv_border(x.block[10].bmi.mv.as_mv, x); } b = x.block[0]; build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre, dst_stride); b = x.block[2]; build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre, dst_stride); b = x.block[8]; build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre, dst_stride); b = x.block[10]; build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre, dst_stride); } else { for (i = 0; i < 16; i += 2) { BLOCKD d0 = x.block[i]; BLOCKD d1 = x.block[i + 1]; int dst_stride = x.dst.y_stride; x.block[i + 0].bmi = x.mode_info_context.get().bmi[i + 0]; x.block[i + 1].bmi = x.mode_info_context.get().bmi[i + 1]; if (x.mode_info_context.get().mbmi.need_to_clamp_mvs > 0) { clamp_mv_to_umv_border(x.block[i + 0].bmi.mv.as_mv, x); clamp_mv_to_umv_border(x.block[i + 1].bmi.mv.as_mv, x); } if (d0.bmi.mv.as_int == d1.bmi.mv.as_int) { build_inter_predictors2b(x, d0, base_dst + d0.offset, dst_stride, base_pre, dst_stride); } else { build_inter_predictors_b(d0, base_dst + d0.offset, dst_stride, base_pre, dst_stride, x.subpixel_predict); build_inter_predictors_b(d1, base_dst + d1.offset, dst_stride, base_pre, dst_stride, x.subpixel_predict); } } } base_dst = x.dst.u_buffer; base_pre = x.pre.u_buffer; for (i = 16; i < 20; i += 2) { BLOCKD d0 = x.block[i]; BLOCKD d1 = x.block[i + 1]; int dst_stride = x.dst.uv_stride; /* Note: uv mvs already clamped in build_4x4uvmvs() */ if (d0.bmi.mv.as_int == d1.bmi.mv.as_int) { build_inter_predictors2b(x, d0, base_dst + d0.offset, dst_stride, base_pre, dst_stride); } else { build_inter_predictors_b(d0, base_dst + d0.offset, dst_stride, base_pre, dst_stride, x.subpixel_predict); build_inter_predictors_b(d1, base_dst + d1.offset, dst_stride, base_pre, dst_stride, x.subpixel_predict); } } base_dst = x.dst.v_buffer; base_pre = x.pre.v_buffer; for (i = 20; i < 24; i += 2) { BLOCKD d0 = x.block[i]; BLOCKD d1 = x.block[i + 1]; int dst_stride = x.dst.uv_stride; /* Note: uv mvs already clamped in build_4x4uvmvs() */ if (d0.bmi.mv.as_int == d1.bmi.mv.as_int) { build_inter_predictors2b(x, d0, base_dst + d0.offset, dst_stride, base_pre, dst_stride); } else { build_inter_predictors_b(d0, base_dst + d0.offset, dst_stride, base_pre, dst_stride, x.subpixel_predict); build_inter_predictors_b(d1, base_dst + d1.offset, dst_stride, base_pre, dst_stride, x.subpixel_predict); } } }