Exemplo n.º 1
0
        static void clamp_mv_to_umv_border(MV mv, MACROBLOCKD xd)
        {
            /* If the MV points so far into the UMV border that no visible pixels
             * are used for reconstruction, the subpel part of the MV can be
             * discarded and the MV limited to 16 pixels with equivalent results.
             *
             * This limit kicks in at 19 pixels for the top and left edges, for
             * the 16 pixels plus 3 taps right of the central pixel when subpel
             * filtering. The bottom and right edges use 16 pixels plus 2 pixels
             * left of the central pixel when filtering.
             */
            checked
            {
                if (mv.col < (xd.mb_to_left_edge - (19 << 3)))
                {
                    mv.col = (short)(xd.mb_to_left_edge - (16 << 3));
                }
                else if (mv.col > xd.mb_to_right_edge + (18 << 3))
                {
                    mv.col = (short)(xd.mb_to_right_edge + (16 << 3));
                }

                if (mv.row < (xd.mb_to_top_edge - (19 << 3)))
                {
                    mv.row = (short)(xd.mb_to_top_edge - (16 << 3));
                }
                else if (mv.row > xd.mb_to_bottom_edge + (18 << 3))
                {
                    mv.row = (short)(xd.mb_to_bottom_edge + (16 << 3));
                }
            }
        }
Exemplo n.º 2
0
 public static void vp8_build_inter_predictors_mb(MACROBLOCKD xd)
 {
     if (xd.mode_info_context.get().mbmi.mode != (byte)MB_PREDICTION_MODE.SPLITMV)
     {
         vp8_build_inter16x16_predictors_mb(xd, xd.dst.y_buffer, xd.dst.u_buffer,
                                            xd.dst.v_buffer, xd.dst.y_stride,
                                            xd.dst.uv_stride);
     }
     else
     {
         build_4x4uvmvs(xd);
         build_inter4x4_predictors_mb(xd);
     }
 }
Exemplo n.º 3
0
        public static void vp8_build_block_doffsets(MACROBLOCKD x)
        {
            int block;

            for (block = 0; block < 16; ++block) /* y blocks */
            {
                x.block[block].offset =
                    (block >> 2) * 4 * x.dst.y_stride + (block & 3) * 4;
            }

            for (block = 16; block < 20; ++block) /* U and V blocks */
            {
                x.block[block + 4].offset = x.block[block].offset =
                    ((block - 16) >> 1) * 4 * x.dst.uv_stride + (block & 1) * 4;
            }
        }
Exemplo n.º 4
0
        static void build_4x4uvmvs(MACROBLOCKD x)
        {
            int i, j;

            checked
            {
                for (i = 0; i < 2; ++i)
                {
                    for (j = 0; j < 2; ++j)
                    {
                        int yoffset = i * 8 + j * 2;
                        int uoffset = 16 + i * 2 + j;
                        int voffset = 20 + i * 2 + j;

                        int temp;

                        temp = x.mode_info_context.get().bmi[yoffset + 0].mv.as_mv.row +
                               x.mode_info_context.get().bmi[yoffset + 1].mv.as_mv.row +
                               x.mode_info_context.get().bmi[yoffset + 4].mv.as_mv.row +
                               x.mode_info_context.get().bmi[yoffset + 5].mv.as_mv.row;

                        temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) * 8);

                        x.block[uoffset].bmi.mv.as_mv.row = (short)((temp / 8) & x.fullpixel_mask);

                        temp = x.mode_info_context.get().bmi[yoffset + 0].mv.as_mv.col +
                               x.mode_info_context.get().bmi[yoffset + 1].mv.as_mv.col +
                               x.mode_info_context.get().bmi[yoffset + 4].mv.as_mv.col +
                               x.mode_info_context.get().bmi[yoffset + 5].mv.as_mv.col;

                        temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) * 8);

                        x.block[uoffset].bmi.mv.as_mv.col = (short)((temp / 8) & x.fullpixel_mask);

                        if (x.mode_info_context.get().mbmi.need_to_clamp_mvs > 0)
                        {
                            clamp_uvmv_to_umv_border(x.block[uoffset].bmi.mv.as_mv, x);
                        }

                        x.block[voffset].bmi.mv.as_int = x.block[uoffset].bmi.mv.as_int;
                    }
                }
            }
        }
Exemplo n.º 5
0
        /* A version of the above function for chroma block MVs.*/
        static void clamp_uvmv_to_umv_border(MV mv, MACROBLOCKD xd)
        {
            checked
            {
                mv.col = (short)((2 * mv.col < (xd.mb_to_left_edge - (19 << 3)))
                              ? (xd.mb_to_left_edge - (16 << 3)) >> 1
                              : mv.col);
                mv.col = (short)((2 * mv.col > xd.mb_to_right_edge + (18 << 3))
                              ? (xd.mb_to_right_edge + (16 << 3)) >> 1
                              : mv.col);

                mv.row = (short)((2 * mv.row < (xd.mb_to_top_edge - (19 << 3)))
                              ? (xd.mb_to_top_edge - (16 << 3)) >> 1
                              : mv.row);
                mv.row = (short)((2 * mv.row > xd.mb_to_bottom_edge + (18 << 3))
                              ? (xd.mb_to_bottom_edge + (16 << 3)) >> 1
                              : mv.row);
            }
        }
Exemplo n.º 6
0
        static void build_inter_predictors2b(MACROBLOCKD x, BLOCKD d,
                                             byte *dst, int dst_stride,
                                             byte *base_pre, int pre_stride)
        {
            byte *ptr;

            ptr = base_pre + d.offset + (d.bmi.mv.as_mv.row >> 3) * pre_stride +
                  (d.bmi.mv.as_mv.col >> 3);

            if ((d.bmi.mv.as_mv.row & 7) > 0 || (d.bmi.mv.as_mv.col & 7) > 0)
            {
                x.subpixel_predict8x4(ptr, pre_stride, d.bmi.mv.as_mv.col & 7,
                                      d.bmi.mv.as_mv.row & 7, dst, dst_stride);
            }
            else
            {
                vp8_rtcd.vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride);
            }
        }
Exemplo n.º 7
0
        public unsafe static void vp8_setup_block_dptrs(MACROBLOCKD x)
        {
            int r, c;

            for (r = 0; r < 4; ++r)
            {
                for (c = 0; c < 4; ++c)
                {
                    //x.block[r * 4 + c].predictor = x.predictor + r * 4 * 16 + c * 4;
                    x.block[r * 4 + c].predictor = new ArrPtr <byte>(x.predictor, r * 4 * 16 + c * 4);
                }
            }

            for (r = 0; r < 2; ++r)
            {
                for (c = 0; c < 2; ++c)
                {
                    //x.block[16 + r * 2 + c].predictor = x.predictor + 256 + r * 4 * 8 + c * 4;
                    x.block[16 + r * 2 + c].predictor = new ArrPtr <byte>(x.predictor, 256 + r * 4 * 8 + c * 4);
                }
            }

            for (r = 0; r < 2; ++r)
            {
                for (c = 0; c < 2; ++c)
                {
                    //x.block[20 + r * 2 + c].predictor = x.predictor + 320 + r * 4 * 8 + c * 4;
                    x.block[20 + r * 2 + c].predictor = new ArrPtr <byte>(x.predictor, 320 + r * 4 * 8 + c * 4);
                }
            }

            for (r = 0; r < 25; ++r)
            {
                //x.block[r].qcoeff = x.qcoeff + r * 16;
                x.block[r].qcoeff = new ArrPtr <short>(x.qcoeff, r * 16);
                //x.block[r].dqcoeff = x.dqcoeff + r * 16;
                x.block[r].dqcoeff = new ArrPtr <short>(x.dqcoeff, r * 16);
                //x.block[r].eob = x.eobs + r;
                x.block[r].eob = new ArrPtr <sbyte>(x.eobs, r);
            }
        }
Exemplo n.º 8
0
        static void build_inter4x4_predictors_mb(MACROBLOCKD x)
        {
            int   i;
            byte *base_dst = x.dst.y_buffer;
            byte *base_pre = x.pre.y_buffer;

            if (x.mode_info_context.get().mbmi.partitioning < 3)
            {
                BLOCKD b;
                int    dst_stride = x.dst.y_stride;

                x.block[0].bmi  = x.mode_info_context.get().bmi[0];
                x.block[2].bmi  = x.mode_info_context.get().bmi[2];
                x.block[8].bmi  = x.mode_info_context.get().bmi[8];
                x.block[10].bmi = x.mode_info_context.get().bmi[10];
                if (x.mode_info_context.get().mbmi.need_to_clamp_mvs > 0)
                {
                    clamp_mv_to_umv_border(x.block[0].bmi.mv.as_mv, x);
                    clamp_mv_to_umv_border(x.block[2].bmi.mv.as_mv, x);
                    clamp_mv_to_umv_border(x.block[8].bmi.mv.as_mv, x);
                    clamp_mv_to_umv_border(x.block[10].bmi.mv.as_mv, x);
                }

                b = x.block[0];
                build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre,
                                         dst_stride);
                b = x.block[2];
                build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre,
                                         dst_stride);
                b = x.block[8];
                build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre,
                                         dst_stride);
                b = x.block[10];
                build_inter_predictors4b(x, b, base_dst + b.offset, dst_stride, base_pre,
                                         dst_stride);
            }
            else
            {
                for (i = 0; i < 16; i += 2)
                {
                    BLOCKD d0         = x.block[i];
                    BLOCKD d1         = x.block[i + 1];
                    int    dst_stride = x.dst.y_stride;

                    x.block[i + 0].bmi = x.mode_info_context.get().bmi[i + 0];
                    x.block[i + 1].bmi = x.mode_info_context.get().bmi[i + 1];
                    if (x.mode_info_context.get().mbmi.need_to_clamp_mvs > 0)
                    {
                        clamp_mv_to_umv_border(x.block[i + 0].bmi.mv.as_mv, x);
                        clamp_mv_to_umv_border(x.block[i + 1].bmi.mv.as_mv, x);
                    }

                    if (d0.bmi.mv.as_int == d1.bmi.mv.as_int)
                    {
                        build_inter_predictors2b(x, d0, base_dst + d0.offset, dst_stride,
                                                 base_pre, dst_stride);
                    }
                    else
                    {
                        build_inter_predictors_b(d0, base_dst + d0.offset, dst_stride,
                                                 base_pre, dst_stride, x.subpixel_predict);
                        build_inter_predictors_b(d1, base_dst + d1.offset, dst_stride,
                                                 base_pre, dst_stride, x.subpixel_predict);
                    }
                }
            }
            base_dst = x.dst.u_buffer;
            base_pre = x.pre.u_buffer;
            for (i = 16; i < 20; i += 2)
            {
                BLOCKD d0         = x.block[i];
                BLOCKD d1         = x.block[i + 1];
                int    dst_stride = x.dst.uv_stride;

                /* Note: uv mvs already clamped in build_4x4uvmvs() */

                if (d0.bmi.mv.as_int == d1.bmi.mv.as_int)
                {
                    build_inter_predictors2b(x, d0, base_dst + d0.offset, dst_stride,
                                             base_pre, dst_stride);
                }
                else
                {
                    build_inter_predictors_b(d0, base_dst + d0.offset, dst_stride, base_pre,
                                             dst_stride, x.subpixel_predict);
                    build_inter_predictors_b(d1, base_dst + d1.offset, dst_stride, base_pre,
                                             dst_stride, x.subpixel_predict);
                }
            }

            base_dst = x.dst.v_buffer;
            base_pre = x.pre.v_buffer;
            for (i = 20; i < 24; i += 2)
            {
                BLOCKD d0         = x.block[i];
                BLOCKD d1         = x.block[i + 1];
                int    dst_stride = x.dst.uv_stride;

                /* Note: uv mvs already clamped in build_4x4uvmvs() */

                if (d0.bmi.mv.as_int == d1.bmi.mv.as_int)
                {
                    build_inter_predictors2b(x, d0, base_dst + d0.offset, dst_stride,
                                             base_pre, dst_stride);
                }
                else
                {
                    build_inter_predictors_b(d0, base_dst + d0.offset, dst_stride, base_pre,
                                             dst_stride, x.subpixel_predict);
                    build_inter_predictors_b(d1, base_dst + d1.offset, dst_stride, base_pre,
                                             dst_stride, x.subpixel_predict);
                }
            }
        }
Exemplo n.º 9
0
        static void vp8_build_inter16x16_predictors_mb(MACROBLOCKD x, byte *dst_y,
                                                       byte *dst_u,
                                                       byte *dst_v, int dst_ystride,
                                                       int dst_uvstride)
        {
            int   offset;
            byte *ptr;
            byte *uptr, vptr;

            int_mv _16x16mv = new int_mv();

            byte *ptr_base   = x.pre.y_buffer;
            int   pre_stride = x.pre.y_stride;

            _16x16mv.as_int = x.mode_info_context.get().mbmi.mv.as_int;

            if (x.mode_info_context.get().mbmi.need_to_clamp_mvs > 0)
            {
                clamp_mv_to_umv_border(_16x16mv.as_mv, x);
            }

            ptr = ptr_base + (_16x16mv.as_mv.row >> 3) * pre_stride +
                  (_16x16mv.as_mv.col >> 3);

            if ((_16x16mv.as_int & 0x00070007) > 0)
            {
                x.subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7,
                                        _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
            }
            else
            {
                vp8_rtcd.vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
            }

            /* calc uv motion vectors */
            _16x16mv.as_mv.row += (short)
                                  (1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1)));
            _16x16mv.as_mv.col += (short)
                                  (1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1)));
            _16x16mv.as_mv.row /= 2;
            _16x16mv.as_mv.col /= 2;
            _16x16mv.as_mv.row &= (short)x.fullpixel_mask;
            _16x16mv.as_mv.col &= (short)x.fullpixel_mask;

            if (2 * _16x16mv.as_mv.col < (x.mb_to_left_edge - (19 << 3)) ||
                2 * _16x16mv.as_mv.col > x.mb_to_right_edge + (18 << 3) ||
                2 * _16x16mv.as_mv.row < (x.mb_to_top_edge - (19 << 3)) ||
                2 * _16x16mv.as_mv.row > x.mb_to_bottom_edge + (18 << 3))
            {
                return;
            }

            pre_stride >>= 1;
            offset       = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
            uptr         = x.pre.u_buffer + offset;
            vptr         = x.pre.v_buffer + offset;

            if ((_16x16mv.as_int & 0x00070007) > 0)
            {
                x.subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7,
                                      _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
                x.subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7,
                                      _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
            }
            else
            {
                vp8_rtcd.vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
                vp8_rtcd.vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
            }
        }
Exemplo n.º 10
0
        public static void vp8_loop_filter_frame_init(VP8_COMMON cm, MACROBLOCKD mbd, int default_filt_lvl)
        {
            int seg,  /* segment number */
                @ref, /* index in ref_lf_deltas */
                mode; /* index in mode_lf_deltas */

            loop_filter_info_n lfi = cm.lf_info;

            /* update limits if sharpness has changed */
            if (cm.last_sharpness_level != cm.sharpness_level)
            {
                vp8_loop_filter_update_sharpness(lfi, cm.sharpness_level);
                cm.last_sharpness_level = cm.sharpness_level;
            }

            for (seg = 0; seg < blockd.MAX_MB_SEGMENTS; ++seg)
            {
                int lvl_seg = default_filt_lvl;
                int lvl_ref, lvl_mode;

                /* Note the baseline filter values for each segment */
                if (mbd.segmentation_enabled > 0)
                {
                    if (mbd.mb_segement_abs_delta == blockd.SEGMENT_ABSDATA)
                    {
                        lvl_seg = mbd.segment_feature_data[(int)MB_LVL_FEATURES.MB_LVL_ALT_LF, seg];
                    }
                    else
                    { /* Delta Value */
                        lvl_seg += mbd.segment_feature_data[(int)MB_LVL_FEATURES.MB_LVL_ALT_LF, seg];
                    }
                    lvl_seg = (lvl_seg > 0) ? ((lvl_seg > 63) ? 63 : lvl_seg) : 0;
                }

                if (mbd.mode_ref_lf_delta_enabled == 0)
                {
                    /* we could get rid of this if we assume that deltas are set to
                     * zero when not in use; encoder always uses deltas
                     */
                    //memset(lfi->lvl[seg][0], lvl_seg, 4 * 4);
                    fixed(byte *plfi = lfi.lvl)
                    {
                        byte *pseg = plfi + seg * lfi.lvl.GetLength(1);

                        for (int i = 0; i < 4 * 4; i++)
                        {
                            *pseg++ = (byte)lvl_seg;
                        }
                    }

                    continue;
                }

                /* INTRA_FRAME */
                @ref = (int)MV_REFERENCE_FRAME.INTRA_FRAME;

                /* Apply delta for reference frame */
                lvl_ref = lvl_seg + mbd.ref_lf_deltas[@ref];

                /* Apply delta for Intra modes */
                mode = 0; /* B_PRED */
                /* Only the split mode BPRED has a further special case */
                lvl_mode = lvl_ref + mbd.mode_lf_deltas[mode];
                /* clamp */
                lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;

                lfi.lvl[seg, @ref, mode] = (byte)lvl_mode;

                mode = 1; /* all the rest of Intra modes */
                /* clamp */
                lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0;
                lfi.lvl[seg, @ref, mode] = (byte)lvl_mode;

                /* LAST, GOLDEN, ALT */
                for (@ref = 1; @ref < (int)MV_REFERENCE_FRAME.MAX_REF_FRAMES; ++@ref)
                {
                    /* Apply delta for reference frame */
                    lvl_ref = lvl_seg + mbd.ref_lf_deltas[@ref];

                    /* Apply delta for Inter modes */
                    for (mode = 1; mode < 4; ++mode)
                    {
                        lvl_mode = lvl_ref + mbd.mode_lf_deltas[mode];
                        /* clamp */
                        lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;

                        lfi.lvl[seg, @ref, mode] = (byte)lvl_mode;
                    }
                }
            }
        }