diff options
-rw-r--r-- | vp8/common/entropymv.c | 18 | ||||
-rw-r--r-- | vp8/common/entropymv.h | 6 | ||||
-rw-r--r-- | vp8/common/findnearmv.c | 29 | ||||
-rw-r--r-- | vp8/decoder/decodemv.c | 2 | ||||
-rw-r--r-- | vp8/encoder/encodemv.c | 1 | ||||
-rw-r--r-- | vp8/encoder/mcomp.c | 22 |
6 files changed, 52 insertions, 26 deletions
diff --git a/vp8/common/entropymv.c b/vp8/common/entropymv.c index 9d7d56651..6c31236ec 100644 --- a/vp8/common/entropymv.c +++ b/vp8/common/entropymv.c @@ -19,6 +19,9 @@ #define MV_COUNT_SAT 16 #define MV_MAX_UPDATE_FACTOR 160 +/* Integer pel reference mv threshold for use of high-precision 1/8 mv */ +#define COMPANDED_MVREF_THRESH 8 + /* Smooth or bias the mv-counts before prob computation */ /* #define SMOOTH_MV_COUNTS */ @@ -103,6 +106,14 @@ MV_CLASS_TYPE vp8_get_mv_class(int z, int *offset) { return c; } +int vp8_use_nmv_hp(const MV *ref) { + if ((abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH && + (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH) + return 1; + else + return 0; +} + int vp8_get_mv_mag(MV_CLASS_TYPE c, int offset) { return mv_class_base(c) + offset; } @@ -154,12 +165,6 @@ static void increment_nmv_component(int v, } else { mvcomp->hp[e] += incr; } - } else { /* assume the extra bit is 1 */ - if (c == MV_CLASS_0) { - mvcomp->class0_hp[1] += incr; - } else { - mvcomp->hp[1] += incr; - } } } @@ -194,6 +199,7 @@ void vp8_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx, int usehp) { MV_JOINT_TYPE j = vp8_get_mv_joint(*mv); mvctx->joints[j]++; + usehp = usehp && vp8_use_nmv_hp(ref); if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) { increment_nmv_component_count(mv->row, &mvctx->comps[0], 1, usehp); } diff --git a/vp8/common/entropymv.h b/vp8/common/entropymv.h index 20af7e461..1a193b172 100644 --- a/vp8/common/entropymv.h +++ b/vp8/common/entropymv.h @@ -21,11 +21,11 @@ struct VP8Common; void vp8_entropy_mv_init(); void vp8_init_mv_probs(struct VP8Common *cm); void vp8_adapt_mv_probs(struct VP8Common *cm); -#if CONFIG_NEWMVENTROPY -void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp); -#endif #if CONFIG_NEWMVENTROPY +void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp); +void vp8_lower_mv_precision(MV *mv); +int vp8_use_nmv_hp(const MV *ref); #define VP8_NMV_UPDATE_PROB 255 //#define MV_GROUP_UPDATE diff --git a/vp8/common/findnearmv.c b/vp8/common/findnearmv.c index 3363d46ca..43eb6bf4d 100644 --- a/vp8/common/findnearmv.c +++ b/vp8/common/findnearmv.c @@ -20,15 +20,20 @@ const unsigned char vp8_mbsplit_offset[4][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} }; -static void lower_mv_precision(int_mv *mv) +static void lower_mv_precision(int_mv *mv, int usehp) { - if (mv->as_mv.row & 1) - mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1); - if (mv->as_mv.col & 1) - mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1); +#if CONFIG_NEWMVENTROPY + if (!usehp || !vp8_use_nmv_hp(&mv->as_mv)) { +#else + if (!usehp) { +#endif + if (mv->as_mv.row & 1) + mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1); + if (mv->as_mv.col & 1) + mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1); + } } - /* Predict motion vectors using those from already-decoded nearby blocks. Note that we only consider one 4x4 subblock from each candidate 16x16 macroblock. */ @@ -173,11 +178,9 @@ void vp8_find_near_mvs /* Make sure that the 1/8th bits of the Mvs are zero if high_precision * is not being used, by truncating the last bit towards 0 */ - if (!xd->allow_high_precision_mv) { - lower_mv_precision(best_mv); - lower_mv_precision(nearest); - lower_mv_precision(nearby); - } + lower_mv_precision(best_mv, xd->allow_high_precision_mv); + lower_mv_precision(nearest, xd->allow_high_precision_mv); + lower_mv_precision(nearby, xd->allow_high_precision_mv); // TODO: move clamp outside findnearmv vp8_clamp_mv2(nearest, xd); @@ -301,9 +304,7 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd, // Copy back the re-ordered mv list vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs)); - - if (!xd->allow_high_precision_mv) - lower_mv_precision(best_mv); + lower_mv_precision(best_mv, xd->allow_high_precision_mv); vp8_clamp_mv2(best_mv, xd); } diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c index 8d0f94e65..7f7567e4e 100644 --- a/vp8/decoder/decodemv.c +++ b/vp8/decoder/decodemv.c @@ -244,6 +244,7 @@ static void read_nmv(vp8_reader *r, MV *mv, const MV *ref, static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref, const nmv_context *mvctx, int usehp) { MV_JOINT_TYPE j = vp8_get_mv_joint(*mv); + usehp = usehp && vp8_use_nmv_hp(ref); if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) { mv->row = read_nmv_component_fp(r, mv->row, ref->row, &mvctx->comps[0], usehp); @@ -252,6 +253,7 @@ static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref, mv->col = read_nmv_component_fp(r, mv->col, ref->col, &mvctx->comps[1], usehp); } + //printf(" %d: %d %d ref: %d %d\n", usehp, mv->row, mv-> col, ref->row, ref->col); } static void update_nmv(vp8_reader *bc, vp8_prob *const p, diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c index 217cc00c5..1289d89bb 100644 --- a/vp8/encoder/encodemv.c +++ b/vp8/encoder/encodemv.c @@ -526,6 +526,7 @@ void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref, void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref, const nmv_context *mvctx, int usehp) { MV_JOINT_TYPE j = vp8_get_mv_joint(*mv); + usehp = usehp && vp8_use_nmv_hp(ref); if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) { encode_nmv_component_fp(w, mv->row, ref->row, &mvctx->comps[0], usehp); } diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c index 76accd423..a6cf2f18b 100644 --- a/vp8/encoder/mcomp.c +++ b/vp8/encoder/mcomp.c @@ -276,6 +276,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int maxc, minc, maxr, minr; int y_stride; int offset; + int usehp = xd->allow_high_precision_mv; #if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64) unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; @@ -301,7 +302,6 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, y_stride = d->pre_stride; #endif - rr = ref_mv->as_mv.row; rc = ref_mv->as_mv.col; br = bestmv->as_mv.row << 3; @@ -403,7 +403,15 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, tc = bc; } - if (x->e_mbd.allow_high_precision_mv) { +#if CONFIG_NEWMVENTROPY + if (xd->allow_high_precision_mv) { + usehp = vp8_use_nmv_hp(&ref_mv->as_mv); + } else { + usehp = 0; + } +#endif + + if (usehp) { hstep >>= 1; while (--eighthiters) { CHECK_BETTER(left, tr, tc - hstep); @@ -471,6 +479,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int thismse; int y_stride; MACROBLOCKD *xd = &x->e_mbd; + int usehp = xd->allow_high_precision_mv; #if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64) unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col; @@ -762,7 +771,14 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, *sse1 = sse; } - if (!x->e_mbd.allow_high_precision_mv) +#if CONFIG_NEWMVENTROPY + if (x->e_mbd.allow_high_precision_mv) { + usehp = vp8_use_nmv_hp(&ref_mv->as_mv); + } else { + usehp = 0; + } +#endif + if (!usehp) return bestmse; /* Now do 1/8th pixel */ |