summaryrefslogtreecommitdiff
path: root/vp8/decoder/decodemv.c
diff options
context:
space:
mode:
authorDeb Mukherjee <debargha@google.com>2012-07-26 13:42:07 -0700
committerDeb Mukherjee <debargha@google.com>2012-09-06 08:28:21 -0700
commit00f9eb659034cb5cef0bf063c1b72c72c6333f36 (patch)
tree760c659d32debf97ec1c0a6ecb5c9e834be4a915 /vp8/decoder/decodemv.c
parentde6dfa6bb0cd680ca446a15b52b2a026859eb1e6 (diff)
downloadlibvpx-00f9eb659034cb5cef0bf063c1b72c72c6333f36.tar
libvpx-00f9eb659034cb5cef0bf063c1b72c72c6333f36.tar.gz
libvpx-00f9eb659034cb5cef0bf063c1b72c72c6333f36.tar.bz2
libvpx-00f9eb659034cb5cef0bf063c1b72c72c6333f36.zip
New motion vector entropy coding
Adds a new experiment with redesigned/refactored motion vector entropy coding. The patch also takes a first step towards separating the integer and fractional pel components of a MV. However the fractional pel encoding still depends on the integer pel part and so they are not fully independent. Further experiments are in progress to see how much they can be decoupled without affecting performance. All components including entropy coding/decoding, costing for MV search, forward updates and backward updates to probability tables, have been implemented. Results so far: derf: +0.19% std-hd: +0.28% yt: +0.80% hd: +1.15% Patch: Simplifies the fractional pel models: derf: +0.284% std-hd: +0.289% yt: +0.849% hd: +1.254% Patch: Some changes in the models, rebased. derf: +0.330% std-hd: +0.306% yt: +0.816% hd: +1.225% Change-Id: I646b3c48f3587f4cc909639b78c3798da6402678
Diffstat (limited to 'vp8/decoder/decodemv.c')
-rw-r--r--vp8/decoder/decodemv.c196
1 files changed, 191 insertions, 5 deletions
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 069d073d4..e8b4a1f67 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -172,6 +172,150 @@ static void vp8_kfread_modes(VP8D_COMP *pbi,
}
+#if CONFIG_NEWMVENTROPY
+static int read_nmv_component(vp8_reader *r,
+ int rv,
+ const nmv_component *mvcomp) {
+ int v, s, z, c, o, d;
+ s = vp8_read(r, mvcomp->sign);
+ c = vp8_treed_read(r, vp8_mv_class_tree, mvcomp->classes);
+ if (c == MV_CLASS_0) {
+ d = vp8_treed_read(r, vp8_mv_class0_tree, mvcomp->class0);
+ } else {
+ int i, b;
+ d = 0;
+ b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i)
+ d |= (vp8_read(r, mvcomp->bits[i]) << i);
+ }
+ o = d << 3;
+
+ z = vp8_get_mv_mag(c, o);
+ v = (s ? -(z + 1) : (z + 1));
+ return v;
+}
+
+static int read_nmv_component_fp(vp8_reader *r,
+ int v,
+ int rv,
+ const nmv_component *mvcomp,
+ int usehp) {
+ int s, z, c, o, d, e, f;
+ s = v < 0;
+ z = (s ? -v : v) - 1; /* magnitude - 1 */
+
+ c = vp8_get_mv_class(z, &o);
+ d = o >> 3;
+
+ if (c == MV_CLASS_0) {
+ f = vp8_treed_read(r, vp8_mv_fp_tree, mvcomp->class0_fp[d]);
+ } else {
+ f = vp8_treed_read(r, vp8_mv_fp_tree, mvcomp->fp);
+ }
+ o += (f << 1);
+
+ if (usehp) {
+ if (c == MV_CLASS_0) {
+ e = vp8_read(r, mvcomp->class0_hp);
+ } else {
+ e = vp8_read(r, mvcomp->hp);
+ }
+ o += e;
+ } else {
+ ++o; /* Note if hp is not used, the default value of the hp bit is 1 */
+ }
+ z = vp8_get_mv_mag(c, o);
+ v = (s ? -(z + 1) : (z + 1));
+ return v;
+}
+
+static void read_nmv(vp8_reader *r, MV *mv, const MV *ref,
+ const nmv_context *mvctx) {
+ MV_JOINT_TYPE j = vp8_treed_read(r, vp8_mv_joint_tree, mvctx->joints);
+ mv->row = mv-> col = 0;
+ if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
+ mv->row = read_nmv_component(r, ref->row, &mvctx->comps[0]);
+ }
+ if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
+ mv->col = read_nmv_component(r, ref->col, &mvctx->comps[1]);
+ }
+}
+
+static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp) {
+ MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
+ if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
+ mv->row = read_nmv_component_fp(r, mv->row, ref->row, &mvctx->comps[0],
+ usehp);
+ }
+ if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
+ mv->col = read_nmv_component_fp(r, mv->col, ref->col, &mvctx->comps[1],
+ usehp);
+ }
+}
+
+static void update_nmv(vp8_reader *bc, vp8_prob *const p,
+ const vp8_prob upd_p) {
+ if (vp8_read(bc, upd_p)) {
+#ifdef LOW_PRECISION_MV_UPDATE
+ *p = (vp8_read_literal(bc, 7) << 1) | 1;
+#else
+ *p = (vp8_read_literal(bc, 8));
+#endif
+ }
+}
+
+static void read_nmvprobs(vp8_reader *bc, nmv_context *mvctx,
+ int usehp) {
+ int i, j, k;
+#ifdef MV_GROUP_UPDATE
+ if (!vp8_read_bit(bc)) return;
+#endif
+ for (j = 0; j < MV_JOINTS - 1; ++j) {
+ update_nmv(bc, &mvctx->joints[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (i = 0; i < 2; ++i) {
+ update_nmv(bc, &mvctx->comps[i].sign,
+ VP8_NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j) {
+ update_nmv(bc, &mvctx->comps[i].classes[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < CLASS0_SIZE - 1; ++j) {
+ update_nmv(bc, &mvctx->comps[i].class0[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ update_nmv(bc, &mvctx->comps[i].bits[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ for (k = 0; k < 3; ++k)
+ update_nmv(bc, &mvctx->comps[i].class0_fp[j][k],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < 3; ++j) {
+ update_nmv(bc, &mvctx->comps[i].fp[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ update_nmv(bc, &mvctx->comps[i].class0_hp,
+ VP8_NMV_UPDATE_PROB);
+ update_nmv(bc, &mvctx->comps[i].hp,
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+}
+
+#else
+
static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
const vp8_prob *const p = (const vp8_prob *) mvc;
int x = 0;
@@ -211,7 +355,6 @@ static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
#endif
}
-
static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
int i = 0;
@@ -287,6 +430,8 @@ static void read_mvcontexts_hp(vp8_reader *bc, MV_CONTEXT_HP *mvc) {
} while (++i < 2);
}
+#endif /* CONFIG_NEWMVENTROPY */
+
// Read the referncence frame
static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
vp8_reader *const bc,
@@ -452,8 +597,12 @@ static void read_switchable_interp_probs(VP8D_COMP *pbi) {
static void mb_mode_mv_init(VP8D_COMP *pbi) {
VP8_COMMON *const cm = & pbi->common;
vp8_reader *const bc = & pbi->bc;
+#if CONFIG_NEWMVENTROPY
+ nmv_context *const nmvc = &pbi->common.fc.nmvc;
+#else
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
+#endif
MACROBLOCKD *const xd = & pbi->mb;
vpx_memset(cm->mbskip_pred_probs, 0, sizeof(cm->mbskip_pred_probs));
@@ -499,10 +648,14 @@ static void mb_mode_mv_init(VP8D_COMP *pbi) {
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
+#if CONFIG_NEWMVENTROPY
+ read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv)
read_mvcontexts_hp(bc, mvc_hp);
else
read_mvcontexts(bc, mvc);
+#endif
}
}
@@ -585,12 +738,16 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_row, int mb_col) {
VP8_COMMON *const cm = & pbi->common;
vp8_reader *const bc = & pbi->bc;
+#if CONFIG_NEWMVENTROPY
+ nmv_context *const nmvc = &pbi->common.fc.nmvc;
+#else
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
+#endif
const int mis = pbi->common.mode_info_stride;
MACROBLOCKD *const xd = & pbi->mb;
- int_mv *const mv = & mbmi->mv[0];
+ int_mv *const mv = & mbmi->mv;
int mb_to_left_edge;
int mb_to_right_edge;
int mb_to_top_edge;
@@ -684,7 +841,6 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// Update stats on relative distance of chosen vector to the
// possible best reference vectors.
{
- int i;
MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
find_mv_refs(xd, mi, prev_mi,
@@ -833,8 +989,6 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mv_contz;
int blockmode;
- second_leftmv.as_int = 0;
- second_abovemv.as_int = 0;
k = vp8_mbsplit_offset[s][j];
leftmv.as_int = left_block_mv(mi, k);
@@ -849,6 +1003,13 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
switch (blockmode) {
case NEW4X4:
+#if CONFIG_NEWMVENTROPY
+ read_nmv(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc);
+ read_nmv_fp(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc,
+ xd->allow_high_precision_mv);
+ vp8_increment_nmv(&blockmv.as_mv, &best_mv.as_mv,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &blockmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (blockmv.as_mv.row)]++;
@@ -858,10 +1019,18 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.MVcount[0][mv_max + (blockmv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (blockmv.as_mv.col >> 1)]++;
}
+#endif /* CONFIG_NEWMVENTROPY */
blockmv.as_mv.row += best_mv.as_mv.row;
blockmv.as_mv.col += best_mv.as_mv.col;
if (mbmi->second_ref_frame) {
+#if CONFIG_NEWMVENTROPY
+ read_nmv(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc);
+ read_nmv_fp(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
+ xd->allow_high_precision_mv);
+ vp8_increment_nmv(&secondmv.as_mv, &best_mv_second.as_mv,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &secondmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (secondmv.as_mv.row)]++;
@@ -871,6 +1040,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.MVcount[0][mv_max + (secondmv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (secondmv.as_mv.col >> 1)]++;
}
+#endif /* CONFIG_NEWMVENTROPY */
secondmv.as_mv.row += best_mv_second.as_mv.row;
secondmv.as_mv.col += best_mv_second.as_mv.col;
}
@@ -975,6 +1145,13 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
break;
case NEWMV:
+#if CONFIG_NEWMVENTROPY
+ read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
+ read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
+ xd->allow_high_precision_mv);
+ vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
+ xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &mv->as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (mv->as_mv.row)]++;
@@ -984,6 +1161,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.MVcount[0][mv_max + (mv->as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (mv->as_mv.col >> 1)]++;
}
+#endif /* CONFIG_NEWMVENTROPY */
mv->as_mv.row += best_mv.as_mv.row;
mv->as_mv.col += best_mv.as_mv.col;
@@ -998,6 +1176,13 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mb_to_top_edge,
mb_to_bottom_edge);
if (mbmi->second_ref_frame) {
+#if CONFIG_NEWMVENTROPY
+ read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
+ read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
+ xd->allow_high_precision_mv);
+ vp8_increment_nmv(&mbmi->mv[1].as_mv, &best_mv_second.as_mv,
+ &cm->fc.NMVcount, xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row)]++;
@@ -1007,6 +1192,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.MVcount[0][mv_max + (mbmi->mv[1].as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (mbmi->mv[1].as_mv.col >> 1)]++;
}
+#endif /* CONFIG_NEWMVENTROPY */
mbmi->mv[1].as_mv.row += best_mv_second.as_mv.row;
mbmi->mv[1].as_mv.col += best_mv_second.as_mv.col;
mbmi->need_to_clamp_secondmv |=