summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
authorDeb Mukherjee <debargha@google.com>2012-09-06 09:07:42 -0700
committerDeb Mukherjee <debargha@google.com>2012-09-12 11:51:10 -0700
commit2b26cf17862949123618d2dbe60a661fbb7eb4a9 (patch)
treeef2cbadc62a86cad8c5d69a7287159367ef397f7 /vp8/encoder
parentd406334f2712a1ec1bef890b2397927918a84e48 (diff)
downloadlibvpx-2b26cf17862949123618d2dbe60a661fbb7eb4a9.tar
libvpx-2b26cf17862949123618d2dbe60a661fbb7eb4a9.tar.gz
libvpx-2b26cf17862949123618d2dbe60a661fbb7eb4a9.tar.bz2
libvpx-2b26cf17862949123618d2dbe60a661fbb7eb4a9.zip
Adds feature for companded MV encoding
The high-precision (1/8) pel bit is turned off if the reference MV is larger than a threshold. The motivation for this patch is the intuition that if motion is likely large (as indicated by the reference), there is likley to be more motion blur, and as a result 1/8 pel precision would be wasteful both in rd sense as well as computationally. The feature is incorporated as part of the newmventropy experiment. There is a modest RD improvement with the patch. Overall the results with the newmventropy experiment with the threshold being 16 integer pels are: derf: +0.279% std-hd: +0.617% hd: +1.299% yt: +0.822% With threshold 8 integer pels are: derf: +0.295% std-hd: +0.623% hd: +1.365% yt: +0.847% Patch: rebased Patch: rebase fixes Change-Id: I4ed14600df3c457944e6541ed407cb6e91fe428b
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/encodemv.c1
-rw-r--r--vp8/encoder/mcomp.c22
2 files changed, 20 insertions, 3 deletions
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index 217cc00c5..1289d89bb 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -526,6 +526,7 @@ void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref,
void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
+ usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
encode_nmv_component_fp(w, mv->row, ref->row, &mvctx->comps[0], usehp);
}
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 76accd423..a6cf2f18b 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -276,6 +276,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int maxc, minc, maxr, minr;
int y_stride;
int offset;
+ int usehp = xd->allow_high_precision_mv;
#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
@@ -301,7 +302,6 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
y_stride = d->pre_stride;
#endif
-
rr = ref_mv->as_mv.row;
rc = ref_mv->as_mv.col;
br = bestmv->as_mv.row << 3;
@@ -403,7 +403,15 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
- if (x->e_mbd.allow_high_precision_mv) {
+#if CONFIG_NEWMVENTROPY
+ if (xd->allow_high_precision_mv) {
+ usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+#endif
+
+ if (usehp) {
hstep >>= 1;
while (--eighthiters) {
CHECK_BETTER(left, tr, tc - hstep);
@@ -471,6 +479,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int thismse;
int y_stride;
MACROBLOCKD *xd = &x->e_mbd;
+ int usehp = xd->allow_high_precision_mv;
#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
@@ -762,7 +771,14 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
- if (!x->e_mbd.allow_high_precision_mv)
+#if CONFIG_NEWMVENTROPY
+ if (x->e_mbd.allow_high_precision_mv) {
+ usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
+ } else {
+ usehp = 0;
+ }
+#endif
+ if (!usehp)
return bestmse;
/* Now do 1/8th pixel */