summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c67
-rw-r--r--vp8/encoder/block.h16
-rw-r--r--vp8/encoder/encodeframe.c7
-rw-r--r--vp8/encoder/encodemv.c533
-rw-r--r--vp8/encoder/encodemv.h28
-rw-r--r--vp8/encoder/firstpass.c9
-rw-r--r--vp8/encoder/mbgraph.c12
-rw-r--r--vp8/encoder/mcomp.c359
-rw-r--r--vp8/encoder/mcomp.h30
-rw-r--r--vp8/encoder/onyx_if.c84
-rw-r--r--vp8/encoder/onyx_int.h11
-rw-r--r--vp8/encoder/ratectrl.c31
-rw-r--r--vp8/encoder/rdopt.c103
-rw-r--r--vp8/encoder/temporal_filter.c6
14 files changed, 1083 insertions, 213 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 7d94bec26..712019602 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -27,6 +27,7 @@
#include "vp8/common/seg_common.h"
#include "vp8/common/pred_common.h"
#include "vp8/common/entropy.h"
+#include "vp8/encoder/encodemv.h"
#if CONFIG_NEW_MVREF
#include "vp8/common/mvref_common.h"
@@ -36,6 +37,8 @@
unsigned __int64 Sectionbits[500];
#endif
+//int final_packing = 0;
+
#ifdef ENTROPY_STATS
int intra_mode_stats [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES];
unsigned int tree_update_hist [BLOCK_TYPES]
@@ -564,6 +567,19 @@ static void write_sub_mv_ref
vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
+#if CONFIG_NEWMVENTROPY
+static void write_nmv (vp8_writer *w, const MV *mv, const int_mv *ref,
+ const nmv_context *nmvc, int usehp) {
+ MV e;
+ e.row = mv->row - ref->as_mv.row;
+ e.col = mv->col - ref->as_mv.col;
+
+ vp8_encode_nmv(w, &e, &ref->as_mv, nmvc);
+ vp8_encode_nmv_fp(w, &e, &ref->as_mv, nmvc, usehp);
+}
+
+#else
+
static void write_mv
(
vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
@@ -585,6 +601,7 @@ static void write_mv_hp
vp8_encode_motion_vector_hp(w, &e, mvc);
}
+#endif /* CONFIG_NEWMVENTROPY */
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
@@ -737,8 +754,12 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
int i;
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const w = & cpi->bc;
+#if CONFIG_NEWMVENTROPY
+ const nmv_context *nmvc = &pc->fc.nmvc;
+#else
const MV_CONTEXT *mvc = pc->fc.mvc;
const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
+#endif
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
MODE_INFO *prev_m;
@@ -754,6 +775,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
+ //final_packing = !cpi->dummy_packing;
+
cpi->mb.partition_info = cpi->mb.pi;
// Update the probabilities used to encode reference frame data
@@ -814,10 +837,14 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
update_mbintra_mode_probs(cpi);
+#if CONFIG_NEWMVENTROPY
+ vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv)
vp8_write_mvprobs_hp(cpi);
else
vp8_write_mvprobs(cpi);
+#endif
mb_row = 0;
for (row = 0; row < pc->mb_rows; row += 2) {
@@ -1055,10 +1082,17 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
pick_best_mv_ref( mi->mv[0], mi->ref_mvs[rf], &best_mv);
#endif
- if (xd->allow_high_precision_mv)
+#if CONFIG_NEWMVENTROPY
+ write_nmv(w, &mi->mv[0].as_mv, &best_mv,
+ (const nmv_context*) nmvc,
+ xd->allow_high_precision_mv);
+#else
+ if (xd->allow_high_precision_mv) {
write_mv_hp(w, &mi->mv[0].as_mv, &best_mv, mvc_hp);
- else
+ } else {
write_mv(w, &mi->mv[0].as_mv, &best_mv, mvc);
+ }
+#endif
if (mi->second_ref_frame) {
#if 0 //CONFIG_NEW_MVREF
@@ -1071,10 +1105,17 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
mi->ref_mvs[mi->second_ref_frame],
&best_second_mv);
#endif
- if (xd->allow_high_precision_mv)
+#if CONFIG_NEWMVENTROPY
+ write_nmv(w, &mi->mv[1].as_mv, &best_second_mv,
+ (const nmv_context*) nmvc,
+ xd->allow_high_precision_mv);
+#else
+ if (xd->allow_high_precision_mv) {
write_mv_hp(w, &mi->mv[1].as_mv, &best_second_mv, mvc_hp);
- else
+ } else {
write_mv(w, &mi->mv[1].as_mv, &best_second_mv, mvc);
+ }
+#endif
}
break;
case SPLITMV: {
@@ -1116,6 +1157,11 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
#ifdef ENTROPY_STATS
active_section = 11;
#endif
+#if CONFIG_NEWMVENTROPY
+ write_nmv(w, &blockmv.as_mv, &best_mv,
+ (const nmv_context*) nmvc,
+ xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv) {
write_mv_hp(w, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT_HP *) mvc_hp);
@@ -1123,8 +1169,16 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
write_mv(w, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT *) mvc);
}
+#endif
if (mi->second_ref_frame) {
+#if CONFIG_NEWMVENTROPY
+ write_nmv(w,
+ &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv,
+ (const nmv_context*) nmvc,
+ xd->allow_high_precision_mv);
+#else
if (xd->allow_high_precision_mv) {
write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
@@ -1132,6 +1186,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv, (const MV_CONTEXT *) mvc);
}
+#endif
}
}
} while (++j < cpi->mb.partition_info->count);
@@ -2313,8 +2368,12 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
+#if CONFIG_NEWMVENTROPY
+ cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
+#else
vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
+#endif
vp8_zero(cpi->sub_mv_ref_count);
vp8_zero(cpi->mbsplit_count);
vp8_zero(cpi->common.fc.mv_ref_ct)
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 77980567d..c0cd2e783 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -117,6 +117,19 @@ typedef struct {
int *mb_norm_activity_ptr;
signed int act_zbin_adj;
+#if CONFIG_NEWMVENTROPY
+ int nmvjointcost[MV_JOINTS];
+ int nmvcosts[2][MV_VALS];
+ int *nmvcost[2];
+ int nmvcosts_hp[2][MV_VALS];
+ int *nmvcost_hp[2];
+
+ int nmvjointsadcost[MV_JOINTS];
+ int nmvsadcosts[2][MV_VALS];
+ int *nmvsadcost[2];
+ int nmvsadcosts_hp[2][MV_VALS];
+ int *nmvsadcost_hp[2];
+#else
int mvcosts[2][MVvals + 1];
int *mvcost[2];
int mvsadcosts[2][MVfpvals + 1];
@@ -125,6 +138,7 @@ typedef struct {
int *mvcost_hp[2];
int mvsadcosts_hp[2][MVfpvals_hp + 1];
int *mvsadcost_hp[2];
+#endif /* CONFIG_NEWMVENTROPY */
int mbmode_cost[2][MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
@@ -150,8 +164,6 @@ typedef struct {
signed char *gf_active_ptr;
unsigned char *active_ptr;
- MV_CONTEXT *mvc;
- MV_CONTEXT_HP *mvc_hp;
unsigned int token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 41b87f2b0..4670c314d 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1280,9 +1280,6 @@ void init_encode_frame_mb_context(VP8_COMP *cpi) {
#endif
// vp8_zero(cpi->uv_mode_count)
- x->mvc = cm->fc.mvc;
- x->mvc_hp = cm->fc.mvc_hp;
-
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
@@ -1357,8 +1354,12 @@ static void encode_frame_internal(VP8_COMP *cpi) {
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
+#if CONFIG_NEWMVENTROPY
+ vp8_zero(cpi->NMVcount);
+#else
vp8_zero(cpi->MVcount);
vp8_zero(cpi->MVcount_hp);
+#endif
vp8_zero(cpi->coef_counts);
vp8_zero(cpi->coef_counts_8x8);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index e5426f662..217cc00c5 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -20,6 +20,536 @@
extern unsigned int active_section;
#endif
+//extern int final_packing;
+
+#if CONFIG_NEWMVENTROPY
+
+#ifdef NMV_STATS
+nmv_context_counts tnmvcounts;
+#endif
+
+static void encode_nmv_component(vp8_writer *w,
+ int v,
+ int r,
+ const nmv_component *mvcomp) {
+ int s, z, c, o, d;
+ assert (v != 0); /* should not be zero */
+ s = v < 0;
+ vp8_write(w, s, mvcomp->sign);
+ z = (s ? -v : v) - 1; /* magnitude - 1 */
+
+ c = vp8_get_mv_class(z, &o);
+
+ vp8_write_token(w, vp8_mv_class_tree, mvcomp->classes,
+ vp8_mv_class_encodings + c);
+
+ d = (o >> 3); /* int mv data */
+
+ if (c == MV_CLASS_0) {
+ vp8_write_token(w, vp8_mv_class0_tree, mvcomp->class0,
+ vp8_mv_class0_encodings + d);
+ } else {
+ int i, b;
+ b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i)
+ vp8_write(w, ((d >> i) & 1), mvcomp->bits[i]);
+ }
+}
+
+static void encode_nmv_component_fp(vp8_writer *w,
+ int v,
+ int r,
+ const nmv_component *mvcomp,
+ int usehp) {
+ int s, z, c, o, d, f, e;
+ assert (v != 0); /* should not be zero */
+ s = v < 0;
+ z = (s ? -v : v) - 1; /* magnitude - 1 */
+
+ c = vp8_get_mv_class(z, &o);
+
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
+
+ /* Code the fractional pel bits */
+ if (c == MV_CLASS_0) {
+ vp8_write_token(w, vp8_mv_fp_tree, mvcomp->class0_fp[d],
+ vp8_mv_fp_encodings + f);
+ } else {
+ vp8_write_token(w, vp8_mv_fp_tree, mvcomp->fp,
+ vp8_mv_fp_encodings + f);
+ }
+ /* Code the high precision bit */
+ if (usehp) {
+ if (c == MV_CLASS_0) {
+ vp8_write(w, e, mvcomp->class0_hp);
+ } else {
+ vp8_write(w, e, mvcomp->hp);
+ }
+ }
+}
+
+static void build_nmv_component_cost_table(int *mvcost,
+ const nmv_component *mvcomp,
+ int usehp) {
+ int i, v;
+ int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
+ int bits_cost[MV_OFFSET_BITS][2];
+ int class0_fp_cost[CLASS0_SIZE][4], fp_cost[4];
+ int class0_hp_cost[2], hp_cost[2];
+
+ sign_cost[0] = vp8_cost_zero(mvcomp->sign);
+ sign_cost[1] = vp8_cost_one(mvcomp->sign);
+ vp8_cost_tokens(class_cost, mvcomp->classes, vp8_mv_class_tree);
+ vp8_cost_tokens(class0_cost, mvcomp->class0, vp8_mv_class0_tree);
+ for (i = 0; i < MV_OFFSET_BITS; ++i) {
+ bits_cost[i][0] = vp8_cost_zero(mvcomp->bits[i]);
+ bits_cost[i][1] = vp8_cost_one(mvcomp->bits[i]);
+ }
+
+ for (i = 0; i < CLASS0_SIZE; ++i)
+ vp8_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp8_mv_fp_tree);
+ vp8_cost_tokens(fp_cost, mvcomp->fp, vp8_mv_fp_tree);
+
+ if (usehp) {
+ class0_hp_cost[0] = vp8_cost_zero(mvcomp->class0_hp);
+ class0_hp_cost[1] = vp8_cost_one(mvcomp->class0_hp);
+ hp_cost[0] = vp8_cost_zero(mvcomp->hp);
+ hp_cost[1] = vp8_cost_one(mvcomp->hp);
+ }
+ mvcost[0] = 0;
+ for (v = 1; v <= MV_MAX; ++v) {
+ int z, c, o, d, e, f, cost = 0;
+ z = v - 1;
+ c = vp8_get_mv_class(z, &o);
+ cost += class_cost[c];
+ d = (o >> 3); /* int mv data */
+ f = (o >> 1) & 3; /* fractional pel mv data */
+ e = (o & 1); /* high precision mv data */
+ if (c == MV_CLASS_0) {
+ cost += class0_cost[d];
+ } else {
+ int i, b;
+ b = c + CLASS0_BITS - 1; /* number of bits */
+ for (i = 0; i < b; ++i)
+ cost += bits_cost[i][((d >> i) & 1)];
+ }
+ if (c == MV_CLASS_0) {
+ cost += class0_fp_cost[d][f];
+ } else {
+ cost += fp_cost[f];
+ }
+ if (usehp) {
+ if (c == MV_CLASS_0) {
+ cost += class0_hp_cost[e];
+ } else {
+ cost += hp_cost[e];
+ }
+ }
+ mvcost[v] = cost + sign_cost[0];
+ mvcost[-v] = cost + sign_cost[1];
+ }
+}
+
+static int update_nmv_savings(const unsigned int ct[2],
+ const vp8_prob cur_p,
+ const vp8_prob new_p,
+ const vp8_prob upd_p) {
+
+#ifdef LOW_PRECISION_MV_UPDATE
+ vp8_prob mod_p = new_p | 1;
+#else
+ vp8_prob mod_p = new_p;
+#endif
+ const int cur_b = vp8_cost_branch256(ct, cur_p);
+ const int mod_b = vp8_cost_branch256(ct, mod_p);
+ const int cost = 7 * 256 +
+#ifndef LOW_PRECISION_MV_UPDATE
+ 256 +
+#endif
+ (vp8_cost_one(upd_p) - vp8_cost_zero(upd_p));
+ if (cur_b - mod_b - cost > 0) {
+ return cur_b - mod_b - cost;
+ } else {
+ return -vp8_cost_zero(upd_p);
+ }
+}
+
+static int update_nmv(
+ vp8_writer *const w,
+ const unsigned int ct[2],
+ vp8_prob *const cur_p,
+ const vp8_prob new_p,
+ const vp8_prob upd_p) {
+
+#ifdef LOW_PRECISION_MV_UPDATE
+ vp8_prob mod_p = new_p | 1;
+#else
+ vp8_prob mod_p = new_p;
+#endif
+
+ const int cur_b = vp8_cost_branch256(ct, *cur_p);
+ const int mod_b = vp8_cost_branch256(ct, mod_p);
+ const int cost = 7 * 256 +
+#ifndef LOW_PRECISION_MV_UPDATE
+ 256 +
+#endif
+ (vp8_cost_one(upd_p) - vp8_cost_zero(upd_p));
+
+ if (cur_b - mod_b > cost) {
+ *cur_p = mod_p;
+ vp8_write(w, 1, upd_p);
+#ifdef LOW_PRECISION_MV_UPDATE
+ vp8_write_literal(w, mod_p >> 1, 7);
+#else
+ vp8_write_literal(w, mod_p, 8);
+#endif
+ return 1;
+ } else {
+ vp8_write(w, 0, upd_p);
+ return 0;
+ }
+}
+
+#ifdef NMV_STATS
+void init_nmvstats() {
+ vp8_zero(tnmvcounts);
+}
+
+void print_nmvstats() {
+ nmv_context prob;
+ unsigned int branch_ct_joint[MV_JOINTS - 1][2];
+ unsigned int branch_ct_sign[2][2];
+ unsigned int branch_ct_classes[2][MV_CLASSES - 1][2];
+ unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2];
+ unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2];
+ unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2];
+ unsigned int branch_ct_fp[2][4 - 1][2];
+ unsigned int branch_ct_class0_hp[2][2];
+ unsigned int branch_ct_hp[2][2];
+ int i, j, k;
+ vp8_counts_to_nmv_context(&tnmvcounts, &prob, 1,
+ branch_ct_joint, branch_ct_sign, branch_ct_classes,
+ branch_ct_class0, branch_ct_bits,
+ branch_ct_class0_fp, branch_ct_fp,
+ branch_ct_class0_hp, branch_ct_hp);
+
+ printf("\nCounts =\n { ");
+ for (j = 0; j < MV_JOINTS; ++j)
+ printf("%d, ", tnmvcounts.joints[j]);
+ printf("},\n");
+ for (i=0; i< 2; ++i) {
+ printf(" {\n");
+ printf(" %d/%d,\n", tnmvcounts.comps[i].sign[0],
+ tnmvcounts.comps[i].sign[1]);
+ printf(" { ");
+ for (j = 0; j < MV_CLASSES; ++j)
+ printf("%d, ", tnmvcounts.comps[i].classes[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE; ++j)
+ printf("%d, ", tnmvcounts.comps[i].class0[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ printf("%d/%d, ", tnmvcounts.comps[i].bits[j][0],
+ tnmvcounts.comps[i].bits[j][1]);
+ printf("},\n");
+
+ printf(" {");
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ printf("{");
+ for (k = 0; k < 4; ++k)
+ printf("%d, ", tnmvcounts.comps[i].class0_fp[j][k]);
+ printf("}, ");
+ }
+ printf("},\n");
+
+ printf(" { ");
+ for (j = 0; j < 4; ++j)
+ printf("%d, ", tnmvcounts.comps[i].fp[j]);
+ printf("},\n");
+
+ printf(" %d/%d,\n",
+ tnmvcounts.comps[i].class0_hp[0],
+ tnmvcounts.comps[i].class0_hp[1]);
+ printf(" %d/%d,\n",
+ tnmvcounts.comps[i].hp[0],
+ tnmvcounts.comps[i].hp[1]);
+ printf(" },\n");
+ }
+
+ printf("\nProbs =\n { ");
+ for (j = 0; j < MV_JOINTS - 1; ++j)
+ printf("%d, ", prob.joints[j]);
+ printf("},\n");
+ for (i=0; i< 2; ++i) {
+ printf(" {\n");
+ printf(" %d,\n", prob.comps[i].sign);
+ printf(" { ");
+ for (j = 0; j < MV_CLASSES - 1; ++j)
+ printf("%d, ", prob.comps[i].classes[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE - 1; ++j)
+ printf("%d, ", prob.comps[i].class0[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < MV_OFFSET_BITS; ++j)
+ printf("%d, ", prob.comps[i].bits[j]);
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ printf("{");
+ for (k = 0; k < 3; ++k)
+ printf("%d, ", prob.comps[i].class0_fp[j][k]);
+ printf("}, ");
+ }
+ printf("},\n");
+ printf(" { ");
+ for (j = 0; j < 3; ++j)
+ printf("%d, ", prob.comps[i].fp[j]);
+ printf("},\n");
+
+ printf(" %d,\n", prob.comps[i].class0_hp);
+ printf(" %d,\n", prob.comps[i].hp);
+ printf(" },\n");
+ }
+}
+
+static void add_nmvcount(nmv_context_counts *dst, nmv_context_counts *src) {
+ int i, j, k;
+ for (j = 0; j < MV_JOINTS; ++j) {
+ dst->joints[j] += src->joints[j];
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < MV_VALS; ++j) {
+ dst->comps[i].mvcount[j] += src->comps[i].mvcount[j];
+ }
+ dst->comps[i].sign[0] += src->comps[i].sign[0];
+ dst->comps[i].sign[1] += src->comps[i].sign[1];
+ for (j = 0; j < MV_CLASSES; ++j) {
+ dst->comps[i].classes[j] += src->comps[i].classes[j];
+ }
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ dst->comps[i].class0[j] += src->comps[i].class0[j];
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ dst->comps[i].bits[j][0] += src->comps[i].bits[j][0];
+ dst->comps[i].bits[j][1] += src->comps[i].bits[j][1];
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ for (k = 0; k < 4; ++k) {
+ dst->comps[i].class0_fp[j][k] += src->comps[i].class0_fp[j][k];
+ }
+ }
+ for (j = 0; j < 4; ++j) {
+ dst->comps[i].fp[j] += src->comps[i].fp[j];
+ }
+ dst->comps[i].class0_hp[0] += src->comps[i].class0_hp[0];
+ dst->comps[i].class0_hp[1] += src->comps[i].class0_hp[1];
+ dst->comps[i].hp[0] += src->comps[i].hp[0];
+ dst->comps[i].hp[1] += src->comps[i].hp[1];
+ }
+}
+#endif
+
+void vp8_write_nmvprobs(VP8_COMP * cpi, int usehp) {
+ vp8_writer *const w = & cpi->bc;
+ int i, j;
+ nmv_context prob;
+ unsigned int branch_ct_joint[MV_JOINTS - 1][2];
+ unsigned int branch_ct_sign[2][2];
+ unsigned int branch_ct_classes[2][MV_CLASSES - 1][2];
+ unsigned int branch_ct_class0[2][CLASS0_SIZE - 1][2];
+ unsigned int branch_ct_bits[2][MV_OFFSET_BITS][2];
+ unsigned int branch_ct_class0_fp[2][CLASS0_SIZE][4 - 1][2];
+ unsigned int branch_ct_fp[2][4 - 1][2];
+ unsigned int branch_ct_class0_hp[2][2];
+ unsigned int branch_ct_hp[2][2];
+ int savings = 0;
+
+#ifdef NMV_STATS
+ if (!cpi->dummy_packing)
+ add_nmvcount(&tnmvcounts, &cpi->NMVcount);
+#endif
+ vp8_counts_to_nmv_context(&cpi->NMVcount, &prob, usehp,
+ branch_ct_joint, branch_ct_sign, branch_ct_classes,
+ branch_ct_class0, branch_ct_bits,
+ branch_ct_class0_fp, branch_ct_fp,
+ branch_ct_class0_hp, branch_ct_hp);
+ /* write updates if they help */
+#ifdef MV_GROUP_UPDATE
+ for (j = 0; j < MV_JOINTS - 1; ++j) {
+ savings += update_nmv_savings(branch_ct_joint[j],
+ cpi->common.fc.nmvc.joints[j],
+ prob.joints[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (i = 0; i < 2; ++i) {
+ savings += update_nmv_savings(branch_ct_sign[i],
+ cpi->common.fc.nmvc.comps[i].sign,
+ prob.comps[i].sign,
+ VP8_NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j) {
+ savings += update_nmv_savings(branch_ct_classes[i][j],
+ cpi->common.fc.nmvc.comps[i].classes[j],
+ prob.comps[i].classes[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < CLASS0_SIZE - 1; ++j) {
+ savings += update_nmv_savings(branch_ct_class0[i][j],
+ cpi->common.fc.nmvc.comps[i].class0[j],
+ prob.comps[i].class0[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ savings += update_nmv_savings(branch_ct_bits[i][j],
+ cpi->common.fc.nmvc.comps[i].bits[j],
+ prob.comps[i].bits[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ int k;
+ for (k = 0; k < 3; ++k) {
+ savings += update_nmv_savings(branch_ct_class0_fp[i][j][k],
+ cpi->common.fc.nmvc.comps[i].class0_fp[j][k],
+ prob.comps[i].class0_fp[j][k],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ for (j = 0; j < 3; ++j) {
+ savings += update_nmv_savings(branch_ct_fp[i][j],
+ cpi->common.fc.nmvc.comps[i].fp[j],
+ prob.comps[i].fp[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ savings += update_nmv_savings(branch_ct_class0_hp[i],
+ cpi->common.fc.nmvc.comps[i].class0_hp,
+ prob.comps[i].class0_hp,
+ VP8_NMV_UPDATE_PROB);
+ savings += update_nmv_savings(branch_ct_hp[i],
+ cpi->common.fc.nmvc.comps[i].hp,
+ prob.comps[i].hp,
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ if (savings <= 0) {
+ vp8_write_bit(w, 0);
+ return;
+ }
+ vp8_write_bit(w, 1);
+#endif
+
+ for (j = 0; j < MV_JOINTS - 1; ++j) {
+ update_nmv(w, branch_ct_joint[j],
+ &cpi->common.fc.nmvc.joints[j],
+ prob.joints[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (i = 0; i < 2; ++i) {
+ update_nmv(w, branch_ct_sign[i],
+ &cpi->common.fc.nmvc.comps[i].sign,
+ prob.comps[i].sign,
+ VP8_NMV_UPDATE_PROB);
+ for (j = 0; j < MV_CLASSES - 1; ++j) {
+ update_nmv(w, branch_ct_classes[i][j],
+ &cpi->common.fc.nmvc.comps[i].classes[j],
+ prob.comps[i].classes[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < CLASS0_SIZE - 1; ++j) {
+ update_nmv(w, branch_ct_class0[i][j],
+ &cpi->common.fc.nmvc.comps[i].class0[j],
+ prob.comps[i].class0[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ for (j = 0; j < MV_OFFSET_BITS; ++j) {
+ update_nmv(w, branch_ct_bits[i][j],
+ &cpi->common.fc.nmvc.comps[i].bits[j],
+ prob.comps[i].bits[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < CLASS0_SIZE; ++j) {
+ int k;
+ for (k = 0; k < 3; ++k) {
+ update_nmv(w, branch_ct_class0_fp[i][j][k],
+ &cpi->common.fc.nmvc.comps[i].class0_fp[j][k],
+ prob.comps[i].class0_fp[j][k],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ for (j = 0; j < 3; ++j) {
+ update_nmv(w, branch_ct_fp[i][j],
+ &cpi->common.fc.nmvc.comps[i].fp[j],
+ prob.comps[i].fp[j],
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+ if (usehp) {
+ for (i = 0; i < 2; ++i) {
+ update_nmv(w, branch_ct_class0_hp[i],
+ &cpi->common.fc.nmvc.comps[i].class0_hp,
+ prob.comps[i].class0_hp,
+ VP8_NMV_UPDATE_PROB);
+ update_nmv(w, branch_ct_hp[i],
+ &cpi->common.fc.nmvc.comps[i].hp,
+ prob.comps[i].hp,
+ VP8_NMV_UPDATE_PROB);
+ }
+ }
+}
+
+void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx) {
+ MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
+ vp8_write_token(w, vp8_mv_joint_tree, mvctx->joints,
+ vp8_mv_joint_encodings + j);
+ if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
+ encode_nmv_component(w, mv->row, ref->col, &mvctx->comps[0]);
+ }
+ if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
+ encode_nmv_component(w, mv->col, ref->col, &mvctx->comps[1]);
+ }
+}
+
+void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp) {
+ MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
+ if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
+ encode_nmv_component_fp(w, mv->row, ref->row, &mvctx->comps[0], usehp);
+ }
+ if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
+ encode_nmv_component_fp(w, mv->col, ref->col, &mvctx->comps[1], usehp);
+ }
+}
+
+void vp8_build_nmv_cost_table(int *mvjoint,
+ int *mvcost[2],
+ const nmv_context *mvctx,
+ int usehp,
+ int mvc_flag_v,
+ int mvc_flag_h) {
+ vp8_clear_system_state();
+ vp8_cost_tokens(mvjoint, mvctx->joints, vp8_mv_joint_tree);
+ if (mvc_flag_v)
+ build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp);
+ if (mvc_flag_h)
+ build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
+}
+
+#else /* CONFIG_NEWMVENTROPY */
+
static void encode_mvcomponent(
vp8_writer *const w,
const int v,
@@ -596,8 +1126,9 @@ void vp8_write_mvprobs_hp(VP8_COMP *cpi) {
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp,
(const MV_CONTEXT_HP *)
cpi->common.fc.mvc_hp, flags);
-
#ifdef ENTROPY_STATS
active_section = 5;
#endif
}
+
+#endif /* CONFIG_NEWMVENTROPY */
diff --git a/vp8/encoder/encodemv.h b/vp8/encoder/encodemv.h
index 7e33007d7..e675fe058 100644
--- a/vp8/encoder/encodemv.h
+++ b/vp8/encoder/encodemv.h
@@ -14,11 +14,31 @@
#include "onyx_int.h"
+#if CONFIG_NEWMVENTROPY
+void vp8_write_nmvprobs(VP8_COMP *, int usehp);
+void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx);
+void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
+ const nmv_context *mvctx, int usehp);
+void vp8_build_nmv_cost_table(int *mvjoint,
+ int *mvcost[2],
+ const nmv_context *mvctx,
+ int usehp,
+ int mvc_flag_v,
+ int mvc_flag_h);
+#else /* CONFIG_NEWMVENTROPY */
void vp8_write_mvprobs(VP8_COMP *);
-void vp8_encode_motion_vector(vp8_writer *, const MV *, const MV_CONTEXT *);
-void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]);
+void vp8_encode_motion_vector(vp8_writer *, const MV *,
+ const MV_CONTEXT *);
+void vp8_build_component_cost_table(int *mvcost[2],
+ const MV_CONTEXT *mvc,
+ int mvc_flag[2]);
void vp8_write_mvprobs_hp(VP8_COMP *);
-void vp8_encode_motion_vector_hp(vp8_writer *, const MV *, const MV_CONTEXT_HP *);
-void vp8_build_component_cost_table_hp(int *mvcost[2], const MV_CONTEXT_HP *mvc, int mvc_flag[2]);
+void vp8_encode_motion_vector_hp(vp8_writer *, const MV *,
+ const MV_CONTEXT_HP *);
+void vp8_build_component_cost_table_hp(int *mvcost[2],
+ const MV_CONTEXT_HP *mvc,
+ int mvc_flag[2]);
+#endif /* CONFIG_NEWMVENTROPY */
#endif
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index c1504f2ec..1e54371f7 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -28,6 +28,7 @@
#include "rdopt.h"
#include "ratectrl.h"
#include "vp8/common/quant_common.h"
+#include "vp8/common/entropymv.h"
#include "encodemv.h"
#define OUTPUT_FPF 0
@@ -38,8 +39,6 @@
#define IF_RTCD(x) NULL
#endif
-#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
-
extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
@@ -492,11 +491,13 @@ void vp8_first_pass(VP8_COMP *cpi) {
// if ( 0 )
{
int flag[2] = {1, 1};
+ vp8_init_mv_probs(cm);
+#if CONFIG_NEWMVENTROPY
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
- vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+#else
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
- vpx_memcpy(cm->fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
+#endif
}
// for each macroblock row in image
diff --git a/vp8/encoder/mbgraph.c b/vp8/encoder/mbgraph.c
index dde000af1..d512ae4bc 100644
--- a/vp8/encoder/mbgraph.c
+++ b/vp8/encoder/mbgraph.c
@@ -31,12 +31,6 @@ static unsigned int do_16x16_motion_iteration
vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
unsigned int best_err;
int step_param, further_steps;
- static int dummy_cost[2 * mv_max + 1];
- int *mvcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
- int *mvsadcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
- static int dummy_cost_hp[2 * mv_max_hp + 1];
- int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
- int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
int tmp_col_min = x->mv_col_min;
int tmp_col_max = x->mv_col_max;
@@ -65,8 +59,8 @@ static unsigned int do_16x16_motion_iteration
step_param,
x->errorperbit,
&v_fn_ptr,
- xd->allow_high_precision_mv ? mvsadcost_hp : mvsadcost,
- xd->allow_high_precision_mv ? mvcost_hp : mvcost,
+ NULLMVCOST,
+ NULLMVCOST,
ref_mv);
// Try sub-pixel MC
@@ -78,7 +72,7 @@ static unsigned int do_16x16_motion_iteration
x, b, d,
dst_mv, ref_mv,
x->errorperbit, &v_fn_ptr,
- xd->allow_high_precision_mv ? mvcost_hp : mvcost,
+ NULLMVCOST,
& distortion, &sse);
}
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 79e10a5bc..76accd423 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -42,42 +42,59 @@ void vp8_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv) {
x->mv_row_max = row_max;
}
-int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
int Weight, int ishp) {
- // MV costing is based on the distribution of vectors in the previous frame
- // and as such will tend to over state the cost of vectors. In addition
- // coding a new vector can have a knock on effect on the cost of subsequent
- // vectors and the quality of prediction from NEAR and NEAREST for subsequent
- // blocks. The "Weight" parameter allows, to a limited extent, for some
- // account to be taken of these factors.
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
- mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)])
+ MV v;
+ v.row = (mv->as_mv.row - ref->as_mv.row);
+ v.col = (mv->as_mv.col - ref->as_mv.col);
+#if CONFIG_NEWMVENTROPY
+ return ((mvjcost[vp8_get_mv_joint(v)] +
+ mvcost[0][v.row] + mvcost[1][v.col]) *
+ Weight) >> 7;
+#else
+ return ((mvcost[0][v.row >> (ishp == 0)] +
+ mvcost[1][v.col >> (ishp == 0)])
* Weight) >> 7;
+#endif
}
-static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
int error_per_bit, int ishp) {
- // Ignore costing if mvcost is NULL
- if (mvcost)
- return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
- mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)])
- * error_per_bit + 128) >> 8;
+ if (mvcost) {
+ MV v;
+ v.row = (mv->as_mv.row - ref->as_mv.row);
+ v.col = (mv->as_mv.col - ref->as_mv.col);
+#if CONFIG_NEWMVENTROPY
+ return ((mvjcost[vp8_get_mv_joint(v)] +
+ mvcost[0][v.row] + mvcost[1][v.col]) *
+ error_per_bit + 128) >> 8;
+#else
+ return ((mvcost[0][v.row >> (ishp == 0)] +
+ mvcost[1][v.col >> (ishp == 0)]) * error_per_bit + 128) >> 8;
+#endif
+ }
return 0;
}
-
-static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2],
+static int mvsad_err_cost(int_mv *mv, int_mv *ref, DEC_MVSADCOSTS,
int error_per_bit) {
- // Calculate sad error cost on full pixel basis.
- // Ignore costing if mvcost is NULL
- if (mvsadcost)
- return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
- mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
+
+ if (mvsadcost) {
+ MV v;
+ v.row = (mv->as_mv.row - ref->as_mv.row);
+ v.col = (mv->as_mv.col - ref->as_mv.col);
+#if CONFIG_NEWMVENTROPY
+ return ((mvjsadcost[vp8_get_mv_joint(v)] +
+ mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
+ error_per_bit + 128) >> 8;
+#else
+ return ((mvsadcost[0][v.row] + mvsadcost[1][v.col])
* error_per_bit + 128) >> 8;
+#endif
+ }
return 0;
}
-
void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) {
int Len;
int search_site_count = 0;
@@ -185,7 +202,6 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
x->ss[search_site_count].offset = Len * stride + Len;
search_site_count++;
-
// Contract.
Len /= 2;
}
@@ -204,18 +220,35 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
* could reduce the area.
*/
-#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
-#define SP(x) (((x)&3)<<2) // convert motion vector component to offset for svf calc
-#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128 )>>8 : 0) // estimated cost of a motion vector (r,c)
-#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
-#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
-#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
+#if CONFIG_NEWMVENTROPY
+/* estimated cost of a motion vector (r,c) */
+#define MVC(r,c) \
+ (mvcost ? \
+ ((mvjcost[((r)!=rr)*2 + ((c)!=rc)] + \
+ mvcost[0][((r)-rr)] + mvcost[1][((c)-rc)]) * error_per_bit + 128 )>>8 : 0)
+#else
+#define MVC(r,c) \
+ (mvcost ? \
+ ((mvcost[0][((r)-rr)>>(xd->allow_high_precision_mv==0)] + \
+ mvcost[1][((c)-rc)>>(xd->allow_high_precision_mv==0)]) * \
+ error_per_bit + 128 )>>8 : 0)
+#endif /* CONFIG_NEWMVENTROPY */
+
+#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
+
+#define IFMVCV(r,c,s,e) \
+ if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
+
+/* pointer to predictor base of a motionvector */
+#define PRE(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset)))
-#define PREHP(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset))) // pointer to predictor base of a motionvector
-#define SPHP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
-#define DISTHP(r,c) vfp->svf( PREHP(r,c), y_stride, SPHP(c),SPHP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
-#define ERRHP(r,c) (MVC(r,c)+DISTHP(r,c)) // returns distortion + motion vector cost
-#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = ((xd->allow_high_precision_mv)?DISTHP(r,c):DIST(r,c)); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
+/* returns subpixel variance error function */
+#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
+
+/* checks if (r,c) has better score than previous best */
+#define CHECK_BETTER(v,r,c) \
+ IFMVCV(r,c,{thismse = (DIST(r,c)); if((v = MVC(r,c)+thismse) < besterr) \
+ { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
@@ -224,7 +257,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
- int *mvcost[2], int *distortion,
+ DEC_MVCOSTS,
+ int *distortion,
unsigned int *sse1) {
unsigned char *z = (*(b->base_src) + b->src);
MACROBLOCKD *xd = &x->e_mbd;
@@ -268,27 +302,22 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
#endif
- if (xd->allow_high_precision_mv) {
- rr = ref_mv->as_mv.row;
- rc = ref_mv->as_mv.col;
- br = bestmv->as_mv.row << 3;
- bc = bestmv->as_mv.col << 3;
- hstep = 4;
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
- } else {
- rr = ref_mv->as_mv.row >> 1;
- rc = ref_mv->as_mv.col >> 1;
- br = bestmv->as_mv.row << 2;
- bc = bestmv->as_mv.col << 2;
- hstep = 2;
- minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
- maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
- minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
- maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
- }
+ rr = ref_mv->as_mv.row;
+ rc = ref_mv->as_mv.col;
+ br = bestmv->as_mv.row << 3;
+ bc = bestmv->as_mv.col << 3;
+ hstep = 4;
+#if CONFIG_NEWMVENTROPY
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
+#else
+ minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
+ maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
+ minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
+ maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
+#endif
tr = br;
tc = bc;
@@ -303,8 +332,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// calculate central point error
besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = besterr;
- besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
- xd->allow_high_precision_mv);
+ besterr += mv_err_cost(bestmv, ref_mv, MVCOSTS,
+ error_per_bit, xd->allow_high_precision_mv);
// TODO: Each subsequent iteration checks at least one point in
// common with the last iteration could be 2 ( if diag selected)
@@ -407,13 +436,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
}
- if (x->e_mbd.allow_high_precision_mv) {
- bestmv->as_mv.row = br;
- bestmv->as_mv.col = bc;
- } else {
- bestmv->as_mv.row = br << 1;
- bestmv->as_mv.col = bc << 1;
- }
+ bestmv->as_mv.row = br;
+ bestmv->as_mv.col = bc;
if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
(abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
@@ -423,25 +447,17 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
#undef MVC
#undef PRE
-#undef SP
#undef DIST
#undef IFMVCV
-#undef ERR
#undef CHECK_BETTER
#undef MIN
#undef MAX
-#undef PREHP
-#undef DPHP
-#undef DISTHP
-#undef ERRHP
-
-#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
- int *mvcost[2], int *distortion,
+ DEC_MVCOSTS, int *distortion,
unsigned int *sse1) {
int bestmse = INT_MAX;
int_mv startmv;
@@ -478,14 +494,14 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// calculate central point error
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
+ bestmse += mv_err_cost(bestmv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ left = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (left < bestmse) {
@@ -497,7 +513,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ right = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (right < bestmse) {
@@ -511,7 +527,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ up = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (up < bestmse) {
@@ -523,7 +539,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ down = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (down < bestmse) {
@@ -564,7 +580,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (diag < bestmse) {
@@ -606,7 +622,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
b->src_stride, &sse);
}
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ left = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (left < bestmse) {
@@ -620,7 +636,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
thismse = vfp->svf(y, y_stride,
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
z, b->src_stride, &sse);
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ right = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (right < bestmse) {
@@ -644,7 +660,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
z, b->src_stride, &sse);
}
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ up = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (up < bestmse) {
@@ -657,7 +673,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row += 4;
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
z, b->src_stride, &sse);
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ down = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (down < bestmse) {
@@ -736,7 +752,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (diag < bestmse) {
@@ -776,7 +792,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
z, b->src_stride, &sse);
}
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ left = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (left < bestmse) {
@@ -787,8 +803,10 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
this_mv.as_mv.col += 2;
- thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
+ z, b->src_stride, &sse);
+ right = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
+ xd->allow_high_precision_mv);
if (right < bestmse) {
*bestmv = this_mv;
@@ -808,7 +826,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(7), z, b->src_stride, &sse);
}
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ up = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
+ xd->allow_high_precision_mv);
if (up < bestmse) {
*bestmv = this_mv;
@@ -819,7 +838,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row += 2;
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ down = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
+ xd->allow_high_precision_mv);
if (down < bestmse) {
*bestmv = this_mv;
@@ -892,7 +912,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
+ xd->allow_high_precision_mv);
if (diag < bestmse) {
*bestmv = this_mv;
@@ -910,7 +931,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *bestmv, int_mv *ref_mv,
int error_per_bit,
const vp8_variance_fn_ptr_t *vfp,
- int *mvcost[2], int *distortion,
+ DEC_MVCOSTS,
+ int *distortion,
unsigned int *sse1) {
int bestmse = INT_MAX;
int_mv startmv;
@@ -946,14 +968,14 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// calculate central point error
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
*distortion = bestmse;
- bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
+ bestmse += mv_err_cost(bestmv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
// go left then right and check error
this_mv.as_mv.row = startmv.as_mv.row;
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
- left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ left = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (left < bestmse) {
@@ -965,7 +987,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.col += 8;
thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
- right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ right = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (right < bestmse) {
@@ -979,7 +1001,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.col = startmv.as_mv.col;
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
- up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ up = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (up < bestmse) {
@@ -991,7 +1013,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row += 8;
thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
- down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ down = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (down < bestmse) {
@@ -1029,7 +1051,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
break;
}
- diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
+ diag = thismse + mv_err_cost(&this_mv, ref_mv, MVCOSTS, error_per_bit,
xd->allow_high_precision_mv);
if (diag < bestmse) {
@@ -1063,7 +1085,7 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
{\
if (thissad < bestsad)\
{\
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);\
if (thissad < bestsad)\
{\
bestsad = thissad;\
@@ -1091,8 +1113,8 @@ int vp8_hex_search
int search_param,
int sad_per_bit,
const vp8_variance_fn_ptr_t *vfp,
- int *mvsadcost[2],
- int *mvcost[2],
+ DEC_MVSADCOSTS,
+ DEC_MVCOSTS,
int_mv *center_mv
) {
MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} };
@@ -1128,7 +1150,7 @@ int vp8_hex_search
this_mv.as_mv.col = bc;
bestsad = vfp->sdf(what, what_stride, this_offset,
in_what_stride, 0x7fffffff)
- + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ + mvsad_err_cost(&this_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);
// hex search
// j=0
@@ -1240,7 +1262,7 @@ cal_neighbors:
int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv,
int search_param, int sad_per_bit, int *num00,
- vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
int i, j, step;
@@ -1264,14 +1286,19 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
unsigned char *check_here;
int thissad;
MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
+
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1289,7 +1316,7 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// Check the starting position
bestsad = fn_ptr->sdf(what, what_stride, in_what,
in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ + mvsad_err_cost(best_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);
// search_param determines the length of the initial step and hence the number of iterations
// 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
@@ -1315,7 +1342,7 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1345,7 +1372,7 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
}
@@ -1353,7 +1380,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int_mv *best_mv, int search_param,
int sad_per_bit, int *num00,
vp8_variance_fn_ptr_t *fn_ptr,
- int *mvcost[2], int_mv *center_mv) {
+ DEC_MVCOSTS, int_mv *center_mv) {
int i, j, step;
unsigned char *what = (*(b->base_src) + b->src);
@@ -1378,14 +1405,19 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
unsigned char *check_here;
unsigned int thissad;
MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
+
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1403,7 +1435,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
// Check the starting position
bestsad = fn_ptr->sdf(what, what_stride,
in_what, in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ + mvsad_err_cost(best_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);
// search_param determines the length of the initial step and hence the number of iterations
// 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
@@ -1438,7 +1470,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (sad_array[t] < bestsad) {
bestsad = sad_array[t];
@@ -1462,7 +1494,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1492,11 +1524,10 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
}
-#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
@@ -1558,7 +1589,7 @@ int vp8_full_pixel_diamond(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *b,
int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
- vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
@@ -1582,14 +1613,19 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int row_max = ref_row + distance;
int col_min = ref_col - distance;
int col_max = ref_col + distance;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
+
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1603,7 +1639,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
// Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ + mvsad_err_cost(best_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
if (col_min < x->mv_col_min)
@@ -1627,7 +1663,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1647,7 +1683,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
else
return INT_MAX;
@@ -1655,7 +1691,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
- vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
@@ -1681,14 +1717,19 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int col_max = ref_col + distance;
unsigned int sad_array[3];
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
+
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1702,7 +1743,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
// Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ + mvsad_err_cost(best_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
if (col_min < x->mv_col_min)
@@ -1733,7 +1774,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (thissad < bestsad) {
this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1754,7 +1795,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (thissad < bestsad) {
this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1777,7 +1818,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
else
return INT_MAX;
@@ -1785,7 +1826,8 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
- vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ vp8_variance_fn_ptr_t *fn_ptr,
+ DEC_MVCOSTS,
int_mv *center_mv) {
unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride;
@@ -1812,14 +1854,19 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
unsigned int sad_array[3];
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
+
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1833,7 +1880,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
// Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff)
- + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
+ + mvsad_err_cost(best_mv, &fcenter_mv, MVSADCOSTS, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
if (col_min < x->mv_col_min)
@@ -1864,7 +1911,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (thissad < bestsad) {
this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1890,7 +1937,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (thissad < bestsad) {
this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1911,7 +1958,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (thissad < bestsad) {
this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
- mvsadcost, sad_per_bit);
+ MVSADCOSTS, sad_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -1933,7 +1980,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
else
return INT_MAX;
@@ -1941,7 +1988,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int error_per_bit, int search_range,
- vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
+ vp8_variance_fn_ptr_t *fn_ptr, DEC_MVCOSTS,
int_mv *center_mv) {
MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
int i, j;
@@ -1957,19 +2004,24 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int_mv this_mv;
unsigned int bestsad = INT_MAX;
MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, MVSADCOSTS, error_per_bit);
for (i = 0; i < search_range; i++) {
int best_site = -1;
@@ -1986,7 +2038,7 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, MVSADCOSTS, error_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -2012,7 +2064,7 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
else
return INT_MAX;
@@ -2021,7 +2073,7 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv *ref_mv, int error_per_bit,
int search_range, vp8_variance_fn_ptr_t *fn_ptr,
- int *mvcost[2], int_mv *center_mv) {
+ DEC_MVCOSTS, int_mv *center_mv) {
MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}};
int i, j;
short this_row_offset, this_col_offset;
@@ -2036,19 +2088,24 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int_mv this_mv;
unsigned int bestsad = INT_MAX;
MACROBLOCKD *xd = &x->e_mbd;
-
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
int_mv fcenter_mv;
+#if CONFIG_NEWMVENTROPY
+ int *mvjsadcost = x->nmvjointsadcost;
+ int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
+#else
+ int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
+#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
- bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
+ bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) +
+ mvsad_err_cost(ref_mv, &fcenter_mv, MVSADCOSTS, error_per_bit);
for (i = 0; i < search_range; i++) {
int best_site = -1;
@@ -2073,7 +2130,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (sad_array[j] < bestsad) {
this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
- sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+ sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, MVSADCOSTS, error_per_bit);
if (sad_array[j] < bestsad) {
bestsad = sad_array[j];
@@ -2094,7 +2151,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset;
- thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
+ thissad += mvsad_err_cost(&this_mv, &fcenter_mv, MVSADCOSTS, error_per_bit);
if (thissad < bestsad) {
bestsad = thissad;
@@ -2121,7 +2178,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
return
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) +
- mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
+ mv_err_cost(&this_mv, center_mv, MVCOSTS, x->errorperbit,
xd->allow_high_precision_mv);
else
return INT_MAX;
diff --git a/vp8/encoder/mcomp.h b/vp8/encoder/mcomp.h
index c27f3bf75..afca58084 100644
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -15,6 +15,22 @@
#include "block.h"
#include "variance.h"
+#if CONFIG_NEWMVENTROPY
+#define MVCOSTS mvjcost, mvcost
+#define MVSADCOSTS mvjsadcost, mvsadcost
+#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
+#define DEC_MVSADCOSTS int *mvjsadcost, int *mvsadcost[2]
+#define NULLMVCOST NULL, NULL
+#define XMVCOST x->nmvjointcost, (x->e_mbd.allow_high_precision_mv?x->nmvcost_hp:x->nmvcost)
+#else
+#define MVCOSTS mvcost
+#define MVSADCOSTS mvsadcost
+#define DEC_MVCOSTS int *mvcost[2]
+#define DEC_MVSADCOSTS int *mvsadcost[2]
+#define NULLMVCOST NULL
+#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
+#endif /* CONFIG_NEWMVENTROPY */
+
#ifdef ENTROPY_STATS
extern void init_mv_ref_counts();
extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
@@ -26,7 +42,7 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
extern void vp8_clamp_mv_min_max(MACROBLOCK *x, int_mv *ref_mv);
-extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
+extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
int Weight, int ishp);
extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
@@ -48,14 +64,14 @@ extern int vp8_hex_search
int search_param,
int error_per_bit,
const vp8_variance_fn_ptr_t *vf,
- int *mvsadcost[2],
- int *mvcost[2],
+ DEC_MVSADCOSTS,
+ DEC_MVCOSTS,
int_mv *center_mv
);
typedef int (fractional_mv_step_fp)
(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv,
- int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2],
+ int error_per_bit, const vp8_variance_fn_ptr_t *vfp, DEC_MVCOSTS,
int *distortion, unsigned int *sse);
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively;
extern fractional_mv_step_fp vp8_find_best_sub_pixel_step;
@@ -72,7 +88,7 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int sad_per_bit, \
int distance, \
vp8_variance_fn_ptr_t *fn_ptr, \
- int *mvcost[2], \
+ DEC_MVSADCOSTS, \
int_mv *center_mv \
)
@@ -86,7 +102,7 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int sad_per_bit, \
int distance, \
vp8_variance_fn_ptr_t *fn_ptr, \
- int *mvcost[2], \
+ DEC_MVSADCOSTS, \
int_mv *center_mv \
)
@@ -102,7 +118,7 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int sad_per_bit, \
int *num00, \
vp8_variance_fn_ptr_t *fn_ptr, \
- int *mvcost[2], \
+ DEC_MVSADCOSTS, \
int_mv *center_mv \
)
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 68c28c3f2..fdb3fa196 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -149,6 +149,13 @@ extern int skip_false_count;
extern int intra_mode_stats[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
#endif
+#if CONFIG_NEWMVENTROPY
+#ifdef NMV_STATS
+extern void init_nmvstats();
+extern void print_nmvstats();
+#endif
+#endif
+
#ifdef SPEEDSTATS
unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned int tot_pm = 0;
@@ -1697,6 +1704,48 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
#define M_LOG2_E 0.693147180559945309417
#define log2f(x) (log (x) / (float) M_LOG2_E)
+
+#if CONFIG_NEWMVENTROPY
+
+static void cal_nmvjointsadcost(int *mvjointsadcost) {
+ mvjointsadcost[0] = 600;
+ mvjointsadcost[1] = 300;
+ mvjointsadcost[2] = 300;
+ mvjointsadcost[0] = 300;
+}
+
+static void cal_nmvsadcosts(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost [0] [0] = 0;
+ mvsadcost [1] [0] = 0;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost [0][i] = (int) z;
+ mvsadcost [1][i] = (int) z;
+ mvsadcost [0][-i] = (int) z;
+ mvsadcost [1][-i] = (int) z;
+ } while (++i <= MV_MAX);
+}
+
+static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
+ int i = 1;
+
+ mvsadcost [0] [0] = 0;
+ mvsadcost [1] [0] = 0;
+
+ do {
+ double z = 256 * (2 * (log2f(8 * i) + .6));
+ mvsadcost [0][i] = (int) z;
+ mvsadcost [1][i] = (int) z;
+ mvsadcost [0][-i] = (int) z;
+ mvsadcost [1][-i] = (int) z;
+ } while (++i <= MV_MAX);
+}
+
+#else
+
static void cal_mvsadcosts(int *mvsadcost[2]) {
int i = 1;
@@ -1727,6 +1776,8 @@ static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
} while (++i <= mvfp_max_hp);
}
+#endif /* CONFIG_NEWMVENTROPY */
+
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
int i;
volatile union {
@@ -1838,6 +1889,11 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
vp8_zero(inter_uv_modes);
vp8_zero(inter_b_modes);
#endif
+#if CONFIG_NEWMVENTROPY
+#ifdef NMV_STATS
+ init_nmvstats();
+#endif
+#endif
/*Initialize the feed-forward activity masking.*/
cpi->activity_avg = 90 << 12;
@@ -1903,19 +1959,32 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->gf_rate_correction_factor = 1.0;
cpi->twopass.est_max_qcorrection_factor = 1.0;
+#if CONFIG_NEWMVENTROPY
+ cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
+ cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX];
+ cpi->mb.nmvcost[1] = &cpi->mb.nmvcosts[1][MV_MAX];
+ cpi->mb.nmvsadcost[0] = &cpi->mb.nmvsadcosts[0][MV_MAX];
+ cpi->mb.nmvsadcost[1] = &cpi->mb.nmvsadcosts[1][MV_MAX];
+ cal_nmvsadcosts(cpi->mb.nmvsadcost);
+
+ cpi->mb.nmvcost_hp[0] = &cpi->mb.nmvcosts_hp[0][MV_MAX];
+ cpi->mb.nmvcost_hp[1] = &cpi->mb.nmvcosts_hp[1][MV_MAX];
+ cpi->mb.nmvsadcost_hp[0] = &cpi->mb.nmvsadcosts_hp[0][MV_MAX];
+ cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
+ cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
+#else
cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max + 1];
cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max + 1];
cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mvfp_max + 1];
cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mvfp_max + 1];
-
cal_mvsadcosts(cpi->mb.mvsadcost);
cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
-
cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
+#endif /* CONFIG_NEWMVENTROPY */
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
@@ -2068,6 +2137,12 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
print_mode_context();
}
#endif
+#if CONFIG_NEWMVENTROPY
+#ifdef NMV_STATS
+ if (cpi->pass != 1)
+ print_nmvstats();
+#endif
+#endif
#if CONFIG_INTERNAL_STATS
@@ -3697,9 +3772,14 @@ static void encode_frame_to_data_rate
vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
vp8_adapt_mode_probs(&cpi->common);
+#if CONFIG_NEWMVENTROPY
+ cpi->common.fc.NMVcount = cpi->NMVcount;
+ vp8_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
+#else
vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
vp8_adapt_mv_probs(&cpi->common);
+#endif /* CONFIG_NEWMVENTROPY */
vp8_update_mode_context(&cpi->common);
}
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 04c7ad4ed..0bdc07fc2 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -67,10 +67,17 @@
#endif
typedef struct {
+#if CONFIG_NEWMVENTROPY
+ nmv_context nmvc;
+ int nmvjointcost[MV_JOINTS];
+ int nmvcosts[2][MV_VALS];
+ int nmvcosts_hp[2][MV_VALS];
+#else
MV_CONTEXT mvc[2];
int mvcosts[2][MVvals + 1];
MV_CONTEXT_HP mvc_hp[2];
int mvcosts_hp[2][MVvals_hp + 1];
+#endif
#ifdef MODE_STATS
// Stats
@@ -549,8 +556,12 @@ typedef struct VP8_COMP {
// int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
+#if CONFIG_NEWMVENTROPY
+ nmv_context_counts NMVcount;
+#else
unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
+#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
// DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 809279e7d..e985748da 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -132,10 +132,17 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
// intended for use in a re-code loop in vp8_compress_frame where the
// quantizer value is adjusted between loop iterations.
+#if CONFIG_NEWMVENTROPY
+ cc->nmvc = cm->fc.nmvc;
+ vp8_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
+ vp8_copy(cc->nmvcosts, cpi->mb.nmvcosts);
+ vp8_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp);
+#else
vp8_copy(cc->mvc, cm->fc.mvc);
vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
+#endif
vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
vp8_copy(cc->mode_context, cm->fc.mode_context);
@@ -188,10 +195,17 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
// Restore key state variables to the snapshot state stored in the
// previous call to vp8_save_coding_context.
+#if CONFIG_NEWMVENTROPY
+ cm->fc.nmvc = cc->nmvc;
+ vp8_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
+ vp8_copy(cpi->mb.nmvcosts, cc->nmvcosts);
+ vp8_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp);
+#else
vp8_copy(cm->fc.mvc, cc->mvc);
vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
+#endif
vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
vp8_copy(cm->fc.mode_context, cc->mode_context);
@@ -244,17 +258,17 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
vp8_kf_default_bmode_probs(cpi->common.kf_bmode_prob);
vp8_init_mbmode_probs(& cpi->common);
- vpx_memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
- {
- int flag[2] = {1, 1};
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
- }
- vpx_memcpy(cpi->common.fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
+ vp8_init_mv_probs(& cpi->common);
+#if CONFIG_NEWMVENTROPY == 0
+ /* this is not really required */
{
int flag[2] = {1, 1};
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
+ vp8_build_component_cost_table(
+ cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
+ vp8_build_component_cost_table_hp(
+ cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
}
-
+#endif
cpi->common.txfm_mode = ALLOW_8X8;
@@ -285,6 +299,7 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
sizeof(default_vp8_mode_contexts));
}
+
void vp8_setup_inter_frame(VP8_COMP *cpi) {
cpi->common.txfm_mode = ALLOW_8X8;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 391254bf2..a2b234ebe 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -37,6 +37,7 @@
#include "vpx_mem/vpx_mem.h"
#include "dct.h"
#include "vp8/common/systemdependent.h"
+#include "vp8/encoder/encodemv.h"
#include "vp8/common/seg_common.h"
#include "vp8/common/pred_common.h"
@@ -58,8 +59,6 @@ extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
#endif
-#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
-
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
#define INVALID_MV 0x80008000
@@ -376,6 +375,17 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
vp8_init_mode_costs(cpi);
+ if (cpi->common.frame_type != KEY_FRAME)
+ {
+#if CONFIG_NEWMVENTROPY
+ vp8_build_nmv_cost_table(
+ cpi->mb.nmvjointcost,
+ cpi->mb.e_mbd.allow_high_precision_mv ?
+ cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
+ &cpi->common.fc.nmvc,
+ cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
+#endif
+ }
}
void vp8_auto_select_speed(VP8_COMP *cpi) {
@@ -404,7 +414,9 @@ void vp8_auto_select_speed(VP8_COMP *cpi) {
cpi->oxcf.cpu_used = -16;
*/
- if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress) {
+ if (cpi->avg_pick_mode_time < milliseconds_for_compress &&
+ (cpi->avg_encode_time - cpi->avg_pick_mode_time) <
+ milliseconds_for_compress) {
if (cpi->avg_pick_mode_time == 0) {
cpi->Speed = 4;
} else {
@@ -418,7 +430,8 @@ void vp8_auto_select_speed(VP8_COMP *cpi) {
}
}
- if (milliseconds_for_compress * 100 > cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) {
+ if (milliseconds_for_compress * 100 >
+ cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) {
cpi->Speed -= 1;
cpi->avg_pick_mode_time = 0;
cpi->avg_encode_time = 0;
@@ -1883,11 +1896,15 @@ void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
}
-static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label,
- B_PREDICTION_MODE this_mode,
- int_mv *this_mv, int_mv *this_second_mv,
- int_mv seg_mvs[MAX_REF_FRAMES - 1], int_mv *best_ref_mv,
- int_mv *second_best_ref_mv, int *mvcost[2]) {
+static int labels2mode(
+ MACROBLOCK *x,
+ int const *labelings, int which_label,
+ B_PREDICTION_MODE this_mode,
+ int_mv *this_mv, int_mv *this_second_mv,
+ int_mv seg_mvs[MAX_REF_FRAMES - 1],
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ DEC_MVCOSTS) {
MACROBLOCKD *const xd = & x->e_mbd;
MODE_INFO *const mic = xd->mode_info_context;
MB_MODE_INFO * mbmi = &mic->mbmi;
@@ -1922,11 +1939,11 @@ static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label,
seg_mvs[mbmi->second_ref_frame - 1].as_int;
}
- thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
+ thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, MVCOSTS,
102, xd->allow_high_precision_mv);
if (mbmi->second_ref_frame) {
thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv,
- mvcost, 102,
+ MVCOSTS, 102,
xd->allow_high_precision_mv);
}
break;
@@ -2656,8 +2673,10 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
-
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+#if CONFIG_NEWMVENTROPY
+ MV mv;
+#endif
if (mbmi->mode == SPLITMV) {
int i;
@@ -2665,6 +2684,21 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
+#if CONFIG_NEWMVENTROPY
+ mv.row = (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
+ &cpi->NMVcount, 1);
+ }
+#else
cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.col
@@ -2675,8 +2709,23 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col)]++;
}
- } else
- {
+#endif
+ } else {
+#if CONFIG_NEWMVENTROPY
+ mv.row = (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
+ &cpi->NMVcount, 0);
+ }
+#else
cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col
@@ -2687,11 +2736,22 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col) >> 1)]++;
}
+#endif
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
+#if CONFIG_NEWMVENTROPY
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ if (mbmi->second_ref_frame) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ }
+#else
cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[0].as_mv.row
- best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[0].as_mv.col
@@ -2702,8 +2762,18 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col
- second_best_ref_mv->as_mv.col)]++;
}
- } else
- {
+#endif
+ } else {
+#if CONFIG_NEWMVENTROPY
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ if (mbmi->second_ref_frame) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ }
+#else
cpi->MVcount[0][mv_max + ((mbmi->mv[0].as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((mbmi->mv[0].as_mv.col
@@ -2714,6 +2784,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
cpi->MVcount[1][mv_max + ((mbmi->mv[1].as_mv.col
- second_best_ref_mv->as_mv.col) >> 1)]++;
}
+#endif
}
}
}
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index 035eebb98..d57613bd5 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -187,7 +187,8 @@ static int vp8_temporal_filter_find_matching_mb_c
// Ignore mv costing by sending NULL pointer instead of cost arrays
bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.as_mv.first,
step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
- NULL, NULL, &best_ref_mv1);
+ NULLMVCOST, NULLMVCOST,
+ &best_ref_mv1);
#if ALT_REF_SUBPEL_ENABLED
// Try sub-pixel MC?
@@ -200,7 +201,8 @@ static int vp8_temporal_filter_find_matching_mb_c
&best_ref_mv1,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
- NULL, &distortion, &sse);
+ NULLMVCOST,
+ &distortion, &sse);
}
#endif