summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/blockd.h12
-rw-r--r--vp8/common/entropymv.c5
-rw-r--r--vp8/common/findnearmv.c38
-rw-r--r--vp8/decoder/decodemv.c74
-rw-r--r--vp8/encoder/bitstream.c306
-rw-r--r--vp8/encoder/block.h3
-rw-r--r--vp8/encoder/encodeframe.c2
-rw-r--r--vp8/encoder/onyx_if.c2
-rw-r--r--vp8/encoder/onyx_int.h2
-rw-r--r--vp8/encoder/rdopt.c103
-rw-r--r--vp8/encoder/rdopt.h5
11 files changed, 307 insertions, 245 deletions
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index cb546e74b..5d919f1a9 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -216,10 +216,8 @@ typedef struct {
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
TX_SIZE txfm_size;
int_mv mv[2]; // for each reference frame used
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv, second_ref_mv;
+#if CONFIG_NEWBESTREFMV || CONFIG_NEW_MVREF
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REFS];
- int mv_ref_index[MAX_REF_FRAMES];
#endif
SPLITMV_PARTITIONING_TYPE partitioning;
@@ -325,6 +323,9 @@ typedef struct MacroBlockD {
// Probability Tree used to code Segment number
vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
+#if CONFIG_NEW_MVREF
+ vp8_prob mb_mv_ref_id_probs[MAX_REF_FRAMES][3];
+#endif
// Segment features
signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
@@ -379,11 +380,6 @@ typedef struct MacroBlockD {
#endif
int mb_index; // Index of the MB in the SB (0..3)
-
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv[MAX_MV_REFS];
-#endif
-
int q_index;
} MACROBLOCKD;
diff --git a/vp8/common/entropymv.c b/vp8/common/entropymv.c
index a442a2438..eaa8b750c 100644
--- a/vp8/common/entropymv.c
+++ b/vp8/common/entropymv.c
@@ -17,8 +17,13 @@
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
+#if CONFIG_NEW_MVREF
+/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
+#define COMPANDED_MVREF_THRESH 1000000
+#else
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 8
+#endif
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
diff --git a/vp8/common/findnearmv.c b/vp8/common/findnearmv.c
index 5fc135090..339458d58 100644
--- a/vp8/common/findnearmv.c
+++ b/vp8/common/findnearmv.c
@@ -54,19 +54,9 @@ void vp8_find_near_mvs
int *cntx = cnt;
enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV};
-#if CONFIG_NEWBESTREFMV
- int_mv *ref_mv = xd->ref_mv;
-#endif
-
/* Zero accumulators */
mv[0].as_int = mv[1].as_int = mv[2].as_int = 0;
cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
-#if CONFIG_NEWBESTREFMV
- ref_mv[0].as_int = ref_mv[1].as_int
- = ref_mv[2].as_int
- = ref_mv[3].as_int
- = 0;
-#endif
/* Process above */
if (above->mbmi.ref_frame != INTRA_FRAME) {
@@ -75,9 +65,6 @@ void vp8_find_near_mvs
mv->as_int = above->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame],
refframe, mv, ref_frame_sign_bias);
-#if CONFIG_NEWBESTREFMV
- ref_mv[0].as_int = mv->as_int;
-#endif
++cntx;
}
*cntx += 2;
@@ -90,9 +77,7 @@ void vp8_find_near_mvs
this_mv.as_int = left->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame],
refframe, &this_mv, ref_frame_sign_bias);
-#if CONFIG_NEWBESTREFMV
- ref_mv[1].as_int = this_mv.as_int;
-#endif
+
if (this_mv.as_int != mv->as_int) {
++ mv;
mv->as_int = this_mv.as_int;
@@ -107,21 +92,9 @@ void vp8_find_near_mvs
(lf_here->mbmi.ref_frame == LAST_FRAME && refframe == LAST_FRAME)) {
if (aboveleft->mbmi.mv[0].as_int) {
third = aboveleft;
-#if CONFIG_NEWBESTREFMV
- ref_mv[2].as_int = aboveleft->mbmi.mv[0].as_int;
- mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame],
- refframe, (ref_mv+2), ref_frame_sign_bias);
-#endif
} else if (lf_here->mbmi.mv[0].as_int) {
third = lf_here;
}
-#if CONFIG_NEWBESTREFMV
- if (lf_here->mbmi.mv[0].as_int) {
- ref_mv[3].as_int = lf_here->mbmi.mv[0].as_int;
- mv_bias(ref_frame_sign_bias[lf_here->mbmi.ref_frame],
- refframe, (ref_mv+3), ref_frame_sign_bias);
- }
-#endif
if (third) {
int_mv this_mv;
this_mv.as_int = third->mbmi.mv[0].as_int;
@@ -294,6 +267,12 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
}
}
+ // Make sure all the candidates are properly clamped etc
+ for (i = 0; i < 4; ++i) {
+ lower_mv_precision(&sorted_mvs[i], xd->allow_high_precision_mv);
+ vp8_clamp_mv2(&sorted_mvs[i], xd);
+ }
+
// Set the best mv to the first entry in the sorted list
best_mv->as_int = sorted_mvs[0].as_int;
@@ -316,9 +295,6 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
// Copy back the re-ordered mv list
vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs));
- lower_mv_precision(best_mv, xd->allow_high_precision_mv);
-
- vp8_clamp_mv2(best_mv, xd);
}
#endif // CONFIG_NEWBESTREFMV
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 0adc3333e..991fb9103 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -72,6 +72,23 @@ static void vp8_read_mb_segid(vp8_reader *r, MB_MODE_INFO *mi,
}
}
+#if CONFIG_NEW_MVREF
+int vp8_read_mv_ref_id(vp8_reader *r,
+ vp8_prob * ref_id_probs) {
+ int ref_index = 0;
+
+ if (vp8_read(r, ref_id_probs[0])) {
+ ref_index++;
+ if (vp8_read(r, ref_id_probs[1])) {
+ ref_index++;
+ if (vp8_read(r, ref_id_probs[2]))
+ ref_index++;
+ }
+ }
+ return ref_index;
+}
+#endif
+
extern const int vp8_i8x8_block[4];
static void kfread_modes(VP8D_COMP *pbi,
MODE_INFO *m,
@@ -530,6 +547,12 @@ static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
+
+#if CONFIG_NEW_MVREF
+ // Temp defaults probabilities for ecnoding the MV ref id signal
+ vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
+#endif
+
read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
}
}
@@ -708,13 +731,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
xd->pre.u_buffer = cm->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cm->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
- // Update stats on relative distance of chosen vector to the
- // possible best reference vectors.
- {
- find_mv_refs(xd, mi, prev_mi,
- ref_frame, mbmi->ref_mvs[ref_frame],
- cm->ref_frame_sign_bias );
- }
+ find_mv_refs(xd, mi, prev_mi,
+ ref_frame, mbmi->ref_mvs[ref_frame],
+ cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->pre.y_buffer,
@@ -799,15 +818,10 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->second_ref_frame,
cm->ref_frame_sign_bias);
- // Update stats on relative distance of chosen vector to the
- // possible best reference vectors.
- {
- MV_REFERENCE_FRAME ref_frame = mbmi->second_ref_frame;
-
- find_mv_refs(xd, mi, prev_mi,
- ref_frame, mbmi->ref_mvs[ref_frame],
- cm->ref_frame_sign_bias );
- }
+ find_mv_refs(xd, mi, prev_mi,
+ mbmi->second_ref_frame,
+ mbmi->ref_mvs[mbmi->second_ref_frame],
+ cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->second_pre.y_buffer,
@@ -977,11 +991,26 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
break;
case NEWMV:
+
+#if CONFIG_NEW_MVREF
+ {
+ int best_index;
+ MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
+
+ // Encode the index of the choice.
+ best_index =
+ vp8_read_mv_ref_id(bc, xd->mb_mv_ref_id_probs[ref_frame]);
+
+ best_mv.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
+ }
+#endif
+
read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
xd->allow_high_precision_mv);
+
mv->as_mv.row += best_mv.as_mv.row;
mv->as_mv.col += best_mv.as_mv.col;
@@ -995,7 +1024,20 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
+
if (mbmi->second_ref_frame) {
+#if CONFIG_NEW_MVREF
+ {
+ int best_index;
+ MV_REFERENCE_FRAME ref_frame = mbmi->second_ref_frame;
+
+ // Encode the index of the choice.
+ best_index =
+ vp8_read_mv_ref_id(bc, xd->mb_mv_ref_id_probs[ref_frame]);
+ best_mv_second.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
+ }
+#endif
+
read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 758c9736c..3c7e37762 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -28,6 +28,7 @@
#include "vp8/common/pred_common.h"
#include "vp8/common/entropy.h"
#include "vp8/encoder/encodemv.h"
+#include "vp8/common/entropymv.h"
#if CONFIG_NEWBESTREFMV
#include "vp8/common/mvref_common.h"
@@ -116,69 +117,6 @@ static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp) {
return update_bits[delp] * 256;
}
-#if CONFIG_NEW_MVREF
-// Estimate the cost of each coding the vector using each reference candidate
-unsigned int pick_best_mv_ref( MACROBLOCK *x,
- int_mv target_mv,
- int_mv * mv_ref_list,
- int_mv * best_ref ) {
-
- int i;
- int best_index = 0;
- int cost, cost2;
- int index_cost[MAX_MV_REFS];
- MACROBLOCKD *xd = &x->e_mbd;
-
- /*unsigned int distance, distance2;
-
- distance = mv_distance(&target_mv, &mv_ref_list[0]);
-
- for (i = 1; i < MAX_MV_REFS; ++i ) {
- distance2 =
- mv_distance(&target_mv, &mv_ref_list[i]);
- if (distance2 < distance) {
- distance = distance2;
- best_index = i;
- }
- }*/
-
- // For now estimate the cost of selecting a given ref index
- // as index * 1 bits (but here 1 bit is scaled to 256)
- for (i = 0; i < MAX_MV_REFS; ++i ) {
- index_cost[i] = i << 8;
- }
- index_cost[0] = vp8_cost_zero(205);
- index_cost[1] = vp8_cost_zero(40);
- index_cost[2] = vp8_cost_zero(8);
- index_cost[3] = vp8_cost_zero(2);
-
- cost = index_cost[0] +
- vp8_mv_bit_cost(&target_mv,
- &mv_ref_list[0],
- XMVCOST, 96,
- xd->allow_high_precision_mv);
-
-
- //for (i = 1; i < MAX_MV_REFS; ++i ) {
- for (i = 1; i < 4; ++i ) {
- cost2 = index_cost[i] +
- vp8_mv_bit_cost(&target_mv,
- &mv_ref_list[i],
- XMVCOST, 96,
- xd->allow_high_precision_mv);
-
- if (cost2 < cost) {
- cost = cost2;
- best_index = i;
- }
- }
-
- (*best_ref).as_int = mv_ref_list[best_index].as_int;
-
- return best_index;
-}
-#endif
-
static void update_mode(
vp8_writer *const bc,
int n,
@@ -321,6 +259,70 @@ static void update_refpred_stats(VP8_COMP *cpi) {
}
}
+static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MV mv;
+
+ if (mbmi->mode == SPLITMV) {
+ int i;
+
+ for (i = 0; i < x->partition_info->count; i++) {
+ if (x->partition_info->bmi[i].mode == NEW4X4) {
+ if (x->e_mbd.allow_high_precision_mv) {
+ mv.row = (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
+ &cpi->NMVcount, 1);
+ }
+ } else {
+ mv.row = (x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
+ mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
+ - second_best_ref_mv->as_mv.row);
+ mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
+ - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
+ &cpi->NMVcount, 0);
+ }
+ }
+ }
+ }
+ } else if (mbmi->mode == NEWMV) {
+ if (x->e_mbd.allow_high_precision_mv) {
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ if (mbmi->second_ref_frame) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
+ }
+ } else {
+ mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ if (mbmi->second_ref_frame) {
+ mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
+ mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
+ vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
+ }
+ }
+ }
+}
+
static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
}
@@ -619,6 +621,124 @@ static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
+#if CONFIG_NEW_MVREF
+static int vp8_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
+ int cost;
+
+ // Encode the index for the MV reference.
+ switch (mv_ref_id) {
+ case 0:
+ cost = vp8_cost_zero(ref_id_probs[0]);
+ break;
+ case 1:
+ cost = vp8_cost_one(ref_id_probs[0]);
+ cost += vp8_cost_zero(ref_id_probs[1]);
+ break;
+ case 2:
+ cost = vp8_cost_one(ref_id_probs[0]);
+ cost += vp8_cost_one(ref_id_probs[1]);
+ cost += vp8_cost_zero(ref_id_probs[2]);
+ break;
+ case 3:
+ cost = vp8_cost_one(ref_id_probs[0]);
+ cost += vp8_cost_one(ref_id_probs[1]);
+ cost += vp8_cost_one(ref_id_probs[2]);
+ break;
+
+ // TRAP.. This should not happen
+ default:
+ assert(0);
+ break;
+ }
+
+ return cost;
+}
+
+static void vp8_write_mv_ref_id(vp8_writer *w,
+ vp8_prob * ref_id_probs,
+ int mv_ref_id) {
+ // Encode the index for the MV reference.
+ switch (mv_ref_id) {
+ case 0:
+ vp8_write(w, 0, ref_id_probs[0]);
+ break;
+ case 1:
+ vp8_write(w, 1, ref_id_probs[0]);
+ vp8_write(w, 0, ref_id_probs[1]);
+ break;
+ case 2:
+ vp8_write(w, 1, ref_id_probs[0]);
+ vp8_write(w, 1, ref_id_probs[1]);
+ vp8_write(w, 0, ref_id_probs[2]);
+ break;
+ case 3:
+ vp8_write(w, 1, ref_id_probs[0]);
+ vp8_write(w, 1, ref_id_probs[1]);
+ vp8_write(w, 1, ref_id_probs[2]);
+ break;
+
+ // TRAP.. This should not happen
+ default:
+ assert(0);
+ break;
+ }
+}
+
+// Estimate the cost of each coding the vector using each reference candidate
+static unsigned int pick_best_mv_ref(MACROBLOCK *x,
+ MV_REFERENCE_FRAME ref_frame,
+ int_mv target_mv,
+ int_mv * mv_ref_list,
+ int_mv * best_ref) {
+ int i;
+ int best_index = 0;
+ int cost, cost2;
+ int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
+ MACROBLOCKD *xd = &x->e_mbd;
+ int max_mv = MV_MAX;
+
+ cost = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
+ vp8_mv_bit_cost(&target_mv,
+ &mv_ref_list[0],
+ XMVCOST, 96,
+ xd->allow_high_precision_mv);
+
+
+ // Use 4 for now : for (i = 1; i < MAX_MV_REFS; ++i ) {
+ for (i = 1; i < 4; ++i) {
+ // If we see a 0,0 reference vector for a second time we have reached
+ // the end of the list of valid candidate vectors.
+ if (!mv_ref_list[i].as_int)
+ if (zero_seen)
+ break;
+ else
+ zero_seen = TRUE;
+
+ // Check for cases where the reference choice would give rise to an
+ // uncodable/out of range residual for row or col.
+ if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
+ (abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
+ continue;
+ }
+
+ cost2 = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
+ vp8_mv_bit_cost(&target_mv,
+ &mv_ref_list[i],
+ XMVCOST, 96,
+ xd->allow_high_precision_mv);
+
+ if (cost2 < cost) {
+ cost = cost2;
+ best_index = i;
+ }
+ }
+
+ (*best_ref).as_int = mv_ref_list[best_index].as_int;
+
+ return best_index;
+}
+#endif
+
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *bc,
@@ -931,11 +1051,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
{
int_mv n1, n2;
+ // Only used for context just now and soon to be deprecated.
vp8_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
rf, cpi->common.ref_frame_sign_bias);
#if CONFIG_NEWBESTREFMV
- best_mv.as_int = mi->ref_mv.as_int;
+ best_mv.as_int = mi->ref_mvs[rf][0].as_int;
#endif
+
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
#ifdef ENTROPY_STATS
@@ -988,13 +1110,15 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
(mode == NEWMV || mode == SPLITMV)) {
int_mv n1, n2;
- vp8_find_near_mvs(xd, m,
- prev_m,
+ // Only used for context just now and soon to be deprecated.
+ vp8_find_near_mvs(xd, m, prev_m,
&n1, &n2, &best_second_mv, ct,
mi->second_ref_frame,
cpi->common.ref_frame_sign_bias);
+
#if CONFIG_NEWBESTREFMV
- best_second_mv.as_int = mi->second_ref_mv.as_int;
+ best_second_mv.as_int =
+ mi->ref_mvs[mi->second_ref_frame][0].as_int;
#endif
}
@@ -1012,38 +1136,43 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
active_section = 5;
#endif
-#if 0 //CONFIG_NEW_MVREF
+#if CONFIG_NEW_MVREF
{
unsigned int best_index;
- /*find_mv_refs(xd, m, prev_m,
- m->mbmi.ref_frame,
- mi->ref_mvs[rf],
- cpi->common.ref_frame_sign_bias );*/
- best_index = pick_best_mv_ref(x, mi->mv[0],
+ // Choose the best mv reference
+ best_index = pick_best_mv_ref(x, rf, mi->mv[0],
mi->ref_mvs[rf], &best_mv);
- cpi->best_ref_index_counts[best_index]++;
+
+ // Encode the index of the choice.
+ vp8_write_mv_ref_id(bc,
+ xd->mb_mv_ref_id_probs[rf], best_index);
+
+ cpi->best_ref_index_counts[rf][best_index]++;
}
#endif
+
write_nmv(bc, &mi->mv[0].as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
if (mi->second_ref_frame) {
-#if 0 //CONFIG_NEW_MVREF
+#if CONFIG_NEW_MVREF
unsigned int best_index;
-
- /*find_mv_refs(xd, m, prev_m,
- m->mbmi.second_ref_frame,
- mi->ref_mvs[mi->second_ref_frame],
- cpi->common.ref_frame_sign_bias );*/
+ MV_REFERENCE_FRAME sec_ref_frame = mi->second_ref_frame;
best_index =
- pick_best_mv_ref(x, mi->mv[1],
- mi->ref_mvs[mi->second_ref_frame],
+ pick_best_mv_ref(x, sec_ref_frame, mi->mv[1],
+ mi->ref_mvs[sec_ref_frame],
&best_second_mv);
- cpi->best_ref_index_counts[best_index]++;
+
+ // Encode the index of the choice.
+ vp8_write_mv_ref_id(bc,
+ xd->mb_mv_ref_id_probs[sec_ref_frame],
+ best_index);
+
+ cpi->best_ref_index_counts[sec_ref_frame][best_index]++;
#endif
write_nmv(bc, &mi->mv[1].as_mv, &best_second_mv,
(const nmv_context*) nmvc,
@@ -1108,6 +1237,12 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
break;
}
}
+
+ // Update the mvcounts used to tune mv probs but only if this is
+ // the real pack run.
+ if ( !cpi->dummy_packing ) {
+ update_mvcount(cpi, x, &best_mv, &best_second_mv);
+ }
}
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
@@ -2160,6 +2295,11 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
update_mbintra_mode_probs(cpi, &header_bc);
+#if CONFIG_NEW_MVREF
+ // Temp defaults probabilities for ecnoding the MV ref id signal
+ vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
+#endif
+
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
}
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 48623be8c..df74a3fcf 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -70,6 +70,9 @@ typedef struct {
PARTITION_INFO partition_info;
int_mv best_ref_mv;
int_mv second_best_ref_mv;
+#if CONFIG_NEWBESTREFMV || CONFIG_NEW_MVREF
+ int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REFS];
+#endif
int rate;
int distortion;
int64_t intra_error;
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 893a33817..7cb52a815 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -470,8 +470,6 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
- rd_update_mvcount(cpi, x, &ctx->best_ref_mv, &ctx->second_best_ref_mv);
-
cpi->prediction_error += ctx->distortion;
cpi->intra_error += ctx->intra_error;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index f11ff5936..25d865484 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -3761,7 +3761,7 @@ static void encode_frame_to_data_rate
FILE *f = fopen("mv_ref_dist.stt", "a");
unsigned int i;
for (i = 0; i < MAX_MV_REFS; ++i) {
- fprintf(f, "%10d", cpi->best_ref_index_counts[i] );
+ fprintf(f, "%10d", cpi->best_ref_index_counts[0][i]);
}
fprintf(f, "\n" );
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index ab6802509..e78339c42 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -755,7 +755,7 @@ typedef struct VP8_COMP {
[VP8_SWITCHABLE_FILTERS];
#if CONFIG_NEW_MVREF
- unsigned int best_ref_index_counts[MAX_MV_REFS];
+ unsigned int best_ref_index_counts[MAX_REF_FRAMES][MAX_MV_REFS];
#endif
} VP8_COMP;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index b2207cb1f..f72697728 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -3011,70 +3011,6 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
}
}
-void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
- MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
- MV mv;
-
- if (mbmi->mode == SPLITMV) {
- int i;
-
- for (i = 0; i < x->partition_info->count; i++) {
- if (x->partition_info->bmi[i].mode == NEW4X4) {
- if (x->e_mbd.allow_high_precision_mv) {
- mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
- &cpi->NMVcount, 1);
- }
- } else {
- mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
- if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
- mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row);
- mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
- &cpi->NMVcount, 0);
- }
- }
- }
- }
- } else if (mbmi->mode == NEWMV) {
- if (x->e_mbd.allow_high_precision_mv) {
- mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
- if (mbmi->second_ref_frame) {
- mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
- }
- } else {
- mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
- if (mbmi->second_ref_frame) {
- mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
- mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
- vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
- }
- }
- }
-}
-
static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4]) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
@@ -3297,9 +3233,6 @@ void setup_buffer_inter(VP8_COMP *cpi, MACROBLOCK *x, int idx, int frame_type,
int recon_yoffset, int recon_uvoffset,
int_mv frame_nearest_mv[4], int_mv frame_near_mv[4],
int_mv frame_best_ref_mv[4],
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv[MAX_REF_FRAMES],
-#endif
int frame_mdcounts[4][4],
unsigned char *y_buffer[4], unsigned char *u_buffer[4],
unsigned char *v_buffer[4]) {
@@ -3319,15 +3252,11 @@ void setup_buffer_inter(VP8_COMP *cpi, MACROBLOCK *x, int idx, int frame_type,
v_buffer[frame_type] = yv12->v_buffer + recon_uvoffset;
#if CONFIG_NEWBESTREFMV
- // Update stats on relative distance of chosen vector to the
- // possible best reference vectors.
- {
- find_mv_refs(xd, xd->mode_info_context,
- xd->prev_mode_info_context,
- frame_type,
- mbmi->ref_mvs[frame_type],
- cpi->common.ref_frame_sign_bias );
- }
+ find_mv_refs(xd, xd->mode_info_context,
+ xd->prev_mode_info_context,
+ frame_type,
+ mbmi->ref_mvs[frame_type],
+ cpi->common.ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd, y_buffer[frame_type],
yv12->y_stride,
@@ -3335,7 +3264,6 @@ void setup_buffer_inter(VP8_COMP *cpi, MACROBLOCK *x, int idx, int frame_type,
&frame_best_ref_mv[frame_type],
&frame_nearest_mv[frame_type],
&frame_near_mv[frame_type]);
- ref_mv[frame_type].as_int = frame_best_ref_mv[frame_type].as_int;
#endif
}
@@ -3377,10 +3305,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#if CONFIG_PRED_FILTER
int best_filter_state;
#endif
-#if CONFIG_NEWBESTREFMV
- int_mv ref_mv[MAX_REF_FRAMES] = {{0}};
-#endif
-
int switchable_filter_index = 0;
MB_PREDICTION_MODE uv_intra_mode;
@@ -3424,9 +3348,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
setup_buffer_inter(cpi, x, cpi->common.lst_fb_idx, LAST_FRAME,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_best_ref_mv,
-#if CONFIG_NEWBESTREFMV
- ref_mv,
-#endif
frame_mdcounts, y_buffer, u_buffer, v_buffer);
}
@@ -3434,9 +3355,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
setup_buffer_inter(cpi, x, cpi->common.gld_fb_idx, GOLDEN_FRAME,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_best_ref_mv,
-#if CONFIG_NEWBESTREFMV
- ref_mv,
-#endif
frame_mdcounts, y_buffer, u_buffer, v_buffer);
}
@@ -3444,9 +3362,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
setup_buffer_inter(cpi, x, cpi->common.alt_fb_idx, ALTREF_FRAME,
recon_yoffset, recon_uvoffset, frame_mv[NEARESTMV],
frame_mv[NEARMV], frame_best_ref_mv,
-#if CONFIG_NEWBESTREFMV
- ref_mv,
-#endif
frame_mdcounts, y_buffer, u_buffer, v_buffer);
}
@@ -3500,10 +3415,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame;
mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame;
is_comp_pred = x->e_mbd.mode_info_context->mbmi.second_ref_frame != 0;
-#if CONFIG_NEWBESTREFMV
- mbmi->ref_mv = ref_mv[mbmi->ref_frame];
- mbmi->second_ref_mv = ref_mv[mbmi->second_ref_frame];
-#endif
#if CONFIG_PRED_FILTER
mbmi->pred_filter_enabled = 0;
#endif
@@ -4237,10 +4148,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// macroblock modes
vpx_memcpy(mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
-#if CONFIG_NEWBESTREFMV
- mbmi->ref_mv = ref_mv[best_mbmode.ref_frame];
- mbmi->second_ref_mv = ref_mv[best_mbmode.second_ref_frame];
-#endif
if (best_mbmode.mode == B_PRED) {
for (i = 0; i < 16; i++) {
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
diff --git a/vp8/encoder/rdopt.h b/vp8/encoder/rdopt.h
index 0e36a519d..448a70244 100644
--- a/vp8/encoder/rdopt.h
+++ b/vp8/encoder/rdopt.h
@@ -35,9 +35,4 @@ extern void vp8_mv_pred
extern void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[]);
extern void vp8_init_me_luts();
extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv);
-extern void rd_update_mvcount(VP8_COMP *cpi,
- MACROBLOCK *x,
- int_mv *best_ref_mv,
- int_mv *second_best_ref_mv);
-
#endif