summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
authorRonald S. Bultje <rbultje@google.com>2012-04-18 13:51:58 -0700
committerRonald S. Bultje <rbultje@google.com>2012-04-18 14:05:39 -0700
commit18433aef17d9c4674de98a329e4e46e5677f846e (patch)
treed92072b825a7def3e5392dd00cca05feadfeb3b9 /vp8/encoder
parent1cc406ab4a16549fc3b44c0b20f7e81dfc2b649c (diff)
downloadlibvpx-18433aef17d9c4674de98a329e4e46e5677f846e.tar
libvpx-18433aef17d9c4674de98a329e4e46e5677f846e.tar.gz
libvpx-18433aef17d9c4674de98a329e4e46e5677f846e.tar.bz2
libvpx-18433aef17d9c4674de98a329e4e46e5677f846e.zip
Compound prediction for splitmv macroblocks.
Change-Id: I0af3395500b1cb0ed629249eb6636a0c9322cb18
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c60
-rw-r--r--vp8/encoder/block.h2
-rw-r--r--vp8/encoder/encodeframe.c6
-rw-r--r--vp8/encoder/firstpass.c67
-rw-r--r--vp8/encoder/mcomp.c6
-rw-r--r--vp8/encoder/onyx_if.c29
-rw-r--r--vp8/encoder/onyx_int.h6
-rw-r--r--vp8/encoder/rdopt.c366
-rw-r--r--vp8/encoder/temporal_filter.c12
9 files changed, 375 insertions, 179 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 198e1c324..033487e51 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -963,7 +963,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
}
else
{
- int_mv best_mv;
+ int_mv best_mv, best_second_mv;
int ct[4];
vp8_prob mv_ref_p [VP8_MVREFS-1];
@@ -975,7 +975,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
rf, cpi->common.ref_frame_sign_bias);
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
-
#ifdef ENTROPY_STATS
accum_mv_refs(mode, ct);
#endif
@@ -992,6 +991,25 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_accum_mv_refs(&cpi->common, mode, ct);
}
+ if (mi->second_ref_frame &&
+ (mode == NEWMV || mode == SPLITMV))
+ {
+ int_mv n1, n2;
+
+ vp8_find_near_mvs(xd, m,
+ prev_m,
+ &n1, &n2, &best_second_mv, ct,
+ mi->second_ref_frame, cpi->common.ref_frame_sign_bias);
+ }
+
+ // does the feature use compound prediction or not
+ // (if not specified at the frame/segment level)
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
+ {
+ vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
+ get_pred_prob( pc, xd, PRED_COMP ) );
+ }
+
{
switch (mode) /* new, split require MVs */
{
@@ -1007,30 +1025,16 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
#endif
write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- vp8_write(w,
- mi->second_ref_frame != INTRA_FRAME,
- get_pred_prob( pc, xd, PRED_COMP ) );
- }
if (mi->second_ref_frame)
{
- const int second_rf = mi->second_ref_frame;
- int_mv n1, n2;
- int ct[4];
- vp8_find_near_mvs(xd, m,
- prev_m,
- &n1, &n2, &best_mv,
- ct, second_rf,
- cpi->common.ref_frame_sign_bias);
#if CONFIG_HIGH_PRECISION_MV
if (xd->allow_high_precision_mv)
write_mv_hp(w, &mi->second_mv.as_mv,
- &best_mv, mvc_hp);
+ &best_second_mv, mvc_hp);
else
#endif
- write_mv(w, &mi->second_mv.as_mv, &best_mv,
- mvc);
+ write_mv(w, &mi->second_mv.as_mv,
+ &best_second_mv, mvc);
}
break;
case SPLITMV:
@@ -1082,18 +1086,24 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
#endif
write_mv(w, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT *) mvc);
+
+ if (mi->second_ref_frame)
+ {
+#if CONFIG_HIGH_PRECISION_MV
+ if (xd->allow_high_precision_mv)
+ write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
+ else
+#endif
+ write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
+ &best_second_mv, (const MV_CONTEXT *) mvc);
+ }
}
}
while (++j < cpi->mb.partition_info->count);
}
break;
default:
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
- {
- vp8_write(w,
- mi->second_ref_frame != INTRA_FRAME,
- get_pred_prob( pc, xd, PRED_COMP ) );
- }
break;
}
}
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 83dddf9fa..cca32f31f 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -45,6 +45,7 @@ typedef struct
short zbin_extra;
unsigned char **base_src;
+ unsigned char **base_second_src;
int src;
int src_stride;
@@ -60,6 +61,7 @@ typedef struct
{
B_PREDICTION_MODE mode;
int_mv mv;
+ int_mv second_mv;
} bmi[16];
} PARTITION_INFO;
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index bc17cd4bc..15a131412 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -428,6 +428,8 @@ static void update_state (VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx)
xd->mode_info_context->mbmi.mv.as_int =
x->partition_info->bmi[15].mv.as_int;
+ xd->mode_info_context->mbmi.second_mv.as_int =
+ x->partition_info->bmi[15].second_mv.as_int;
}
if (cpi->common.frame_type == KEY_FRAME)
@@ -1085,6 +1087,8 @@ static void encode_frame_internal(VP8_COMP *cpi)
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
@@ -1132,6 +1136,8 @@ static void encode_frame_internal(VP8_COMP *cpi)
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
+ xd->subpixel_predict_avg = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 5212007d8..2022302ab 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -583,56 +583,43 @@ void vp8_first_pass(VP8_COMP *cpi)
// Other than for the first frame do a motion search
if (cm->current_video_frame > 0)
{
- BLOCKD *d = &x->e_mbd.block[0];
- MV tmp_mv = {0, 0};
int tmp_err;
int motion_error = INT_MAX;
+ int_mv mv, tmp_mv;
// Simple 0,0 motion with no mv overhead
zz_motion_search( cpi, x, lst_yv12, &motion_error, recon_yoffset );
- d->bmi.mv.as_mv.row = 0;
- d->bmi.mv.as_mv.col = 0;
+ mv.as_int = tmp_mv.as_int = 0;
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search
first_pass_motion_search(cpi, x, &best_ref_mv,
- &d->bmi.mv.as_mv, lst_yv12,
+ &mv.as_mv, lst_yv12,
&motion_error, recon_yoffset);
// If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
if (best_ref_mv.as_int)
{
tmp_err = INT_MAX;
- first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv,
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv,
lst_yv12, &tmp_err, recon_yoffset);
if ( tmp_err < motion_error )
{
motion_error = tmp_err;
- d->bmi.mv.as_mv.row = tmp_mv.row;
- d->bmi.mv.as_mv.col = tmp_mv.col;
+ mv.as_int = tmp_mv.as_int;
}
}
// Experimental search in a second reference frame ((0,0) based only)
if (cm->current_video_frame > 1)
{
- first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset);
+ first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv.as_mv, gld_yv12, &gf_motion_error, recon_yoffset);
if ((gf_motion_error < motion_error) && (gf_motion_error < this_error))
{
second_ref_count++;
- //motion_error = gf_motion_error;
- //d->bmi.mv.as_mv.row = tmp_mv.row;
- //d->bmi.mv.as_mv.col = tmp_mv.col;
}
- /*else
- {
- xd->pre.y_buffer = cm->last_frame.y_buffer + recon_yoffset;
- xd->pre.u_buffer = cm->last_frame.u_buffer + recon_uvoffset;
- xd->pre.v_buffer = cm->last_frame.v_buffer + recon_uvoffset;
- }*/
-
// Reset to last frame as reference buffer
xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
@@ -656,60 +643,60 @@ void vp8_first_pass(VP8_COMP *cpi)
neutral_count++;
}
- d->bmi.mv.as_mv.row <<= 3;
- d->bmi.mv.as_mv.col <<= 3;
+ mv.as_mv.row <<= 3;
+ mv.as_mv.col <<= 3;
this_error = motion_error;
- vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv);
+ vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
- sum_mvr += d->bmi.mv.as_mv.row;
- sum_mvr_abs += abs(d->bmi.mv.as_mv.row);
- sum_mvc += d->bmi.mv.as_mv.col;
- sum_mvc_abs += abs(d->bmi.mv.as_mv.col);
- sum_mvrs += d->bmi.mv.as_mv.row * d->bmi.mv.as_mv.row;
- sum_mvcs += d->bmi.mv.as_mv.col * d->bmi.mv.as_mv.col;
+ sum_mvr += mv.as_mv.row;
+ sum_mvr_abs += abs(mv.as_mv.row);
+ sum_mvc += mv.as_mv.col;
+ sum_mvc_abs += abs(mv.as_mv.col);
+ sum_mvrs += mv.as_mv.row * mv.as_mv.row;
+ sum_mvcs += mv.as_mv.col * mv.as_mv.col;
intercount++;
- best_ref_mv.as_int = d->bmi.mv.as_int;
+ best_ref_mv.as_int = mv.as_int;
// Was the vector non-zero
- if (d->bmi.mv.as_int)
+ if (mv.as_int)
{
mvcount++;
// Was it different from the last non zero vector
- if ( d->bmi.mv.as_int != lastmv_as_int )
+ if ( mv.as_int != lastmv_as_int )
new_mv_count++;
- lastmv_as_int = d->bmi.mv.as_int;
+ lastmv_as_int = mv.as_int;
// Does the Row vector point inwards or outwards
if (mb_row < cm->mb_rows / 2)
{
- if (d->bmi.mv.as_mv.row > 0)
+ if (mv.as_mv.row > 0)
sum_in_vectors--;
- else if (d->bmi.mv.as_mv.row < 0)
+ else if (mv.as_mv.row < 0)
sum_in_vectors++;
}
else if (mb_row > cm->mb_rows / 2)
{
- if (d->bmi.mv.as_mv.row > 0)
+ if (mv.as_mv.row > 0)
sum_in_vectors++;
- else if (d->bmi.mv.as_mv.row < 0)
+ else if (mv.as_mv.row < 0)
sum_in_vectors--;
}
// Does the Row vector point inwards or outwards
if (mb_col < cm->mb_cols / 2)
{
- if (d->bmi.mv.as_mv.col > 0)
+ if (mv.as_mv.col > 0)
sum_in_vectors--;
- else if (d->bmi.mv.as_mv.col < 0)
+ else if (mv.as_mv.col < 0)
sum_in_vectors++;
}
else if (mb_col > cm->mb_cols / 2)
{
- if (d->bmi.mv.as_mv.col > 0)
+ if (mv.as_mv.col > 0)
sum_in_vectors++;
- else if (d->bmi.mv.as_mv.col < 0)
+ else if (mv.as_mv.col < 0)
sum_in_vectors--;
}
}
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index c9058ae40..e9f85545f 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -1720,7 +1720,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.mv;
+ int_mv *best_mv = &d->bmi.as_mv.first;
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1825,7 +1825,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.mv;
+ int_mv *best_mv = &d->bmi.as_mv.first;
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1968,7 +1968,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int in_what_stride = d->pre_stride;
int mv_stride = d->pre_stride;
unsigned char *bestaddress;
- int_mv *best_mv = &d->bmi.mv;
+ int_mv *best_mv = &d->bmi.as_mv.first;
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index e2fed8ed1..becbe9b42 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -780,6 +780,10 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 2500;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 5000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 5000;
+
sf->first_step = 0;
sf->max_step_search_steps = MAX_MVSEARCH_STEPS;
break;
@@ -834,6 +838,10 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_COMP_NEWLA ] = 1000;
sf->thresh_mult[THR_COMP_NEWGA ] = 1000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 1700;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 4500;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 4500;
+
if (Speed > 0)
{
/* Disable coefficient optimization above speed 0 */
@@ -845,6 +853,10 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cpi->mode_check_freq[THR_SPLITG] = 2;
cpi->mode_check_freq[THR_SPLITA] = 2;
cpi->mode_check_freq[THR_SPLITMV] = 0;
+
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 0;
}
if (Speed > 1)
@@ -853,6 +865,10 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cpi->mode_check_freq[THR_SPLITA] = 4;
cpi->mode_check_freq[THR_SPLITMV] = 2;
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 4;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 4;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 2;
+
sf->thresh_mult[THR_TM ] = 1500;
sf->thresh_mult[THR_V_PRED ] = 1500;
sf->thresh_mult[THR_H_PRED ] = 1500;
@@ -863,6 +879,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
{
sf->thresh_mult[THR_NEWMV ] = 2000;
sf->thresh_mult[THR_SPLITMV ] = 10000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 20000;
}
if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
@@ -872,6 +889,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_NEARG ] = 1500;
sf->thresh_mult[THR_NEWG ] = 2000;
sf->thresh_mult[THR_SPLITG ] = 20000;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 20000;
}
if (cpi->ref_frame_flags & VP8_ALT_FLAG)
@@ -881,6 +899,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_NEARA ] = 1500;
sf->thresh_mult[THR_NEWA ] = 2000;
sf->thresh_mult[THR_SPLITA ] = 20000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 10000;
}
sf->thresh_mult[THR_COMP_ZEROLG ] = 1500;
@@ -904,6 +923,10 @@ void vp8_set_speed_features(VP8_COMP *cpi)
cpi->mode_check_freq[THR_SPLITA] = 15;
cpi->mode_check_freq[THR_SPLITMV] = 7;
+ cpi->mode_check_freq[THR_COMP_SPLITGA] = 15;
+ cpi->mode_check_freq[THR_COMP_SPLITLG] = 15;
+ cpi->mode_check_freq[THR_COMP_SPLITLA] = 7;
+
sf->thresh_mult[THR_TM ] = 2000;
sf->thresh_mult[THR_V_PRED ] = 2000;
sf->thresh_mult[THR_H_PRED ] = 2000;
@@ -914,6 +937,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
{
sf->thresh_mult[THR_NEWMV ] = 2000;
sf->thresh_mult[THR_SPLITMV ] = 25000;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = 50000;
}
if (cpi->ref_frame_flags & VP8_GOLD_FLAG)
@@ -923,6 +947,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_NEARG ] = 2000;
sf->thresh_mult[THR_NEWG ] = 2500;
sf->thresh_mult[THR_SPLITG ] = 50000;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = 50000;
}
if (cpi->ref_frame_flags & VP8_ALT_FLAG)
@@ -932,6 +957,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_NEARA ] = 2000;
sf->thresh_mult[THR_NEWA ] = 2500;
sf->thresh_mult[THR_SPLITA ] = 50000;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = 25000;
}
sf->thresh_mult[THR_COMP_ZEROLG ] = 2000;
@@ -994,6 +1020,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_COMP_NEARESTLG] = INT_MAX;
sf->thresh_mult[THR_COMP_NEARLG ] = INT_MAX;
sf->thresh_mult[THR_COMP_NEWLG ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLG ] = INT_MAX;
}
if ((cpi->ref_frame_flags & (VP8_LAST_FLAG | VP8_ALT_FLAG)) != (VP8_LAST_FLAG | VP8_ALT_FLAG))
@@ -1002,6 +1029,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_COMP_NEARESTLA] = INT_MAX;
sf->thresh_mult[THR_COMP_NEARLA ] = INT_MAX;
sf->thresh_mult[THR_COMP_NEWLA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITLA ] = INT_MAX;
}
if ((cpi->ref_frame_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) != (VP8_GOLD_FLAG | VP8_ALT_FLAG))
@@ -1010,6 +1038,7 @@ void vp8_set_speed_features(VP8_COMP *cpi)
sf->thresh_mult[THR_COMP_NEARESTGA] = INT_MAX;
sf->thresh_mult[THR_COMP_NEARGA ] = INT_MAX;
sf->thresh_mult[THR_COMP_NEWGA ] = INT_MAX;
+ sf->thresh_mult[THR_COMP_SPLITGA ] = INT_MAX;
}
// Slow quant, dct and trellis not worthwhile for first pass
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 703235c33..f5088b92b 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -41,7 +41,7 @@
#define AF_THRESH 25
#define AF_THRESH2 100
#define ARF_DECAY_THRESH 12
-#define MAX_MODES 33
+#define MAX_MODES 36
#define MIN_THRESHMULT 32
#define MAX_THRESHMULT 512
@@ -204,6 +204,10 @@ typedef enum
THR_COMP_NEWLG = 30,
THR_COMP_NEWLA = 31,
THR_COMP_NEWGA = 32,
+
+ THR_COMP_SPLITLG = 33,
+ THR_COMP_SPLITLA = 34,
+ THR_COMP_SPLITGA = 35,
}
THR_MODES;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index e3604fe5e..8acf8777b 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -58,6 +58,8 @@ extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
+#define INVALID_MV 0x80008000
+
static const int auto_speed_thresh[17] =
{
1000,
@@ -127,6 +129,10 @@ const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] =
NEWMV,
NEWMV,
NEWMV,
+
+ SPLITMV,
+ SPLITMV,
+ SPLITMV,
};
const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES] =
@@ -177,6 +183,10 @@ const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES] =
LAST_FRAME,
ALTREF_FRAME,
GOLDEN_FRAME,
+
+ LAST_FRAME,
+ ALTREF_FRAME,
+ GOLDEN_FRAME,
};
const MV_REFERENCE_FRAME vp8_second_ref_frame_order[MAX_MODES] =
@@ -201,6 +211,10 @@ const MV_REFERENCE_FRAME vp8_second_ref_frame_order[MAX_MODES] =
GOLDEN_FRAME,
LAST_FRAME,
ALTREF_FRAME,
+
+ GOLDEN_FRAME,
+ LAST_FRAME,
+ ALTREF_FRAME,
};
static void fill_token_costs(
@@ -1554,7 +1568,10 @@ static int labels2mode(
MACROBLOCK *x,
int const *labelings, int which_label,
B_PREDICTION_MODE this_mode,
- int_mv *this_mv, int_mv *best_ref_mv,
+ int_mv *this_mv, int_mv *this_second_mv,
+ int_mv seg_mvs[MAX_REF_FRAMES - 1],
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
int *mvcost[2]
)
{
@@ -1592,21 +1609,42 @@ static int labels2mode(
switch (m = this_mode)
{
case NEW4X4 :
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ {
+ this_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.ref_frame - 1].as_int;
+ this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
+ }
+
#if CONFIG_HIGH_PRECISION_MV
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
102, xd->allow_high_precision_mv);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ {
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost,
+ 102, xd->allow_high_precision_mv);
+ }
#else
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ {
+ thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost, 102);
+ }
#endif
break;
case LEFT4X4:
- this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
+ this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = col ? d[-1].bmi.as_mv.second.as_int : left_block_second_mv(mic, i);
break;
case ABOVE4X4:
- this_mv->as_int = row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis);
+ this_mv->as_int = row ? d[-4].bmi.as_mv.first.as_int : above_block_mv(mic, i, mis);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = row ? d[-4].bmi.as_mv.second.as_int : above_block_second_mv(mic, i, mis);
break;
case ZERO4X4:
this_mv->as_int = 0;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ this_second_mv->as_int = 0;
break;
default:
break;
@@ -1614,23 +1652,31 @@ static int labels2mode(
if (m == ABOVE4X4) // replace above with left if same
{
- int_mv left_mv;
+ int_mv left_mv, left_second_mv;
- left_mv.as_int = col ? d[-1].bmi.mv.as_int :
+ left_mv.as_int = col ? d[-1].bmi.as_mv.first.as_int :
left_block_mv(mic, i);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ left_second_mv.as_int = col ? d[-1].bmi.as_mv.second.as_int :
+ left_block_second_mv(mic, i);
- if (left_mv.as_int == this_mv->as_int)
+ if (left_mv.as_int == this_mv->as_int &&
+ (!xd->mode_info_context->mbmi.second_ref_frame ||
+ left_second_mv.as_int == this_second_mv->as_int))
m = LEFT4X4;
}
cost = x->inter_bmode_costs[ m];
}
- d->bmi.mv.as_int = this_mv->as_int;
+ d->bmi.as_mv.first.as_int = this_mv->as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ d->bmi.as_mv.second.as_int = this_second_mv->as_int;
x->partition_info->bmi[i].mode = m;
x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
-
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
}
while (++i < 16);
@@ -1673,6 +1719,8 @@ static unsigned int vp8_encode_inter_mb_segment(
int thisdistortion;
vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ vp8_build_2nd_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict_avg);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
@@ -1694,7 +1742,7 @@ static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
typedef struct
{
- int_mv *ref_mv;
+ int_mv *ref_mv, *second_ref_mv;
int_mv mvp;
int segment_rd;
@@ -1703,7 +1751,7 @@ typedef struct
int d;
int segment_yrate;
B_PREDICTION_MODE modes[16];
- int_mv mvs[16];
+ int_mv mvs[16], second_mvs[16];
unsigned char eobs[16];
int mvthresh;
@@ -1716,7 +1764,8 @@ typedef struct
static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
- BEST_SEG_INFO *bsi, unsigned int segmentation)
+ BEST_SEG_INFO *bsi, unsigned int segmentation,
+ int_mv seg_mvs[16 /* n_blocks */][MAX_REF_FRAMES - 1])
{
int i;
int const *labels;
@@ -1771,7 +1820,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < label_count; i++)
{
- int_mv mode_mv[B_MODE_COUNT];
+ int_mv mode_mv[B_MODE_COUNT], second_mode_mv[B_MODE_COUNT];
int best_label_rd = INT_MAX;
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
@@ -1792,7 +1841,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
ta_s = (ENTROPY_CONTEXT *)&t_above_s;
tl_s = (ENTROPY_CONTEXT *)&t_left_s;
- if (this_mode == NEW4X4)
+ // motion search for newmv (single predictor case only)
+ if (!x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4)
{
int sseshift;
int num00;
@@ -1823,9 +1873,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
// use previous block's result as next block's MV predictor.
if (segmentation == BLOCK_4X4 && i>0)
{
- bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int;
+ bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.as_mv.first.as_int;
if (i==4 || i==8 || i==12)
- bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.mv.as_int;
+ bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.as_mv.first.as_int;
step_param = 2;
}
}
@@ -1894,12 +1944,12 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEW4X4].as_int = e->bmi.mv.as_int;
+ mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
}
else
{
// The full search result is actually worse so re-instate the previous best vector
- e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
+ e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
}
}
}
@@ -1911,11 +1961,23 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
&distortion, &sse);
+
+ // safe motion search result for use in compound prediction
+ seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
}
} /* NEW4X4 */
+ else if (x->e_mbd.mode_info_context->mbmi.second_ref_frame && this_mode == NEW4X4)
+ {
+ // motion search not completed? Then skip newmv for this block with comppred
+ if (seg_mvs[i][x->e_mbd.mode_info_context->mbmi.second_ref_frame - 1].as_int == INVALID_MV ||
+ seg_mvs[i][x->e_mbd.mode_info_context->mbmi.ref_frame - 1].as_int == INVALID_MV)
+ {
+ continue;
+ }
+ }
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
- bsi->ref_mv, XMVCOST);
+ &second_mode_mv[this_mode], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
// Trap vectors that reach beyond the UMV borders
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
@@ -1923,6 +1985,16 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
{
continue;
}
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ {
+ if (((second_mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
+ ((second_mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
+ ((second_mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
+ ((second_mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
+ {
+ continue;
+ }
+ }
distortion = vp8_encode_inter_mb_segment(
x, labels, i,
@@ -1951,7 +2023,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
- bsi->ref_mv, XMVCOST);
+ &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
br += sbr;
bd += sbd;
@@ -1979,6 +2051,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
BLOCKD *bd = &x->e_mbd.block[i];
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = bd->eob;
}
@@ -2000,10 +2074,11 @@ void vp8_cal_step_param(int sr, int *sp)
}
static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int best_rd,
+ int_mv *best_ref_mv, int_mv *second_best_ref_mv, int best_rd,
int *mdcounts, int *returntotrate,
int *returnyrate, int *returndistortion,
- int mvthresh)
+ int mvthresh,
+ int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1])
{
int i;
BEST_SEG_INFO bsi;
@@ -2012,6 +2087,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
bsi.segment_rd = best_rd;
bsi.ref_mv = best_ref_mv;
+ bsi.second_ref_mv = second_best_ref_mv;
bsi.mvp.as_int = best_ref_mv->as_int;
bsi.mvthresh = mvthresh;
bsi.mdcounts = mdcounts;
@@ -2025,16 +2101,16 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
{
/* for now, we will keep the original segmentation order
when in best quality mode */
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
}
else
{
int sr;
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
if (bsi.segment_rd < best_rd)
@@ -2074,7 +2150,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
+ rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
}
/* block 16X8 */
@@ -2085,7 +2161,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
+ rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
}
/* If 8x8 is better than 16x8/8x16, then do 4x4 search */
@@ -2093,7 +2169,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
{
bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
+ rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
}
/* restore UMV window */
@@ -2109,7 +2185,9 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
{
BLOCKD *bd = &x->e_mbd.block[i];
- bd->bmi.mv.as_int = bsi.mvs[i].as_int;
+ bd->bmi.as_mv.first.as_int = bsi.mvs[i].as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ bd->bmi.as_mv.second.as_int = bsi.second_mvs[i].as_int;
bd->eob = bsi.eobs[i];
}
@@ -2129,11 +2207,15 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
x->partition_info->bmi[i].mode = bsi.modes[j];
x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[j].as_mv;
}
/*
* used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
*/
x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ x->partition_info->bmi[15].second_mv.as_int = bsi.second_mvs[15].as_int;
return bsi.segment_rd;
}
@@ -2564,7 +2646,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
union b_mode_info best_bmodes[16];
MB_MODE_INFO best_mbmode;
PARTITION_INFO best_partition;
- int_mv best_ref_mv;
+ int_mv best_ref_mv, second_best_ref_mv;
int_mv mode_mv[MB_MODE_COUNT];
MB_PREDICTION_MODE this_mode;
int num00;
@@ -2615,6 +2697,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
unsigned char *v_buffer[4];
unsigned int ref_costs[MAX_REF_FRAMES];
+ int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
@@ -2622,10 +2705,24 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
for (i = 0; i < 4; i++)
{
-#define INVALID_MV 0x80008000
mc_search_result[i].as_int = INVALID_MV;
}
+ for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++)
+ {
+ int j;
+
+ for (j = 0; j < 16; j++)
+ {
+ int k;
+
+ for (k = 0; k < MAX_REF_FRAMES - 1; k++)
+ {
+ seg_mvs[i][j][k].as_int = INVALID_MV;
+ }
+ }
+ }
+
if (cpi->ref_frame_flags & VP8_LAST_FLAG)
{
YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx];
@@ -2709,7 +2806,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// Test best rd so far against threshold for trying this mode.
if (best_rd <= cpi->rd_threshes[mode_index])
+ {
continue;
+ }
// These variables hold are rolling total cost and distortion for this mode
rate2 = 0;
@@ -2756,20 +2855,34 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
{
if (this_mode != ZEROMV ||
x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
+ {
continue;
+ }
}
}
/* everything but intra */
if (x->e_mbd.mode_info_context->mbmi.ref_frame)
{
- x->e_mbd.pre.y_buffer = y_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
- x->e_mbd.pre.u_buffer = u_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
- x->e_mbd.pre.v_buffer = v_buffer[x->e_mbd.mode_info_context->mbmi.ref_frame];
- mode_mv[NEARESTMV] = frame_nearest_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- mode_mv[NEARMV] = frame_near_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- best_ref_mv = frame_best_ref_mv[x->e_mbd.mode_info_context->mbmi.ref_frame];
- vpx_memcpy(mdcounts, frame_mdcounts[x->e_mbd.mode_info_context->mbmi.ref_frame], sizeof(mdcounts));
+ int ref = x->e_mbd.mode_info_context->mbmi.ref_frame;
+
+ x->e_mbd.pre.y_buffer = y_buffer[ref];
+ x->e_mbd.pre.u_buffer = u_buffer[ref];
+ x->e_mbd.pre.v_buffer = v_buffer[ref];
+ mode_mv[NEARESTMV] = frame_nearest_mv[ref];
+ mode_mv[NEARMV] = frame_near_mv[ref];
+ best_ref_mv = frame_best_ref_mv[ref];
+ vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
+ }
+
+ if (x->e_mbd.mode_info_context->mbmi.second_ref_frame)
+ {
+ int ref = x->e_mbd.mode_info_context->mbmi.second_ref_frame;
+
+ x->e_mbd.second_pre.y_buffer = y_buffer[ref];
+ x->e_mbd.second_pre.u_buffer = u_buffer[ref];
+ x->e_mbd.second_pre.v_buffer = v_buffer[ref];
+ second_best_ref_mv = frame_best_ref_mv[ref];
}
// Experimental code. Special case for gf and arf zeromv modes.
@@ -2867,9 +2980,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
- tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, NULL,
best_yrd, mdcounts,
- &rate, &rate_y, &distortion, this_rd_thresh) ;
+ &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs) ;
rate2 += rate;
distortion2 += distortion;
@@ -2887,6 +3000,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
this_rd = INT_MAX;
disable_skip = 1;
}
+ mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
+ compmode_cost =
+ vp8_cost_bit( get_pred_prob( cm, xd, PRED_COMP ), 0 );
}
break;
case DC_PRED:
@@ -2973,11 +3089,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// Initial step/diamond search
{
- bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv,
+ bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.as_mv.first,
step_param, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &best_ref_mv);
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
// Further step/diamond searches as necessary
n = 0;
@@ -2999,7 +3115,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
else
{
thissme = cpi->diamond_search_sad(x, b, d, &mvp_full,
- &d->bmi.mv, step_param + n, sadpb, &num00,
+ &d->bmi.as_mv.first, step_param + n, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &best_ref_mv);
@@ -3010,11 +3126,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
}
else
{
- d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
}
}
}
@@ -3030,18 +3146,18 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
search_range = 8;
//thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
- thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb,
+ thissme = cpi->refining_search_sad(x, b, d, &d->bmi.as_mv.first, sadpb,
search_range, &cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &best_ref_mv);
if (thissme < bestsme)
{
bestsme = thissme;
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
}
else
{
- d->bmi.mv.as_int = mode_mv[NEWMV].as_int;
+ d->bmi.as_mv.first.as_int = mode_mv[NEWMV].as_int;
}
}
@@ -3054,14 +3170,14 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
{
int dis; /* TODO: use dis in distortion calculation later. */
unsigned int sse;
- cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
+ cpi->find_fractional_mv_step(x, b, d, &d->bmi.as_mv.first, &best_ref_mv,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
XMVCOST, &dis, &sse);
}
- mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.mv.as_int;
+ mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.as_mv.first.as_int;
- mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
+ mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
// Add the new motion vector cost to our rolling cost variable
#if CONFIG_HIGH_PRECISION_MV
@@ -3081,7 +3197,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// Do not bother proceeding if the vector (from newmv,nearest or near) is 0,0 as this should then be coded using the zeromv mode.
if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0))
+ {
continue;
+ }
case ZEROMV:
@@ -3090,7 +3208,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// because of the lack of break statements in the previous two cases.
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
+ {
continue;
+ }
vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
vp8_build_inter16x16_predictors_mby(&x->e_mbd);
@@ -3216,68 +3336,104 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
break;
case NEARMV:
if (frame_near_mv[ref1].as_int == 0 || frame_near_mv[ref2].as_int == 0)
+ {
continue;
+ }
x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_near_mv[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_near_mv[ref2].as_int;
break;
case NEARESTMV:
if (frame_nearest_mv[ref1].as_int == 0 || frame_nearest_mv[ref2].as_int == 0)
+ {
continue;
+ }
x->e_mbd.mode_info_context->mbmi.mv.as_int = frame_nearest_mv[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = frame_nearest_mv[ref2].as_int;
break;
+ case SPLITMV:
+ {
+ int tmp_rd;
+ int this_rd_thresh;
+
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) ? cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
+ this_rd_thresh = (x->e_mbd.mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[THR_NEWG]: this_rd_thresh;
+
+ tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, &second_best_ref_mv,
+ best_yrd, mdcounts,
+ &rate, &rate_y, &distortion, this_rd_thresh, seg_mvs) ;
+
+ rate2 += rate;
+ distortion2 += distortion;
+
+ // If even the 'Y' rd value of split is higher than best so far then dont bother looking at UV
+ if (tmp_rd < best_yrd)
+ {
+ // Now work out UV cost and add it in
+ rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ }
+ else
+ {
+ this_rd = INT_MAX;
+ disable_skip = 1;
+ }
+ }
+ break;
default:
break;
}
- /* Add in the Mv/mode cost */
- rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
-
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
- vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
- if (((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) < x->mv_row_min) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) > x->mv_row_max) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) < x->mv_col_min) ||
- ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) > x->mv_col_max) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) < x->mv_row_min) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) > x->mv_row_max) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) < x->mv_col_min) ||
- ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) > x->mv_col_max))
- continue;
+ if (this_mode != SPLITMV)
+ {
+ /* Add in the Mv/mode cost */
+ rate2 += vp8_cost_mv_ref(cpi, this_mode, mdcounts);
+
+ vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.mv, xd);
+ vp8_clamp_mv2(&x->e_mbd.mode_info_context->mbmi.second_mv, xd);
+ if (((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) < x->mv_row_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row >> 3) > x->mv_row_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) < x->mv_col_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col >> 3) > x->mv_col_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) < x->mv_row_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.row >> 3) > x->mv_row_max) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) < x->mv_col_min) ||
+ ((x->e_mbd.mode_info_context->mbmi.second_mv.as_mv.col >> 3) > x->mv_col_max))
+ {
+ continue;
+ }
- /* build first and second prediction */
- vp8_build_inter16x16_predictors_mby(&x->e_mbd);
- vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
- /* do second round and average the results */
- x->e_mbd.second_pre.y_buffer = y_buffer[ref2];
- x->e_mbd.second_pre.u_buffer = u_buffer[ref2];
- x->e_mbd.second_pre.v_buffer = v_buffer[ref2];
- vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
- &x->e_mbd.predictor[256],
- &x->e_mbd.predictor[320], 16, 8);
-
- /* Y cost and distortion */
- if(cpi->common.txfm_mode == ALLOW_8X8)
- macro_block_yrd_8x8(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
- else
- macro_block_yrd(x, &rate_y, &distortion,
- IF_RTCD(&cpi->rtcd));
+ /* build first and second prediction */
+ vp8_build_inter16x16_predictors_mby(&x->e_mbd);
+ vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
+ /* do second round and average the results */
+ vp8_build_2nd_inter16x16_predictors_mb(&x->e_mbd, x->e_mbd.predictor,
+ &x->e_mbd.predictor[256],
+ &x->e_mbd.predictor[320], 16, 8);
+
+ /* Y cost and distortion */
+ if (cpi->common.txfm_mode == ALLOW_8X8)
+ macro_block_yrd_8x8(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
+ else
+ macro_block_yrd(x, &rate_y, &distortion,
+ IF_RTCD(&cpi->rtcd));
- rate2 += rate_y;
- distortion2 += distortion;
+ rate2 += rate_y;
+ distortion2 += distortion;
- /* UV cost and distortion */
- if(cpi->common.txfm_mode == ALLOW_8X8)
- rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- else
- rd_inter16x16_uv(cpi, x, &rate_uv,
- &distortion_uv,
- cpi->common.full_pixel);
- rate2 += rate_uv;
- distortion2 += distortion_uv;
+ /* UV cost and distortion */
+ if(cpi->common.txfm_mode == ALLOW_8X8)
+ rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ else
+ rd_inter16x16_uv(cpi, x, &rate_uv,
+ &distortion_uv,
+ cpi->common.full_pixel);
+ rate2 += rate_uv;
+ distortion2 += distortion_uv;
+ }
/* don't bother w/ skip, we would never have come here if skip were enabled */
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
@@ -3399,8 +3555,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
*returnintra = distortion2 ;
}
- if (!disable_skip &&
- (this_mode == SPLITMV || x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME))
+ if (!disable_skip && x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
if (this_rd < best_comp_rd)
best_comp_rd = this_rd;
@@ -3470,8 +3625,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
/* keep record of best compound/single-only prediction */
if (!disable_skip &&
- x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
- this_mode != SPLITMV)
+ x->e_mbd.mode_info_context->mbmi.ref_frame != INTRA_FRAME)
{
int single_rd, hybrid_rd, single_rate, hybrid_rate;
@@ -3581,12 +3735,17 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (best_mbmode.mode == SPLITMV)
{
for (i = 0; i < 16; i++)
- xd->mode_info_context->bmi[i].mv.as_int = best_bmodes[i].mv.as_int;
+ xd->mode_info_context->bmi[i].as_mv.first.as_int = best_bmodes[i].as_mv.first.as_int;
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ for (i = 0; i < 16; i++)
+ xd->mode_info_context->bmi[i].as_mv.second.as_int = best_bmodes[i].as_mv.second.as_int;
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
x->e_mbd.mode_info_context->mbmi.mv.as_int =
x->partition_info->bmi[15].mv.as_int;
+ x->e_mbd.mode_info_context->mbmi.second_mv.as_int =
+ x->partition_info->bmi[15].second_mv.as_int;
}
if (best_single_rd == INT_MAX)
@@ -3729,8 +3888,7 @@ int vp8cx_pick_mode_inter_macroblock
cpi->rd_single_diff += single;
cpi->rd_comp_diff += compound;
cpi->rd_hybrid_diff += hybrid;
- if (xd->mode_info_context->mbmi.ref_frame &&
- xd->mode_info_context->mbmi.mode != SPLITMV)
+ if (xd->mode_info_context->mbmi.ref_frame)
{
unsigned char pred_context;
diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c
index 7440883e0..982bab627 100644
--- a/vp8/encoder/temporal_filter.c
+++ b/vp8/encoder/temporal_filter.c
@@ -224,7 +224,7 @@ static int vp8_temporal_filter_find_matching_mb_c
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
bestsme = vp8_hex_search(x, b, d,
- &best_ref_mv1_full, &d->bmi.mv,
+ &best_ref_mv1_full, &d->bmi.as_mv.first,
step_param,
sadpb,
&cpi->fn_ptr[BLOCK_16X16],
@@ -243,7 +243,7 @@ static int vp8_temporal_filter_find_matching_mb_c
int distortion;
unsigned int sse;
bestsme = cpi->find_fractional_mv_step(x, b, d,
- &d->bmi.mv, &best_ref_mv1,
+ &d->bmi.as_mv.first, &best_ref_mv1,
x->errorperbit, &cpi->fn_ptr[BLOCK_16X16],
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?mvcost_hp:mvcost,
@@ -333,8 +333,8 @@ static void vp8_temporal_filter_iterate_c
if (cpi->frames[frame] == NULL)
continue;
- mbd->block[0].bmi.mv.as_mv.row = 0;
- mbd->block[0].bmi.mv.as_mv.col = 0;
+ mbd->block[0].bmi.as_mv.first.as_mv.row = 0;
+ mbd->block[0].bmi.as_mv.first.as_mv.col = 0;
#if ALT_REF_MC_ENABLED
#define THRESH_LOW 10000
@@ -364,8 +364,8 @@ static void vp8_temporal_filter_iterate_c
cpi->frames[frame]->u_buffer + mb_uv_offset,
cpi->frames[frame]->v_buffer + mb_uv_offset,
cpi->frames[frame]->y_stride,
- mbd->block[0].bmi.mv.as_mv.row,
- mbd->block[0].bmi.mv.as_mv.col,
+ mbd->block[0].bmi.as_mv.first.as_mv.row,
+ mbd->block[0].bmi.as_mv.first.as_mv.col,
predictor);
// Apply the filter (YUV)