summaryrefslogtreecommitdiff
path: root/vp8/encoder/rdopt.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder/rdopt.c')
-rw-r--r--vp8/encoder/rdopt.c76
1 files changed, 46 insertions, 30 deletions
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 5bd0c0a5e..d33d18bf2 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -232,10 +232,11 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME))
{
- if (cpi->next_iiratio > 31)
+ if (cpi->twopass.next_iiratio > 31)
cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
else
- cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->next_iiratio]) >> 4;
+ cpi->RDMULT +=
+ (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
}
cpi->mb.errorperbit = (cpi->RDMULT / 100);
@@ -455,17 +456,21 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
if ((mv_row | mv_col) & 7)
{
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
+ mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
+ VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
+ mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
}
else
{
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
+ int sum2, sum1;
+ VARIANCE_INVOKE(rtcd, get8x8var)(uptr, pre_stride,
+ upred_ptr, uv_stride, &sse2, &sum2);
+ VARIANCE_INVOKE(rtcd, get8x8var)(vptr, pre_stride,
+ vpred_ptr, uv_stride, &sse1, &sum1);
sse2 += sse1;
}
-
return sse2;
}
@@ -933,9 +938,11 @@ static int labels2mode(
cost = x->inter_bmode_costs[ m];
}
- d->bmi.mode = m;
d->bmi.mv.as_int = this_mv->as_int;
+ x->partition_info->bmi[i].mode = m;
+ x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
+
}
while (++i < 16);
@@ -1275,8 +1282,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
{
BLOCKD *bd = &x->e_mbd.block[i];
- bsi->mvs[i].as_mv = bd->bmi.mv.as_mv;
- bsi->modes[i] = bd->bmi.mode;
+ bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
+ bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = bd->eob;
}
}
@@ -1406,7 +1413,6 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.mv.as_mv = bsi.mvs[i].as_mv;
- bd->bmi.mode = bsi.modes[i];
bd->eob = bsi.eobs[i];
}
@@ -1424,9 +1430,13 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
j = vp8_mbsplit_offset[bsi.segment_num][i];
- x->partition_info->bmi[i].mode = x->e_mbd.block[j].bmi.mode;
- x->partition_info->bmi[i].mv.as_mv = x->e_mbd.block[j].bmi.mv.as_mv;
+ x->partition_info->bmi[i].mode = bsi.modes[j];
+ x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
}
+ /*
+ * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
+ */
+ x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
return bsi.segment_rd;
}
@@ -1686,25 +1696,29 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
}
}
-static void vp8_rd_update_mvcount(VP8_COMP *cpi, MACROBLOCKD *xd, int_mv *best_ref_mv)
+static void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv)
{
- int i;
-
- if (xd->mode_info_context->mbmi.mode == SPLITMV)
+ if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV)
{
- for (i = 0; i < 16; i++)
+ int i;
+
+ for (i = 0; i < x->partition_info->count; i++)
{
- if (xd->block[i].bmi.mode == NEW4X4)
+ if (x->partition_info->bmi[i].mode == NEW4X4)
{
- cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((x->partition_info->bmi[i].mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((x->partition_info->bmi[i].mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
}
}
}
- else if (xd->mode_info_context->mbmi.mode == NEWMV)
+ else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV)
{
- cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv->as_mv.col) >> 1)]++;
+ cpi->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
+ - best_ref_mv->as_mv.row) >> 1)]++;
+ cpi->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col
+ - best_ref_mv->as_mv.col) >> 1)]++;
}
}
@@ -2374,9 +2388,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
- // Keep a record of best mode index that we chose
- cpi->last_best_mode_index = best_mode_index;
-
// Note how often each mode chosen as best
cpi->mode_chosen_counts[best_mode_index] ++;
@@ -2414,14 +2425,19 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// macroblock modes
vpx_memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
- vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++)
{
vpx_memcpy(&x->e_mbd.block[i].bmi, &best_bmodes[i], sizeof(B_MODE_INFO));
}
- x->e_mbd.mode_info_context->mbmi.mv.as_mv = x->e_mbd.block[15].bmi.mv.as_mv;
+ if (best_mbmode.mode == SPLITMV)
+ {
+ vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
+ x->e_mbd.mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ }
+
+ rd_update_mvcount(cpi, x, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
- vp8_rd_update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]);
}