summaryrefslogtreecommitdiff
path: root/vp8/encoder/encodemb.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder/encodemb.c')
-rw-r--r--vp8/encoder/encodemb.c78
1 files changed, 46 insertions, 32 deletions
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 50de2f2c0..d3bd0f1dd 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -267,7 +267,7 @@ static const int plane_rd_mult[4] = {
void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- const VP8_ENCODER_RTCD *rtcd, int tx_type) {
+ const VP8_ENCODER_RTCD *rtcd, int tx_size) {
BLOCK *b;
BLOCKD *d;
vp8_token_state tokens[65][2];
@@ -298,21 +298,18 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
b = &mb->block[i];
d = &mb->e_mbd.block[i];
- switch (tx_type) {
+ switch (tx_size) {
default:
case TX_4X4:
scan = vp8_default_zig_zag1d;
bands = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
{
- int active_ht = (mb->q_index < ACTIVE_HT) &&
- (mb->e_mbd.mode_info_context->mbmi.mode == B_PRED);
-
- if((type == PLANE_TYPE_Y_WITH_DC) && active_ht) {
- switch (d->bmi.as_mode.tx_type) {
+ TX_TYPE tx_type = get_tx_type(&mb->e_mbd, d);
+ if (tx_type != DCT_DCT) {
+ switch (tx_type) {
case ADST_DCT:
scan = vp8_row_scan;
break;
@@ -325,11 +322,10 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
scan = vp8_default_zig_zag1d;
break;
}
-
- } else
+ } else {
scan = vp8_default_zig_zag1d;
+ }
}
-#endif
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
@@ -380,9 +376,9 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
band = bands[i + 1];
pt = vp8_prev_token_class[t0];
rate0 +=
- mb->token_costs[tx_type][type][band][pt][tokens[next][0].token];
+ mb->token_costs[tx_size][type][band][pt][tokens[next][0].token];
rate1 +=
- mb->token_costs[tx_type][type][band][pt][tokens[next][1].token];
+ mb->token_costs[tx_size][type][band][pt][tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
@@ -427,12 +423,12 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
band = bands[i + 1];
if (t0 != DCT_EOB_TOKEN) {
pt = vp8_prev_token_class[t0];
- rate0 += mb->token_costs[tx_type][type][band][pt][
+ rate0 += mb->token_costs[tx_size][type][band][pt][
tokens[next][0].token];
}
if (t1 != DCT_EOB_TOKEN) {
pt = vp8_prev_token_class[t1];
- rate1 += mb->token_costs[tx_type][type][band][pt][
+ rate1 += mb->token_costs[tx_size][type][band][pt][
tokens[next][1].token];
}
}
@@ -464,11 +460,11 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
t1 = tokens[next][1].token;
/* Update the cost of each path if we're past the EOB token. */
if (t0 != DCT_EOB_TOKEN) {
- tokens[next][0].rate += mb->token_costs[tx_type][type][band][0][t0];
+ tokens[next][0].rate += mb->token_costs[tx_size][type][band][0][t0];
tokens[next][0].token = ZERO_TOKEN;
}
if (t1 != DCT_EOB_TOKEN) {
- tokens[next][1].rate += mb->token_costs[tx_type][type][band][0][t1];
+ tokens[next][1].rate += mb->token_costs[tx_size][type][band][0][t1];
tokens[next][1].token = ZERO_TOKEN;
}
/* Don't update next, because we didn't add a new node. */
@@ -484,8 +480,8 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
error1 = tokens[next][1].error;
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
- rate0 += mb->token_costs[tx_type][type][band][pt][t0];
- rate1 += mb->token_costs[tx_type][type][band][pt][t1];
+ rate0 += mb->token_costs[tx_size][type][band][pt][t0];
+ rate1 += mb->token_costs[tx_size][type][band][pt][t1];
UPDATE_RD_COST();
best = rd_cost1 < rd_cost0;
final_eob = i0 - 1;
@@ -640,6 +636,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
+ int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
@@ -649,18 +646,21 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- type = PLANE_TYPE_Y_NO_DC;
+ type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
+ tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
}
// 8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd,
- ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
+ if (has_2nd_order) {
+ check_reset_8x8_2nd_coeffs(&x->e_mbd,
+ ta + vp8_block2above_8x8[24],
+ tl + vp8_block2left_8x8[24]);
+ }
}
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
@@ -682,8 +682,8 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
+ tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
}
}
@@ -900,11 +900,25 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
optimize_mb_16x16(x, rtcd);
vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
} else if (tx_size == TX_8X8) {
- vp8_transform_mb_8x8(x);
- vp8_quantize_mb_8x8(x);
- if (x->optimize)
- optimize_mb_8x8(x, rtcd);
- vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
+ vp8_transform_mby_8x8(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mby_8x8(x);
+ vp8_quantize_mbuv_4x4(x);
+ if (x->optimize) {
+ vp8_optimize_mby_8x8(x, rtcd);
+ vp8_optimize_mbuv_4x4(x, rtcd);
+ }
+ vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
+ } else {
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
+ if (x->optimize)
+ optimize_mb_8x8(x, rtcd);
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ }
} else {
transform_mb_4x4(x);
vp8_quantize_mb_4x4(x);