summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--vp9/encoder/vp9_block.h2
-rw-r--r--vp9/encoder/vp9_encodeframe.c6
-rw-r--r--vp9/encoder/vp9_rdopt.c125
3 files changed, 64 insertions, 69 deletions
diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h
index 634c0b44e..800e3ba47 100644
--- a/vp9/encoder/vp9_block.h
+++ b/vp9/encoder/vp9_block.h
@@ -47,7 +47,7 @@ typedef struct {
int hybrid_pred_diff;
int comp_pred_diff;
int single_pred_diff;
- int64_t txfm_rd_diff[TX_MODES];
+ int64_t tx_rd_diff[TX_MODES];
int64_t best_filter_diff[VP9_SWITCHABLE_FILTERS + 1];
// Bit flag for each mode whether it has high error in comparison to others.
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 027caadae..762a548fe 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -364,8 +364,8 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
// (i.e. after the output_enabled)
if (bsize < BLOCK_SIZE_SB32X32) {
if (bsize < BLOCK_SIZE_MB16X16)
- ctx->txfm_rd_diff[ALLOW_16X16] = ctx->txfm_rd_diff[ALLOW_8X8];
- ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
+ ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
+ ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
}
if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_SIZE_SB8X8) {
@@ -380,7 +380,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
if (!vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < TX_MODES; i++)
- cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
+ cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i];
}
if (cpi->common.frame_type == KEY_FRAME) {
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 5a36b6729..b110cd985 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -866,9 +866,9 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
int (*r)[2], int *rate,
int64_t *d, int64_t *distortion,
int *s, int *skip,
- int64_t txfm_cache[TX_MODES],
+ int64_t tx_cache[TX_MODES],
BLOCK_SIZE_TYPE bs) {
- const TX_SIZE max_txfm_size = TX_32X32
+ const TX_SIZE max_tx_size = TX_32X32
- (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16);
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
@@ -880,11 +880,11 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs);
- for (n = TX_4X4; n <= max_txfm_size; n++) {
+ for (n = TX_4X4; n <= max_tx_size; n++) {
r[n][1] = r[n][0];
if (r[n][0] == INT_MAX)
continue;
- for (m = 0; m <= n - (n == max_txfm_size); m++) {
+ for (m = 0; m <= n - (n == max_tx_size); m++) {
if (m == n)
r[n][1] += vp9_cost_zero(tx_probs[m]);
else
@@ -896,7 +896,7 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
s0 = vp9_cost_bit(skip_prob, 0);
s1 = vp9_cost_bit(skip_prob, 1);
- for (n = TX_4X4; n <= max_txfm_size; n++) {
+ for (n = TX_4X4; n <= max_tx_size; n++) {
if (d[n] == INT64_MAX) {
rd[n][0] = rd[n][1] = INT64_MAX;
continue;
@@ -909,13 +909,13 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
}
}
- if (max_txfm_size == TX_32X32 &&
+ if (max_tx_size == TX_32X32 &&
(cm->tx_mode == ALLOW_32X32 ||
(cm->tx_mode == TX_MODE_SELECT &&
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
rd[TX_32X32][1] < rd[TX_4X4][1]))) {
mbmi->txfm_size = TX_32X32;
- } else if (max_txfm_size >= TX_16X16 &&
+ } else if (max_tx_size >= TX_16X16 &&
(cm->tx_mode == ALLOW_16X16 ||
cm->tx_mode == ALLOW_32X32 ||
(cm->tx_mode == TX_MODE_SELECT &&
@@ -935,34 +935,34 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
*rate = r[mbmi->txfm_size][cm->tx_mode == TX_MODE_SELECT];
*skip = s[mbmi->txfm_size];
- txfm_cache[ONLY_4X4] = rd[TX_4X4][0];
- txfm_cache[ALLOW_8X8] = rd[TX_8X8][0];
- txfm_cache[ALLOW_16X16] = rd[MIN(max_txfm_size, TX_16X16)][0];
- txfm_cache[ALLOW_32X32] = rd[MIN(max_txfm_size, TX_32X32)][0];
- if (max_txfm_size == TX_32X32 &&
+ tx_cache[ONLY_4X4] = rd[TX_4X4][0];
+ tx_cache[ALLOW_8X8] = rd[TX_8X8][0];
+ tx_cache[ALLOW_16X16] = rd[MIN(max_tx_size, TX_16X16)][0];
+ tx_cache[ALLOW_32X32] = rd[MIN(max_tx_size, TX_32X32)][0];
+ if (max_tx_size == TX_32X32 &&
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
rd[TX_32X32][1] < rd[TX_4X4][1])
- txfm_cache[TX_MODE_SELECT] = rd[TX_32X32][1];
- else if (max_txfm_size >= TX_16X16 &&
+ tx_cache[TX_MODE_SELECT] = rd[TX_32X32][1];
+ else if (max_tx_size >= TX_16X16 &&
rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])
- txfm_cache[TX_MODE_SELECT] = rd[TX_16X16][1];
+ tx_cache[TX_MODE_SELECT] = rd[TX_16X16][1];
else
- txfm_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ?
+ tx_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ?
rd[TX_4X4][1] : rd[TX_8X8][1];
- if (max_txfm_size == TX_32X32 &&
+ if (max_tx_size == TX_32X32 &&
rd[TX_32X32][1] < rd[TX_16X16][1] &&
rd[TX_32X32][1] < rd[TX_8X8][1] &&
rd[TX_32X32][1] < rd[TX_4X4][1]) {
cpi->txfm_stepdown_count[0]++;
- } else if (max_txfm_size >= TX_16X16 &&
+ } else if (max_tx_size >= TX_16X16 &&
rd[TX_16X16][1] < rd[TX_8X8][1] &&
rd[TX_16X16][1] < rd[TX_4X4][1]) {
- cpi->txfm_stepdown_count[max_txfm_size - TX_16X16]++;
+ cpi->txfm_stepdown_count[max_tx_size - TX_16X16]++;
} else if (rd[TX_8X8][1] < rd[TX_4X4][1]) {
- cpi->txfm_stepdown_count[max_txfm_size - TX_8X8]++;
+ cpi->txfm_stepdown_count[max_tx_size - TX_8X8]++;
} else {
- cpi->txfm_stepdown_count[max_txfm_size - TX_4X4]++;
+ cpi->txfm_stepdown_count[max_tx_size - TX_4X4]++;
}
}
@@ -1383,7 +1383,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
int *rate, int *rate_tokenonly,
int64_t *distortion, int *skippable,
BLOCK_SIZE_TYPE bsize,
- int64_t txfm_cache[TX_MODES],
+ int64_t tx_cache[TX_MODES],
int64_t best_rd) {
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
@@ -1394,14 +1394,13 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
int i;
int *bmode_costs = x->mbmode_cost;
- if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD)
for (i = 0; i < TX_MODES; i++)
- txfm_cache[i] = INT64_MAX;
- }
+ tx_cache[i] = INT64_MAX;
/* Y Search for intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
- int64_t local_txfm_cache[TX_MODES];
+ int64_t local_tx_cache[TX_MODES];
MODE_INFO *const mic = xd->mode_info_context;
const int mis = xd->mode_info_stride;
@@ -1415,7 +1414,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
x->e_mbd.mode_info_context->mbmi.mode = mode;
super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
- bsize, local_txfm_cache, best_rd);
+ bsize, local_tx_cache, best_rd);
if (this_rate_tokenonly == INT_MAX)
continue;
@@ -1435,10 +1434,10 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (cpi->sf.tx_size_search_method == USE_FULL_RD && this_rd < INT64_MAX) {
for (i = 0; i < TX_MODES; i++) {
- int64_t adj_rd = this_rd + local_txfm_cache[i] -
- local_txfm_cache[cpi->common.tx_mode];
- if (adj_rd < txfm_cache[i]) {
- txfm_cache[i] = adj_rd;
+ const int64_t adj_rd = this_rd + local_tx_cache[i] -
+ local_tx_cache[cpi->common.tx_mode];
+ if (adj_rd < tx_cache[i]) {
+ tx_cache[i] = adj_rd;
}
}
}
@@ -2343,7 +2342,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int_mv *ref_mv,
int_mv *second_ref_mv,
int64_t comp_pred_diff[NB_PREDICTION_TYPES],
- int64_t txfm_size_diff[TX_MODES],
+ int64_t tx_size_diff[TX_MODES],
int64_t best_filter_diff[VP9_SWITCHABLE_FILTERS + 1]) {
MACROBLOCKD *const xd = &x->e_mbd;
@@ -2365,7 +2364,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
// FIXME(rbultje) does this memcpy the whole array? I believe sizeof()
// doesn't actually work this way
- memcpy(ctx->txfm_rd_diff, txfm_size_diff, sizeof(ctx->txfm_rd_diff));
+ memcpy(ctx->tx_rd_diff, tx_size_diff, sizeof(ctx->tx_rd_diff));
memcpy(ctx->best_filter_diff, best_filter_diff,
sizeof(*best_filter_diff) * (VP9_SWITCHABLE_FILTERS + 1));
}
@@ -3131,15 +3130,13 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
MACROBLOCKD *const xd = &x->e_mbd;
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
int y_skip = 0, uv_skip;
- int64_t dist_y = 0, dist_uv = 0, txfm_cache[TX_MODES];
-
+ int64_t dist_y = 0, dist_uv = 0, tx_cache[TX_MODES] = { 0 };
x->skip_encode = 0;
- vpx_memset(&txfm_cache, 0, sizeof(txfm_cache));
ctx->skip = 0;
xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
if (bsize >= BLOCK_SIZE_SB8X8) {
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
- &dist_y, &y_skip, bsize, txfm_cache,
+ &dist_y, &y_skip, bsize, tx_cache,
best_rd) >= best_rd) {
*returnrate = INT_MAX;
return;
@@ -3161,17 +3158,15 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
*returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 1);
*returndist = dist_y + (dist_uv >> 2);
- memset(ctx->txfm_rd_diff, 0, sizeof(ctx->txfm_rd_diff));
+ vp9_zero(ctx->tx_rd_diff);
} else {
int i;
*returnrate = rate_y + rate_uv +
vp9_cost_bit(vp9_get_pred_prob_mbskip(cm, xd), 0);
*returndist = dist_y + (dist_uv >> 2);
- if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
- for (i = 0; i < TX_MODES; i++) {
- ctx->txfm_rd_diff[i] = txfm_cache[i] - txfm_cache[cm->tx_mode];
- }
- }
+ if (cpi->sf.tx_size_search_method == USE_FULL_RD)
+ for (i = 0; i < TX_MODES; i++)
+ ctx->tx_rd_diff[i] = tx_cache[i] - tx_cache[cm->tx_mode];
}
ctx->mic = *xd->mode_info_context;
@@ -3204,8 +3199,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
cpi->alt_fb_idx};
int64_t best_rd = best_rd_so_far;
int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
- int64_t best_txfm_rd[TX_MODES];
- int64_t best_txfm_diff[TX_MODES];
+ int64_t best_tx_rd[TX_MODES];
+ int64_t best_tx_diff[TX_MODES];
int64_t best_pred_diff[NB_PREDICTION_TYPES];
int64_t best_pred_rd[NB_PREDICTION_TYPES];
int64_t best_filter_rd[VP9_SWITCHABLE_FILTERS + 1];
@@ -3261,7 +3256,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
best_pred_rd[i] = INT64_MAX;
for (i = 0; i < TX_MODES; i++)
- best_txfm_rd[i] = INT64_MAX;
+ best_tx_rd[i] = INT64_MAX;
for (i = 0; i <= VP9_SWITCHABLE_FILTERS; i++)
best_filter_rd[i] = INT64_MAX;
for (i = 0; i < TX_SIZES; i++)
@@ -3320,14 +3315,14 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int rate2 = 0, rate_y = 0, rate_uv = 0;
int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
int skippable;
- int64_t txfm_cache[TX_MODES];
+ int64_t tx_cache[TX_MODES];
int i;
int this_skip2 = 0;
int64_t total_sse = INT_MAX;
int early_term = 0;
for (i = 0; i < TX_MODES; ++i)
- txfm_cache[i] = INT64_MAX;
+ tx_cache[i] = INT64_MAX;
x->skip = 0;
this_mode = vp9_mode_order[mode_index].mode;
@@ -3520,9 +3515,9 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
distortion2 += dist_uv[TX_4X4];
distortion_uv = dist_uv[TX_4X4];
mbmi->uv_mode = mode_uv[TX_4X4];
- txfm_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ tx_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
for (i = 0; i < TX_MODES; ++i)
- txfm_cache[i] = txfm_cache[ONLY_4X4];
+ tx_cache[i] = tx_cache[ONLY_4X4];
} else if (ref_frame == INTRA_FRAME) {
TX_SIZE uv_tx;
// Only search the oblique modes if the best so far is
@@ -3537,7 +3532,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
continue;
}
super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL,
- bsize, txfm_cache, best_rd);
+ bsize, tx_cache, best_rd);
if (rate_y == INT_MAX)
continue;
@@ -3723,14 +3718,14 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
skippable = skippable && uv_skippable;
total_sse += uv_sse;
- txfm_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
+ tx_cache[ONLY_4X4] = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
for (i = 0; i < TX_MODES; ++i)
- txfm_cache[i] = txfm_cache[ONLY_4X4];
+ tx_cache[i] = tx_cache[ONLY_4X4];
}
} else {
compmode_cost = vp9_cost_bit(comp_mode_p, second_ref_frame > INTRA_FRAME);
this_rd = handle_inter_mode(cpi, x, bsize,
- txfm_cache,
+ tx_cache,
&rate2, &distortion2, &skippable,
&rate_y, &distortion_y,
&rate_uv, &distortion_uv,
@@ -3954,22 +3949,22 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (bsize < BLOCK_SIZE_SB32X32) {
if (bsize < BLOCK_SIZE_MB16X16) {
if (this_mode == SPLITMV || this_mode == I4X4_PRED)
- txfm_cache[ALLOW_8X8] = txfm_cache[ONLY_4X4];
- txfm_cache[ALLOW_16X16] = txfm_cache[ALLOW_8X8];
+ tx_cache[ALLOW_8X8] = tx_cache[ONLY_4X4];
+ tx_cache[ALLOW_16X16] = tx_cache[ALLOW_8X8];
}
- txfm_cache[ALLOW_32X32] = txfm_cache[ALLOW_16X16];
+ tx_cache[ALLOW_32X32] = tx_cache[ALLOW_16X16];
}
if (!mode_excluded && this_rd != INT64_MAX) {
for (i = 0; i < TX_MODES; i++) {
int64_t adj_rd = INT64_MAX;
if (this_mode != I4X4_PRED) {
- adj_rd = this_rd + txfm_cache[i] - txfm_cache[cm->tx_mode];
+ adj_rd = this_rd + tx_cache[i] - tx_cache[cm->tx_mode];
} else {
adj_rd = this_rd;
}
- if (adj_rd < best_txfm_rd[i])
- best_txfm_rd[i] = adj_rd;
+ if (adj_rd < best_tx_rd[i])
+ best_tx_rd[i] = adj_rd;
}
}
@@ -4121,13 +4116,13 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (!x->skip) {
for (i = 0; i < TX_MODES; i++) {
- if (best_txfm_rd[i] == INT64_MAX)
- best_txfm_diff[i] = 0;
+ if (best_tx_rd[i] == INT64_MAX)
+ best_tx_diff[i] = 0;
else
- best_txfm_diff[i] = best_rd - best_txfm_rd[i];
+ best_tx_diff[i] = best_rd - best_tx_rd[i];
}
} else {
- vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
+ vpx_memset(best_tx_diff, 0, sizeof(best_tx_diff));
}
set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1],
@@ -4137,7 +4132,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
&mbmi->ref_mvs[mbmi->ref_frame[0]][0],
&mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 :
mbmi->ref_frame[1]][0],
- best_pred_diff, best_txfm_diff, best_filter_diff);
+ best_pred_diff, best_tx_diff, best_filter_diff);
return best_rd;
}