summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorDmitry Kovalev <dkovalev@google.com>2013-07-23 17:02:08 -0700
committerDmitry Kovalev <dkovalev@google.com>2013-07-23 17:02:08 -0700
commit1099a436d3be3fac43248dac4a5fd57727683fbd (patch)
treee43de6625812f4a9965ad54ffdca3bdafa293ffd /vp9/encoder
parent8d13b0d1dfccf2359ea4b8e93d016b28eab538cd (diff)
downloadlibvpx-1099a436d3be3fac43248dac4a5fd57727683fbd.tar
libvpx-1099a436d3be3fac43248dac4a5fd57727683fbd.tar.gz
libvpx-1099a436d3be3fac43248dac4a5fd57727683fbd.tar.bz2
libvpx-1099a436d3be3fac43248dac4a5fd57727683fbd.zip
Moving counts from FRAME_CONTEXT to new struct FRAME_COUNTS.
Counts are separate from frame context. We have several frame contexts but need only one copy of all counts. Change-Id: I5279b0321cb450bbea7049adaa9275306a7cef7d
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/vp9_bitstream.c22
-rw-r--r--vp9/encoder/vp9_encodeframe.c34
-rw-r--r--vp9/encoder/vp9_onyx_if.c20
-rw-r--r--vp9/encoder/vp9_tokenize.c6
4 files changed, 42 insertions, 40 deletions
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 72797fe0c..d7cb50372 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -225,12 +225,12 @@ static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m,
}
void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) {
- FRAME_CONTEXT *const fc = &cpi->common.fc;
+ VP9_COMMON *cm = &cpi->common;
int k;
for (k = 0; k < MBSKIP_CONTEXTS; ++k)
- vp9_cond_prob_diff_update(w, &fc->mbskip_probs[k],
- VP9_MODE_UPDATE_PROB, fc->mbskip_count[k]);
+ vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k],
+ VP9_MODE_UPDATE_PROB, cm->counts.mbskip[k]);
}
static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
@@ -248,7 +248,7 @@ static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_tree_probs_from_distribution(
vp9_switchable_interp_tree,
new_prob[j], branch_ct[j],
- pc->fc.switchable_interp_count[j], 0);
+ pc->counts.switchable_interp[j], 0);
}
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
@@ -269,7 +269,7 @@ static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
for (j = 0; j < VP9_INTER_MODES - 1; j++) {
vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j],
VP9_MODE_UPDATE_PROB,
- pc->fc.inter_mode_counts[i][j]);
+ pc->counts.inter_mode[i][j]);
}
}
}
@@ -740,7 +740,7 @@ static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE txfm_size) {
vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[txfm_size];
vp9_coeff_count *coef_counts = cpi->coef_counts[txfm_size];
unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
- cpi->common.fc.eob_branch_counts[txfm_size];
+ cpi->common.counts.eob_branch[txfm_size];
vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[txfm_size];
vp9_prob full_probs[ENTROPY_NODES];
int i, j, k, l;
@@ -1060,7 +1060,7 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_8x8(cm->fc.tx_counts.p8x8[i],
+ tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i],
ct_8x8p);
for (j = 0; j < TX_SIZE_MAX_SB - 3; j++)
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j],
@@ -1068,7 +1068,7 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_16x16(cm->fc.tx_counts.p16x16[i],
+ tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i],
ct_16x16p);
for (j = 0; j < TX_SIZE_MAX_SB - 2; j++)
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
@@ -1076,7 +1076,7 @@ static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
}
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
- tx_counts_to_branch_counts_32x32(cm->fc.tx_counts.p32x32[i], ct_32x32p);
+ tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
for (j = 0; j < TX_SIZE_MAX_SB - 1; j++)
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
VP9_MODE_UPDATE_PROB, ct_32x32p[j]);
@@ -1105,7 +1105,7 @@ static void fix_mcomp_filter_type(VP9_COMP *cpi) {
for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
count[i] = 0;
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j)
- count[i] += cm->fc.switchable_interp_count[j][i];
+ count[i] += cm->counts.switchable_interp[j][i];
c += (count[i] > 0);
}
if (c == 1) {
@@ -1392,7 +1392,7 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
#endif
update_inter_mode_probs(cm, &header_bc);
- vp9_zero(fc->inter_mode_counts);
+ vp9_zero(cm->counts.inter_mode);
if (cm->mcomp_filter_type == SWITCHABLE)
update_switchable_interp_probs(cpi, &header_bc);
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 88d0b01c8..0830949d1 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -422,7 +422,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
if (cpi->common.mcomp_filter_type == SWITCHABLE
&& is_inter_mode(mbmi->mode)) {
- ++cpi->common.fc.switchable_interp_count[
+ ++cpi->common.counts.switchable_interp[
vp9_get_pred_context_switchable_interp(xd)]
[vp9_switchable_interp_map[mbmi->interp_filter]];
}
@@ -1949,14 +1949,14 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
vp9_zero(cpi->y_mode_count)
vp9_zero(cpi->y_uv_mode_count)
- vp9_zero(cm->fc.inter_mode_counts)
+ vp9_zero(cm->counts.inter_mode)
vp9_zero(cpi->partition_count);
vp9_zero(cpi->intra_inter_count);
vp9_zero(cpi->comp_inter_count);
vp9_zero(cpi->single_ref_count);
vp9_zero(cpi->comp_ref_count);
- vp9_zero(cm->fc.tx_counts);
- vp9_zero(cm->fc.mbskip_count);
+ vp9_zero(cm->counts.tx);
+ vp9_zero(cm->counts.mbskip);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
@@ -2018,7 +2018,7 @@ static void encode_frame_internal(VP9_COMP *cpi) {
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
- vp9_zero(cm->fc.switchable_interp_count);
+ vp9_zero(cm->counts.switchable_interp);
vp9_zero(cpi->txfm_stepdown_count);
xd->mode_info_context = cm->mi;
@@ -2026,7 +2026,7 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cpi->NMVcount);
vp9_zero(cpi->coef_counts);
- vp9_zero(cm->fc.eob_branch_counts);
+ vp9_zero(cm->counts.eob_branch);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
&& cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2412,17 +2412,17 @@ void vp9_encode_frame(VP9_COMP *cpi) {
int count32x32 = 0;
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
- count4x4 += cm->fc.tx_counts.p32x32[i][TX_4X4];
- count4x4 += cm->fc.tx_counts.p16x16[i][TX_4X4];
- count4x4 += cm->fc.tx_counts.p8x8[i][TX_4X4];
+ count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
+ count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
+ count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
- count8x8_lp += cm->fc.tx_counts.p32x32[i][TX_8X8];
- count8x8_lp += cm->fc.tx_counts.p16x16[i][TX_8X8];
- count8x8_8x8p += cm->fc.tx_counts.p8x8[i][TX_8X8];
+ count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
+ count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
+ count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
- count16x16_16x16p += cm->fc.tx_counts.p16x16[i][TX_16X16];
- count16x16_lp += cm->fc.tx_counts.p32x32[i][TX_16X16];
- count32x32 += cm->fc.tx_counts.p32x32[i][TX_32X32];
+ count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
+ count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
+ count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
}
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0
@@ -2588,7 +2588,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
mbmi->mb_skip_coeff = 1;
if (output_enabled)
- cm->fc.mbskip_count[mb_skip_context][1]++;
+ cm->counts.mbskip[mb_skip_context][1]++;
vp9_reset_sb_tokens_context(
xd, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
}
@@ -2604,7 +2604,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
(mbmi->mb_skip_coeff ||
vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) {
const uint8_t context = vp9_get_pred_context_tx_size(xd);
- update_tx_counts(bsize, context, mbmi->txfm_size, &cm->fc.tx_counts);
+ update_tx_counts(bsize, context, mbmi->txfm_size, &cm->counts.tx);
} else {
int x, y;
TX_SIZE sz = (cm->tx_mode == TX_MODE_SELECT) ? TX_32X32 : cm->tx_mode;
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 7b50e076e..f9c1c830e 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -3152,7 +3152,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
update_reference_frames(cpi);
for (t = TX_4X4; t <= TX_32X32; t++)
- full_to_model_counts(cpi->common.fc.coef_counts[t],
+ full_to_model_counts(cpi->common.counts.coef[t],
cpi->coef_counts[t]);
if (!cpi->common.error_resilient_mode &&
!cpi->common.frame_parallel_decoding_mode) {
@@ -3160,14 +3160,16 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
if (cpi->common.frame_type != KEY_FRAME) {
- vp9_copy(cpi->common.fc.y_mode_counts, cpi->y_mode_count);
- vp9_copy(cpi->common.fc.uv_mode_counts, cpi->y_uv_mode_count);
- vp9_copy(cpi->common.fc.partition_counts, cpi->partition_count);
- vp9_copy(cm->fc.intra_inter_count, cpi->intra_inter_count);
- vp9_copy(cm->fc.comp_inter_count, cpi->comp_inter_count);
- vp9_copy(cm->fc.single_ref_count, cpi->single_ref_count);
- vp9_copy(cm->fc.comp_ref_count, cpi->comp_ref_count);
- cpi->common.fc.NMVcount = cpi->NMVcount;
+ FRAME_COUNTS *counts = &cpi->common.counts;
+
+ vp9_copy(counts->y_mode, cpi->y_mode_count);
+ vp9_copy(counts->uv_mode, cpi->y_uv_mode_count);
+ vp9_copy(counts->partition, cpi->partition_count);
+ vp9_copy(counts->intra_inter, cpi->intra_inter_count);
+ vp9_copy(counts->comp_inter, cpi->comp_inter_count);
+ vp9_copy(counts->single_ref, cpi->single_ref_count);
+ vp9_copy(counts->comp_ref, cpi->comp_ref_count);
+ counts->mv = cpi->NMVcount;
if (!cpi->common.error_resilient_mode &&
!cpi->common.frame_parallel_decoding_mode) {
vp9_adapt_mode_probs(&cpi->common);
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 4403f9527..6b7cc47d8 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -219,7 +219,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
#else
if (!t->skip_eob_node)
#endif
- ++cpi->common.fc.eob_branch_counts[tx_size][type][ref][band][pt];
+ ++cpi->common.counts.eob_branch[tx_size][type][ref][band][pt];
}
token_cache[scan[c]] = vp9_pt_energy_class[token];
++t;
@@ -281,7 +281,7 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
mbmi->mb_skip_coeff = vp9_sb_is_skippable(xd, bsize);
if (mbmi->mb_skip_coeff) {
if (!dry_run)
- cm->fc.mbskip_count[mb_skip_context][1] += skip_inc;
+ cm->counts.mbskip[mb_skip_context][1] += skip_inc;
vp9_reset_sb_tokens_context(xd, bsize);
if (dry_run)
*t = t_backup;
@@ -289,7 +289,7 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
}
if (!dry_run)
- cm->fc.mbskip_count[mb_skip_context][0] += skip_inc;
+ cm->counts.mbskip[mb_skip_context][0] += skip_inc;
foreach_transformed_block(xd, bsize, tokenize_b, &arg);