summaryrefslogtreecommitdiff
path: root/vp9/decoder/vp9_decodframe.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/decoder/vp9_decodframe.c')
-rw-r--r--vp9/decoder/vp9_decodframe.c454
1 files changed, 186 insertions, 268 deletions
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 4af921872..a16b6108a 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -86,31 +86,31 @@ static int inv_recenter_nonneg(int v, int m) {
return m - ((v + 1) >> 1);
}
-static int decode_uniform(BOOL_DECODER *br, int n) {
+static int decode_uniform(vp9_reader *r, int n) {
int v;
const int l = get_unsigned_bits(n);
const int m = (1 << l) - n;
if (!l)
return 0;
- v = vp9_read_literal(br, l - 1);
- return v < m ? v : (v << 1) - m + vp9_read_bit(br);
+ v = vp9_read_literal(r, l - 1);
+ return v < m ? v : (v << 1) - m + vp9_read_bit(r);
}
-static int decode_term_subexp(BOOL_DECODER *br, int k, int num_syms) {
+static int decode_term_subexp(vp9_reader *r, int k, int num_syms) {
int i = 0, mk = 0, word;
while (1) {
const int b = i ? k + i - 1 : k;
const int a = 1 << b;
if (num_syms <= mk + 3 * a) {
- word = decode_uniform(br, num_syms - mk) + mk;
+ word = decode_uniform(r, num_syms - mk) + mk;
break;
} else {
- if (vp9_read_bit(br)) {
+ if (vp9_read_bit(r)) {
i++;
mk += a;
} else {
- word = vp9_read_literal(br, b) + mk;
+ word = vp9_read_literal(r, b) + mk;
break;
}
}
@@ -118,11 +118,11 @@ static int decode_term_subexp(BOOL_DECODER *br, int k, int num_syms) {
return word;
}
-static int decode_unsigned_max(BOOL_DECODER *br, int max) {
+static int decode_unsigned_max(vp9_reader *r, int max) {
int data = 0, bit = 0, lmax = max;
while (lmax) {
- data |= vp9_read_bit(br) << bit++;
+ data |= vp9_read_bit(r) << bit++;
lmax >>= 1;
}
return data > max ? max : data;
@@ -154,8 +154,8 @@ static int inv_remap_prob(int v, int m) {
}
}
-static vp9_prob read_prob_diff_update(vp9_reader *const bc, int oldp) {
- int delp = decode_term_subexp(bc, SUBEXP_PARAM, 255);
+static vp9_prob read_prob_diff_update(vp9_reader *r, int oldp) {
+ int delp = decode_term_subexp(r, SUBEXP_PARAM, 255);
return (vp9_prob)inv_remap_prob(delp, oldp);
}
@@ -220,51 +220,23 @@ static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *mb) {
}
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void propagate_nzcs(VP9_COMMON *cm, MACROBLOCKD *xd) {
- MODE_INFO *m = xd->mode_info_context;
- BLOCK_SIZE_TYPE sb_type = m->mbmi.sb_type;
- const int mis = cm->mode_info_stride;
- int n;
- if (sb_type == BLOCK_SIZE_SB64X64) {
- for (n = 0; n < 16; ++n) {
- int i = n >> 2;
- int j = n & 3;
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
- } else if (sb_type == BLOCK_SIZE_SB32X32) {
- for (n = 0; n < 4; ++n) {
- int i = n >> 1;
- int j = n & 1;
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
- }
-}
-#endif
-
-static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_16x16(MACROBLOCKD *xd) {
const TX_TYPE tx_type = get_tx_type_16x16(xd, 0);
vp9_dequant_iht_add_16x16_c(tx_type, xd->plane[0].qcoeff,
- xd->block[0].dequant, xd->dst.y_buffer,
- xd->dst.y_stride, xd->plane[0].eobs[0]);
+ xd->block[0].dequant, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, xd->plane[0].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[20].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].eobs[0]);
}
-static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_8x8(MACROBLOCKD *xd) {
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// luma
// if the first one is DCT_DCT assume all the rest are as well
@@ -277,7 +249,7 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
int16_t *q = BLOCK_OFFSET(xd->plane[0].qcoeff, idx, 16);
int16_t *dq = xd->block[0].dequant;
uint8_t *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
- int stride = xd->dst.y_stride;
+ int stride = xd->plane[0].dst.stride;
if (mode == I8X8_PRED) {
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
@@ -289,8 +261,8 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
} else {
vp9_dequant_idct_add_y_block_8x8(xd->plane[0].qcoeff,
- xd->block[0].dequant, xd->dst.y_buffer,
- xd->dst.y_stride, xd);
+ xd->block[0].dequant, xd->plane[0].dst.buf,
+ xd->plane[0].dst.stride, xd);
}
// chroma
@@ -317,16 +289,16 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
} else if (mode == SPLITMV) {
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride, xd->plane[2].eobs);
} else {
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].eobs[0]);
}
}
@@ -347,8 +319,7 @@ static INLINE void dequant_add_y(MACROBLOCKD *xd, TX_TYPE tx_type, int idx) {
}
-static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
- BOOL_DECODER* const bc) {
+static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
TX_TYPE tx_type;
int i = 0;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
@@ -386,7 +357,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->bmi[i].as_mode.context = b->bmi.as_mode.context =
vp9_find_bpred_context(xd, b);
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
- vp9_decode_coefs_4x4(pbi, xd, bc, PLANE_TYPE_Y_WITH_DC, i);
+ vp9_decode_coefs_4x4(pbi, xd, r, PLANE_TYPE_Y_WITH_DC, i);
#endif
vp9_intra4x4_predict(xd, b, b_mode, *(b->base_dst) + b->dst,
b->dst_stride);
@@ -395,31 +366,31 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
#if CONFIG_NEWBINTRAMODES
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
- vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc);
+ vp9_decode_mb_tokens_4x4_uv(pbi, xd, r);
#endif
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride, xd->plane[2].eobs);
} else if (mode == SPLITMV || get_tx_type_4x4(xd, 0) == DCT_DCT) {
xd->itxm_add_y_block(xd->plane[0].qcoeff,
xd->block[0].dequant,
- xd->dst.y_buffer, xd->dst.y_stride, xd);
+ xd->plane[0].dst.buf, xd->plane[0].dst.stride, xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride, xd->plane[2].eobs);
} else {
for (i = 0; i < 16; i++) {
tx_type = get_tx_type_4x4(xd, i);
dequant_add_y(xd, tx_type, i);
}
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
- xd->dst.u_buffer, xd->dst.uv_stride,
+ xd->plane[1].dst.buf, xd->plane[1].dst.stride,
xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
- xd->dst.v_buffer, xd->dst.uv_stride,
+ xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].eobs);
}
}
@@ -433,10 +404,11 @@ static INLINE void decode_sby_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 32) * mb->dst.y_stride + (x_idx * 32);
+ const int y_offset = (y_idx * 32) * mb->plane[0].dst.stride + (x_idx * 32);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[0].qcoeff, n, 1024),
mb->block[0].dequant ,
- mb->dst.y_buffer + y_offset, mb->dst.y_stride,
+ mb->plane[0].dst.buf + y_offset,
+ mb->plane[0].dst.stride,
mb->plane[0].eobs[n * 64]);
}
}
@@ -449,15 +421,18 @@ static INLINE void decode_sbuv_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 32) * mb->dst.uv_stride + (x_idx * 32);
+ const int uv_offset = (y_idx * 32) * mb->plane[1].dst.stride +
+ (x_idx * 32);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 1024),
mb->block[16].dequant,
- mb->dst.u_buffer + uv_offset,
- mb->dst.uv_stride, mb->plane[1].eobs[n * 64]);
+ mb->plane[1].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
+ mb->plane[1].eobs[n * 64]);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 1024),
mb->block[20].dequant,
- mb->dst.v_buffer + uv_offset,
- mb->dst.uv_stride, mb->plane[2].eobs[n * 64]);
+ mb->plane[2].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
+ mb->plane[2].eobs[n * 64]);
}
}
@@ -470,14 +445,14 @@ static INLINE void decode_sby_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 16) * mb->dst.y_stride + (x_idx * 16);
+ const int y_offset = (y_idx * 16) * mb->plane[0].dst.stride + (x_idx * 16);
const TX_TYPE tx_type = get_tx_type_16x16(mb,
(y_idx * (4 * bw) + x_idx) * 4);
vp9_dequant_iht_add_16x16_c(tx_type,
BLOCK_OFFSET(mb->plane[0].qcoeff, n, 256),
mb->block[0].dequant,
- mb->dst.y_buffer + y_offset,
- mb->dst.y_stride,
+ mb->plane[0].dst.buf + y_offset,
+ mb->plane[0].dst.stride,
mb->plane[0].eobs[n * 16]);
}
}
@@ -493,14 +468,16 @@ static INLINE void decode_sbuv_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 16) * mb->dst.uv_stride + (x_idx * 16);
+ const int uv_offset = (y_idx * 16) * mb->plane[1].dst.stride + (x_idx * 16);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 256),
mb->block[16].dequant,
- mb->dst.u_buffer + uv_offset, mb->dst.uv_stride,
+ mb->plane[1].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
mb->plane[1].eobs[n * 16]);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 256),
mb->block[20].dequant,
- mb->dst.v_buffer + uv_offset, mb->dst.uv_stride,
+ mb->plane[2].dst.buf + uv_offset,
+ mb->plane[1].dst.stride,
mb->plane[2].eobs[n * 16]);
}
}
@@ -515,14 +492,15 @@ static INLINE void decode_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 8) * xd->dst.y_stride + (x_idx * 8);
+ const int y_offset = (y_idx * 8) * xd->plane[0].dst.stride + (x_idx * 8);
const TX_TYPE tx_type = get_tx_type_8x8(xd,
(y_idx * (2 * bw) + x_idx) * 2);
vp9_dequant_iht_add_8x8_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
xd->block[0].dequant,
- xd->dst.y_buffer + y_offset, xd->dst.y_stride,
+ xd->plane[0].dst.buf + y_offset,
+ xd->plane[0].dst.stride,
xd->plane[0].eobs[n * 4]);
}
}
@@ -537,14 +515,16 @@ static INLINE void decode_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 8) * xd->dst.uv_stride + (x_idx * 8);
+ const int uv_offset = (y_idx * 8) * xd->plane[1].dst.stride + (x_idx * 8);
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 64),
xd->block[16].dequant,
- xd->dst.u_buffer + uv_offset, xd->dst.uv_stride,
+ xd->plane[1].dst.buf + uv_offset,
+ xd->plane[1].dst.stride,
xd->plane[1].eobs[n * 4]);
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 64),
xd->block[20].dequant,
- xd->dst.v_buffer + uv_offset, xd->dst.uv_stride,
+ xd->plane[2].dst.buf + uv_offset,
+ xd->plane[1].dst.stride,
xd->plane[2].eobs[n * 4]);
}
}
@@ -558,18 +538,19 @@ static INLINE void decode_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < y_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 4) * xd->dst.y_stride + (x_idx * 4);
+ const int y_offset = (y_idx * 4) * xd->plane[0].dst.stride + (x_idx * 4);
const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
if (tx_type == DCT_DCT) {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
xd->block[0].dequant,
- xd->dst.y_buffer + y_offset, xd->dst.y_stride,
+ xd->plane[0].dst.buf + y_offset, xd->plane[0].dst.stride,
xd->plane[0].eobs[n]);
} else {
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
- xd->block[0].dequant, xd->dst.y_buffer + y_offset,
- xd->dst.y_stride, xd->plane[0].eobs[n]);
+ xd->block[0].dequant,
+ xd->plane[0].dst.buf + y_offset,
+ xd->plane[0].dst.stride, xd->plane[0].eobs[n]);
}
}
}
@@ -583,13 +564,15 @@ static INLINE void decode_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
for (n = 0; n < uv_count; n++) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 4) * xd->dst.uv_stride + (x_idx * 4);
+ const int uv_offset = (y_idx * 4) * xd->plane[1].dst.stride + (x_idx * 4);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 16),
xd->block[16].dequant,
- xd->dst.u_buffer + uv_offset, xd->dst.uv_stride, xd->plane[1].eobs[n]);
+ xd->plane[1].dst.buf + uv_offset, xd->plane[1].dst.stride,
+ xd->plane[1].eobs[n]);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 16),
xd->block[20].dequant,
- xd->dst.v_buffer + uv_offset, xd->dst.uv_stride, xd->plane[2].eobs[n]);
+ xd->plane[2].dst.buf + uv_offset, xd->plane[1].dst.stride,
+ xd->plane[2].eobs[n]);
}
}
@@ -612,7 +595,7 @@ static void decode_sb_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
}
static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
- BOOL_DECODER* const bc, BLOCK_SIZE_TYPE bsize) {
+ vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
const int bw = 1 << bwl, bh = 1 << bhl;
int n, eobtotal;
@@ -635,16 +618,13 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
if (mi->mbmi.mb_skip_coeff) {
vp9_reset_sb_tokens_context(xd, bsize);
-#if CONFIG_CODE_NONZEROCOUNT
- vpx_memset(mi->mbmi.nzcs, 0, 384 * sizeof(mi->mbmi.nzcs[0]));
-#endif
} else {
// re-initialize macroblock dequantizer before detokenization
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
// dequantization and idct
- eobtotal = vp9_decode_tokens(pbi, xd, bc, bsize);
+ eobtotal = vp9_decode_tokens(pbi, xd, r, bsize);
if (eobtotal == 0) { // skip loopfilter
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
@@ -672,17 +652,13 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
}
}
}
-
-#if CONFIG_CODE_NONZEROCOUNT
- propagate_nzcs(&pbi->common, xd);
-#endif
}
// TODO(jingning): Need to merge SB and MB decoding. The MB decoding currently
// couples special handles on I8x8, B_PRED, and splitmv modes.
static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, int mb_col,
- BOOL_DECODER* const bc) {
+ vp9_reader *r) {
int eobtotal = 0;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
const int tx_size = xd->mode_info_context->mbmi.txfm_size;
@@ -708,7 +684,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode, tx_size,
xd->mode_info_context->mbmi.interp_filter);
#endif
- vp9_build_inter_predictors_mb(xd, mb_row, mb_col);
+ vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
}
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
@@ -718,11 +694,11 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
- if (!bool_error(bc)) {
+ if (!vp9_reader_has_error(r)) {
#if CONFIG_NEWBINTRAMODES
if (mode != I4X4_PRED)
#endif
- eobtotal = vp9_decode_tokens(pbi, xd, bc, BLOCK_SIZE_MB16X16);
+ eobtotal = vp9_decode_tokens(pbi, xd, r, BLOCK_SIZE_MB16X16);
}
}
@@ -730,7 +706,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
mode != I4X4_PRED &&
mode != SPLITMV &&
mode != I8X8_PRED &&
- !bool_error(bc)) {
+ !vp9_reader_has_error(r)) {
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
} else {
#if 0 // def DEC_DEBUG
@@ -739,11 +715,11 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
#endif
if (tx_size == TX_16X16) {
- decode_16x16(pbi, xd, bc);
+ decode_16x16(xd);
} else if (tx_size == TX_8X8) {
- decode_8x8(pbi, xd, bc);
+ decode_8x8(xd);
} else {
- decode_4x4(pbi, xd, bc);
+ decode_4x4(pbi, xd, r);
}
}
@@ -761,21 +737,21 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
- printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
+ printf("%3d ", xd->plane[0].dst.buf[i * xd->plane[0].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final u\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[1].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final v\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[2].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
fflush(stdout);
@@ -823,9 +799,9 @@ static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE_TYPE bsize,
set_mb_row(cm, xd, mb_row, bh);
set_mb_col(cm, xd, mb_col, bw);
- xd->dst.y_buffer = dst_fb->y_buffer + recon_yoffset;
- xd->dst.u_buffer = dst_fb->u_buffer + recon_uvoffset;
- xd->dst.v_buffer = dst_fb->v_buffer + recon_uvoffset;
+ xd->plane[0].dst.buf = dst_fb->y_buffer + recon_yoffset;
+ xd->plane[1].dst.buf = dst_fb->u_buffer + recon_uvoffset;
+ xd->plane[2].dst.buf = dst_fb->v_buffer + recon_uvoffset;
}
static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
@@ -839,8 +815,8 @@ static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[fb_idx];
xd->scale_factor[0] = cm->active_ref_scale[mbmi->ref_frame - 1];
xd->scale_factor_uv[0] = cm->active_ref_scale[mbmi->ref_frame - 1];
- setup_pred_block(&xd->pre, cfg, mb_row, mb_col,
- &xd->scale_factor[0], &xd->scale_factor_uv[0]);
+ setup_pre_planes(xd, cfg, NULL, mb_row, mb_col,
+ xd->scale_factor, xd->scale_factor_uv);
xd->corrupted |= cfg->corrupted;
if (mbmi->second_ref_frame > INTRA_FRAME) {
@@ -849,8 +825,8 @@ static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
const YV12_BUFFER_CONFIG *second_cfg = &cm->yv12_fb[second_fb_idx];
xd->scale_factor[1] = cm->active_ref_scale[mbmi->second_ref_frame - 1];
xd->scale_factor_uv[1] = cm->active_ref_scale[mbmi->second_ref_frame - 1];
- setup_pred_block(&xd->second_pre, second_cfg, mb_row, mb_col,
- &xd->scale_factor[1], &xd->scale_factor_uv[1]);
+ setup_pre_planes(xd, NULL, second_cfg, mb_row, mb_col,
+ xd->scale_factor, xd->scale_factor_uv);
xd->corrupted |= second_cfg->corrupted;
}
}
@@ -870,7 +846,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mb_row, int mb_col,
else
decode_mb(pbi, xd, mb_row, mb_col, r);
- xd->corrupted |= bool_error(r);
+ xd->corrupted |= vp9_reader_has_error(r);
}
static void decode_modes_sb(VP9D_COMP *pbi, int mb_row, int mb_col,
@@ -961,7 +937,7 @@ static void setup_token_decoder(VP9D_COMP *pbi,
"Truncated packet or corrupt partition "
"%d length", 1);
- if (vp9_start_decode(r, data, partition_size))
+ if (vp9_reader_init(r, data, partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
@@ -986,45 +962,35 @@ static void init_frame(VP9D_COMP *pbi) {
xd->corrupted = 0;
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void read_nzc_probs_common(VP9_COMMON *cm,
- BOOL_DECODER* const bc,
+#if CONFIG_CODE_ZEROGROUP
+static void read_zpc_probs_common(VP9_COMMON *cm,
+ vp9_reader* bc,
TX_SIZE tx_size) {
- int c, r, b, t;
- int tokens, nodes;
- vp9_prob *nzc_probs;
- vp9_prob upd;
-
- if (!get_nzc_used(tx_size)) return;
+ int r, b, p, n;
+ vp9_zpc_probs *zpc_probs;
+ vp9_prob upd = ZPC_UPDATE_PROB;
+ if (!get_zpc_used(tx_size)) return;
if (!vp9_read_bit(bc)) return;
if (tx_size == TX_32X32) {
- tokens = NZC32X32_TOKENS;
- nzc_probs = cm->fc.nzc_probs_32x32[0][0][0];
- upd = NZC_UPDATE_PROB_32X32;
+ zpc_probs = &cm->fc.zpc_probs_32x32;
} else if (tx_size == TX_16X16) {
- tokens = NZC16X16_TOKENS;
- nzc_probs = cm->fc.nzc_probs_16x16[0][0][0];
- upd = NZC_UPDATE_PROB_16X16;
+ zpc_probs = &cm->fc.zpc_probs_16x16;
} else if (tx_size == TX_8X8) {
- tokens = NZC8X8_TOKENS;
- nzc_probs = cm->fc.nzc_probs_8x8[0][0][0];
- upd = NZC_UPDATE_PROB_8X8;
+ zpc_probs = &cm->fc.zpc_probs_8x8;
} else {
- tokens = NZC4X4_TOKENS;
- nzc_probs = cm->fc.nzc_probs_4x4[0][0][0];
- upd = NZC_UPDATE_PROB_4X4;
+ zpc_probs = &cm->fc.zpc_probs_4x4;
}
- nodes = tokens - 1;
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (r = 0; r < REF_TYPES; ++r) {
- for (b = 0; b < BLOCK_TYPES; ++b) {
- int offset = c * REF_TYPES * BLOCK_TYPES + r * BLOCK_TYPES + b;
- int offset_nodes = offset * nodes;
- for (t = 0; t < nodes; ++t) {
- vp9_prob *p = &nzc_probs[offset_nodes + t];
+ for (r = 0; r < REF_TYPES; ++r) {
+ for (b = 0; b < ZPC_BANDS; ++b) {
+ for (p = 0; p < ZPC_PTOKS; ++p) {
+ for (n = 0; n < ZPC_NODES; ++n) {
+ vp9_prob *q = &(*zpc_probs)[r][b][p][n];
+#if USE_ZPC_EXTRA == 0
+ if (n == 1) continue;
+#endif
if (vp9_read(bc, upd)) {
- *p = read_prob_diff_update(bc, *p);
+ *q = read_prob_diff_update(bc, *q);
}
}
}
@@ -1032,45 +998,20 @@ static void read_nzc_probs_common(VP9_COMMON *cm,
}
}
-static void read_nzc_pcat_probs(VP9_COMMON *cm, BOOL_DECODER* const bc) {
- int c, t, b;
- vp9_prob upd = NZC_UPDATE_PROB_PCAT;
- if (!(get_nzc_used(TX_4X4) || get_nzc_used(TX_8X8) ||
- get_nzc_used(TX_16X16) || get_nzc_used(TX_32X32)))
- return;
- if (!vp9_read_bit(bc)) {
- return;
- }
- for (c = 0; c < MAX_NZC_CONTEXTS; ++c) {
- for (t = 0; t < NZC_TOKENS_EXTRA; ++t) {
- int bits = vp9_extranzcbits[t + NZC_TOKENS_NOEXTRA];
- for (b = 0; b < bits; ++b) {
- vp9_prob *p = &cm->fc.nzc_pcat_probs[c][t][b];
- if (vp9_read(bc, upd)) {
- *p = read_prob_diff_update(bc, *p);
- }
- }
- }
- }
-}
-
-static void read_nzc_probs(VP9_COMMON *cm,
- BOOL_DECODER* const bc) {
- read_nzc_probs_common(cm, bc, TX_4X4);
+static void read_zpc_probs(VP9_COMMON *cm,
+ vp9_reader* bc) {
+ read_zpc_probs_common(cm, bc, TX_4X4);
if (cm->txfm_mode != ONLY_4X4)
- read_nzc_probs_common(cm, bc, TX_8X8);
+ read_zpc_probs_common(cm, bc, TX_8X8);
if (cm->txfm_mode > ALLOW_8X8)
- read_nzc_probs_common(cm, bc, TX_16X16);
+ read_zpc_probs_common(cm, bc, TX_16X16);
if (cm->txfm_mode > ALLOW_16X16)
- read_nzc_probs_common(cm, bc, TX_32X32);
-#ifdef NZC_PCAT_UPDATE
- read_nzc_pcat_probs(cm, bc);
-#endif
+ read_zpc_probs_common(cm, bc, TX_32X32);
}
-#endif // CONFIG_CODE_NONZEROCOUNT
+#endif // CONFIG_CODE_ZEROGROUP
static void read_coef_probs_common(VP9D_COMP *pbi,
- BOOL_DECODER* const bc,
+ vp9_reader *r,
vp9_coeff_probs *coef_probs,
TX_SIZE tx_size) {
#if CONFIG_MODELCOEFPROB && MODEL_BASED_UPDATE
@@ -1081,24 +1022,20 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
int i, j, k, l, m;
- if (vp9_read_bit(bc)) {
+ if (vp9_read_bit(r)) {
for (i = 0; i < BLOCK_TYPES; i++) {
for (j = 0; j < REF_TYPES; j++) {
for (k = 0; k < COEF_BANDS; k++) {
for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
-#if CONFIG_CODE_NONZEROCOUNT
- const int mstart = get_nzc_used(tx_size);
-#else
const int mstart = 0;
-#endif
if (l >= 3 && k == 0)
continue;
for (m = mstart; m < entropy_nodes_update; m++) {
vp9_prob *const p = coef_probs[i][j][k][l] + m;
- if (vp9_read(bc, vp9_coef_update_prob[m])) {
- *p = read_prob_diff_update(bc, *p);
+ if (vp9_read(r, vp9_coef_update_prob[m])) {
+ *p = read_prob_diff_update(r, *p);
#if CONFIG_MODELCOEFPROB && MODEL_BASED_UPDATE
if (m == UNCONSTRAINED_NODES - 1)
vp9_get_model_distribution(*p, coef_probs[i][j][k][l], i, j);
@@ -1112,19 +1049,20 @@ static void read_coef_probs_common(VP9D_COMP *pbi,
}
}
-static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) {
- VP9_COMMON *const pc = &pbi->common;
+static void read_coef_probs(VP9D_COMP *pbi, vp9_reader *r) {
+ const TXFM_MODE mode = pbi->common.txfm_mode;
+ FRAME_CONTEXT *const fc = &pbi->common.fc;
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_4x4, TX_4X4);
+ read_coef_probs_common(pbi, r, fc->coef_probs_4x4, TX_4X4);
- if (pbi->common.txfm_mode != ONLY_4X4)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_8x8, TX_8X8);
+ if (mode != ONLY_4X4)
+ read_coef_probs_common(pbi, r, fc->coef_probs_8x8, TX_8X8);
- if (pbi->common.txfm_mode > ALLOW_8X8)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_16x16, TX_16X16);
+ if (mode > ALLOW_8X8)
+ read_coef_probs_common(pbi, r, fc->coef_probs_16x16, TX_16X16);
- if (pbi->common.txfm_mode > ALLOW_16X16)
- read_coef_probs_common(pbi, bc, pc->fc.coef_probs_32x32, TX_32X32);
+ if (mode > ALLOW_16X16)
+ read_coef_probs_common(pbi, r, fc->coef_probs_32x32, TX_32X32);
}
static void update_frame_size(VP9D_COMP *pbi) {
@@ -1149,42 +1087,43 @@ static void update_frame_size(VP9D_COMP *pbi) {
static void setup_segmentation(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_reader *r) {
int i, j;
+ xd->update_mb_segmentation_map = 0;
+ xd->update_mb_segmentation_data = 0;
+
xd->segmentation_enabled = vp9_read_bit(r);
if (xd->segmentation_enabled) {
- // Read whether or not the segmentation map is being explicitly updated
- // this frame.
+ // Segmentation map update
xd->update_mb_segmentation_map = vp9_read_bit(r);
-
if (xd->update_mb_segmentation_map) {
- // Which macro block level features are enabled. Read the probs used to
- // decode the segment id for each macro block.
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
- xd->mb_segment_tree_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r) : 255;
+ xd->mb_segment_tree_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r)
+ : MAX_PROB;
- // Read the prediction probs needed to decode the segment id
pc->temporal_update = vp9_read_bit(r);
if (pc->temporal_update) {
const vp9_prob *p = xd->mb_segment_tree_probs;
- vp9_prob *p_mod = xd->mb_segment_mispred_tree_probs;
+ vp9_prob *mispred_p = xd->mb_segment_mispred_tree_probs;
const int c0 = p[0] * p[1];
const int c1 = p[0] * (256 - p[1]);
const int c2 = (256 - p[0]) * p[2];
const int c3 = (256 - p[0]) * (256 - p[2]);
- p_mod[0] = get_binary_prob(c1, c2 + c3);
- p_mod[1] = get_binary_prob(c0, c2 + c3);
- p_mod[2] = get_binary_prob(c0 + c1, c3);
- p_mod[3] = get_binary_prob(c0 + c1, c2);
+ mispred_p[0] = get_binary_prob(c1, c2 + c3);
+ mispred_p[1] = get_binary_prob(c0, c2 + c3);
+ mispred_p[2] = get_binary_prob(c0 + c1, c3);
+ mispred_p[3] = get_binary_prob(c0 + c1, c2);
for (i = 0; i < PREDICTION_PROBS; i++)
- pc->segment_pred_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r) : 255;
+ pc->segment_pred_probs[i] = vp9_read_bit(r) ? vp9_read_prob(r)
+ : MAX_PROB;
} else {
for (i = 0; i < PREDICTION_PROBS; i++)
- pc->segment_pred_probs[i] = 255;
+ pc->segment_pred_probs[i] = MAX_PROB;
}
}
+ // Segmentation data update
xd->update_mb_segmentation_data = vp9_read_bit(r);
if (xd->update_mb_segmentation_data) {
xd->mb_segment_abs_delta = vp9_read_bit(r);
@@ -1343,7 +1282,7 @@ static const uint8_t *setup_frame_size(VP9D_COMP *pbi, int scaling_active,
return data;
}
-static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
+static void update_frame_context(VP9D_COMP *pbi) {
FRAME_CONTEXT *const fc = &pbi->common.fc;
vp9_copy(fc->pre_coef_probs_4x4, fc->coef_probs_4x4);
@@ -1381,29 +1320,22 @@ static void update_frame_context(VP9D_COMP *pbi, vp9_reader *r) {
vp9_zero(fc->interintra_counts);
#endif
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_copy(fc->pre_nzc_probs_4x4, fc->nzc_probs_4x4);
- vp9_copy(fc->pre_nzc_probs_8x8, fc->nzc_probs_8x8);
- vp9_copy(fc->pre_nzc_probs_16x16, fc->nzc_probs_16x16);
- vp9_copy(fc->pre_nzc_probs_32x32, fc->nzc_probs_32x32);
- vp9_copy(fc->pre_nzc_pcat_probs, fc->nzc_pcat_probs);
-
- vp9_zero(fc->nzc_counts_4x4);
- vp9_zero(fc->nzc_counts_8x8);
- vp9_zero(fc->nzc_counts_16x16);
- vp9_zero(fc->nzc_counts_32x32);
- vp9_zero(fc->nzc_pcat_counts);
-#endif
+#if CONFIG_CODE_ZEROGROUP
+ vp9_copy(fc->pre_zpc_probs_4x4, fc->zpc_probs_4x4);
+ vp9_copy(fc->pre_zpc_probs_8x8, fc->zpc_probs_8x8);
+ vp9_copy(fc->pre_zpc_probs_16x16, fc->zpc_probs_16x16);
+ vp9_copy(fc->pre_zpc_probs_32x32, fc->zpc_probs_32x32);
- read_coef_probs(pbi, r);
-#if CONFIG_CODE_NONZEROCOUNT
- read_nzc_probs(&pbi->common, r);
+ vp9_zero(fc->zpc_counts_4x4);
+ vp9_zero(fc->zpc_counts_8x8);
+ vp9_zero(fc->zpc_counts_16x16);
+ vp9_zero(fc->zpc_counts_32x32);
#endif
}
static void decode_tiles(VP9D_COMP *pbi,
const uint8_t *data, int first_partition_size,
- BOOL_DECODER *header_bc, BOOL_DECODER *residual_bc) {
+ vp9_reader *header_bc, vp9_reader *residual_bc) {
VP9_COMMON *const pc = &pbi->common;
const uint8_t *data_ptr = data + first_partition_size;
@@ -1429,7 +1361,7 @@ static void decode_tiles(VP9D_COMP *pbi,
if (pbi->oxcf.inv_tile_order) {
const int n_cols = pc->tile_columns;
const uint8_t *data_ptr2[4][1 << 6];
- BOOL_DECODER UNINITIALIZED_IS_SAFE(bc_bak);
+ vp9_reader UNINITIALIZED_IS_SAFE(bc_bak);
// pre-initialize the offsets, we're going to read in inverse order
data_ptr2[0][0] = data_ptr;
@@ -1483,7 +1415,7 @@ static void decode_tiles(VP9D_COMP *pbi,
}
int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
- BOOL_DECODER header_bc, residual_bc;
+ vp9_reader header_bc, residual_bc;
VP9_COMMON *const pc = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const uint8_t *data = pbi->source;
@@ -1541,7 +1473,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
pc->width, pc->height,
VP9BORDERINPIXELS);
- if (vp9_start_decode(&header_bc, data, first_partition_size))
+ if (vp9_reader_init(&header_bc, data, first_partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
@@ -1549,17 +1481,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
pc->clamp_type = (CLAMP_TYPE)vp9_read_bit(&header_bc);
pc->error_resilient_mode = vp9_read_bit(&header_bc);
- setup_segmentation(pc, xd, &header_bc);
-
- setup_pred_probs(pc, &header_bc);
-
xd->lossless = vp9_read_bit(&header_bc);
- pc->txfm_mode = xd->lossless ? ONLY_4X4 : read_txfm_mode(&header_bc);
- if (pc->txfm_mode == TX_MODE_SELECT) {
- pc->prob_tx[0] = vp9_read_prob(&header_bc);
- pc->prob_tx[1] = vp9_read_prob(&header_bc);
- pc->prob_tx[2] = vp9_read_prob(&header_bc);
- }
setup_loopfilter(pc, xd, &header_bc);
@@ -1625,6 +1547,17 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
vpx_memcpy(&pc->fc, &pc->frame_contexts[pc->frame_context_idx],
sizeof(pc->fc));
+ setup_segmentation(pc, xd, &header_bc);
+
+ setup_pred_probs(pc, &header_bc);
+
+ pc->txfm_mode = xd->lossless ? ONLY_4X4 : read_txfm_mode(&header_bc);
+ if (pc->txfm_mode == TX_MODE_SELECT) {
+ pc->prob_tx[0] = vp9_read_prob(&header_bc);
+ pc->prob_tx[1] = vp9_read_prob(&header_bc);
+ pc->prob_tx[2] = vp9_read_prob(&header_bc);
+ }
+
// Read inter mode probability context updates
if (pc->frame_type != KEY_FRAME) {
int i, j;
@@ -1639,25 +1572,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
}
#endif
-#if CONFIG_NEW_MVREF
- // If Key frame reset mv ref id probabilities to defaults
- if (pc->frame_type != KEY_FRAME) {
- // Read any mv_ref index probability updates
- int i, j;
-
- for (i = 0; i < MAX_REF_FRAMES; ++i) {
- // Skip the dummy entry for intra ref frame.
- if (i == INTRA_FRAME)
- continue;
-
- // Read any updates to probabilities
- for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j)
- if (vp9_read(&header_bc, VP9_MVREF_UPDATE_PROB))
- xd->mb_mv_ref_probs[i][j] = vp9_read_prob(&header_bc);
- }
- }
-#endif
-
if (0) {
FILE *z = fopen("decodestats.stt", "a");
fprintf(z, "%6d F:%d,R:%d,Q:%d\n",
@@ -1668,13 +1582,17 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
fclose(z);
}
- update_frame_context(pbi, &header_bc);
+ update_frame_context(pbi);
+
+ read_coef_probs(pbi, &header_bc);
+#if CONFIG_CODE_ZEROGROUP
+ read_zpc_probs(&pbi->common, &header_bc);
+#endif
// Initialize xd pointers. Any reference should do for xd->pre, so use 0.
- vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->active_ref_idx[0]],
- sizeof(YV12_BUFFER_CONFIG));
- vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx],
- sizeof(YV12_BUFFER_CONFIG));
+ setup_pre_planes(xd, &pc->yv12_fb[pc->active_ref_idx[0]], NULL,
+ 0, 0, NULL, NULL);
+ setup_dst_planes(xd, &pc->yv12_fb[pc->new_fb_idx], 0, 0);
// Create the segmentation map structure and set to 0
if (!pc->last_frame_seg_map)
@@ -1706,7 +1624,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
// Collect information about decoder corruption.
// 1. Check first boolean decoder for errors.
// 2. Check the macroblock information
- pc->yv12_fb[pc->new_fb_idx].corrupted = bool_error(&header_bc) |
+ pc->yv12_fb[pc->new_fb_idx].corrupted = vp9_reader_has_error(&header_bc) |
corrupt_tokens;
if (!pbi->decoded_key_frame) {
@@ -1719,8 +1637,8 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
if (!pc->error_resilient_mode && !pc->frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(pc);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_adapt_nzc_probs(pc);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_adapt_zpc_probs(pc);
#endif
}