summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_entropy.c2
-rw-r--r--vp9/common/vp9_entropy.h5
-rw-r--r--vp9/common/vp9_seg_common.c2
-rw-r--r--vp9/decoder/vp9_decodemv.c7
-rw-r--r--vp9/decoder/vp9_decodframe.c233
-rw-r--r--vp9/encoder/vp9_bitstream.c8
-rw-r--r--vp9/encoder/vp9_encodeframe.c3
-rw-r--r--vp9/encoder/vp9_onyx_if.c4
-rw-r--r--vp9/encoder/vp9_tokenize.c82
-rw-r--r--vp9/encoder/vp9_tokenize.h2
10 files changed, 81 insertions, 267 deletions
diff --git a/vp9/common/vp9_entropy.c b/vp9/common/vp9_entropy.c
index 16ef14fec..faa45bd5f 100644
--- a/vp9/common/vp9_entropy.c
+++ b/vp9/common/vp9_entropy.c
@@ -1555,6 +1555,7 @@ static void adapt_coef_probs(vp9_coeff_probs *dst_coef_probs,
}
void vp9_adapt_coef_probs(VP9_COMMON *cm) {
+#ifndef DISABLE_COEF_ADAPT
int count_sat;
int update_factor; /* denominator 256 */
@@ -1585,6 +1586,7 @@ void vp9_adapt_coef_probs(VP9_COMMON *cm) {
cm->base_qindex, cm->fc.coef_counts_32x32,
cm->fc.eob_branch_counts[TX_32X32],
count_sat, update_factor);
+#endif
}
#if CONFIG_CODE_ZEROGROUP
diff --git a/vp9/common/vp9_entropy.h b/vp9/common/vp9_entropy.h
index 34cf14552..f12ee95c3 100644
--- a/vp9/common/vp9_entropy.h
+++ b/vp9/common/vp9_entropy.h
@@ -84,6 +84,9 @@ extern vp9_extra_bit vp9_extra_bits[12]; /* indexed by token value */
/*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */
#define PREV_COEF_CONTEXTS 6
+// Disables backward coef probs adaption
+// #define DISABLE_COEF_ADAPT
+
typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
[MAX_ENTROPY_TOKENS];
typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS]
@@ -163,7 +166,7 @@ const int *vp9_get_coef_neighbors_handle(const int *scan, int *pad);
// if model-based how many nodes are unconstrained
#define UNCONSTRAINED_UPDATE_NODES 3
// whether backward updates are model-based
-#define MODEL_BASED_ADAPT 0
+#define MODEL_BASED_ADAPT 1
#define UNCONSTRAINED_ADAPT_NODES 3
typedef vp9_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS]
diff --git a/vp9/common/vp9_seg_common.c b/vp9/common/vp9_seg_common.c
index 4c913e28f..9ed3e2d5b 100644
--- a/vp9/common/vp9_seg_common.c
+++ b/vp9/common/vp9_seg_common.c
@@ -99,7 +99,7 @@ void vp9_implicit_segment_map_update(VP9_COMMON * cm) {
mi = mi_ptr;
// Experimental use of tx size to define implicit segmentation
for (col = 0; col < cm->mb_cols; ++col, ++mi) {
- map_ptr[col] = mi->mbmi.txfm_size;
+ map_ptr[col] = 1 + mi->mbmi.txfm_size;
}
mi_ptr += cm->mode_info_stride;
map_ptr += cm->mb_cols;
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index fbb7dbd6c..c51d0b243 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -475,7 +475,7 @@ static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
} else {
nmv_context *const nmvc = &pbi->common.fc.nmvc;
MACROBLOCKD *const xd = &pbi->mb;
- int i;
+ int i, j;
if (cm->mcomp_filter_type == SWITCHABLE)
read_switchable_interp_probs(pbi, r);
@@ -509,6 +509,11 @@ static void mb_mode_mv_init(VP9D_COMP *pbi, vp9_reader *r) {
for (i = 0; i < VP9_I32X32_MODES - 1; ++i)
cm->fc.sb_ymode_prob[i] = vp9_read_prob(r);
+ for (j = 0; j < NUM_PARTITION_CONTEXTS; ++j)
+ if (vp9_read_bit(r))
+ for (i = 0; i < PARTITION_TYPES - 1; ++i)
+ cm->fc.partition_prob[j][i] = vp9_read_prob(r);
+
read_nmvprobs(r, nmvc, xd->allow_high_precision_mv);
}
}
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index c037d4b10..9e5c341af 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -383,183 +383,60 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
}
}
-static INLINE void decode_sby_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize) - 3, bw = 1 << bwl;
- const int bhl = b_height_log2(bsize) - 3, bh = 1 << bhl;
- const int y_count = bw * bh;
- int n;
-
- for (n = 0; n < y_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 32) * mb->plane[0].dst.stride + (x_idx * 32);
- vp9_idct_add_32x32(BLOCK_OFFSET(mb->plane[0].qcoeff, n, 1024),
- mb->plane[0].dst.buf + y_offset,
- mb->plane[0].dst.stride,
- mb->plane[0].eobs[n * 64]);
- }
-}
-
-static INLINE void decode_sbuv_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize) - 3, bw = (1 << bwl) / 2;
- const int bhl = b_height_log2(bsize) - 3, bh = (1 << bhl) / 2;
- const int uv_count = bw * bh;
- int n;
- for (n = 0; n < uv_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 32) * mb->plane[1].dst.stride +
- (x_idx * 32);
- vp9_idct_add_32x32(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 1024),
- mb->plane[1].dst.buf + uv_offset,
- mb->plane[1].dst.stride,
- mb->plane[1].eobs[n * 64]);
- vp9_idct_add_32x32(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 1024),
- mb->plane[2].dst.buf + uv_offset,
- mb->plane[1].dst.stride,
- mb->plane[2].eobs[n * 64]);
- }
-}
-
-static INLINE void decode_sby_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize) - 2, bw = 1 << bwl;
- const int bhl = b_height_log2(bsize) - 2, bh = 1 << bhl;
- const int y_count = bw * bh;
- int n;
-
- for (n = 0; n < y_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 16) * mb->plane[0].dst.stride + (x_idx * 16);
- const TX_TYPE tx_type = get_tx_type_16x16(mb,
- (y_idx * (4 * bw) + x_idx) * 4);
- vp9_iht_add_16x16_c(tx_type, BLOCK_OFFSET(mb->plane[0].qcoeff, n, 256),
- mb->plane[0].dst.buf + y_offset,
- mb->plane[0].dst.stride, mb->plane[0].eobs[n * 16]);
- }
-}
-
-static INLINE void decode_sbuv_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize) - 2, bw = (1 << bwl) / 2;
- const int bhl = b_height_log2(bsize) - 2, bh = (1 << bhl) / 2;
- const int uv_count = bw * bh;
- int n;
-
- assert(bsize >= BLOCK_SIZE_SB32X32);
-
- for (n = 0; n < uv_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 16) * mb->plane[1].dst.stride + (x_idx * 16);
- vp9_idct_add_16x16(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 256),
- mb->plane[1].dst.buf + uv_offset,
- mb->plane[1].dst.stride, mb->plane[1].eobs[n * 16]);
- vp9_idct_add_16x16(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 256),
- mb->plane[2].dst.buf + uv_offset,
- mb->plane[1].dst.stride, mb->plane[2].eobs[n * 16]);
- }
-}
-
-static INLINE void decode_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize) - 1, bw = 1 << bwl;
- const int bhl = b_height_log2(bsize) - 1, bh = 1 << bhl;
- const int y_count = bw * bh;
- int n;
-
- // luma
- for (n = 0; n < y_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 8) * xd->plane[0].dst.stride + (x_idx * 8);
- const TX_TYPE tx_type = get_tx_type_8x8(xd,
- (y_idx * (2 * bw) + x_idx) * 2);
-
- vp9_iht_add_8x8_c(tx_type, BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
- xd->plane[0].dst.buf + y_offset, xd->plane[0].dst.stride,
- xd->plane[0].eobs[n * 4]);
- }
-}
-
-static INLINE void decode_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize) - 1, bw = 1 << (bwl - 1);
- const int bhl = b_height_log2(bsize) - 1, bh = 1 << (bhl - 1);
- const int uv_count = bw * bh;
- int n;
-
- // chroma
- for (n = 0; n < uv_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 8) * xd->plane[1].dst.stride + (x_idx * 8);
- vp9_idct_add_8x8(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 64),
- xd->plane[1].dst.buf + uv_offset, xd->plane[1].dst.stride,
- xd->plane[1].eobs[n * 4]);
- vp9_idct_add_8x8(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 64),
- xd->plane[2].dst.buf + uv_offset, xd->plane[1].dst.stride,
- xd->plane[2].eobs[n * 4]);
- }
+static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
+ BLOCK_SIZE_TYPE bsize,
+ int plane, int block,
+ int ss_txfrm_size) {
+ const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
+ const int txwl = ss_txfrm_size / 2;
+ const int tx_cols_lg2 = bwl - txwl;
+ const int tx_cols = 1 << tx_cols_lg2;
+ const int raster_mb = block >> ss_txfrm_size;
+ const int x = (raster_mb & (tx_cols - 1)) << (txwl);
+ const int y = raster_mb >> tx_cols_lg2 << (txwl);
+ return x + (y << bwl);
}
-static INLINE void decode_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize), bw = 1 << bwl;
- const int bhl = b_height_log2(bsize), bh = 1 << bhl;
- const int y_count = bw * bh;
- int n;
- for (n = 0; n < y_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> bwl;
- const int y_offset = (y_idx * 4) * xd->plane[0].dst.stride + (x_idx * 4);
- const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
- if (tx_type == DCT_DCT) {
- xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
- xd->plane[0].dst.buf + y_offset, xd->plane[0].dst.stride,
- xd->plane[0].eobs[n]);
- } else {
- vp9_iht_add_c(tx_type, BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
- xd->plane[0].dst.buf + y_offset, xd->plane[0].dst.stride,
- xd->plane[0].eobs[n]);
- }
- }
-}
+static void decode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
+ int ss_txfrm_size, void *arg) {
+ MACROBLOCKD* const xd = arg;
+ int16_t* const qcoeff = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
+ const int stride = xd->plane[plane].dst.stride;
+ const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
+ block, ss_txfrm_size);
+ uint8_t* const dst = raster_block_offset_uint8(xd, bsize, plane,
+ raster_block,
+ xd->plane[plane].dst.buf,
+ stride);
-static INLINE void decode_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
- const int bwl = b_width_log2(bsize), bw = 1 << (bwl - 1);
- const int bhl = b_height_log2(bsize), bh = 1 << (bhl - 1);
- const int uv_count = bw * bh;
- int n;
+ TX_TYPE tx_type;
- for (n = 0; n < uv_count; n++) {
- const int x_idx = n & (bw - 1);
- const int y_idx = n >> (bwl - 1);
- const int uv_offset = (y_idx * 4) * xd->plane[1].dst.stride + (x_idx * 4);
- xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 16),
- xd->plane[1].dst.buf + uv_offset, xd->plane[1].dst.stride,
- xd->plane[1].eobs[n]);
- xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 16),
- xd->plane[2].dst.buf + uv_offset, xd->plane[1].dst.stride,
- xd->plane[2].eobs[n]);
+ switch (ss_txfrm_size / 2) {
+ case TX_4X4:
+ tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
+ if (tx_type == DCT_DCT)
+ xd->itxm_add(qcoeff, dst, stride, xd->plane[plane].eobs[block]);
+ else
+ vp9_iht_add_c(tx_type, qcoeff, dst, stride,
+ xd->plane[plane].eobs[block]);
+ break;
+ case TX_8X8:
+ tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
+ vp9_iht_add_8x8_c(tx_type, qcoeff, dst, stride,
+ xd->plane[plane].eobs[block]);
+ break;
+ case TX_16X16:
+ tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
+ vp9_iht_add_16x16_c(tx_type, qcoeff, dst, stride,
+ xd->plane[plane].eobs[block]);
+ break;
+ case TX_32X32:
+ vp9_idct_add_32x32(qcoeff, dst, stride, xd->plane[plane].eobs[block]);
+ break;
}
}
-// TODO(jingning): combine luma and chroma dequantization and inverse
-// transform into a single function looping over planes.
-static void decode_sb_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
- decode_sby_32x32(mb, bsize);
- if (bsize == BLOCK_SIZE_SB64X64)
- decode_sbuv_32x32(mb, bsize);
- else
- decode_sbuv_16x16(mb, bsize);
-}
-
-static void decode_sb_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
- decode_sby_16x16(mb, bsize);
- if (bsize >= BLOCK_SIZE_SB32X32)
- decode_sbuv_16x16(mb, bsize);
- else
- decode_sbuv_8x8(mb, bsize);
-}
-
static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col,
vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
const int bwl = mi_width_log2(bsize), bhl = mi_height_log2(bsize);
@@ -600,23 +477,7 @@ static void decode_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, int mi_row, int mi_col,
mi[y_idx * mis + x_idx].mbmi.mb_skip_coeff = 1;
}
} else {
- switch (mbmi->txfm_size) {
- case TX_32X32:
- decode_sb_32x32(xd, bsize);
- break;
- case TX_16X16:
- decode_sb_16x16(xd, bsize);
- break;
- case TX_8X8:
- decode_sby_8x8(xd, bsize);
- decode_sbuv_8x8(xd, bsize);
- break;
- case TX_4X4:
- decode_sby_4x4(xd, bsize);
- decode_sbuv_4x4(xd, bsize);
- break;
- default: assert(0);
- }
+ foreach_transformed_block(xd, bsize, decode_block, xd);
}
}
}
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index d099960f4..245305479 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -2257,6 +2257,14 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
}
update_mbintra_mode_probs(cpi, &header_bc);
+ for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) {
+ vp9_prob Pnew[PARTITION_TYPES - 1];
+ unsigned int bct[PARTITION_TYPES - 1][2];
+ update_mode(&header_bc, PARTITION_TYPES, vp9_partition_encodings,
+ vp9_partition_tree, Pnew, pc->fc.partition_prob[i], bct,
+ (unsigned int *)cpi->partition_count[i]);
+ }
+
vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc);
}
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 6d5583bf3..aa1f6f389 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -2167,8 +2167,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
#endif
- vp9_tokenize_mb(cpi, xd, t, !output_enabled);
-
+ vp9_tokenize_sb(cpi, xd, t, !output_enabled, BLOCK_SIZE_MB16X16);
} else {
// FIXME(rbultje): not tile-aware (mi - 1)
int mb_skip_context =
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index d557c6e25..53d03aa95 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -523,8 +523,8 @@ static void configure_implicit_segmentation(VP9_COMP *cpi) {
xd->update_mb_segmentation_data = 1;
- // Enable use of q deltas on segments
- for (i = 0; i < MAX_MB_SEGMENTS; ++i) {
+ // Enable use of q deltas on segments 1 and up
+ for (i = 1; i < MAX_MB_SEGMENTS; ++i) {
qi_delta = compute_qdelta(cpi, cpi->active_worst_quality, q_target);
vp9_set_segdata(xd, i, SEG_LVL_ALT_Q, qi_delta);
q_target *= 0.95;
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index d1e4943f4..8fa1a3367 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -390,10 +390,7 @@ void vp9_tokenize_sb(VP9_COMP *cpi,
const int segment_id = mbmi->segment_id;
const int skip_inc = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
const TX_SIZE txfm_size = mbmi->txfm_size;
- const TX_SIZE uv_txfm_size = (bsize < BLOCK_SIZE_SB32X32 &&
- txfm_size == TX_16X16) ? TX_8X8 :
- (bsize < BLOCK_SIZE_SB64X64 &&
- txfm_size == TX_32X32) ? TX_16X16 : txfm_size;
+ TX_SIZE uv_txfm_size = get_uv_tx_size(xd);
int b;
const int n_y = (1 << (bwl + bhl)), n_uv = (n_y * 3) >> 1;
@@ -446,9 +443,15 @@ void vp9_tokenize_sb(VP9_COMP *cpi,
for (b = 0; b < n_y; b += 4)
tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC,
TX_8X8, n_y, dry_run);
- for (; b < n_uv; b += 4)
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
- TX_8X8, n_y, dry_run);
+ if (uv_txfm_size == TX_8X8) {
+ for (; b < n_uv; b += 4)
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
+ TX_8X8, n_y, dry_run);
+ } else {
+ for (; b < n_uv; ++b)
+ tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV,
+ TX_4X4, n_y, dry_run);
+ }
break;
case TX_4X4:
for (b = 0; b < n_y; b++)
@@ -465,71 +468,6 @@ void vp9_tokenize_sb(VP9_COMP *cpi,
*t = t_backup;
}
-void vp9_tokenize_mb(VP9_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
- int b;
- int tx_size = xd->mode_info_context->mbmi.txfm_size;
- int mb_skip_context = vp9_get_pred_context(&cpi->common, xd, PRED_MBSKIP);
- TOKENEXTRA *t_backup = *t;
-
- // If the MB is going to be skipped because of a segment level flag
- // exclude this from the skip count stats used to calculate the
- // transmitted skip probability;
- int skip_inc;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
- skip_inc = 1;
- } else
- skip_inc = 0;
-
- xd->mode_info_context->mbmi.mb_skip_coeff = vp9_sb_is_skippable(xd,
- BLOCK_SIZE_MB16X16);
-
- if (xd->mode_info_context->mbmi.mb_skip_coeff) {
- if (!dry_run)
- cpi->skip_true_count[mb_skip_context] += skip_inc;
- vp9_reset_sb_tokens_context(xd, BLOCK_SIZE_MB16X16);
-
- if (dry_run)
- *t = t_backup;
- return;
- }
-
- if (!dry_run)
- cpi->skip_false_count[mb_skip_context] += skip_inc;
-
- if (tx_size == TX_16X16) {
- tokenize_b(cpi, xd, 0, t, PLANE_TYPE_Y_WITH_DC, TX_16X16, 16, dry_run);
- for (b = 16; b < 24; b += 4) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_8X8, 16, dry_run);
- }
- } else if (tx_size == TX_8X8) {
- for (b = 0; b < 16; b += 4) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC, TX_8X8, 16, dry_run);
- }
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
- xd->mode_info_context->mbmi.mode == SPLITMV) {
- for (b = 16; b < 24; b++) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_4X4, 16, dry_run);
- }
- } else {
- for (b = 16; b < 24; b += 4) {
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_8X8, 16, dry_run);
- }
- }
- } else {
- for (b = 0; b < 16; b++)
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_Y_WITH_DC, TX_4X4, 16, dry_run);
- for (b = 16; b < 24; b++)
- tokenize_b(cpi, xd, b, t, PLANE_TYPE_UV, TX_4X4, 16, dry_run);
- }
- if (dry_run)
- *t = t_backup;
-}
-
#ifdef ENTROPY_STATS
void init_context_counters(void) {
FILE *f = fopen("context.bin", "rb");
diff --git a/vp9/encoder/vp9_tokenize.h b/vp9/encoder/vp9_tokenize.h
index 816534830..bff5cfd4b 100644
--- a/vp9/encoder/vp9_tokenize.h
+++ b/vp9/encoder/vp9_tokenize.h
@@ -39,8 +39,6 @@ int vp9_sby_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
int vp9_sbuv_is_skippable(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
struct VP9_COMP;
-void vp9_tokenize_mb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run);
void vp9_tokenize_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run, BLOCK_SIZE_TYPE bsize);