summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--vp9/common/vp9_blockd.c167
-rw-r--r--vp9/common/vp9_blockd.h178
-rw-r--r--vp9/decoder/vp9_decodeframe.c8
-rw-r--r--vp9/decoder/vp9_decodemv.c8
-rw-r--r--vp9/decoder/vp9_detokenize.c2
-rw-r--r--vp9/encoder/vp9_bitstream.c8
-rw-r--r--vp9/encoder/vp9_encodeframe.c2
-rw-r--r--vp9/encoder/vp9_encodemb.c9
-rw-r--r--vp9/encoder/vp9_firstpass.c2
-rw-r--r--vp9/encoder/vp9_rdopt.c12
-rw-r--r--vp9/encoder/vp9_tokenize.c15
-rw-r--r--vp9/vp9_common.mk1
12 files changed, 228 insertions, 184 deletions
diff --git a/vp9/common/vp9_blockd.c b/vp9/common/vp9_blockd.c
new file mode 100644
index 000000000..8cc657114
--- /dev/null
+++ b/vp9/common/vp9_blockd.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "vp9/common/vp9_blockd.h"
+
+MB_PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *left_mi, int b) {
+ if (b == 0 || b == 2) {
+ if (!left_mi || is_inter_block(&left_mi->mbmi))
+ return DC_PRED;
+
+ return left_mi->mbmi.sb_type < BLOCK_8X8 ? left_mi->bmi[b + 1].as_mode
+ : left_mi->mbmi.mode;
+ } else {
+ assert(b == 1 || b == 3);
+ return cur_mi->bmi[b - 1].as_mode;
+ }
+}
+
+MB_PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *above_mi, int b) {
+ if (b == 0 || b == 1) {
+ if (!above_mi || is_inter_block(&above_mi->mbmi))
+ return DC_PRED;
+
+ return above_mi->mbmi.sb_type < BLOCK_8X8 ? above_mi->bmi[b + 2].as_mode
+ : above_mi->mbmi.mode;
+ } else {
+ assert(b == 2 || b == 3);
+ return cur_mi->bmi[b - 2].as_mode;
+ }
+}
+
+void vp9_foreach_transformed_block_in_plane(
+ const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
+ foreach_transformed_block_visitor visit, void *arg) {
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi;
+ // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
+ // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
+ // transform size varies per plane, look it up in a common way.
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi)
+ : mbmi->tx_size;
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const int step = 1 << (tx_size << 1);
+ int i;
+
+ // If mb_to_right_edge is < 0 we are in a situation in which
+ // the current block size extends into the UMV and we won't
+ // visit the sub blocks that are wholly within the UMV.
+ if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
+ int r, c;
+
+ int max_blocks_wide = num_4x4_w;
+ int max_blocks_high = num_4x4_h;
+
+ // xd->mb_to_right_edge is in units of pixels * 8. This converts
+ // it to 4x4 block sizes.
+ if (xd->mb_to_right_edge < 0)
+ max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+
+ if (xd->mb_to_bottom_edge < 0)
+ max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+
+ i = 0;
+ // Unlike the normal case - in here we have to keep track of the
+ // row and column of the blocks we use so that we know if we are in
+ // the unrestricted motion border.
+ for (r = 0; r < num_4x4_h; r += (1 << tx_size)) {
+ for (c = 0; c < num_4x4_w; c += (1 << tx_size)) {
+ if (r < max_blocks_high && c < max_blocks_wide)
+ visit(plane, i, plane_bsize, tx_size, arg);
+ i += step;
+ }
+ }
+ } else {
+ for (i = 0; i < num_4x4_w * num_4x4_h; i += step)
+ visit(plane, i, plane_bsize, tx_size, arg);
+ }
+}
+
+void vp9_foreach_transformed_block(const MACROBLOCKD* const xd,
+ BLOCK_SIZE bsize,
+ foreach_transformed_block_visitor visit,
+ void *arg) {
+ int plane;
+
+ for (plane = 0; plane < MAX_MB_PLANE; plane++)
+ vp9_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+}
+
+void vp9_foreach_transformed_block_uv(const MACROBLOCKD* const xd,
+ BLOCK_SIZE bsize,
+ foreach_transformed_block_visitor visit,
+ void *arg) {
+ int plane;
+
+ for (plane = 1; plane < MAX_MB_PLANE; plane++)
+ vp9_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+}
+
+void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+ int aoff, int loff) {
+ ENTROPY_CONTEXT *const a = pd->above_context + aoff;
+ ENTROPY_CONTEXT *const l = pd->left_context + loff;
+ const int tx_size_in_blocks = 1 << tx_size;
+
+ // above
+ if (has_eob && xd->mb_to_right_edge < 0) {
+ int i;
+ const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] +
+ (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
+ int above_contexts = tx_size_in_blocks;
+ if (above_contexts + aoff > blocks_wide)
+ above_contexts = blocks_wide - aoff;
+
+ for (i = 0; i < above_contexts; ++i)
+ a[i] = has_eob;
+ for (i = above_contexts; i < tx_size_in_blocks; ++i)
+ a[i] = 0;
+ } else {
+ vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
+ }
+
+ // left
+ if (has_eob && xd->mb_to_bottom_edge < 0) {
+ int i;
+ const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] +
+ (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
+ int left_contexts = tx_size_in_blocks;
+ if (left_contexts + loff > blocks_high)
+ left_contexts = blocks_high - loff;
+
+ for (i = 0; i < left_contexts; ++i)
+ l[i] = has_eob;
+ for (i = left_contexts; i < tx_size_in_blocks; ++i)
+ l[i] = 0;
+ } else {
+ vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
+ }
+}
+
+void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
+ int i;
+
+ for (i = 0; i < MAX_MB_PLANE; i++) {
+ xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y;
+ xd->plane[i].subsampling_x = i ? ss_x : 0;
+ xd->plane[i].subsampling_y = i ? ss_y : 0;
+ }
+#if CONFIG_ALPHA
+ // TODO(jkoleszar): Using the Y w/h for now
+ xd->plane[3].plane_type = PLANE_TYPE_Y;
+ xd->plane[3].subsampling_x = 0;
+ xd->plane[3].subsampling_y = 0;
+#endif
+}
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 49e336aa4..f85aa2b85 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -153,33 +153,11 @@ static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[1] > INTRA_FRAME;
}
-static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mi,
- const MODE_INFO *left_mi, int b) {
- if (b == 0 || b == 2) {
- if (!left_mi || is_inter_block(&left_mi->mbmi))
- return DC_PRED;
-
- return left_mi->mbmi.sb_type < BLOCK_8X8 ? left_mi->bmi[b + 1].as_mode
- : left_mi->mbmi.mode;
- } else {
- assert(b == 1 || b == 3);
- return cur_mi->bmi[b - 1].as_mode;
- }
-}
+MB_PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *left_mi, int b);
-static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mi,
- const MODE_INFO *above_mi, int b) {
- if (b == 0 || b == 1) {
- if (!above_mi || is_inter_block(&above_mi->mbmi))
- return DC_PRED;
-
- return above_mi->mbmi.sb_type < BLOCK_8X8 ? above_mi->bmi[b + 2].as_mode
- : above_mi->mbmi.mode;
- } else {
- assert(b == 2 || b == 3);
- return cur_mi->bmi[b - 2].as_mode;
- }
-}
+MB_PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi,
+ const MODE_INFO *above_mi, int b);
enum mv_precision {
MV_PRECISION_Q3,
@@ -266,7 +244,8 @@ typedef struct macroblockd {
-static BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, PARTITION_TYPE partition) {
+static INLINE BLOCK_SIZE get_subsize(BLOCK_SIZE bsize,
+ PARTITION_TYPE partition) {
const BLOCK_SIZE subsize = subsize_lookup[partition][bsize];
assert(subsize < BLOCK_SIZES);
return subsize;
@@ -298,23 +277,9 @@ static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type,
: DCT_DCT;
}
-static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) {
- int i;
-
- for (i = 0; i < MAX_MB_PLANE; i++) {
- xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y;
- xd->plane[i].subsampling_x = i ? ss_x : 0;
- xd->plane[i].subsampling_y = i ? ss_y : 0;
- }
-#if CONFIG_ALPHA
- // TODO(jkoleszar): Using the Y w/h for now
- xd->plane[3].plane_type = PLANE_TYPE_Y;
- xd->plane[3].subsampling_x = 0;
- xd->plane[3].subsampling_y = 0;
-#endif
-}
+void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
-static TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) {
+static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) {
if (bsize < BLOCK_8X8) {
return TX_4X4;
} else {
@@ -324,12 +289,12 @@ static TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize) {
}
}
-static TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) {
+static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi) {
return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type);
}
-static BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
- const struct macroblockd_plane *pd) {
+static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize,
+ const struct macroblockd_plane *pd) {
BLOCK_SIZE bs = ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y];
assert(bs < BLOCK_SIZES);
return bs;
@@ -340,77 +305,23 @@ typedef void (*foreach_transformed_block_visitor)(int plane, int block,
TX_SIZE tx_size,
void *arg);
-static INLINE void foreach_transformed_block_in_plane(
+void vp9_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
- foreach_transformed_block_visitor visit, void *arg) {
- const struct macroblockd_plane *const pd = &xd->plane[plane];
- const MB_MODE_INFO* mbmi = &xd->mi_8x8[0]->mbmi;
- // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
- // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
- // transform size varies per plane, look it up in a common way.
- const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi)
- : mbmi->tx_size;
- const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
- const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
- const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
- const int step = 1 << (tx_size << 1);
- int i;
-
- // If mb_to_right_edge is < 0 we are in a situation in which
- // the current block size extends into the UMV and we won't
- // visit the sub blocks that are wholly within the UMV.
- if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0) {
- int r, c;
-
- int max_blocks_wide = num_4x4_w;
- int max_blocks_high = num_4x4_h;
-
- // xd->mb_to_right_edge is in units of pixels * 8. This converts
- // it to 4x4 block sizes.
- if (xd->mb_to_right_edge < 0)
- max_blocks_wide += (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
-
- if (xd->mb_to_bottom_edge < 0)
- max_blocks_high += (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
-
- i = 0;
- // Unlike the normal case - in here we have to keep track of the
- // row and column of the blocks we use so that we know if we are in
- // the unrestricted motion border.
- for (r = 0; r < num_4x4_h; r += (1 << tx_size)) {
- for (c = 0; c < num_4x4_w; c += (1 << tx_size)) {
- if (r < max_blocks_high && c < max_blocks_wide)
- visit(plane, i, plane_bsize, tx_size, arg);
- i += step;
- }
- }
- } else {
- for (i = 0; i < num_4x4_w * num_4x4_h; i += step)
- visit(plane, i, plane_bsize, tx_size, arg);
- }
-}
+ foreach_transformed_block_visitor visit, void *arg);
-static INLINE void foreach_transformed_block(
+
+void vp9_foreach_transformed_block(
const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
- foreach_transformed_block_visitor visit, void *arg) {
- int plane;
+ foreach_transformed_block_visitor visit, void *arg);
- for (plane = 0; plane < MAX_MB_PLANE; plane++)
- foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
-}
-static INLINE void foreach_transformed_block_uv(
+void vp9_foreach_transformed_block_uv(
const MACROBLOCKD* const xd, BLOCK_SIZE bsize,
- foreach_transformed_block_visitor visit, void *arg) {
- int plane;
-
- for (plane = 1; plane < MAX_MB_PLANE; plane++)
- foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
-}
+ foreach_transformed_block_visitor visit, void *arg);
-static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize,
- TX_SIZE tx_size, int block,
- int *x, int *y) {
+static INLINE void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize,
+ TX_SIZE tx_size, int block,
+ int *x, int *y) {
const int bwl = b_width_log2(plane_bsize);
const int tx_cols_log2 = bwl - tx_size;
const int tx_cols = 1 << tx_cols_log2;
@@ -419,50 +330,13 @@ static void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize,
*y = (raster_mb >> tx_cols_log2) << tx_size;
}
-static void set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
- BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
- int has_eob, int aoff, int loff) {
- ENTROPY_CONTEXT *const a = pd->above_context + aoff;
- ENTROPY_CONTEXT *const l = pd->left_context + loff;
- const int tx_size_in_blocks = 1 << tx_size;
-
- // above
- if (has_eob && xd->mb_to_right_edge < 0) {
- int i;
- const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] +
- (xd->mb_to_right_edge >> (5 + pd->subsampling_x));
- int above_contexts = tx_size_in_blocks;
- if (above_contexts + aoff > blocks_wide)
- above_contexts = blocks_wide - aoff;
-
- for (i = 0; i < above_contexts; ++i)
- a[i] = has_eob;
- for (i = above_contexts; i < tx_size_in_blocks; ++i)
- a[i] = 0;
- } else {
- vpx_memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
- }
+void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+ BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
+ int aoff, int loff);
- // left
- if (has_eob && xd->mb_to_bottom_edge < 0) {
- int i;
- const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] +
- (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
- int left_contexts = tx_size_in_blocks;
- if (left_contexts + loff > blocks_high)
- left_contexts = blocks_high - loff;
-
- for (i = 0; i < left_contexts; ++i)
- l[i] = has_eob;
- for (i = left_contexts; i < tx_size_in_blocks; ++i)
- l[i] = 0;
- } else {
- vpx_memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks);
- }
-}
-static int get_tx_eob(const struct segmentation *seg, int segment_id,
- TX_SIZE tx_size) {
+static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id,
+ TX_SIZE tx_size) {
const int eob_max = 16 << (tx_size << 1);
return vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max;
}
diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c
index be8176a3f..93ef7503f 100644
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -407,8 +407,8 @@ static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
if (!is_inter_block(mbmi)) {
struct intra_args arg = { cm, xd, r };
- foreach_transformed_block(xd, bsize, predict_and_reconstruct_intra_block,
- &arg);
+ vp9_foreach_transformed_block(xd, bsize,
+ predict_and_reconstruct_intra_block, &arg);
} else {
// Setup
set_ref(cm, xd, 0, mi_row, mi_col);
@@ -424,7 +424,7 @@ static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
if (!mbmi->skip_coeff) {
int eobtotal = 0;
struct inter_args arg = { cm, xd, r, &eobtotal };
- foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
+ vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
if (!less8x8 && eobtotal == 0)
mbmi->skip_coeff = 1; // skip loopfilter
}
@@ -1371,7 +1371,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
set_prev_mi(cm);
setup_plane_dequants(cm, xd, cm->base_qindex);
- setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y);
+ vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
cm->fc = cm->frame_contexts[cm->frame_context_idx];
vp9_zero(cm->counts);
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index e671f0dba..c7fb71ddf 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -175,8 +175,8 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm,
mbmi->ref_frame[1] = NONE;
if (bsize >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(mi, above_mi, 0);
- const MB_PREDICTION_MODE L = left_block_mode(mi, left_mi, 0);
+ const MB_PREDICTION_MODE A = vp9_above_block_mode(mi, above_mi, 0);
+ const MB_PREDICTION_MODE L = vp9_left_block_mode(mi, left_mi, 0);
mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]);
} else {
// Only 4x4, 4x8, 8x4 blocks
@@ -187,8 +187,8 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm,
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int ib = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(mi, above_mi, ib);
- const MB_PREDICTION_MODE L = left_block_mode(mi, left_mi, ib);
+ const MB_PREDICTION_MODE A = vp9_above_block_mode(mi, above_mi, ib);
+ const MB_PREDICTION_MODE L = vp9_left_block_mode(mi, left_mi, ib);
const MB_PREDICTION_MODE b_mode = read_intra_mode(r,
vp9_kf_y_mode_prob[A][L]);
mi->bmi[ib].as_mode = b_mode;
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index 18cafbaef..52e78cd90 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -204,7 +204,7 @@ int vp9_decode_block_tokens(VP9_COMMON *cm, MACROBLOCKD *xd,
const int eob = decode_coefs(cm, xd, pd->plane_type,
BLOCK_OFFSET(pd->dqcoeff, block), tx_size,
pd->dequant, ctx, so->scan, so->neighbors, r);
- set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, x, y);
+ vp9_set_contexts(xd, pd, plane_bsize, tx_size, eob > 0, x, y);
return eob;
}
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index ede744e7f..dc64a107c 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -393,8 +393,8 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc);
if (m->mbmi.sb_type >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
- const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0);
+ const MB_PREDICTION_MODE A = vp9_above_block_mode(m, above_mi, 0);
+ const MB_PREDICTION_MODE L = vp9_left_block_mode(m, left_mi, 0);
write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
} else {
int idx, idy;
@@ -403,8 +403,8 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
int i = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i);
- const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i);
+ const MB_PREDICTION_MODE A = vp9_above_block_mode(m, above_mi, i);
+ const MB_PREDICTION_MODE L = vp9_left_block_mode(m, left_mi, i);
const int bm = m->bmi[i].as_mode;
write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]);
}
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 7b6da6c39..ba10d153e 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -2142,7 +2142,7 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, NULL);
setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0);
- setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+ vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mi_8x8[0]->mbmi.mode = DC_PRED;
xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED;
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 8ff23c79a..376a899e0 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -487,7 +487,8 @@ void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize) {
if (x->optimize)
optimize_init_b(0, bsize, &arg);
- foreach_transformed_block_in_plane(xd, bsize, 0, encode_block_pass1, &arg);
+ vp9_foreach_transformed_block_in_plane(xd, bsize, 0, encode_block_pass1,
+ &arg);
}
void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
@@ -505,7 +506,7 @@ void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
optimize_init_b(i, bsize, &arg);
}
- foreach_transformed_block(xd, bsize, encode_block, &arg);
+ vp9_foreach_transformed_block(xd, bsize, encode_block, &arg);
}
void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
@@ -648,7 +649,7 @@ void vp9_encode_intra_block_y(MACROBLOCK *x, BLOCK_SIZE bsize) {
MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi;
struct encode_b_args arg = {x, &ctx, &mbmi->skip_coeff};
- foreach_transformed_block_in_plane(xd, bsize, 0, vp9_encode_block_intra,
+ vp9_foreach_transformed_block_in_plane(xd, bsize, 0, vp9_encode_block_intra,
&arg);
}
void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize) {
@@ -656,7 +657,7 @@ void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize) {
struct optimize_ctx ctx;
MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi;
struct encode_b_args arg = {x, &ctx, &mbmi->skip_coeff};
- foreach_transformed_block_uv(xd, bsize, vp9_encode_block_intra, &arg);
+ vp9_foreach_transformed_block_uv(xd, bsize, vp9_encode_block_intra, &arg);
}
int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) {
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 62e3f7f97..8ca2fcd77 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -505,7 +505,7 @@ void vp9_first_pass(VP9_COMP *cpi) {
xd->mi_8x8 = cm->mi_grid_visible;
xd->mi_8x8[0] = cm->mi; // required for vp9_frame_init_quantizer
- setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+ vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
vp9_frame_init_quantizer(cpi);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 9cca3bd52..e5230feb4 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -742,8 +742,8 @@ static void txfm_rd_in_plane(MACROBLOCK *x,
rd_stack.scan = so->scan;
rd_stack.nb = so->neighbors;
- foreach_transformed_block_in_plane(xd, bsize, plane,
- block_rd_txfm, &rd_stack);
+ vp9_foreach_transformed_block_in_plane(xd, bsize, plane,
+ block_rd_txfm, &rd_stack);
if (rd_stack.skip) {
*rate = INT_MAX;
*distortion = INT64_MAX;
@@ -1150,8 +1150,8 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
int64_t d = INT64_MAX, this_rd = INT64_MAX;
i = idy * 2 + idx;
if (cpi->common.frame_type == KEY_FRAME) {
- const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, i);
- const MB_PREDICTION_MODE L = left_block_mode(mic, left_mi, i);
+ const MB_PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
+ const MB_PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
bmode_costs = mb->y_mode_costs[A][L];
}
@@ -1216,8 +1216,8 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
continue;
if (cpi->common.frame_type == KEY_FRAME) {
- const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, 0);
- const MB_PREDICTION_MODE L = left_block_mode(mic, left_mi, 0);
+ const MB_PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
+ const MB_PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
bmode_costs = x->y_mode_costs[A][L];
}
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 8e646f669..ed1301a8a 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -172,7 +172,8 @@ static void set_entropy_context_b(int plane, int block, BLOCK_SIZE plane_bsize,
struct macroblockd_plane *pd = &xd->plane[plane];
int aoff, loff;
txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);
- set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, aoff, loff);
+ vp9_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0,
+ aoff, loff);
}
static INLINE void add_token(TOKENEXTRA **t, const vp9_prob *context_tree,
@@ -259,7 +260,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
*tp = t;
- set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff);
+ vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff);
}
struct is_skippable_args {
@@ -277,15 +278,15 @@ static void is_skippable(int plane, int block,
static int sb_is_skippable(MACROBLOCK *x, BLOCK_SIZE bsize) {
int result = 1;
struct is_skippable_args args = {x, &result};
- foreach_transformed_block(&x->e_mbd, bsize, is_skippable, &args);
+ vp9_foreach_transformed_block(&x->e_mbd, bsize, is_skippable, &args);
return result;
}
int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 1;
struct is_skippable_args args = {x, &result};
- foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
- &args);
+ vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
+ &args);
return result;
}
@@ -310,9 +311,9 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
if (!dry_run) {
cm->counts.skip[ctx][0] += skip_inc;
- foreach_transformed_block(xd, bsize, tokenize_b, &arg);
+ vp9_foreach_transformed_block(xd, bsize, tokenize_b, &arg);
} else {
- foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+ vp9_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
*t = t_backup;
}
}
diff --git a/vp9/vp9_common.mk b/vp9/vp9_common.mk
index b1c029cba..78aabc7aa 100644
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -14,6 +14,7 @@ VP9_COMMON_SRCS-yes += common/vp9_pragmas.h
VP9_COMMON_SRCS-yes += common/vp9_ppflags.h
VP9_COMMON_SRCS-yes += common/vp9_onyx.h
VP9_COMMON_SRCS-yes += common/vp9_alloccommon.c
+VP9_COMMON_SRCS-yes += common/vp9_blockd.c
VP9_COMMON_SRCS-yes += common/vp9_convolve.c
VP9_COMMON_SRCS-yes += common/vp9_convolve.h
VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c