summaryrefslogtreecommitdiff
path: root/vp9/common
diff options
context:
space:
mode:
authorScott LaVarnway <slavarnway@google.com>2016-01-19 16:40:20 -0800
committerScott LaVarnway <slavarnway@google.com>2016-01-19 16:40:20 -0800
commit5232326716af469eafa6d98fba64f8154c69d9f8 (patch)
treebe0511ba21d18a0a2b1e389deefae092030ea9b6 /vp9/common
parentc0307e6cea0fcd79577eaa107f76b07acaf1d4e6 (diff)
downloadlibvpx-5232326716af469eafa6d98fba64f8154c69d9f8.tar
libvpx-5232326716af469eafa6d98fba64f8154c69d9f8.tar.gz
libvpx-5232326716af469eafa6d98fba64f8154c69d9f8.tar.bz2
libvpx-5232326716af469eafa6d98fba64f8154c69d9f8.zip
VP9: Eliminate MB_MODE_INFO
Change-Id: Ifa607dd2bb366ce09fa16dfcad3cc45a2440c185
Diffstat (limited to 'vp9/common')
-rw-r--r--vp9/common/vp9_blockd.c10
-rw-r--r--vp9/common/vp9_blockd.h31
-rw-r--r--vp9/common/vp9_debugmodes.c18
-rw-r--r--vp9/common/vp9_loopfilter.c48
-rw-r--r--vp9/common/vp9_loopfilter.h2
-rw-r--r--vp9/common/vp9_mfqe.c12
-rw-r--r--vp9/common/vp9_mvref_common.c29
-rw-r--r--vp9/common/vp9_mvref_common.h10
-rw-r--r--vp9/common/vp9_onyxc_int.h7
-rw-r--r--vp9/common/vp9_pred_common.c176
-rw-r--r--vp9/common/vp9_pred_common.h22
-rw-r--r--vp9/common/vp9_reconinter.c14
-rw-r--r--vp9/common/vp9_scan.h2
13 files changed, 184 insertions, 197 deletions
diff --git a/vp9/common/vp9_blockd.c b/vp9/common/vp9_blockd.c
index 0e104ee59..7bab27d4f 100644
--- a/vp9/common/vp9_blockd.c
+++ b/vp9/common/vp9_blockd.c
@@ -13,7 +13,7 @@
PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b) {
if (b == 0 || b == 2) {
- if (!left_mi || is_inter_block(&left_mi->mbmi))
+ if (!left_mi || is_inter_block(left_mi))
return DC_PRED;
return get_y_mode(left_mi, b + 1);
@@ -26,7 +26,7 @@ PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *above_mi, int b) {
if (b == 0 || b == 1) {
- if (!above_mi || is_inter_block(&above_mi->mbmi))
+ if (!above_mi || is_inter_block(above_mi))
return DC_PRED;
return get_y_mode(above_mi, b + 2);
@@ -40,12 +40,12 @@ void vp9_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
- const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi;
+ const MODE_INFO* mi = xd->mi[0];
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
// transform size varies per plane, look it up in a common way.
- const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd)
- : mbmi->tx_size;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd)
+ : mi->tx_size;
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 61eb59162..ae2f66a4a 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -64,7 +64,7 @@ typedef struct {
typedef int8_t MV_REFERENCE_FRAME;
// This structure now relates to 8x8 block regions.
-typedef struct {
+typedef struct MODE_INFO {
// Common for both INTER and INTRA blocks
BLOCK_SIZE sb_type;
PREDICTION_MODE mode;
@@ -82,24 +82,21 @@ typedef struct {
// TODO(slavarnway): Delete and use bmi[3].as_mv[] instead.
int_mv mv[2];
-} MB_MODE_INFO;
-typedef struct MODE_INFO {
- MB_MODE_INFO mbmi;
b_mode_info bmi[4];
} MODE_INFO;
static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) {
- return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
- : mi->mbmi.mode;
+ return mi->sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
+ : mi->mode;
}
-static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
- return mbmi->ref_frame[0] > INTRA_FRAME;
+static INLINE int is_inter_block(const MODE_INFO *mi) {
+ return mi->ref_frame[0] > INTRA_FRAME;
}
-static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
- return mbmi->ref_frame[1] > INTRA_FRAME;
+static INLINE int has_second_ref(const MODE_INFO *mi) {
+ return mi->ref_frame[1] > INTRA_FRAME;
}
PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
@@ -160,8 +157,6 @@ typedef struct macroblockd {
MODE_INFO **mi;
MODE_INFO *left_mi;
MODE_INFO *above_mi;
- MB_MODE_INFO *left_mbmi;
- MB_MODE_INFO *above_mbmi;
int up_available;
int left_available;
@@ -212,19 +207,19 @@ extern const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES];
static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) {
- const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const MODE_INFO *const mi = xd->mi[0];
- if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi))
+ if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mi))
return DCT_DCT;
- return intra_mode_to_tx_type_lookup[mbmi->mode];
+ return intra_mode_to_tx_type_lookup[mi->mode];
}
static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int ib) {
const MODE_INFO *const mi = xd->mi[0];
- if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi))
+ if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mi))
return DCT_DCT;
return intra_mode_to_tx_type_lookup[get_y_mode(mi, ib)];
@@ -242,9 +237,9 @@ static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
}
}
-static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi,
+static INLINE TX_SIZE get_uv_tx_size(const MODE_INFO *mi,
const struct macroblockd_plane *pd) {
- return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type, pd->subsampling_x,
+ return get_uv_tx_size_impl(mi->tx_size, mi->sb_type, pd->subsampling_x,
pd->subsampling_y);
}
diff --git a/vp9/common/vp9_debugmodes.c b/vp9/common/vp9_debugmodes.c
index 3d80103d2..d9c1fd968 100644
--- a/vp9/common/vp9_debugmodes.c
+++ b/vp9/common/vp9_debugmodes.c
@@ -35,7 +35,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor,
fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ",
- *((int*) ((char *) (&mi[0]->mbmi) +
+ *((int*) ((char *) (mi[0]) +
member_offset)));
mi++;
}
@@ -53,18 +53,18 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
int rows = cm->mi_rows;
int cols = cm->mi_cols;
- print_mi_data(cm, mvs, "Partitions:", offsetof(MB_MODE_INFO, sb_type));
- print_mi_data(cm, mvs, "Modes:", offsetof(MB_MODE_INFO, mode));
- print_mi_data(cm, mvs, "Ref frame:", offsetof(MB_MODE_INFO, ref_frame[0]));
- print_mi_data(cm, mvs, "Transform:", offsetof(MB_MODE_INFO, tx_size));
- print_mi_data(cm, mvs, "UV Modes:", offsetof(MB_MODE_INFO, uv_mode));
+ print_mi_data(cm, mvs, "Partitions:", offsetof(MODE_INFO, sb_type));
+ print_mi_data(cm, mvs, "Modes:", offsetof(MODE_INFO, mode));
+ print_mi_data(cm, mvs, "Ref frame:", offsetof(MODE_INFO, ref_frame[0]));
+ print_mi_data(cm, mvs, "Transform:", offsetof(MODE_INFO, tx_size));
+ print_mi_data(cm, mvs, "UV Modes:", offsetof(MODE_INFO, uv_mode));
// output skip infomation.
log_frame_info(cm, "Skips:", mvs);
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "S ");
for (mi_col = 0; mi_col < cols; mi_col++) {
- fprintf(mvs, "%2d ", mi[0]->mbmi.skip);
+ fprintf(mvs, "%2d ", mi[0]->skip);
mi++;
}
fprintf(mvs, "\n");
@@ -78,8 +78,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "V ");
for (mi_col = 0; mi_col < cols; mi_col++) {
- fprintf(mvs, "%4d:%4d ", mi[0]->mbmi.mv[0].as_mv.row,
- mi[0]->mbmi.mv[0].as_mv.col);
+ fprintf(mvs, "%4d:%4d ", mi[0]->mv[0].as_mv.row,
+ mi[0]->mv[0].as_mv.col);
mi++;
}
fprintf(mvs, "\n");
diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c
index b8a113223..79c3c4820 100644
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -232,9 +232,9 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
}
static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
- const MB_MODE_INFO *mbmi) {
- return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]]
- [mode_lf_lut[mbmi->mode]];
+ const MODE_INFO *mi) {
+ return lfi_n->lvl[mi->segment_id][mi->ref_frame[0]]
+ [mode_lf_lut[mi->mode]];
}
void vp9_loop_filter_init(VP9_COMMON *cm) {
@@ -709,11 +709,10 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
const MODE_INFO *mi, const int shift_y,
const int shift_uv,
LOOP_FILTER_MASK *lfm) {
- const MB_MODE_INFO *mbmi = &mi->mbmi;
- const BLOCK_SIZE block_size = mbmi->sb_type;
- const TX_SIZE tx_size_y = mbmi->tx_size;
+ const BLOCK_SIZE block_size = mi->sb_type;
+ const TX_SIZE tx_size_y = mi->tx_size;
const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
- const int filter_level = get_filter_level(lfi_n, mbmi);
+ const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
@@ -754,7 +753,7 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
- if (mbmi->skip && is_inter_block(mbmi))
+ if (mi->skip && is_inter_block(mi))
return;
// Here we are adding a mask for the transform size. The transform
@@ -788,10 +787,9 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
static void build_y_mask(const loop_filter_info_n *const lfi_n,
const MODE_INFO *mi, const int shift_y,
LOOP_FILTER_MASK *lfm) {
- const MB_MODE_INFO *mbmi = &mi->mbmi;
- const BLOCK_SIZE block_size = mbmi->sb_type;
- const TX_SIZE tx_size_y = mbmi->tx_size;
- const int filter_level = get_filter_level(lfi_n, mbmi);
+ const BLOCK_SIZE block_size = mi->sb_type;
+ const TX_SIZE tx_size_y = mi->tx_size;
+ const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
@@ -812,7 +810,7 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
*above_y |= above_prediction_mask[block_size] << shift_y;
*left_y |= left_prediction_mask[block_size] << shift_y;
- if (mbmi->skip && is_inter_block(mbmi))
+ if (mi->skip && is_inter_block(mi))
return;
*above_y |= (size_mask[block_size] &
@@ -980,7 +978,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
// TODO(jimbankoski): Try moving most of the following code into decode
// loop and storing lfm in the mbmi structure so that we don't have to go
// through the recursive loop structure multiple times.
- switch (mip[0]->mbmi.sb_type) {
+ switch (mip[0]->sb_type) {
case BLOCK_64X64:
build_masks(lfi_n, mip[0] , 0, 0, lfm);
break;
@@ -1006,7 +1004,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue;
- switch (mip[0]->mbmi.sb_type) {
+ switch (mip[0]->sb_type) {
case BLOCK_32X32:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
@@ -1036,7 +1034,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
continue;
- switch (mip[0]->mbmi.sb_type) {
+ switch (mip[0]->sb_type) {
case BLOCK_16X16:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
@@ -1186,8 +1184,8 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
// Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
const MODE_INFO *mi = mi_8x8[c];
- const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
- const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
+ const BLOCK_SIZE sb_type = mi[0].sb_type;
+ const int skip_this = mi[0].skip && is_inter_block(mi);
// left edge of current unit is block/partition edge -> no skip
const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
!(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
@@ -1196,13 +1194,13 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
!(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
const int skip_this_r = skip_this && !block_edge_above;
- const TX_SIZE tx_size = get_uv_tx_size(&mi[0].mbmi, plane);
+ const TX_SIZE tx_size = get_uv_tx_size(mi, plane);
const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
// Filter level can vary per MI
if (!(lfl[(r << 3) + (c >> ss_x)] =
- get_filter_level(&cm->lf_info, &mi[0].mbmi)))
+ get_filter_level(&cm->lf_info, mi)))
continue;
// Build masks based on the transform size of each block
@@ -1640,12 +1638,12 @@ static const uint8_t first_block_in_16x16[8][8] = {
// This function sets up the bit masks for a block represented
// by mi_row, mi_col in a 64x64 region.
// TODO(SJL): This function only works for yv12.
-void vp9_build_mask(VP9_COMMON *cm, const MB_MODE_INFO *mbmi, int mi_row,
+void vp9_build_mask(VP9_COMMON *cm, const MODE_INFO *mi, int mi_row,
int mi_col, int bw, int bh) {
- const BLOCK_SIZE block_size = mbmi->sb_type;
- const TX_SIZE tx_size_y = mbmi->tx_size;
+ const BLOCK_SIZE block_size = mi->sb_type;
+ const TX_SIZE tx_size_y = mi->tx_size;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
- const int filter_level = get_filter_level(lfi_n, mbmi);
+ const int filter_level = get_filter_level(lfi_n, mi);
const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
LOOP_FILTER_MASK *const lfm = get_lfm(&cm->lf, mi_row, mi_col);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
@@ -1693,7 +1691,7 @@ void vp9_build_mask(VP9_COMMON *cm, const MB_MODE_INFO *mbmi, int mi_row,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
- if (mbmi->skip && is_inter_block(mbmi))
+ if (mi->skip && is_inter_block(mi))
return;
// Add a mask for the transform size. The transform size mask is set to
diff --git a/vp9/common/vp9_loopfilter.h b/vp9/common/vp9_loopfilter.h
index 7f943ea09..b2893a451 100644
--- a/vp9/common/vp9_loopfilter.h
+++ b/vp9/common/vp9_loopfilter.h
@@ -134,7 +134,7 @@ static INLINE LOOP_FILTER_MASK *get_lfm(const struct loopfilter *lf,
return &lf->lfm[(mi_col >> 3) + ((mi_row >> 3) * lf->lfm_stride)];
}
-void vp9_build_mask(struct VP9Common *cm, const MB_MODE_INFO *mbmi, int mi_row,
+void vp9_build_mask(struct VP9Common *cm, const MODE_INFO *mi, int mi_row,
int mi_col, int bw, int bh);
void vp9_adjust_mask(struct VP9Common *const cm, const int mi_row,
const int mi_col, LOOP_FILTER_MASK *lfm);
diff --git a/vp9/common/vp9_mfqe.c b/vp9/common/vp9_mfqe.c
index 6d560f438..f5264665b 100644
--- a/vp9/common/vp9_mfqe.c
+++ b/vp9/common/vp9_mfqe.c
@@ -203,12 +203,12 @@ static void mfqe_block(BLOCK_SIZE bs, const uint8_t *y, const uint8_t *u,
static int mfqe_decision(MODE_INFO *mi, BLOCK_SIZE cur_bs) {
// Check the motion in current block(for inter frame),
// or check the motion in the correlated block in last frame (for keyframe).
- const int mv_len_square = mi->mbmi.mv[0].as_mv.row *
- mi->mbmi.mv[0].as_mv.row +
- mi->mbmi.mv[0].as_mv.col *
- mi->mbmi.mv[0].as_mv.col;
+ const int mv_len_square = mi->mv[0].as_mv.row *
+ mi->mv[0].as_mv.row +
+ mi->mv[0].as_mv.col *
+ mi->mv[0].as_mv.col;
const int mv_threshold = 100;
- return mi->mbmi.mode >= NEARESTMV && // Not an intra block
+ return mi->mode >= NEARESTMV && // Not an intra block
cur_bs >= BLOCK_16X16 &&
mv_len_square <= mv_threshold;
}
@@ -220,7 +220,7 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs,
uint8_t *yd, uint8_t *ud, uint8_t *vd,
int yd_stride, int uvd_stride) {
int mi_offset, y_offset, uv_offset;
- const BLOCK_SIZE cur_bs = mi->mbmi.sb_type;
+ const BLOCK_SIZE cur_bs = mi->sb_type;
const int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex;
const int bsl = b_width_log2_lookup[bs];
PARTITION_TYPE partition = partition_lookup[bsl][cur_bs];
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index 9803ff627..0eb01a51b 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -20,7 +20,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
uint8_t *mode_context) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
- const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
+ const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type];
int different_ref_found = 0;
int context_counter = 0;
const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
@@ -38,15 +38,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride];
- const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
- context_counter += mode_2_counter[candidate->mode];
+ context_counter += mode_2_counter[candidate_mi->mode];
different_ref_found = 1;
- if (candidate->ref_frame[0] == ref_frame)
+ if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
- else if (candidate->ref_frame[1] == ref_frame)
+ else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
}
@@ -58,14 +57,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
- const MB_MODE_INFO *const candidate =
- &xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
different_ref_found = 1;
- if (candidate->ref_frame[0] == ref_frame)
- ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, Done);
- else if (candidate->ref_frame[1] == ref_frame)
- ADD_MV_REF_LIST(candidate->mv[1], refmv_count, mv_ref_list, Done);
+ if (candidate_mi->ref_frame[0] == ref_frame)
+ ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done);
+ else if (candidate_mi->ref_frame[1] == ref_frame)
+ ADD_MV_REF_LIST(candidate_mi->mv[1], refmv_count, mv_ref_list, Done);
}
}
@@ -85,11 +84,11 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
- const MB_MODE_INFO *const candidate =
- &xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
+ const MODE_INFO *const candidate_mi =
+ xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
// If the candidate is INTRA we don't want to consider its mv.
- IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
+ IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
refmv_count, mv_ref_list, Done);
}
}
@@ -163,7 +162,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
- find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block,
+ find_mv_refs_idx(cm, xd, mi, mi->ref_frame[ref], mv_list, block,
mi_row, mi_col, mode_context);
near_mv->as_int = 0;
diff --git a/vp9/common/vp9_mvref_common.h b/vp9/common/vp9_mvref_common.h
index a6dddd01a..4380843e2 100644
--- a/vp9/common/vp9_mvref_common.h
+++ b/vp9/common/vp9_mvref_common.h
@@ -136,19 +136,19 @@ static INLINE void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) {
// on whether the block_size < 8x8 and we have check_sub_blocks set.
static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int which_mv,
int search_col, int block_idx) {
- return block_idx >= 0 && candidate->mbmi.sb_type < BLOCK_8X8
+ return block_idx >= 0 && candidate->sb_type < BLOCK_8X8
? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
.as_mv[which_mv]
- : candidate->mbmi.mv[which_mv];
+ : candidate->mv[which_mv];
}
// Performs mv sign inversion if indicated by the reference frame combination.
-static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
+static INLINE int_mv scale_mv(const MODE_INFO *mi, int ref,
const MV_REFERENCE_FRAME this_ref_frame,
const int *ref_sign_bias) {
- int_mv mv = mbmi->mv[ref];
- if (ref_sign_bias[mbmi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
+ int_mv mv = mi->mv[ref];
+ if (ref_sign_bias[mi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
mv.as_mv.row *= -1;
mv.as_mv.col *= -1;
}
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index ceffdedf9..fd674cbc6 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -406,22 +406,17 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
// Are edges available for intra prediction?
xd->up_available = (mi_row != 0);
xd->left_available = (mi_col > tile->mi_col_start);
+ // TODO(slavarnway): eliminate up/left available ???
if (xd->up_available) {
xd->above_mi = xd->mi[-xd->mi_stride];
- // above_mi may be NULL in VP9 encoder's first pass.
- xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
} else {
xd->above_mi = NULL;
- xd->above_mbmi = NULL;
}
if (xd->left_available) {
xd->left_mi = xd->mi[-1];
- // left_mi may be NULL in VP9 encoder's first pass.
- xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
} else {
xd->left_mi = NULL;
- xd->left_mbmi = NULL;
}
}
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index 1f1632573..1bb11efdf 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -19,12 +19,12 @@ int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
- const int left_type = xd->left_available && is_inter_block(left_mbmi) ?
- left_mbmi->interp_filter : SWITCHABLE_FILTERS;
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const int above_type = xd->up_available && is_inter_block(above_mbmi) ?
- above_mbmi->interp_filter : SWITCHABLE_FILTERS;
+ const MODE_INFO *const left_mi = xd->left_mi;
+ const int left_type = xd->left_available && is_inter_block(left_mi) ?
+ left_mi->interp_filter : SWITCHABLE_FILTERS;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const int above_type = xd->up_available && is_inter_block(above_mi) ?
+ above_mi->interp_filter : SWITCHABLE_FILTERS;
if (left_type == above_type)
return left_type;
@@ -44,18 +44,18 @@ int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// 2 - intra/--, --/intra
// 3 - intra/intra
int vp9_get_intra_inter_context(const MACROBLOCKD *xd) {
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
if (has_above && has_left) { // both edges available
- const int above_intra = !is_inter_block(above_mbmi);
- const int left_intra = !is_inter_block(left_mbmi);
+ const int above_intra = !is_inter_block(above_mi);
+ const int left_intra = !is_inter_block(left_mi);
return left_intra && above_intra ? 3
: left_intra || above_intra;
} else if (has_above || has_left) { // one edge available
- return 2 * !is_inter_block(has_above ? above_mbmi : left_mbmi);
+ return 2 * !is_inter_block(has_above ? above_mi : left_mi);
} else {
return 0;
}
@@ -64,8 +64,8 @@ int vp9_get_intra_inter_context(const MACROBLOCKD *xd) {
int vp9_get_reference_mode_context(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
// Note:
@@ -73,26 +73,26 @@ int vp9_get_reference_mode_context(const VP9_COMMON *cm,
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (has_above && has_left) { // both edges available
- if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi))
+ if (!has_second_ref(above_mi) && !has_second_ref(left_mi))
// neither edge uses comp pred (0/1)
- ctx = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^
- (left_mbmi->ref_frame[0] == cm->comp_fixed_ref);
- else if (!has_second_ref(above_mbmi))
+ ctx = (above_mi->ref_frame[0] == cm->comp_fixed_ref) ^
+ (left_mi->ref_frame[0] == cm->comp_fixed_ref);
+ else if (!has_second_ref(above_mi))
// one of two edges uses comp pred (2/3)
- ctx = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
- !is_inter_block(above_mbmi));
- else if (!has_second_ref(left_mbmi))
+ ctx = 2 + (above_mi->ref_frame[0] == cm->comp_fixed_ref ||
+ !is_inter_block(above_mi));
+ else if (!has_second_ref(left_mi))
// one of two edges uses comp pred (2/3)
- ctx = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
- !is_inter_block(left_mbmi));
+ ctx = 2 + (left_mi->ref_frame[0] == cm->comp_fixed_ref ||
+ !is_inter_block(left_mi));
else // both edges use comp pred (4)
ctx = 4;
} else if (has_above || has_left) { // one edge available
- const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
+ const MODE_INFO *edge_mi = has_above ? above_mi : left_mi;
- if (!has_second_ref(edge_mbmi))
+ if (!has_second_ref(edge_mi))
// edge does not use comp pred (0/1)
- ctx = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref;
+ ctx = edge_mi->ref_frame[0] == cm->comp_fixed_ref;
else
// edge uses comp pred (3)
ctx = 3;
@@ -107,8 +107,8 @@ int vp9_get_reference_mode_context(const VP9_COMMON *cm,
int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int above_in_image = xd->up_available;
const int left_in_image = xd->left_available;
@@ -120,26 +120,26 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const int var_ref_idx = !fix_ref_idx;
if (above_in_image && left_in_image) { // both edges available
- const int above_intra = !is_inter_block(above_mbmi);
- const int left_intra = !is_inter_block(left_mbmi);
+ const int above_intra = !is_inter_block(above_mi);
+ const int left_intra = !is_inter_block(left_mi);
if (above_intra && left_intra) { // intra/intra (2)
pred_context = 2;
} else if (above_intra || left_intra) { // intra/inter
- const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
+ const MODE_INFO *edge_mi = above_intra ? left_mi : above_mi;
- if (!has_second_ref(edge_mbmi)) // single pred (1/3)
- pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
+ if (!has_second_ref(edge_mi)) // single pred (1/3)
+ pred_context = 1 + 2 * (edge_mi->ref_frame[0] != cm->comp_var_ref[1]);
else // comp pred (1/3)
- pred_context = 1 + 2 * (edge_mbmi->ref_frame[var_ref_idx]
+ pred_context = 1 + 2 * (edge_mi->ref_frame[var_ref_idx]
!= cm->comp_var_ref[1]);
} else { // inter/inter
- const int l_sg = !has_second_ref(left_mbmi);
- const int a_sg = !has_second_ref(above_mbmi);
- const MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
- : above_mbmi->ref_frame[var_ref_idx];
- const MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
- : left_mbmi->ref_frame[var_ref_idx];
+ const int l_sg = !has_second_ref(left_mi);
+ const int a_sg = !has_second_ref(above_mi);
+ const MV_REFERENCE_FRAME vrfa = a_sg ? above_mi->ref_frame[0]
+ : above_mi->ref_frame[var_ref_idx];
+ const MV_REFERENCE_FRAME vrfl = l_sg ? left_mi->ref_frame[0]
+ : left_mi->ref_frame[var_ref_idx];
if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
pred_context = 0;
@@ -167,16 +167,16 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
}
}
} else if (above_in_image || left_in_image) { // one edge available
- const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
+ const MODE_INFO *edge_mi = above_in_image ? above_mi : left_mi;
- if (!is_inter_block(edge_mbmi)) {
+ if (!is_inter_block(edge_mi)) {
pred_context = 2;
} else {
- if (has_second_ref(edge_mbmi))
- pred_context = 4 * (edge_mbmi->ref_frame[var_ref_idx]
+ if (has_second_ref(edge_mi))
+ pred_context = 4 * (edge_mi->ref_frame[var_ref_idx]
!= cm->comp_var_ref[1]);
else
- pred_context = 3 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
+ pred_context = 3 * (edge_mi->ref_frame[0] != cm->comp_var_ref[1]);
}
} else { // no edges available (2)
pred_context = 2;
@@ -188,8 +188,8 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
// Note:
@@ -197,25 +197,25 @@ int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (has_above && has_left) { // both edges available
- const int above_intra = !is_inter_block(above_mbmi);
- const int left_intra = !is_inter_block(left_mbmi);
+ const int above_intra = !is_inter_block(above_mi);
+ const int left_intra = !is_inter_block(left_mi);
if (above_intra && left_intra) { // intra/intra
pred_context = 2;
} else if (above_intra || left_intra) { // intra/inter or inter/intra
- const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
- if (!has_second_ref(edge_mbmi))
- pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
+ const MODE_INFO *edge_mi = above_intra ? left_mi : above_mi;
+ if (!has_second_ref(edge_mi))
+ pred_context = 4 * (edge_mi->ref_frame[0] == LAST_FRAME);
else
- pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
- edge_mbmi->ref_frame[1] == LAST_FRAME);
+ pred_context = 1 + (edge_mi->ref_frame[0] == LAST_FRAME ||
+ edge_mi->ref_frame[1] == LAST_FRAME);
} else { // inter/inter
- const int above_has_second = has_second_ref(above_mbmi);
- const int left_has_second = has_second_ref(left_mbmi);
- const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0];
- const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1];
- const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0];
- const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1];
+ const int above_has_second = has_second_ref(above_mi);
+ const int left_has_second = has_second_ref(left_mi);
+ const MV_REFERENCE_FRAME above0 = above_mi->ref_frame[0];
+ const MV_REFERENCE_FRAME above1 = above_mi->ref_frame[1];
+ const MV_REFERENCE_FRAME left0 = left_mi->ref_frame[0];
+ const MV_REFERENCE_FRAME left1 = left_mi->ref_frame[1];
if (above_has_second && left_has_second) {
pred_context = 1 + (above0 == LAST_FRAME || above1 == LAST_FRAME ||
@@ -234,15 +234,15 @@ int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
}
}
} else if (has_above || has_left) { // one edge available
- const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
- if (!is_inter_block(edge_mbmi)) { // intra
+ const MODE_INFO *edge_mi = has_above ? above_mi : left_mi;
+ if (!is_inter_block(edge_mi)) { // intra
pred_context = 2;
} else { // inter
- if (!has_second_ref(edge_mbmi))
- pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
+ if (!has_second_ref(edge_mi))
+ pred_context = 4 * (edge_mi->ref_frame[0] == LAST_FRAME);
else
- pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
- edge_mbmi->ref_frame[1] == LAST_FRAME);
+ pred_context = 1 + (edge_mi->ref_frame[0] == LAST_FRAME ||
+ edge_mi->ref_frame[1] == LAST_FRAME);
}
} else { // no edges available
pred_context = 2;
@@ -254,8 +254,8 @@ int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
int pred_context;
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
@@ -264,29 +264,29 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (has_above && has_left) { // both edges available
- const int above_intra = !is_inter_block(above_mbmi);
- const int left_intra = !is_inter_block(left_mbmi);
+ const int above_intra = !is_inter_block(above_mi);
+ const int left_intra = !is_inter_block(left_mi);
if (above_intra && left_intra) { // intra/intra
pred_context = 2;
} else if (above_intra || left_intra) { // intra/inter or inter/intra
- const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
- if (!has_second_ref(edge_mbmi)) {
- if (edge_mbmi->ref_frame[0] == LAST_FRAME)
+ const MODE_INFO *edge_mi = above_intra ? left_mi : above_mi;
+ if (!has_second_ref(edge_mi)) {
+ if (edge_mi->ref_frame[0] == LAST_FRAME)
pred_context = 3;
else
- pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ pred_context = 4 * (edge_mi->ref_frame[0] == GOLDEN_FRAME);
} else {
- pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
- edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
+ pred_context = 1 + 2 * (edge_mi->ref_frame[0] == GOLDEN_FRAME ||
+ edge_mi->ref_frame[1] == GOLDEN_FRAME);
}
} else { // inter/inter
- const int above_has_second = has_second_ref(above_mbmi);
- const int left_has_second = has_second_ref(left_mbmi);
- const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0];
- const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1];
- const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0];
- const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1];
+ const int above_has_second = has_second_ref(above_mi);
+ const int left_has_second = has_second_ref(left_mi);
+ const MV_REFERENCE_FRAME above0 = above_mi->ref_frame[0];
+ const MV_REFERENCE_FRAME above1 = above_mi->ref_frame[1];
+ const MV_REFERENCE_FRAME left0 = left_mi->ref_frame[0];
+ const MV_REFERENCE_FRAME left1 = left_mi->ref_frame[1];
if (above_has_second && left_has_second) {
if (above0 == left0 && above1 == left1)
@@ -321,16 +321,16 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
}
}
} else if (has_above || has_left) { // one edge available
- const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
+ const MODE_INFO *edge_mi = has_above ? above_mi : left_mi;
- if (!is_inter_block(edge_mbmi) ||
- (edge_mbmi->ref_frame[0] == LAST_FRAME && !has_second_ref(edge_mbmi)))
+ if (!is_inter_block(edge_mi) ||
+ (edge_mi->ref_frame[0] == LAST_FRAME && !has_second_ref(edge_mi)))
pred_context = 2;
- else if (!has_second_ref(edge_mbmi))
- pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME);
+ else if (!has_second_ref(edge_mi))
+ pred_context = 4 * (edge_mi->ref_frame[0] == GOLDEN_FRAME);
else
- pred_context = 3 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME ||
- edge_mbmi->ref_frame[1] == GOLDEN_FRAME);
+ pred_context = 3 * (edge_mi->ref_frame[0] == GOLDEN_FRAME ||
+ edge_mi->ref_frame[1] == GOLDEN_FRAME);
} else { // no edges available (2)
pred_context = 2;
}
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index 6f7af4a50..254cb8b74 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -42,8 +42,8 @@ static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_sip = (above_mi != NULL) ?
- above_mi->mbmi.seg_id_predicted : 0;
- const int left_sip = (left_mi != NULL) ? left_mi->mbmi.seg_id_predicted : 0;
+ above_mi->seg_id_predicted : 0;
+ const int left_sip = (left_mi != NULL) ? left_mi->seg_id_predicted : 0;
return above_sip + left_sip;
}
@@ -56,8 +56,8 @@ static INLINE vpx_prob vp9_get_pred_prob_seg_id(const struct segmentation *seg,
static INLINE int vp9_get_skip_context(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
- const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
- const int left_skip = (left_mi != NULL) ? left_mi->mbmi.skip : 0;
+ const int above_skip = (above_mi != NULL) ? above_mi->skip : 0;
+ const int left_skip = (left_mi != NULL) ? left_mi->skip : 0;
return above_skip + left_skip;
}
@@ -110,15 +110,15 @@ static INLINE vpx_prob vp9_get_pred_prob_single_ref_p2(const VP9_COMMON *cm,
// left of the entries corresponding to real blocks.
// The prediction flags in these dummy entries are initialized to 0.
static INLINE int get_tx_size_context(const MACROBLOCKD *xd) {
- const int max_tx_size = max_txsize_lookup[xd->mi[0]->mbmi.sb_type];
- const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
- const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
+ const int max_tx_size = max_txsize_lookup[xd->mi[0]->sb_type];
+ const MODE_INFO *const above_mi = xd->above_mi;
+ const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
- int above_ctx = (has_above && !above_mbmi->skip) ? (int)above_mbmi->tx_size
- : max_tx_size;
- int left_ctx = (has_left && !left_mbmi->skip) ? (int)left_mbmi->tx_size
- : max_tx_size;
+ int above_ctx = (has_above && !above_mi->skip) ? (int)above_mi->tx_size
+ : max_tx_size;
+ int left_ctx = (has_left && !left_mi->skip) ? (int)left_mi->tx_size
+ : max_tx_size;
if (!has_left)
left_ctx = above_ctx;
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 37658dc94..74bc1d23e 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -159,8 +159,8 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const MODE_INFO *mi = xd->mi[0];
- const int is_compound = has_second_ref(&mi->mbmi);
- const InterpKernel *kernel = vp9_filter_kernels[mi->mbmi.interp_filter];
+ const int is_compound = has_second_ref(mi);
+ const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
int ref;
for (ref = 0; ref < 1 + is_compound; ++ref) {
@@ -168,9 +168,9 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
- const MV mv = mi->mbmi.sb_type < BLOCK_8X8
+ const MV mv = mi->sb_type < BLOCK_8X8
? average_split_mvs(pd, mi, ref, block)
- : mi->mbmi.mv[ref].as_mv;
+ : mi->mv[ref].as_mv;
// TODO(jkoleszar): This clamping is done in the incorrect place for the
// scaling case. It needs to be done on the scaled MV, not the pre-scaling
@@ -191,8 +191,8 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
#if CONFIG_BETTER_HW_COMPATIBILITY
- assert(xd->mi[0]->mbmi.sb_type != BLOCK_4X8 &&
- xd->mi[0]->mbmi.sb_type != BLOCK_8X4);
+ assert(xd->mi[0]->sb_type != BLOCK_4X8 &&
+ xd->mi[0]->sb_type != BLOCK_8X4);
assert(mv_q4.row == mv.row * (1 << (1 - pd->subsampling_y)) &&
mv_q4.col == mv.col * (1 << (1 - pd->subsampling_x)));
#endif
@@ -250,7 +250,7 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
- if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
+ if (xd->mi[0]->sb_type < BLOCK_8X8) {
int i = 0, x, y;
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
diff --git a/vp9/common/vp9_scan.h b/vp9/common/vp9_scan.h
index 1d86b5cfe..4c1ee8107 100644
--- a/vp9/common/vp9_scan.h
+++ b/vp9/common/vp9_scan.h
@@ -42,7 +42,7 @@ static INLINE const scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size,
PLANE_TYPE type, int block_idx) {
const MODE_INFO *const mi = xd->mi[0];
- if (is_inter_block(&mi->mbmi) || type != PLANE_TYPE_Y || xd->lossless) {
+ if (is_inter_block(mi) || type != PLANE_TYPE_Y || xd->lossless) {
return &vp9_default_scan_orders[tx_size];
} else {
const PREDICTION_MODE mode = get_y_mode(mi, block_idx);