summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/vp9_bitstream.c72
-rw-r--r--vp9/encoder/vp9_encodeframe.c241
-rw-r--r--vp9/encoder/vp9_encodeintra.c2
-rw-r--r--vp9/encoder/vp9_encodemb.c10
-rw-r--r--vp9/encoder/vp9_encodemv.c4
-rw-r--r--vp9/encoder/vp9_firstpass.c37
-rw-r--r--vp9/encoder/vp9_mbgraph.c4
-rw-r--r--vp9/encoder/vp9_mcomp.c6
-rw-r--r--vp9/encoder/vp9_onyx_if.c17
-rw-r--r--vp9/encoder/vp9_quantize.c2
-rw-r--r--vp9/encoder/vp9_rdopt.c148
-rw-r--r--vp9/encoder/vp9_segmentation.c35
-rw-r--r--vp9/encoder/vp9_temporal_filter.c10
-rw-r--r--vp9/encoder/vp9_tokenize.c4
14 files changed, 278 insertions, 314 deletions
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index 635891efb..f3bbc17ce 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -198,11 +198,10 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi,
(unsigned int *)cpi->y_mode_count[j]);
}
-static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m,
- TX_SIZE tx_size, BLOCK_SIZE bsize,
- vp9_writer *w) {
+static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size,
+ BLOCK_SIZE bsize, vp9_writer *w) {
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs, m);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs);
vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) {
vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
@@ -360,7 +359,7 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
const int segment_id = mi->segment_id;
int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
SEG_LVL_REF_FRAME);
@@ -438,7 +437,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
- write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc);
+ write_selected_tx_size(cpi, mi->tx_size, bsize, bc);
}
if (rf == INTRA_FRAME) {
@@ -532,16 +531,14 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
}
}
-static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
+static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
vp9_writer *bc) {
const VP9_COMMON *const cm = &cpi->common;
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const struct segmentation *const seg = &cm->seg;
- MODE_INFO *m = mi_8x8[0];
const int ym = m->mbmi.mode;
+ const int mis = cm->mode_info_stride;
const int segment_id = m->mbmi.segment_id;
- MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride];
- MODE_INFO *left_mi = mi_8x8[-1];
if (seg->update_map)
write_segment_id(bc, seg, m->mbmi.segment_id);
@@ -549,12 +546,12 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
write_skip_coeff(cpi, segment_id, m, bc);
if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
- write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc);
+ write_selected_tx_size(cpi, m->mbmi.tx_size, m->mbmi.sb_type, bc);
if (m->mbmi.sb_type >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
+ const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(m, left_mi, 0) : DC_PRED;
+ left_block_mode(m, 0) : DC_PRED;
write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
} else {
int idx, idy;
@@ -562,10 +559,10 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
- int i = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i);
+ const int i = idy * 2 + idx;
+ const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
- left_block_mode(m, left_mi, i) : DC_PRED;
+ left_block_mode(m, i) : DC_PRED;
const int bm = m->bmi[i].as_mode;
#ifdef ENTROPY_STATS
++intra_mode_stats[A][L][bm];
@@ -578,25 +575,23 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
}
-static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
+static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- MODE_INFO *m = mi_8x8[0];
if (m->mbmi.sb_type < BLOCK_8X8)
if (xd->ab_index > 0)
return;
- xd->this_mi = mi_8x8[0];
- xd->mi_8x8 = mi_8x8;
-
+ xd->mode_info_context = m;
set_mi_row_col(&cpi->common, xd,
mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type]);
- if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
- write_mb_modes_kf(cpi, mi_8x8, bc);
+
+ if (cm->frame_type == KEY_FRAME || cm->intra_only) {
+ write_mb_modes_kf(cpi, m, bc);
#ifdef ENTROPY_STATS
active_section = 8;
#endif
@@ -611,7 +606,7 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
pack_mb_tokens(bc, tok, tok_end);
}
-static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
+static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
@@ -622,7 +617,6 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
int n;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
- MODE_INFO *m = mi_8x8[0];
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
@@ -654,26 +648,25 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
switch (partition) {
case PARTITION_NONE:
- write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
break;
case PARTITION_HORZ:
- write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
*(get_sb_index(xd, subsize)) = 1;
if ((mi_row + bs) < cm->mi_rows)
- write_modes_b(cpi, mi_8x8 + bs * mis, bc, tok, tok_end, mi_row + bs,
- mi_col);
+ write_modes_b(cpi, m + bs * mis, bc, tok, tok_end, mi_row + bs, mi_col);
break;
case PARTITION_VERT:
- write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
*(get_sb_index(xd, subsize)) = 1;
if ((mi_col + bs) < cm->mi_cols)
- write_modes_b(cpi, mi_8x8 + bs, bc, tok, tok_end, mi_row, mi_col + bs);
+ write_modes_b(cpi, m + bs, bc, tok, tok_end, mi_row, mi_col + bs);
break;
case PARTITION_SPLIT:
for (n = 0; n < 4; n++) {
int j = n >> 1, i = n & 0x01;
*(get_sb_index(xd, subsize)) = n;
- write_modes_sb(cpi, mi_8x8 + j * bs * mis + i * bs, bc, tok, tok_end,
+ write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end,
mi_row + j * bs, mi_col + i * bs, subsize);
}
break;
@@ -693,21 +686,18 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
+ MODE_INFO *m, *m_ptr = cm->mi;
int mi_row, mi_col;
- MODE_INFO **mi_8x8 = cm->mi_grid_visible;
- MODE_INFO **m_8x8;
- mi_8x8 += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis;
+ m_ptr += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis;
for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
- mi_row += 8, mi_8x8 += 8 * mis) {
- m_8x8 = mi_8x8;
+ mi_row += 8, m_ptr += 8 * mis) {
+ m = m_ptr;
vp9_zero(cm->left_seg_context);
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
- mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) {
- write_modes_sb(cpi, m_8x8, bc, tok, tok_end, mi_row, mi_col,
- BLOCK_64X64);
- }
+ mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE)
+ write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
}
}
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 983ac9a38..45758e7cb 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -342,8 +342,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
- MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi;
- MODE_INFO *mi_addr = xd->this_mi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
int mb_mode_index = ctx->best_mode_index;
const int mis = cm->mode_info_stride;
@@ -356,15 +355,13 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
assert(mi->mbmi.sb_type == bsize);
- *mi_addr = *mi;
-
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < mi_height; y++)
for (x_idx = 0; x_idx < mi_width; x_idx++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y)
- xd->mi_8x8[x_idx + y * mis] = mi_addr;
+ xd->mode_info_context[x_idx + y * mis] = *mi;
// FIXME(rbultje) I'm pretty sure this should go to the end of this block
// (i.e. after the output_enabled)
@@ -433,6 +430,15 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
+ if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) {
+ int i, j;
+ for (j = 0; j < mi_height; ++j)
+ for (i = 0; i < mi_width; ++i)
+ if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i
+ && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j)
+ xd->mode_info_context[mis * j + i].mbmi = *mbmi;
+ }
+
if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
@@ -486,18 +492,11 @@ static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col,
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
-
- xd->mi_8x8 = cm->mi_grid_visible + idx_str;
- xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
-
+ xd->mode_info_context = cm->mi + idx_str;
+ mbmi = &xd->mode_info_context->mbmi;
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
- xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
-
- xd->this_mi =
- xd->mi_8x8[0] = cm->mi + idx_str;
-
- mbmi = &xd->this_mi->mbmi;
+ xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL;
// Set up destination pointers
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
@@ -574,10 +573,10 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
}
set_offsets(cpi, mi_row, mi_col, bsize);
- xd->this_mi->mbmi.sb_type = bsize;
+ xd->mode_info_context->mbmi.sb_type = bsize;
// Set to zero to make sure we do not use the previous encoded frame stats
- xd->this_mi->mbmi.skip_coeff = 0;
+ xd->mode_info_context->mbmi.skip_coeff = 0;
x->source_variance = get_sby_perpixel_variance(cpi, x, bsize);
@@ -598,7 +597,7 @@ static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *mi = xd->this_mi;
+ MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
if (cm->frame_type != KEY_FRAME) {
@@ -877,7 +876,7 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
-static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
BLOCK_SIZE bsize = cpi->sf.always_this_block_size;
@@ -893,7 +892,7 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; ++block_row) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; ++block_col) {
- mi_8x8[block_row * mis + block_col]->mbmi.sb_type = bsize;
+ m[block_row * mis + block_col].mbmi.sb_type = bsize;
}
}
} else {
@@ -917,41 +916,36 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
for (sub_block_col = 0; sub_block_col < bw; ++sub_block_col) {
row_index = block_row + sub_block_row;
col_index = block_col + sub_block_col;
- mi_8x8[row_index * mis + col_index]->mbmi.sb_type = bsize;
+ m[row_index * mis + col_index].mbmi.sb_type = bsize;
}
}
}
}
}
}
-static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
- MODE_INFO **prev_mi_8x8) {
+static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int block_row, block_col;
-
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
- MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col];
- MODE_INFO * mi = mi_8x8[block_row * mis + block_col];
- BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
- if (mi)
- mi->mbmi.sb_type = sb_type;
+ m[block_row * mis + block_col].mbmi.sb_type =
+ p[block_row * mis + block_col].mbmi.sb_type;
}
}
}
-static void set_block_size(VP9_COMMON * const cm, MODE_INFO **mi_8x8,
+static void set_block_size(VP9_COMMON * const cm, MODE_INFO *mi,
BLOCK_SIZE bsize, int mis, int mi_row,
int mi_col) {
int r, c;
const int bs = MAX(num_8x8_blocks_wide_lookup[bsize],
num_8x8_blocks_high_lookup[bsize]);
- MODE_INFO **const mi2 = &mi_8x8[mi_row * mis + mi_col];
+ MODE_INFO *const mi2 = &mi[mi_row * mis + mi_col];
for (r = 0; r < bs; r++)
for (c = 0; c < bs; c++)
if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols)
- mi2[r * mis + c]->mbmi.sb_type = bsize;
+ mi2[r * mis + c].mbmi.sb_type = bsize;
}
typedef struct {
@@ -1097,7 +1091,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
#else // !PERFORM_RANDOM_PARTITIONING
-static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m,
+static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
BLOCK_SIZE bsize, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
@@ -1135,8 +1129,8 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m,
}
#endif // PERFORM_RANDOM_PARTITIONING
-static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
- int mi_row, int mi_col) {
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
+ int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1174,21 +1168,18 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, LAST_FRAME)];
YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
YV12_BUFFER_CONFIG *second_ref_fb = NULL;
- MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi;
setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
&xd->scale_factor[0]);
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
&xd->scale_factor[1]);
-
- mbmi->ref_frame[0] = LAST_FRAME;
- mbmi->sb_type = BLOCK_64X64;
- vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[mbmi->ref_frame[0]],
+ xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_64X64;
+ vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
&nearest_mv, &near_mv);
- mbmi->mv[0] = nearest_mv;
+ xd->mode_info_context->mbmi.mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
-
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
@@ -1225,24 +1216,24 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
- if (!set_vt_partitioning(cpi, &vt, mi_8x8, BLOCK_64X64, mi_row, mi_col,
+ if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
- if (!set_vt_partitioning(cpi, &vt.split[i], mi_8x8, BLOCK_32X32,
+ if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
- if (!set_vt_partitioning(cpi, &vt.split[i].split[j], mi_8x8,
+ if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
BLOCK_16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
- set_block_size(cm, mi_8x8, BLOCK_8X8, mis,
+ set_block_size(cm, m, BLOCK_8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
@@ -1253,10 +1244,9 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
}
}
-static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
- TOKENEXTRA **tp, int mi_row, int mi_col,
- BLOCK_SIZE bsize, int *rate, int64_t *dist,
- int do_recon) {
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int *rate, int64_t *dist, int do_recon) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1282,7 +1272,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
int64_t chosen_dist = INT_MAX;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
int splits_below = 0;
- BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
+ BLOCK_SIZE bs_type = m->mbmi.sb_type;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
@@ -1315,8 +1305,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
- MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
- if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
+ if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
@@ -1336,7 +1325,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
none_rate += x->partition_cost[pl][PARTITION_NONE];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
- mi_8x8[0]->mbmi.sb_type = bs_type;
+ m->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
@@ -1407,9 +1396,8 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
*get_sb_index(xd, subsize) = i;
- rd_use_partition(cpi, mi_8x8 + jj * bss * mis + ii * bss, tp,
- mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
- i != 3);
+ rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
+ mi_col + x_idx, subsize, &rt, &dt, i != 3);
if (rt == INT_MAX || dt == INT_MAX) {
last_part_rate = INT_MAX;
last_part_dist = INT_MAX;
@@ -1490,7 +1478,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
// If last_part is better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
- mi_8x8[0]->mbmi.sb_type = bsize;
+ m->mbmi.sb_type = bsize;
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
@@ -1538,9 +1526,9 @@ static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
-static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
- BLOCK_SIZE * min_block_size,
- BLOCK_SIZE * max_block_size ) {
+static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi,
+ BLOCK_SIZE *min_block_size,
+ BLOCK_SIZE *max_block_size ) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int sb_width_in_blocks = MI_BLOCK_SIZE;
int sb_height_in_blocks = MI_BLOCK_SIZE;
@@ -1550,10 +1538,8 @@ static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
- MODE_INFO * mi = mi_8x8[index+j];
- BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
- *min_block_size = MIN(*min_block_size, sb_type);
- *max_block_size = MAX(*max_block_size, sb_type);
+ *min_block_size = MIN(*min_block_size, mi[index + j].mbmi.sb_type);
+ *max_block_size = MAX(*max_block_size, mi[index + j].mbmi.sb_type);
}
index += xd->mode_info_stride;
}
@@ -1565,12 +1551,13 @@ static void rd_auto_partition_range(VP9_COMP *cpi,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- MODE_INFO ** mi_8x8 = xd->mi_8x8;
- const int left_in_image = xd->left_available && mi_8x8[-1];
- const int above_in_image = xd->up_available &&
- mi_8x8[-xd->mode_info_stride];
- MODE_INFO ** above_sb64_mi_8x8;
- MODE_INFO ** left_sb64_mi_8x8;
+ MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO *above_sb64_mi;
+ MODE_INFO *left_sb64_mi;
+ const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const int left_in_image = xd->left_available && left_mbmi->in_image;
+ const int above_in_image = xd->up_available && above_mbmi->in_image;
// Frequency check
if (cpi->sf.auto_min_max_partition_count <= 0) {
@@ -1578,7 +1565,6 @@ static void rd_auto_partition_range(VP9_COMP *cpi,
cpi->sf.auto_min_max_partition_interval;
*min_block_size = BLOCK_4X4;
*max_block_size = BLOCK_64X64;
-
return;
} else {
--cpi->sf.auto_min_max_partition_count;
@@ -1595,16 +1581,16 @@ static void rd_auto_partition_range(VP9_COMP *cpi,
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
- left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
- get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
+ left_sb64_mi = &mi[-MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, left_sb64_mi,
min_block_size, max_block_size);
}
// Find the min and max partition sizes used in the above SB64 taking
// the values found for left as a starting point.
if (above_in_image) {
- above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE];
- get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
+ above_sb64_mi = &mi[-xd->mode_info_stride * MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, above_sb64_mi,
min_block_size, max_block_size);
}
@@ -2016,18 +2002,18 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
- MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
- MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
+ MODE_INFO *m = cm->mi + idx_str;
+ MODE_INFO *p = cm->prev_mi + idx_str;
cpi->mb.source_variance = UINT_MAX;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
- set_partitioning(cpi, mi_8x8, mi_row, mi_col);
- rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ set_partitioning(cpi, m, mi_row, mi_col);
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else if (cpi->sf.partition_by_variance) {
- choose_partitioning(cpi, cm->mi_grid_visible, mi_row, mi_col);
- rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ choose_partitioning(cpi, cm->mi, mi_row, mi_col);
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else {
if ((cpi->common.current_video_frame
@@ -2046,8 +2032,8 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
- copy_partitioning(cpi, mi_8x8, prev_mi_8x8);
- rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
+ copy_partitioning(cpi, m, p);
+ rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
@@ -2058,6 +2044,7 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
rd_auto_partition_range(cpi, &cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
+
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
}
@@ -2089,8 +2076,8 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
- xd->this_mi->mbmi.mode = DC_PRED;
- xd->this_mi->mbmi.uv_mode = DC_PRED;
+ xd->mode_info_context->mbmi.mode = DC_PRED;
+ xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cpi->y_uv_mode_count)
@@ -2166,14 +2153,8 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cm->counts.switchable_interp);
vp9_zero(cpi->txfm_stepdown_count);
- xd->mi_8x8 = cm->mi_grid_visible;
- // required for vp9_frame_init_quantizer
- xd->this_mi =
- xd->mi_8x8[0] = cm->mi;
- xd->mic_stream_ptr = cm->mi;
-
- xd->last_mi = cm->prev_mi;
-
+ xd->mode_info_context = cm->mi;
+ xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->NMVcount);
vp9_zero(cpi->coef_counts);
@@ -2276,12 +2257,12 @@ static int check_dual_ref_flags(VP9_COMP *cpi) {
}
}
-static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
+static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
- if (!mi_8x8[y * mis + x]->mbmi.skip_coeff)
+ if (!mi[y * mis + x].mbmi.skip_coeff)
return 0;
}
}
@@ -2289,39 +2270,42 @@ static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
return 1;
}
-static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs,
+static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
TX_SIZE tx_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++)
- mi_8x8[y * mis + x]->mbmi.tx_size = tx_size;
+ mi[y * mis + x].mbmi.tx_size = tx_size;
}
}
-static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8,
- int mis, TX_SIZE max_tx_size, int bw, int bh,
+static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis,
+ TX_SIZE max_tx_size, int bw, int bh,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
- VP9_COMMON * const cm = &cpi->common;
- MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi;
+ VP9_COMMON *const cm = &cpi->common;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (mbmi->tx_size > max_tx_size) {
+ MACROBLOCK * const x = &cpi->mb;
+ MACROBLOCKD * const xd = &x->e_mbd;
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
+ xd->mode_info_context = mi;
assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
- get_skip_flag(mi_8x8, mis, ymbs, xmbs));
- set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size);
+ get_skip_flag(mi, mis, ymbs, xmbs));
+ set_txfm_flag(mi, mis, ymbs, xmbs, max_tx_size);
}
}
-static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
TX_SIZE max_tx_size, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- VP9_COMMON * const cm = &cpi->common;
+ const VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
@@ -2329,23 +2313,22 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
- bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
+ bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
if (bw == bs && bh == bs) {
- reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row,
+ reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, bs, mi_row,
mi_col, bsize);
} else if (bw == bs && bh < bs) {
- reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row,
- mi_col, bsize);
- reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs,
+ reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, hbs, mi_row, mi_col,
+ bsize);
+ reset_skip_txfm_size_b(cpi, mi + hbs * mis, mis, max_tx_size, bs, hbs,
mi_row + hbs, mi_col, bsize);
} else if (bw < bs && bh == bs) {
- reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row,
- mi_col, bsize);
- reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row,
+ reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, hbs, bs, mi_row, mi_col,
+ bsize);
+ reset_skip_txfm_size_b(cpi, mi + hbs, mis, max_tx_size, hbs, bs, mi_row,
mi_col + hbs, bsize);
-
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
@@ -2356,7 +2339,7 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size,
+ reset_skip_txfm_size_sb(cpi, &mi[mi_dr * mis + mi_dc], max_tx_size,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
@@ -2366,15 +2349,12 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON * const cm = &cpi->common;
int mi_row, mi_col;
const int mis = cm->mode_info_stride;
-// MODE_INFO *mi, *mi_ptr = cm->mi;
- MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible;
+ MODE_INFO *mi, *mi_ptr = cm->mi;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
- mi_8x8 = mi_ptr;
- for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) {
- reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col,
- BLOCK_64X64);
- }
+ mi = mi_ptr;
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8)
+ reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64);
}
}
@@ -2631,8 +2611,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
- MODE_INFO **mi_8x8 = xd->mi_8x8;
- MODE_INFO *mi = mi_8x8[0];
+ MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *mbmi = &mi->mbmi;
unsigned int segment_id = mbmi->segment_id;
const int mis = cm->mode_info_stride;
@@ -2711,8 +2690,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else {
- int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0;
- mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0;
+ int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.skip_coeff : 0;
+ mb_skip_context += (mi - mis)->mbmi.skip_coeff;
mbmi->skip_coeff = 1;
if (output_enabled)
@@ -2720,6 +2699,10 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
}
+ // copy skip flag on all mb_mode_info contexts in this SB
+ // if this was a skip at this txfm size
+ vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, mi->mbmi.skip_coeff);
+
if (output_enabled) {
if (cm->tx_mode == TX_MODE_SELECT &&
mbmi->sb_type >= BLOCK_8X8 &&
@@ -2748,7 +2731,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
for (y = 0; y < mi_height; y++)
for (x = 0; x < mi_width; x++)
if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
- mi_8x8[mis * y + x]->mbmi.tx_size = sz;
+ mi[mis * y + x].mbmi.tx_size = sz;
}
}
}
diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c
index c5e5dff08..588b77421 100644
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -16,7 +16,7 @@
#include "vp9/encoder/vp9_encodeintra.h"
int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) {
- MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
x->skip_encode = 0;
mbmi->mode = DC_PRED;
mbmi->ref_frame[0] = INTRA_FRAME;
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index 13287f4f1..da9a3bda0 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -147,7 +147,7 @@ static void optimize_b(MACROBLOCK *mb,
TX_SIZE tx_size) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblockd_plane *pd = &xd->plane[plane];
- const int ref = is_inter_block(&xd->this_mi->mbmi);
+ const int ref = is_inter_block(&xd->mode_info_context->mbmi);
vp9_token_state tokens[1025][2];
unsigned best_index[1025][2];
const int16_t *coeff_ptr = BLOCK_OFFSET(mb->plane[plane].coeff, block);
@@ -199,7 +199,7 @@ static void optimize_b(MACROBLOCK *mb,
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
rdmult = mb->rdmult * err_mult;
- if (mb->e_mbd.mi_8x8[0]->mbmi.ref_frame[0] == INTRA_FRAME)
+ if (mb->e_mbd.mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME)
rdmult = (rdmult * 9) >> 4;
rddiv = mb->rddiv;
/* Initialize the sentinel node of the trellis. */
@@ -385,7 +385,7 @@ static void optimize_init_b(int plane, BLOCK_SIZE bsize,
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
- const MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size;
int i;
@@ -569,7 +569,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
struct encode_b_args* const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
struct macroblock_plane *const p = &x->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
int16_t *coeff = BLOCK_OFFSET(p->coeff, block);
@@ -679,7 +679,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
scan = get_scan_4x4(tx_type);
iscan = get_iscan_4x4(tx_type);
if (mbmi->sb_type < BLOCK_8X8 && plane == 0)
- mode = xd->this_mi->bmi[block].as_mode;
+ mode = xd->mode_info_context->bmi[block].as_mode;
else
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index ed3a2bb64..997728930 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -316,7 +316,7 @@ void vp9_build_nmv_cost_table(int *mvjoint,
void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
- MODE_INFO *mi = x->e_mbd.mi_8x8[0];
+ MODE_INFO *mi = x->e_mbd.mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
MV diff;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
@@ -333,7 +333,7 @@ void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
diff.col = mi->bmi[i].as_mv[0].as_mv.col - best_ref_mv->as_mv.col;
vp9_inc_mv(&diff, &cpi->NMVcount);
- if (mi->mbmi.ref_frame[1] > INTRA_FRAME) {
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame[1] > INTRA_FRAME) {
diff.row = mi->bmi[i].as_mv[1].as_mv.row -
second_best_ref_mv->as_mv.row;
diff.col = mi->bmi[i].as_mv[1].as_mv.col -
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 3eaa9f8b6..92485f934 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -346,7 +346,7 @@ static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
// Set up pointers for this macro block recon buffer
xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
- switch (xd->this_mi->mbmi.sb_type) {
+ switch (xd->mode_info_context->mbmi.sb_type) {
case BLOCK_8X8:
vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
@@ -385,7 +385,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
int n;
vp9_variance_fn_ptr_t v_fn_ptr =
- cpi->fn_ptr[xd->this_mi->mbmi.sb_type];
+ cpi->fn_ptr[xd->mode_info_context->mbmi.sb_type];
int new_mv_mode_penalty = 256;
int sr = 0;
@@ -402,7 +402,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
further_steps -= sr;
// override the default variance function to use MSE
- switch (xd->this_mi->mbmi.sb_type) {
+ switch (xd->mode_info_context->mbmi.sb_type) {
case BLOCK_8X8:
v_fn_ptr.vf = vp9_mse8x8;
break;
@@ -505,11 +505,8 @@ void vp9_first_pass(VP9_COMP *cpi) {
setup_dst_planes(xd, new_yv12, 0, 0);
x->partition_info = x->pi;
- xd->mi_8x8 = cm->mi_grid_visible;
- // required for vp9_frame_init_quantizer
- xd->this_mi =
- xd->mi_8x8[0] = cm->mi;
- xd->mic_stream_ptr = cm->mi;
+
+ xd->mode_info_context = cm->mi;
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
@@ -552,23 +549,23 @@ void vp9_first_pass(VP9_COMP *cpi) {
if (mb_col * 2 + 1 < cm->mi_cols) {
if (mb_row * 2 + 1 < cm->mi_rows) {
- xd->this_mi->mbmi.sb_type = BLOCK_16X16;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_16X16;
} else {
- xd->this_mi->mbmi.sb_type = BLOCK_16X8;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_16X8;
}
} else {
if (mb_row * 2 + 1 < cm->mi_rows) {
- xd->this_mi->mbmi.sb_type = BLOCK_8X16;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_8X16;
} else {
- xd->this_mi->mbmi.sb_type = BLOCK_8X8;
+ xd->mode_info_context->mbmi.sb_type = BLOCK_8X8;
}
}
- xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME;
+ xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
set_mi_row_col(cm, xd,
mb_row << 1,
- 1 << mi_height_log2(xd->this_mi->mbmi.sb_type),
+ 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type),
mb_col << 1,
- 1 << mi_height_log2(xd->this_mi->mbmi.sb_type));
+ 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type));
// do intra 16x16 prediction
this_error = vp9_encode_intra(x, use_dc_pred);
@@ -664,13 +661,13 @@ void vp9_first_pass(VP9_COMP *cpi) {
mv.as_mv.col <<= 3;
this_error = motion_error;
vp9_set_mbmode_and_mvs(x, NEWMV, &mv);
- xd->this_mi->mbmi.tx_size = TX_4X4;
- xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME;
- xd->this_mi->mbmi.ref_frame[1] = NONE;
+ xd->mode_info_context->mbmi.tx_size = TX_4X4;
+ xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->mode_info_context->mbmi.ref_frame[1] = NONE;
vp9_build_inter_predictors_sby(xd, mb_row << 1,
mb_col << 1,
- xd->this_mi->mbmi.sb_type);
- vp9_encode_sby(x, xd->this_mi->mbmi.sb_type);
+ xd->mode_info_context->mbmi.sb_type);
+ vp9_encode_sby(x, xd->mode_info_context->mbmi.sb_type);
sum_mvr += mv.as_mv.row;
sum_mvr_abs += abs(mv.as_mv.row);
sum_mvc += mv.as_mv.col;
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index 5a671f201..1baea643d 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -145,7 +145,7 @@ static int find_best_16x16_intra(VP9_COMP *cpi,
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
unsigned int err;
- xd->this_mi->mbmi.mode = mode;
+ xd->mode_info_context->mbmi.mode = mode;
vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode,
x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
@@ -253,7 +253,7 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi,
xd->plane[0].dst.stride = buf->y_stride;
xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
- xd->this_mi = &mi_local;
+ xd->mode_info_context = &mi_local;
mi_local.mbmi.sb_type = BLOCK_16X16;
mi_local.mbmi.ref_frame[0] = LAST_FRAME;
mi_local.mbmi.ref_frame[1] = NONE;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 136008847..55e4c36de 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -1537,7 +1537,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
- int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+ int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1625,7 +1625,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
- int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+ int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
@@ -1747,7 +1747,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
- int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
+ int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index 1ba304904..d35b739fb 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -527,15 +527,15 @@ static void print_seg_map(VP9_COMP *cpi) {
static void update_reference_segmentation_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int row, col;
- MODE_INFO **mi_8x8, **mi_8x8_ptr = cm->mi_grid_visible;
+ MODE_INFO *mi, *mi_ptr = cm->mi;
uint8_t *cache_ptr = cm->last_frame_seg_map, *cache;
for (row = 0; row < cm->mi_rows; row++) {
- mi_8x8 = mi_8x8_ptr;
+ mi = mi_ptr;
cache = cache_ptr;
- for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
- cache[0] = mi_8x8[0]->mbmi.segment_id;
- mi_8x8_ptr += cm->mode_info_stride;
+ for (col = 0; col < cm->mi_cols; col++, mi++, cache++)
+ cache[0] = mi->mbmi.segment_id;
+ mi_ptr += cm->mode_info_stride;
cache_ptr += cm->mi_cols;
}
}
@@ -3528,15 +3528,11 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
if (cm->show_frame) {
// current mip will be the prev_mip for the next frame
MODE_INFO *temp = cm->prev_mip;
- MODE_INFO **temp2 = cm->prev_mi_grid_base;
cm->prev_mip = cm->mip;
cm->mip = temp;
- cm->prev_mi_grid_base = cm->mi_grid_base;
- cm->mi_grid_base = temp2;
// update the upper left visible macroblock ptrs
cm->mi = cm->mip + cm->mode_info_stride + 1;
- cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
// Don't increment frame counters if this was an altref buffer
// update not a real frame
@@ -3545,9 +3541,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
// restore prev_mi
cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
- cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
- #if 0
+#if 0
{
char filename[512];
FILE *recon_file;
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index 81e14265d..96abeff38 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -278,7 +278,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
int zbin_extra;
- int segment_id = xd->this_mi->mbmi.segment_id;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
const int qindex = vp9_get_qindex(&cpi->common.seg, segment_id,
cpi->common.base_qindex);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index e219442c1..647265bf6 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -474,7 +474,7 @@ static INLINE int cost_coeffs(MACROBLOCK *mb,
TX_SIZE tx_size,
const int16_t *scan, const int16_t *nb) {
MACROBLOCKD *const xd = &mb->e_mbd;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
struct macroblockd_plane *pd = &xd->plane[plane];
const PLANE_TYPE type = pd->plane_type;
const int16_t *band_count = &band_counts[tx_size][1];
@@ -567,7 +567,7 @@ static void dist_block(int plane, int block, TX_SIZE tx_size, void *arg) {
args->sse += this_sse >> shift;
if (x->skip_encode &&
- xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
+ xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) {
// TODO(jingning): tune the model to better capture the distortion.
int64_t p = (pd->dequant[1] * pd->dequant[1] *
(1 << ss_txfrm_size)) >> shift;
@@ -610,7 +610,7 @@ static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize,
return;
}
- if (!is_inter_block(&xd->this_mi->mbmi))
+ if (!is_inter_block(&xd->mode_info_context->mbmi))
vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &encode_args);
else
vp9_xform_quant(plane, block, plane_bsize, tx_size, &encode_args);
@@ -634,7 +634,7 @@ static void txfm_rd_in_plane(MACROBLOCK *x,
num_4x4_blocks_wide, num_4x4_blocks_high,
0, 0, 0, ref_best_rd, 0 };
if (plane == 0)
- xd->this_mi->mbmi.tx_size = tx_size;
+ xd->mode_info_context->mbmi.tx_size = tx_size;
switch (tx_size) {
case TX_4X4:
@@ -688,7 +688,7 @@ static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x,
const TX_SIZE max_txfm_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
if (max_txfm_size == TX_32X32 &&
(cm->tx_mode == ALLOW_32X32 ||
cm->tx_mode == TX_MODE_SELECT)) {
@@ -718,13 +718,13 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd);
int64_t rd[TX_SIZES][2];
int n, m;
int s0, s1;
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs);
for (n = TX_4X4; n <= max_tx_size; n++) {
r[n][1] = r[n][0];
@@ -821,7 +821,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x,
const TX_SIZE max_txfm_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd);
int64_t rd[TX_SIZES][2];
int n, m;
@@ -829,7 +829,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x,
double scale_rd[TX_SIZES] = {1.73, 1.44, 1.20, 1.00};
// double scale_r[TX_SIZES] = {2.82, 2.00, 1.41, 1.00};
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs);
// for (n = TX_4X4; n <= max_txfm_size; n++)
// r[n][0] = (r[n][0] * scale_r[n]);
@@ -914,7 +914,7 @@ static void super_block_yrd(VP9_COMP *cpi,
int r[TX_SIZES][2], s[TX_SIZES];
int64_t d[TX_SIZES], sse[TX_SIZES];
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
assert(bs == mbmi->sb_type);
if (mbmi->ref_frame[0] > INTRA_FRAME)
@@ -1021,7 +1021,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
vpx_memcpy(ta, a, sizeof(ta));
vpx_memcpy(tl, l, sizeof(tl));
- xd->this_mi->mbmi.tx_size = TX_4X4;
+ xd->mode_info_context->mbmi.tx_size = TX_4X4;
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
@@ -1051,7 +1051,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
uint8_t *dst = dst_init + idx * 4 + idy * 4 * dst_stride;
block = ib + idy * 2 + idx;
- xd->this_mi->bmi[block].as_mode = mode;
+ xd->mode_info_context->bmi[block].as_mode = mode;
src_diff = raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
vp9_predict_intra_block(xd, block, 1,
@@ -1127,10 +1127,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
int64_t best_rd) {
int i, j;
MACROBLOCKD *const xd = &mb->e_mbd;
- MODE_INFO *const mic = xd->this_mi;
- const MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride];
- const MODE_INFO *left_mi = xd->mi_8x8[-1];
- const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
+ const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
@@ -1140,6 +1137,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
int64_t total_rd = 0;
ENTROPY_CONTEXT t_above[4], t_left[4];
int *bmode_costs;
+ MODE_INFO *const mic = xd->mode_info_context;
vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
@@ -1149,15 +1147,15 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
// Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ const int mis = xd->mode_info_stride;
MB_PREDICTION_MODE best_mode = DC_PRED;
int r = INT_MAX, ry = INT_MAX;
int64_t d = INT64_MAX, this_rd = INT64_MAX;
i = idy * 2 + idx;
if (cpi->common.frame_type == KEY_FRAME) {
- const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, i);
+ const MB_PREDICTION_MODE A = above_block_mode(mic, i, mis);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
- left_block_mode(mic, left_mi, i) :
- DC_PRED;
+ left_block_mode(mic, i) : DC_PRED;
bmode_costs = mb->y_mode_costs[A][L];
}
@@ -1187,7 +1185,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
*rate = cost;
*rate_y = tot_rate_y;
*distortion = total_distortion;
- mic->mbmi.mode = mic->bmi[3].as_mode;
+ xd->mode_info_context->mbmi.mode = mic->bmi[3].as_mode;
return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
}
@@ -1201,7 +1199,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE mode_selected = DC_PRED;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mic = xd->this_mi;
+ MODE_INFO *const mic = xd->mode_info_context;
int this_rate, this_rate_tokenonly, s;
int64_t this_distortion, this_rd;
TX_SIZE best_tx = TX_4X4;
@@ -1215,16 +1213,15 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
/* Y Search for intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
int64_t local_tx_cache[TX_MODES];
- MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride];
- MODE_INFO *left_mi = xd->mi_8x8[-1];
+ const int mis = xd->mode_info_stride;
if (!(cpi->sf.intra_y_mode_mask & (1 << mode)))
continue;
if (cpi->common.frame_type == KEY_FRAME) {
- const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, 0);
+ const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis);
const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(mic, left_mi, 0) : DC_PRED;
+ left_block_mode(mic, 0) : DC_PRED;
bmode_costs = x->y_mode_costs[A][L];
}
@@ -1271,7 +1268,7 @@ static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x,
int64_t *sse, BLOCK_SIZE bsize,
int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
TX_SIZE uv_txfm_size = get_uv_tx_size(mbmi);
int plane;
int pnrate = 0, pnskip = 1;
@@ -1326,8 +1323,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (!(cpi->sf.intra_uv_mode_mask & (1 << mode)))
continue;
- x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode;
-
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
super_block_uvrd(&cpi->common, x, &this_rate_tokenonly,
&this_distortion, &s, &this_sse, bsize, best_rd);
if (this_rate_tokenonly == INT_MAX)
@@ -1346,7 +1342,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
}
- x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode_selected;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
return best_rd;
}
@@ -1358,7 +1354,7 @@ static int64_t rd_sbuv_dcpred(VP9_COMP *cpi, MACROBLOCK *x,
int64_t this_rd;
int64_t this_sse;
- x->e_mbd.mi_8x8[0]->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
super_block_uvrd(&cpi->common, x, rate_tokenonly,
distortion, skippable, &this_sse, bsize, INT64_MAX);
*rate = *rate_tokenonly +
@@ -1386,14 +1382,14 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE bsize,
rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
}
- *mode_uv = x->e_mbd.mi_8x8[0]->mbmi.uv_mode;
+ *mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode;
}
static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode,
int mode_context) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- const int segment_id = xd->this_mi->mbmi.segment_id;
+ const int segment_id = xd->mode_info_context->mbmi.segment_id;
// Don't account for mode here if segment skip is enabled.
if (!vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
@@ -1405,8 +1401,8 @@ static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode,
}
void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
- x->e_mbd.mi_8x8[0]->mbmi.mode = mb;
- x->e_mbd.mi_8x8[0]->mbmi.mv[0].as_int = mv->as_int;
+ x->e_mbd.mode_info_context->mbmi.mode = mb;
+ x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
}
static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
@@ -1429,7 +1425,7 @@ static int labels2mode(MACROBLOCK *x, int i,
int_mv *second_best_ref_mv,
int *mvjcost, int *mvcost[2], VP9_COMP *cpi) {
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mic = xd->this_mi;
+ MODE_INFO *const mic = xd->mode_info_context;
MB_MODE_INFO *mbmi = &mic->mbmi;
int cost = 0, thismvcost = 0;
int idx, idy;
@@ -1503,7 +1499,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
int k;
MACROBLOCKD *xd = &x->e_mbd;
struct macroblockd_plane *const pd = &xd->plane[0];
- MODE_INFO *const mi = xd->this_mi;
+ MODE_INFO *const mi = xd->mode_info_context;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
const int width = plane_block_width(bsize, pd);
const int height = plane_block_height(bsize, pd);
@@ -1603,7 +1599,7 @@ static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
}
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
- MB_MODE_INFO *const mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
+ MB_MODE_INFO *const mbmi = &x->e_mbd.mode_info_context->mbmi;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
@@ -1619,7 +1615,7 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
struct buf_2d orig_pre[2]) {
- MB_MODE_INFO *mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
+ MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
x->plane[0].src = orig_src;
x->e_mbd.plane[0].pre[0] = orig_pre[0];
if (mbmi->ref_frame[1])
@@ -1633,7 +1629,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
int i, j, br = 0, idx, idy;
int64_t bd = 0, block_sse = 0;
MB_PREDICTION_MODE this_mode;
- MODE_INFO *mi = x->e_mbd.mi_8x8[0];
+ MODE_INFO *mi = x->e_mbd.mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const int label_count = 4;
int64_t this_segment_rd = 0;
@@ -1751,9 +1747,11 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
if (cpi->compressor_speed) {
// use previous block's result as next block's MV predictor.
if (i > 0) {
- bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
+ bsi->mvp.as_int =
+ x->e_mbd.mode_info_context->bmi[i - 1].as_mv[0].as_int;
if (i == 2)
- bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
+ bsi->mvp.as_int =
+ x->e_mbd.mode_info_context->bmi[i - 2].as_mv[0].as_int;
}
}
if (i == 0)
@@ -1811,11 +1809,13 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
if (thissme < bestsme) {
bestsme = thissme;
- mode_mv[NEWMV].as_int = mi->bmi[i].as_mv[0].as_int;
+ mode_mv[NEWMV].as_int =
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int;
} else {
/* The full search result is actually worse so re-instate the
* previous best vector */
- mi->bmi[i].as_mv[0].as_int = mode_mv[NEWMV].as_int;
+ x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int =
+ mode_mv[NEWMV].as_int;
}
}
@@ -2016,7 +2016,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
int i;
BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
MACROBLOCKD *xd = &x->e_mbd;
- MODE_INFO *mi = xd->this_mi;
+ MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *mbmi = &mi->mbmi;
int mode_idx;
@@ -2062,7 +2062,7 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
uint8_t *ref_y_buffer, int ref_y_stride,
int ref_frame, BLOCK_SIZE block_size ) {
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
int_mv this_mv;
int i;
int zero_seen = 0;
@@ -2182,7 +2182,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
// restored if we decide to encode this way
ctx->skip = x->skip;
ctx->best_mode_index = mode_index;
- ctx->mic = *xd->this_mi;
+ ctx->mic = *xd->mode_info_context;
if (partition)
ctx->partition_info = *partition;
@@ -2238,7 +2238,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
VP9_COMMON *cm = &cpi->common;
YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]];
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
// set up scaling factors
scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1];
@@ -2256,8 +2256,8 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
&scale[frame_type], &scale[frame_type]);
// Gets an initial list of candidate vectors from neighbours and orders them
- vp9_find_mv_refs(&cpi->common, xd, xd->this_mi,
- xd->last_mi,
+ vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context,
+ xd->prev_mode_info_context,
frame_type,
mbmi->ref_mvs[frame_type], mi_row, mi_col);
@@ -2285,7 +2285,7 @@ static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) {
static INLINE int get_switchable_rate(const MACROBLOCK *x) {
const MACROBLOCKD *const xd = &x->e_mbd;
- const MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ const MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
const int ctx = vp9_get_pred_context_switchable_interp(xd);
return SWITCHABLE_INTERP_RATE_FACTOR *
x->switchable_interp_costs[ctx][mbmi->interp_filter];
@@ -2297,7 +2297,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *tmp_mv, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
VP9_COMMON *cm = &cpi->common;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
int bestsme = INT_MAX;
int further_steps, step_param;
@@ -2425,7 +2425,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
int *rate_mv) {
int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize);
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
int refs[2] = { mbmi->ref_frame[0],
(mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv ref_mv[2];
@@ -2590,7 +2590,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
const int64_t ref_best_rd) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const int is_comp_pred = (mbmi->ref_frame[1] > 0);
const int num_refs = is_comp_pred ? 2 : 1;
const int this_mode = mbmi->mode;
@@ -2636,7 +2636,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
*rate2 += rate_mv;
frame_mv[refs[0]].as_int =
- xd->this_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
}
}
@@ -2991,11 +2991,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
- int y_skip = 0, uv_skip = 0;
+ int y_skip = 0, uv_skip;
int64_t dist_y = 0, dist_uv = 0, tx_cache[TX_MODES] = { 0 };
x->skip_encode = 0;
ctx->skip = 0;
- xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME;
+ xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
if (bsize >= BLOCK_8X8) {
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
&dist_y, &y_skip, bsize, tx_cache,
@@ -3031,7 +3031,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
ctx->tx_rd_diff[i] = tx_cache[i] - tx_cache[cm->tx_mode];
}
- ctx->mic = *xd->this_mi;
+ ctx->mic = *xd->mode_info_context;
}
int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
@@ -3043,12 +3043,12 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int64_t best_rd_so_far) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const struct segmentation *seg = &cm->seg;
const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]);
MB_PREDICTION_MODE this_mode;
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
- unsigned char segment_id = mbmi->segment_id;
+ unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
int comp_pred, i;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
@@ -3458,7 +3458,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
cpi->rd_threshes[bsize][THR_NEWA];
this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh;
- xd->this_mi->mbmi.tx_size = TX_4X4;
+ xd->mode_info_context->mbmi.tx_size = TX_4X4;
cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
if (cm->mcomp_filter_type != BILINEAR) {
@@ -3514,7 +3514,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
tmp_best_mbmode = *mbmi;
tmp_best_partition = *x->partition_info;
for (i = 0; i < 4; i++)
- tmp_best_bmodes[i] = xd->this_mi->bmi[i];
+ tmp_best_bmodes[i] = xd->mode_info_context->bmi[i];
pred_exists = 1;
if (switchable_filter_index == 0 &&
cpi->sf.use_rd_breakout &&
@@ -3566,7 +3566,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
*mbmi = tmp_best_mbmode;
*x->partition_info = tmp_best_partition;
for (i = 0; i < 4; i++)
- xd->this_mi->bmi[i] = tmp_best_bmodes[i];
+ xd->mode_info_context->bmi[i] = tmp_best_bmodes[i];
}
rate2 += rate;
@@ -3690,20 +3690,20 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
// Keep record of best intra rd
- if (xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
- is_intra_mode(xd->this_mi->mbmi.mode) &&
+ if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME &&
+ is_intra_mode(xd->mode_info_context->mbmi.mode) &&
this_rd < best_intra_rd) {
best_intra_rd = this_rd;
- best_intra_mode = xd->this_mi->mbmi.mode;
+ best_intra_mode = xd->mode_info_context->mbmi.mode;
}
// Keep record of best inter rd with single reference
- if (xd->this_mi->mbmi.ref_frame[0] > INTRA_FRAME &&
- xd->this_mi->mbmi.ref_frame[1] == NONE &&
+ if (xd->mode_info_context->mbmi.ref_frame[0] > INTRA_FRAME &&
+ xd->mode_info_context->mbmi.ref_frame[1] == NONE &&
!mode_excluded &&
this_rd < best_inter_rd) {
best_inter_rd = this_rd;
best_inter_ref_frame = ref_frame;
- // best_inter_mode = xd->this_mi->mbmi.mode;
+ // best_inter_mode = xd->mode_info_context->mbmi.mode;
}
if (!disable_skip && ref_frame == INTRA_FRAME) {
@@ -3747,7 +3747,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (this_mode == I4X4_PRED || this_mode == SPLITMV)
for (i = 0; i < 4; i++)
- best_bmodes[i] = xd->this_mi->bmi[i];
+ best_bmodes[i] = xd->mode_info_context->bmi[i];
// TODO(debargha): enhance this test with a better distortion prediction
// based on qp, activity mask and history
@@ -3926,24 +3926,24 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
best_mbmode.sb_type < BLOCK_8X8) {
for (i = 0; i < 4; i++)
- xd->this_mi->bmi[i].as_mode = best_bmodes[i].as_mode;
+ xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
}
if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
best_mbmode.sb_type < BLOCK_8X8) {
for (i = 0; i < 4; i++)
- xd->this_mi->bmi[i].as_mv[0].as_int =
+ xd->mode_info_context->bmi[i].as_mv[0].as_int =
best_bmodes[i].as_mv[0].as_int;
if (mbmi->ref_frame[1] > 0)
for (i = 0; i < 4; i++)
- xd->this_mi->bmi[i].as_mv[1].as_int =
+ xd->mode_info_context->bmi[i].as_mv[1].as_int =
best_bmodes[i].as_mv[1].as_int;
*x->partition_info = best_partition;
- mbmi->mv[0].as_int = xd->this_mi->bmi[3].as_mv[0].as_int;
- mbmi->mv[1].as_int = xd->this_mi->bmi[3].as_mv[1].as_int;
+ mbmi->mv[0].as_int = xd->mode_info_context->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = xd->mode_info_context->bmi[3].as_mv[1].as_int;
}
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index 10655e8a7..0a6d2abe2 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -117,7 +117,7 @@ static int cost_segmap(int *segcounts, vp9_prob *probs) {
return cost;
}
-static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+static void count_segs(VP9_COMP *cpi, MODE_INFO *mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
@@ -129,8 +129,8 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- segment_id = mi_8x8[0]->mbmi.segment_id;
-
+ segment_id = mi->mbmi.segment_id;
+ xd->mode_info_context = mi;
set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
// Count the number of hits on each segment with no prediction
@@ -138,7 +138,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
- const BLOCK_SIZE bsize = mi_8x8[0]->mbmi.sb_type;
+ const BLOCK_SIZE bsize = mi->mbmi.sb_type;
// Test to see if the segment id matches the predicted value.
const int pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
bsize, mi_row, mi_col);
@@ -147,7 +147,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
// Store the prediction status for this mb and update counts
// as appropriate
- vp9_set_pred_flag_seg_id(xd, pred_flag);
+ vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag);
temporal_predictor_count[pred_context][pred_flag]++;
if (!pred_flag)
@@ -156,7 +156,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
}
}
-static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
@@ -170,22 +170,21 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
- bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
+ bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
if (bw == bs && bh == bs) {
- count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, bs, bs, mi_row, mi_col);
} else if (bw == bs && bh < bs) {
- count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
- count_segs(cpi, mi_8x8 + hbs * mis, no_pred_segcounts,
- temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
- mi_row + hbs, mi_col);
+ count_segs(cpi, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count,
+ t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col);
} else if (bw < bs && bh == bs) {
- count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
- count_segs(cpi, mi_8x8 + hbs, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi + hbs, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs);
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
@@ -197,7 +196,7 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- count_segs_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc],
+ count_segs_sb(cpi, &mi[mi_dr * mis + mi_dc],
no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts,
mi_row + mi_dr, mi_col + mi_dc, subsize);
@@ -223,7 +222,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
vp9_prob t_nopred_prob[PREDICTION_PROBS];
const int mis = cm->mode_info_stride;
- MODE_INFO **mi_ptr, **mi;
+ MODE_INFO *mi_ptr, *mi;
// Set default state for the segment tree probabilities and the
// temporal coding probabilities
@@ -234,7 +233,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
// predicts this one
for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
vp9_get_tile_col_offsets(cm, tile_col);
- mi_ptr = cm->mi_grid_visible + cm->cur_tile_mi_col_start;
+ mi_ptr = cm->mi + cm->cur_tile_mi_col_start;
for (mi_row = 0; mi_row < cm->mi_rows;
mi_row += 8, mi_ptr += 8 * mis) {
mi = mi_ptr;
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index 63826eea5..3052e8f70 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -153,7 +153,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
- ref_mv = &x->e_mbd.mi_8x8[0]->bmi[0].as_mv[0];
+ ref_mv = &x->e_mbd.mode_info_context->bmi[0].as_mv[0];
bestsme = vp9_hex_search(x, &best_ref_mv1_full,
step_param, sadpb, 1,
&cpi->fn_ptr[BLOCK_16X16],
@@ -245,8 +245,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
if (cpi->frames[frame] == NULL)
continue;
- mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row = 0;
- mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col = 0;
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row = 0;
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col = 0;
if (frame == alt_ref_index) {
filter_weight = 2;
@@ -279,8 +279,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
cpi->frames[frame]->u_buffer + mb_uv_offset,
cpi->frames[frame]->v_buffer + mb_uv_offset,
cpi->frames[frame]->y_stride,
- mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row,
- mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col,
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row,
+ mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col,
predictor);
// Apply the filter (YUV)
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 0c9bf9db2..03bf14716 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -114,7 +114,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
MACROBLOCKD *xd = args->xd;
TOKENEXTRA **tp = args->tp;
struct macroblockd_plane *pd = &xd->plane[plane];
- MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
int pt; /* near block/prev token context index */
int c = 0, rc = 0;
TOKENEXTRA *t = *tp; /* store tokens starting here */
@@ -210,7 +210,7 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
TOKENEXTRA *t_backup = *t;
const int mb_skip_context = vp9_get_pred_context_mbskip(xd);
const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id,