summaryrefslogtreecommitdiff
path: root/vp9/encoder/vp9_encodeframe.c
diff options
context:
space:
mode:
authorScott LaVarnway <slavarnway@google.com>2013-09-13 08:56:20 -0700
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2013-09-13 08:56:20 -0700
commit8fc95a1b119b7e9e34ef989e3d5ced0e358bc2c4 (patch)
tree3bf68c324d58bb8b8aad9eaa50f423ef2c20171b /vp9/encoder/vp9_encodeframe.c
parent9c9a3b277568a58c8fcefa2c4031590a39b08e49 (diff)
parentac6093d179d8488f90bf10c6d0abcc48be327d28 (diff)
downloadlibvpx-8fc95a1b119b7e9e34ef989e3d5ced0e358bc2c4.tar
libvpx-8fc95a1b119b7e9e34ef989e3d5ced0e358bc2c4.tar.gz
libvpx-8fc95a1b119b7e9e34ef989e3d5ced0e358bc2c4.tar.bz2
libvpx-8fc95a1b119b7e9e34ef989e3d5ced0e358bc2c4.zip
Merge "New mode_info_context storage -- undo revert"
Diffstat (limited to 'vp9/encoder/vp9_encodeframe.c')
-rw-r--r--vp9/encoder/vp9_encodeframe.c276
1 files changed, 147 insertions, 129 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index ea1a245b4..45bf87238 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -351,7 +351,8 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi;
+ MODE_INFO *mi_addr = xd->this_mi;
int mb_mode_index = ctx->best_mode_index;
const int mis = cm->mode_info_stride;
@@ -364,13 +365,15 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
assert(mi->mbmi.sb_type == bsize);
+ *mi_addr = *mi;
+
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < mi_height; y++)
for (x_idx = 0; x_idx < mi_width; x_idx++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y)
- xd->mode_info_context[x_idx + y * mis] = *mi;
+ xd->mi_8x8[x_idx + y * mis] = mi_addr;
// FIXME(rbultje) I'm pretty sure this should go to the end of this block
// (i.e. after the output_enabled)
@@ -439,15 +442,6 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
- if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) {
- int i, j;
- for (j = 0; j < mi_height; ++j)
- for (i = 0; i < mi_width; ++i)
- if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i
- && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j)
- xd->mode_info_context[mis * j + i].mbmi = *mbmi;
- }
-
if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
@@ -501,11 +495,18 @@ static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col,
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
- xd->mode_info_context = cm->mi + idx_str;
- mbmi = &xd->mode_info_context->mbmi;
+
+ xd->mi_8x8 = cm->mi_grid_visible + idx_str;
+ xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
+
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
- xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL;
+ xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
+
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi + idx_str;
+
+ mbmi = &xd->this_mi->mbmi;
// Set up destination pointers
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
@@ -582,10 +583,10 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
}
set_offsets(cpi, mi_row, mi_col, bsize);
- xd->mode_info_context->mbmi.sb_type = bsize;
+ xd->this_mi->mbmi.sb_type = bsize;
// Set to zero to make sure we do not use the previous encoded frame stats
- xd->mode_info_context->mbmi.skip_coeff = 0;
+ xd->this_mi->mbmi.skip_coeff = 0;
x->source_variance = get_sby_perpixel_variance(cpi, x, bsize);
@@ -606,7 +607,7 @@ static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO *mi = xd->this_mi;
MB_MODE_INFO *const mbmi = &mi->mbmi;
if (cm->frame_type != KEY_FRAME) {
@@ -885,7 +886,7 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
-static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
BLOCK_SIZE bsize = cpi->sf.always_this_block_size;
@@ -893,68 +894,75 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
int row8x8_remaining = cm->cur_tile_mi_row_end - mi_row;
int col8x8_remaining = cm->cur_tile_mi_col_end - mi_col;
int block_row, block_col;
+ MODE_INFO * mi_upper_left = cm->mi + mi_row * mis + mi_col;
+ int bh = num_8x8_blocks_high_lookup[bsize];
+ int bw = num_8x8_blocks_wide_lookup[bsize];
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// Apply the requested partition size to the SB64 if it is all "in image"
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
- for (block_row = 0; block_row < MI_BLOCK_SIZE; ++block_row) {
- for (block_col = 0; block_col < MI_BLOCK_SIZE; ++block_col) {
- m[block_row * mis + block_col].mbmi.sb_type = bsize;
+ for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
+ for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
+ int index = block_row * mis + block_col;
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
}
}
} else {
// Else this is a partial SB64.
- int bh = num_8x8_blocks_high_lookup[bsize];
- int bw = num_8x8_blocks_wide_lookup[bsize];
- int sub_block_row;
- int sub_block_col;
- int row_index;
- int col_index;
-
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
+ int index = block_row * mis + block_col;
// Find a partition size that fits
bsize = find_partition_size(cpi->sf.always_this_block_size,
(row8x8_remaining - block_row),
(col8x8_remaining - block_col), &bh, &bw);
-
- // Set the mi entries for all 8x8 blocks within the selected size
- for (sub_block_row = 0; sub_block_row < bh; ++sub_block_row) {
- for (sub_block_col = 0; sub_block_col < bw; ++sub_block_col) {
- row_index = block_row + sub_block_row;
- col_index = block_col + sub_block_col;
- m[row_index * mis + col_index].mbmi.sb_type = bsize;
- }
- }
+ mi_8x8[index] = mi_upper_left + index;
+ mi_8x8[index]->mbmi.sb_type = bsize;
}
}
}
}
-static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) {
+
+static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ MODE_INFO **prev_mi_8x8) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int block_row, block_col;
+
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
- m[block_row * mis + block_col].mbmi.sb_type =
- p[block_row * mis + block_col].mbmi.sb_type;
+ MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col];
+ BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
+ int offset;
+
+ if (prev_mi) {
+ offset = prev_mi - cm->prev_mi;
+ mi_8x8[block_row * mis + block_col] = cm->mi + offset;
+ mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
+ }
}
}
}
-static void set_block_size(VP9_COMMON * const cm, MODE_INFO *mi,
+static void set_block_size(VP9_COMMON * const cm, MODE_INFO **mi_8x8,
BLOCK_SIZE bsize, int mis, int mi_row,
int mi_col) {
int r, c;
const int bs = MAX(num_8x8_blocks_wide_lookup[bsize],
num_8x8_blocks_high_lookup[bsize]);
- MODE_INFO *const mi2 = &mi[mi_row * mis + mi_col];
+ const int idx_str = mis * mi_row + mi_col;
+ MODE_INFO **const mi2 = &mi_8x8[idx_str];
+
+ mi2[0] = cm->mi + idx_str;
+ mi2[0]->mbmi.sb_type = bsize;
+
for (r = 0; r < bs; r++)
for (c = 0; c < bs; c++)
if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols)
- mi2[r * mis + c].mbmi.sb_type = bsize;
+ mi2[r * mis + c] = mi2[0];
}
typedef struct {
@@ -1100,7 +1108,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
#else // !PERFORM_RANDOM_PARTITIONING
-static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
+static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m,
BLOCK_SIZE bsize, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
@@ -1138,8 +1146,8 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
}
#endif // PERFORM_RANDOM_PARTITIONING
-static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
- int mi_col) {
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mi_row, int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1182,13 +1190,16 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
&xd->scale_factor[0]);
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
&xd->scale_factor[1]);
- xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
- xd->mode_info_context->mbmi.sb_type = BLOCK_64X64;
- vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
+
+ xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->this_mi->mbmi.sb_type = BLOCK_64X64;
+ vp9_find_best_ref_mvs(xd,
+ mi_8x8[0]->mbmi.ref_mvs[mi_8x8[0]->mbmi.ref_frame[0]],
&nearest_mv, &near_mv);
- xd->mode_info_context->mbmi.mv[0] = nearest_mv;
+ xd->this_mi->mbmi.mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
+
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
@@ -1225,24 +1236,24 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
- if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col,
+ if (!set_vt_partitioning(cpi, &vt, mi_8x8, BLOCK_64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
- if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32,
+ if (!set_vt_partitioning(cpi, &vt.split[i], mi_8x8, BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
- if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
+ if (!set_vt_partitioning(cpi, &vt.split[i].split[j], mi_8x8,
BLOCK_16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
- set_block_size(cm, m, BLOCK_8X8, mis,
+ set_block_size(cm, mi_8x8, BLOCK_8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
@@ -1253,9 +1264,10 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
}
}
-static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- int *rate, int64_t *dist, int do_recon) {
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int *rate, int64_t *dist,
+ int do_recon) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1281,7 +1293,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int64_t chosen_dist = INT_MAX;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
int splits_below = 0;
- BLOCK_SIZE bs_type = m->mbmi.sb_type;
+ BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
@@ -1314,7 +1326,8 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
- if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
+ MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
+ if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
@@ -1334,7 +1347,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
none_rate += x->partition_cost[pl][PARTITION_NONE];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
- m->mbmi.sb_type = bs_type;
+ mi_8x8[0]->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
@@ -1405,8 +1418,9 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
*get_sb_index(xd, subsize) = i;
- rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
- mi_col + x_idx, subsize, &rt, &dt, i != 3);
+ rd_use_partition(cpi, mi_8x8 + jj * bss * mis + ii * bss, tp,
+ mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
+ i != 3);
if (rt == INT_MAX || dt == INT_MAX) {
last_part_rate = INT_MAX;
last_part_dist = INT_MAX;
@@ -1487,7 +1501,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
// If last_part is better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
- m->mbmi.sb_type = bsize;
+ mi_8x8[0]->mbmi.sb_type = bsize;
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
@@ -1535,9 +1549,9 @@ static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
-static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi,
- BLOCK_SIZE *min_block_size,
- BLOCK_SIZE *max_block_size ) {
+static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
+ BLOCK_SIZE * min_block_size,
+ BLOCK_SIZE * max_block_size ) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int sb_width_in_blocks = MI_BLOCK_SIZE;
int sb_height_in_blocks = MI_BLOCK_SIZE;
@@ -1547,8 +1561,10 @@ static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi,
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
- *min_block_size = MIN(*min_block_size, mi[index + j].mbmi.sb_type);
- *max_block_size = MAX(*max_block_size, mi[index + j].mbmi.sb_type);
+ MODE_INFO * mi = mi_8x8[index+j];
+ BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
+ *min_block_size = MIN(*min_block_size, sb_type);
+ *max_block_size = MAX(*max_block_size, sb_type);
}
index += xd->mode_info_stride;
}
@@ -1560,13 +1576,12 @@ static void rd_auto_partition_range(VP9_COMP *cpi, int row, int col,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
- MODE_INFO *above_sb64_mi;
- MODE_INFO *left_sb64_mi;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
+ MODE_INFO ** mi_8x8 = xd->mi_8x8;
+ const int left_in_image = xd->left_available && mi_8x8[-1];
+ const int above_in_image = xd->up_available &&
+ mi_8x8[-xd->mode_info_stride];
+ MODE_INFO ** above_sb64_mi_8x8;
+ MODE_INFO ** left_sb64_mi_8x8;
// Frequency check
if (cpi->sf.auto_min_max_partition_count <= 0) {
@@ -1593,16 +1608,16 @@ static void rd_auto_partition_range(VP9_COMP *cpi, int row, int col,
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
- left_sb64_mi = &mi[-MI_BLOCK_SIZE];
- get_sb_partition_size_range(cpi, left_sb64_mi,
+ left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
min_block_size, max_block_size);
}
// Find the min and max partition sizes used in the above SB64 taking
// the values found for left as a starting point.
if (above_in_image) {
- above_sb64_mi = &mi[-xd->mode_info_stride * MI_BLOCK_SIZE];
- get_sb_partition_size_range(cpi, above_sb64_mi,
+ above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
min_block_size, max_block_size);
}
@@ -2027,18 +2042,18 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
- MODE_INFO *m = cm->mi + idx_str;
- MODE_INFO *p = cm->prev_mi + idx_str;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
+ MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
cpi->mb.source_variance = UINT_MAX;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
- set_partitioning(cpi, m, mi_row, mi_col);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
+ set_partitioning(cpi, mi_8x8, mi_row, mi_col);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else if (cpi->sf.partition_by_variance) {
- choose_partitioning(cpi, cm->mi, mi_row, mi_col);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
+ choose_partitioning(cpi, cm->mi_grid_visible, mi_row, mi_col);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else {
if ((cpi->common.current_video_frame
@@ -2057,8 +2072,8 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
- copy_partitioning(cpi, m, p);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
+ copy_partitioning(cpi, mi_8x8, prev_mi_8x8);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
@@ -2070,7 +2085,6 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
&cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
-
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
}
@@ -2102,8 +2116,8 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
- xd->mode_info_context->mbmi.mode = DC_PRED;
- xd->mode_info_context->mbmi.uv_mode = DC_PRED;
+ xd->this_mi->mbmi.mode = DC_PRED;
+ xd->this_mi->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cpi->y_uv_mode_count)
@@ -2179,8 +2193,14 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cm->counts.switchable_interp);
vp9_zero(cpi->txfm_stepdown_count);
- xd->mode_info_context = cm->mi;
- xd->prev_mode_info_context = cm->prev_mi;
+ xd->mi_8x8 = cm->mi_grid_visible;
+ // required for vp9_frame_init_quantizer
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi;
+ xd->mic_stream_ptr = cm->mi;
+
+ xd->last_mi = cm->prev_mi;
+
vp9_zero(cpi->NMVcount);
vp9_zero(cpi->coef_counts);
@@ -2283,12 +2303,12 @@ static int check_dual_ref_flags(VP9_COMP *cpi) {
}
}
-static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
+static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
- if (!mi[y * mis + x].mbmi.skip_coeff)
+ if (!mi_8x8[y * mis + x]->mbmi.skip_coeff)
return 0;
}
}
@@ -2296,42 +2316,39 @@ static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
return 1;
}
-static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
+static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs,
TX_SIZE tx_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++)
- mi[y * mis + x].mbmi.tx_size = tx_size;
+ mi_8x8[y * mis + x]->mbmi.tx_size = tx_size;
}
}
-static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis,
- TX_SIZE max_tx_size, int bw, int bh,
+static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mis, TX_SIZE max_tx_size, int bw, int bh,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
- VP9_COMMON *const cm = &cpi->common;
- MB_MODE_INFO *const mbmi = &mi->mbmi;
+ VP9_COMMON * const cm = &cpi->common;
+ MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (mbmi->tx_size > max_tx_size) {
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
- xd->mode_info_context = mi;
assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
- get_skip_flag(mi, mis, ymbs, xmbs));
- set_txfm_flag(mi, mis, ymbs, xmbs, max_tx_size);
+ get_skip_flag(mi_8x8, mis, ymbs, xmbs));
+ set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size);
}
}
-static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
+static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
TX_SIZE max_tx_size, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- const VP9_COMMON *const cm = &cpi->common;
+ VP9_COMMON * const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
@@ -2339,22 +2356,23 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
- bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
+ bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, bs, mi_row,
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row,
mi_col, bsize);
} else if (bw == bs && bh < bs) {
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, hbs, mi_row, mi_col,
- bsize);
- reset_skip_txfm_size_b(cpi, mi + hbs * mis, mis, max_tx_size, bs, hbs,
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row,
+ mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs,
mi_row + hbs, mi_col, bsize);
} else if (bw < bs && bh == bs) {
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, hbs, bs, mi_row, mi_col,
- bsize);
- reset_skip_txfm_size_b(cpi, mi + hbs, mis, max_tx_size, hbs, bs, mi_row,
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row,
+ mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row,
mi_col + hbs, bsize);
+
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
@@ -2365,7 +2383,7 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- reset_skip_txfm_size_sb(cpi, &mi[mi_dr * mis + mi_dc], max_tx_size,
+ reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
@@ -2375,12 +2393,15 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON * const cm = &cpi->common;
int mi_row, mi_col;
const int mis = cm->mode_info_stride;
- MODE_INFO *mi, *mi_ptr = cm->mi;
+// MODE_INFO *mi, *mi_ptr = cm->mi;
+ MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
- mi = mi_ptr;
- for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8)
- reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64);
+ mi_8x8 = mi_ptr;
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) {
+ reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col,
+ BLOCK_64X64);
+ }
}
}
@@ -2636,7 +2657,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO **mi_8x8 = xd->mi_8x8;
+ MODE_INFO *mi = mi_8x8[0];
MB_MODE_INFO *mbmi = &mi->mbmi;
unsigned int segment_id = mbmi->segment_id;
const int mis = cm->mode_info_stride;
@@ -2715,8 +2737,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else {
- int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.skip_coeff : 0;
- mb_skip_context += (mi - mis)->mbmi.skip_coeff;
+ int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0;
+ mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0;
mbmi->skip_coeff = 1;
if (output_enabled)
@@ -2724,10 +2746,6 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
}
- // copy skip flag on all mb_mode_info contexts in this SB
- // if this was a skip at this txfm size
- vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, mi->mbmi.skip_coeff);
-
if (output_enabled) {
if (cm->tx_mode == TX_MODE_SELECT &&
mbmi->sb_type >= BLOCK_8X8 &&
@@ -2758,7 +2776,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
for (y = 0; y < mi_height; y++)
for (x = 0; x < mi_width; x++)
if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
- mi[mis * y + x].mbmi.tx_size = sz;
+ mi_8x8[mis * y + x]->mbmi.tx_size = sz;
}
}
}