summaryrefslogtreecommitdiff
path: root/vp9
diff options
context:
space:
mode:
Diffstat (limited to 'vp9')
-rw-r--r--vp9/common/vp9_alloccommon.c33
-rw-r--r--vp9/common/vp9_alloccommon.h1
-rw-r--r--vp9/common/vp9_blockd.h22
-rw-r--r--vp9/common/vp9_debugmodes.c11
-rw-r--r--vp9/common/vp9_entropymode.c3
-rw-r--r--vp9/common/vp9_findnearmv.c4
-rw-r--r--vp9/common/vp9_findnearmv.h37
-rw-r--r--vp9/common/vp9_loopfilter.c88
-rw-r--r--vp9/common/vp9_mvref_common.c15
-rw-r--r--vp9/common/vp9_onyxc_int.h4
-rw-r--r--vp9/common/vp9_pred_common.c135
-rw-r--r--vp9/common/vp9_pred_common.h34
-rw-r--r--vp9/common/vp9_reconinter.c10
-rw-r--r--vp9/decoder/vp9_decodemv.c34
-rw-r--r--vp9/decoder/vp9_decodframe.c29
-rw-r--r--vp9/decoder/vp9_detokenize.c4
-rw-r--r--vp9/decoder/vp9_onyxd_if.c5
-rw-r--r--vp9/encoder/vp9_bitstream.c72
-rw-r--r--vp9/encoder/vp9_encodeframe.c241
-rw-r--r--vp9/encoder/vp9_encodeintra.c2
-rw-r--r--vp9/encoder/vp9_encodemb.c10
-rw-r--r--vp9/encoder/vp9_encodemv.c4
-rw-r--r--vp9/encoder/vp9_firstpass.c37
-rw-r--r--vp9/encoder/vp9_mbgraph.c4
-rw-r--r--vp9/encoder/vp9_mcomp.c6
-rw-r--r--vp9/encoder/vp9_onyx_if.c17
-rw-r--r--vp9/encoder/vp9_quantize.c2
-rw-r--r--vp9/encoder/vp9_rdopt.c148
-rw-r--r--vp9/encoder/vp9_segmentation.c35
-rw-r--r--vp9/encoder/vp9_temporal_filter.c10
-rw-r--r--vp9/encoder/vp9_tokenize.c4
31 files changed, 558 insertions, 503 deletions
diff --git a/vp9/common/vp9_alloccommon.c b/vp9/common/vp9_alloccommon.c
index e89fea8a4..4937fc899 100644
--- a/vp9/common/vp9_alloccommon.c
+++ b/vp9/common/vp9_alloccommon.c
@@ -31,22 +31,6 @@ void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi) {
vpx_memset(&mi[i * stride], 0, sizeof(MODE_INFO));
}
-void vp9_update_mode_info_in_image(VP9_COMMON *cm, MODE_INFO *mi) {
- int i, j;
-
- // For each in image mode_info element set the in image flag to 1
- for (i = 0; i < cm->mi_rows; i++) {
- MODE_INFO *ptr = mi;
- for (j = 0; j < cm->mi_cols; j++) {
- ptr->mbmi.in_image = 1;
- ptr++; // Next element in the row
- }
-
- // Step over border element at start of next row
- mi += cm->mode_info_stride;
- }
-}
-
void vp9_free_frame_buffers(VP9_COMMON *cm) {
int i;
@@ -82,15 +66,18 @@ static void set_mb_mi(VP9_COMMON *cm, int aligned_width, int aligned_height) {
static void setup_mi(VP9_COMMON *cm) {
cm->mi = cm->mip + cm->mode_info_stride + 1;
cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
vpx_memset(cm->mip, 0,
cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
- vp9_update_mode_info_border(cm, cm->mip);
- vp9_update_mode_info_in_image(cm, cm->mi);
+ vpx_memset(cm->mi_grid_base, 0,
+ cm->mode_info_stride * (cm->mi_rows + 1) *
+ sizeof(*cm->mi_grid_base));
+ vp9_update_mode_info_border(cm, cm->mip);
vp9_update_mode_info_border(cm, cm->prev_mip);
- vp9_update_mode_info_in_image(cm, cm->prev_mi);
}
int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) {
@@ -139,6 +126,14 @@ int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) {
if (!cm->prev_mip)
goto fail;
+ cm->mi_grid_base = vpx_calloc(mi_size, sizeof(*cm->mi_grid_base));
+ if (!cm->mi_grid_base)
+ goto fail;
+
+ cm->prev_mi_grid_base = vpx_calloc(mi_size, sizeof(*cm->prev_mi_grid_base));
+ if (!cm->prev_mi_grid_base)
+ goto fail;
+
setup_mi(cm);
// FIXME(jkoleszar): allocate subsampled arrays for U/V once subsampling
diff --git a/vp9/common/vp9_alloccommon.h b/vp9/common/vp9_alloccommon.h
index b7d7eba72..5d5fae993 100644
--- a/vp9/common/vp9_alloccommon.h
+++ b/vp9/common/vp9_alloccommon.h
@@ -17,7 +17,6 @@
void vp9_initialize_common();
void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi);
-void vp9_update_mode_info_in_image(VP9_COMMON *cm, MODE_INFO *mi);
void vp9_create_common(VP9_COMMON *cm);
void vp9_remove_common(VP9_COMMON *cm);
diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h
index 5ba784671..c8d677fb9 100644
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -147,10 +147,6 @@ typedef struct {
// Flags used for prediction status of various bit-stream signals
unsigned char seg_id_predicted;
- // Indicates if the block is part of the image (1) vs border (0)
- // This can be useful in determining whether it provides a valid predictor
- unsigned char in_image;
-
INTERPOLATIONFILTERTYPE interp_filter;
BLOCK_SIZE sb_type;
@@ -206,10 +202,16 @@ typedef struct macroblockd {
struct scale_factors scale_factor[2];
- MODE_INFO *prev_mode_info_context;
- MODE_INFO *mode_info_context;
+ MODE_INFO *last_mi;
+ MODE_INFO *this_mi;
int mode_info_stride;
+ MODE_INFO *mic_stream_ptr;
+
+ // A NULL indicates that the 8x8 is not part of the image
+ MODE_INFO **mi_8x8;
+ MODE_INFO **prev_mi_8x8;
+
int up_available;
int left_available;
int right_available;
@@ -319,7 +321,7 @@ extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT];
static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int ib) {
- const MODE_INFO *const mi = xd->mode_info_context;
+ const MODE_INFO *const mi = xd->this_mi;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
if (plane_type != PLANE_TYPE_Y_WITH_DC ||
@@ -334,13 +336,13 @@ static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) {
return plane_type == PLANE_TYPE_Y_WITH_DC ?
- mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT;
+ mode2txfm_map[xd->this_mi->mbmi.mode] : DCT_DCT;
}
static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) {
return plane_type == PLANE_TYPE_Y_WITH_DC ?
- mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT;
+ mode2txfm_map[xd->this_mi->mbmi.mode] : DCT_DCT;
}
static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) {
@@ -389,7 +391,7 @@ static INLINE void foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
- const MB_MODE_INFO* mbmi = &xd->mode_info_context->mbmi;
+ const MB_MODE_INFO* mbmi = &xd->this_mi->mbmi;
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
// transform size varies per plane, look it up in a common way.
diff --git a/vp9/common/vp9_debugmodes.c b/vp9/common/vp9_debugmodes.c
index 4de50aacf..79f769e40 100644
--- a/vp9/common/vp9_debugmodes.c
+++ b/vp9/common/vp9_debugmodes.c
@@ -27,7 +27,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, char *descriptor,
int mi_row;
int mi_col;
int mi_index = 0;
- MODE_INFO *mi = cm->mi;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
int rows = cm->mi_rows;
int cols = cm->mi_cols;
char prefix = descriptor[0];
@@ -38,7 +38,8 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, char *descriptor,
fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ",
- *((int*) ((char *) (&mi[mi_index].mbmi) + member_offset)));
+ *((int*) ((char *) (&mi_8x8[mi_index]->mbmi) +
+ member_offset)));
mi_index++;
}
fprintf(file, "\n");
@@ -51,7 +52,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, char *file) {
int mi_col;
int mi_index = 0;
FILE *mvs = fopen(file, "a");
- MODE_INFO *mi = cm->mi;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
int rows = cm->mi_rows;
int cols = cm->mi_cols;
@@ -66,8 +67,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs,"V ");
for (mi_col = 0; mi_col < cols; mi_col++) {
- fprintf(mvs, "%4d:%4d ", mi[mi_index].mbmi.mv[0].as_mv.row,
- mi[mi_index].mbmi.mv[0].as_mv.col);
+ fprintf(mvs, "%4d:%4d ", mi_8x8[mi_index]->mbmi.mv[0].as_mv.row,
+ mi_8x8[mi_index]->mbmi.mv[0].as_mv.col);
mi_index++;
}
fprintf(mvs, "\n");
diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c
index a75d1a9a4..93c89b03a 100644
--- a/vp9/common/vp9_entropymode.c
+++ b/vp9/common/vp9_entropymode.c
@@ -510,10 +510,7 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO));
vp9_update_mode_info_border(cm, cm->mip);
- vp9_update_mode_info_in_image(cm, cm->mi);
-
vp9_update_mode_info_border(cm, cm->prev_mip);
- vp9_update_mode_info_in_image(cm, cm->prev_mi);
vp9_zero(cm->ref_frame_sign_bias);
diff --git a/vp9/common/vp9_findnearmv.c b/vp9/common/vp9_findnearmv.c
index c158c92dc..49a731fdb 100644
--- a/vp9/common/vp9_findnearmv.c
+++ b/vp9/common/vp9_findnearmv.c
@@ -43,12 +43,12 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col) {
int_mv dst_list[MAX_MV_REF_CANDIDATES];
int_mv mv_list[MAX_MV_REF_CANDIDATES];
- MODE_INFO *const mi = xd->mode_info_context;
+ MODE_INFO *const mi = xd->this_mi;
assert(ref_idx == 0 || ref_idx == 1);
assert(MAX_MV_REF_CANDIDATES == 2); // makes code here slightly easier
- vp9_find_mv_refs_idx(cm, xd, mi, xd->prev_mode_info_context,
+ vp9_find_mv_refs_idx(cm, xd, mi, xd->last_mi,
mi->mbmi.ref_frame[ref_idx],
mv_list, block_idx, mi_row, mi_col);
diff --git a/vp9/common/vp9_findnearmv.h b/vp9/common/vp9_findnearmv.h
index 72572dfb1..ad0d882b9 100644
--- a/vp9/common/vp9_findnearmv.h
+++ b/vp9/common/vp9_findnearmv.h
@@ -43,41 +43,50 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm,
int block_idx, int ref_idx,
int mi_row, int mi_col);
-static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) {
+static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb,
+ const MODE_INFO *left_mb, int b) {
// FIXME(rbultje, jingning): temporary hack because jenkins doesn't
// understand this condition. This will go away soon.
+ const MODE_INFO *mi = cur_mb;
+
if (b == 0 || b == 2) {
/* On L edge, get from MB to left of us */
- --cur_mb;
+ mi = left_mb;
+ if (!mi)
+ return DC_PRED;
- if (is_inter_block(&cur_mb->mbmi)) {
+ if (mi->mbmi.ref_frame[0] != INTRA_FRAME) {
return DC_PRED;
- } else if (cur_mb->mbmi.sb_type < BLOCK_8X8) {
- return (cur_mb->bmi + 1 + b)->as_mode;
+ } else if (mi->mbmi.sb_type < BLOCK_8X8) {
+ return ((mi->bmi + 1 + b)->as_mode);
} else {
- return cur_mb->mbmi.mode;
+ return mi->mbmi.mode;
}
}
assert(b == 1 || b == 3);
- return (cur_mb->bmi + b - 1)->as_mode;
+ return (mi->bmi + b - 1)->as_mode;
}
static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb,
- int b, int mi_stride) {
+ const MODE_INFO *above_mb, int b) {
+ const MODE_INFO *mi = cur_mb;
+
if (!(b >> 1)) {
/* On top edge, get from MB above us */
- cur_mb -= mi_stride;
+ mi = above_mb;
+ if (!mi)
+ return DC_PRED;
- if (is_inter_block(&cur_mb->mbmi)) {
+ if (mi->mbmi.ref_frame[0] != INTRA_FRAME) {
return DC_PRED;
- } else if (cur_mb->mbmi.sb_type < BLOCK_8X8) {
- return (cur_mb->bmi + 2 + b)->as_mode;
+ } else if (mi->mbmi.sb_type < BLOCK_8X8) {
+ return ((mi->bmi + 2 + b)->as_mode);
} else {
- return cur_mb->mbmi.mode;
+ return mi->mbmi.mode;
}
}
- return (cur_mb->bmi + b - 2)->as_mode;
+ return (mi->bmi + b - 2)->as_mode;
}
#endif // VP9_COMMON_VP9_FINDNEARMV_H_
diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c
index 0d883ab9b..cfb5cd4a3 100644
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -559,12 +559,12 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
- const MODE_INFO *mi, const int mode_info_stride,
+ MODE_INFO **mi_8x8, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
- const MODE_INFO *mip = mi;
- const MODE_INFO *mip2 = mi;
+ MODE_INFO **mip = mi_8x8;
+ MODE_INFO **mip2 = mi_8x8;
// These are offsets to the next mi in the 64x64 block. It is what gets
// added to the mi ptr as we go through each loop. It helps us to avoids
@@ -596,23 +596,23 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
// TODO(jimbankoski): Try moving most of the following code into decode
// loop and storing lfm in the mbmi structure so that we don't have to go
// through the recursive loop structure multiple times.
- switch (mip->mbmi.sb_type) {
+ switch (mip[0]->mbmi.sb_type) {
case BLOCK_64X64:
- build_masks(lfi_n, mip , 0, 0, lfm);
+ build_masks(lfi_n, mip[0] , 0, 0, lfm);
break;
case BLOCK_64X32:
- build_masks(lfi_n, mip, 0, 0, lfm);
+ build_masks(lfi_n, mip[0], 0, 0, lfm);
mip2 = mip + mode_info_stride * 4;
if (4 >= max_rows)
break;
- build_masks(lfi_n, mip2 , 32, 8, lfm);
+ build_masks(lfi_n, mip2[0], 32, 8, lfm);
break;
case BLOCK_32X64:
- build_masks(lfi_n, mip, 0, 0, lfm);
+ build_masks(lfi_n, mip[0], 0, 0, lfm);
mip2 = mip + 4;
if (4 >= max_cols)
break;
- build_masks(lfi_n, mip2, 4, 2, lfm);
+ build_masks(lfi_n, mip2[0], 4, 2, lfm);
break;
default:
for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
@@ -622,23 +622,23 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue;
- switch (mip->mbmi.sb_type) {
+ switch (mip[0]->mbmi.sb_type) {
case BLOCK_32X32:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
case BLOCK_32X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_32_row_offset + 2 >= max_rows)
continue;
mip2 = mip + mode_info_stride * 2;
- build_masks(lfi_n, mip2, shift_y + 16, shift_uv + 4, lfm);
+ build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
break;
case BLOCK_16X32:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_32_col_offset + 2 >= max_cols)
continue;
mip2 = mip + 2;
- build_masks(lfi_n, mip2, shift_y + 2, shift_uv + 1, lfm);
+ build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
break;
default:
for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
@@ -652,29 +652,29 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
continue;
- switch (mip->mbmi.sb_type) {
+ switch (mip[0]->mbmi.sb_type) {
case BLOCK_16X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
case BLOCK_16X8:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_16_row_offset + 1 >= max_rows)
continue;
mip2 = mip + mode_info_stride;
- build_y_mask(lfi_n, mip2, shift_y+8, lfm);
+ build_y_mask(lfi_n, mip2[0], shift_y+8, lfm);
break;
case BLOCK_8X16:
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_16_col_offset +1 >= max_cols)
continue;
mip2 = mip + 1;
- build_y_mask(lfi_n, mip2, shift_y+1, lfm);
+ build_y_mask(lfi_n, mip2[0], shift_y+1, lfm);
break;
default: {
const int shift_y = shift_32_y[idx_32] +
shift_16_y[idx_16] +
shift_8_y[0];
- build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
+ build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
mip += offset[0];
for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
const int shift_y = shift_32_y[idx_32] +
@@ -688,7 +688,7 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_8_col_offset >= max_cols ||
mi_8_row_offset >= max_rows)
continue;
- build_y_mask(lfi_n, mip, shift_y, lfm);
+ build_y_mask(lfi_n, mip[0], shift_y, lfm);
}
break;
}
@@ -792,7 +792,7 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
#if CONFIG_NON420
static void filter_block_plane_non420(VP9_COMMON *cm,
struct macroblockd_plane *plane,
- const MODE_INFO *mi,
+ MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
const int ss_x = plane->subsampling_x;
const int ss_y = plane->subsampling_y;
@@ -816,24 +816,25 @@ static void filter_block_plane_non420(VP9_COMMON *cm,
// Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
- const int skip_this = mi[c].mbmi.skip_coeff
- && is_inter_block(&mi[c].mbmi);
+ const MODE_INFO *mi = mi_8x8[c];
+ const int skip_this = mi[0].mbmi.skip_coeff
+ && is_inter_block(&mi[0].mbmi);
// left edge of current unit is block/partition edge -> no skip
- const int block_edge_left = b_width_log2(mi[c].mbmi.sb_type) ?
- !(c & ((1 << (b_width_log2(mi[c].mbmi.sb_type)-1)) - 1)) : 1;
+ const int block_edge_left = b_width_log2(mi[0].mbmi.sb_type) ?
+ !(c & ((1 << (b_width_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1;
const int skip_this_c = skip_this && !block_edge_left;
// top edge of current unit is block/partition edge -> no skip
- const int block_edge_above = b_height_log2(mi[c].mbmi.sb_type) ?
- !(r & ((1 << (b_height_log2(mi[c].mbmi.sb_type)-1)) - 1)) : 1;
+ const int block_edge_above = b_height_log2(mi[0].mbmi.sb_type) ?
+ !(r & ((1 << (b_height_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1;
const int skip_this_r = skip_this && !block_edge_above;
const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
- ? get_uv_tx_size(&mi[c].mbmi)
- : mi[c].mbmi.tx_size;
+ ? get_uv_tx_size(&mi[0].mbmi)
+ : mi[0].mbmi.tx_size;
const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
// Filter level can vary per MI
- if (!build_lfi(&cm->lf_info, &mi[c].mbmi, lfi[r] + (c >> ss_x)))
+ if (!build_lfi(&cm->lf_info, &mi[0].mbmi, lfi[r] + (c >> ss_x)))
continue;
// Build masks based on the transform size of each block
@@ -892,7 +893,7 @@ static void filter_block_plane_non420(VP9_COMMON *cm,
mask_4x4_c & border_mask,
mask_4x4_int[r], lfi[r]);
dst->buf += 8 * dst->stride;
- mi += row_step_stride;
+ mi_8x8 += row_step_stride;
}
// Now do horizontal pass
@@ -913,7 +914,7 @@ static void filter_block_plane_non420(VP9_COMMON *cm,
static void filter_block_plane(VP9_COMMON *const cm,
struct macroblockd_plane *const plane,
- const MODE_INFO *mi,
+ MODE_INFO **mi_8x8,
int mi_row, int mi_col,
LOOP_FILTER_MASK *lfm) {
const int ss_x = plane->subsampling_x;
@@ -936,7 +937,8 @@ static void filter_block_plane(VP9_COMMON *const cm,
// Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
- if (!build_lfi(&cm->lf_info, &mi[c].mbmi, lfi[r] + (c >> ss_x)))
+ const MODE_INFO *mi = mi_8x8[c];
+ if (!build_lfi(&cm->lf_info, &mi[0].mbmi, lfi[r] + (c >> ss_x)))
continue;
}
if (!plane->plane_type) {
@@ -959,7 +961,7 @@ static void filter_block_plane(VP9_COMMON *const cm,
lfi[r]);
}
dst->buf += 8 * dst->stride;
- mi += row_step_stride;
+ mi_8x8 += row_step_stride;
}
// Now do horizontal pass
@@ -1001,7 +1003,7 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
#endif
for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
- MODE_INFO* const mi = cm->mi + mi_row * cm->mode_info_stride;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mode_info_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane;
@@ -1012,17 +1014,18 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
#if CONFIG_NON420
if (use_420)
#endif
- setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mode_info_stride, &lfm);
+ setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mode_info_stride,
+ &lfm);
for (plane = 0; plane < num_planes; ++plane) {
#if CONFIG_NON420
if (use_420)
#endif
- filter_block_plane(cm, &xd->plane[plane], mi + mi_col, mi_row, mi_col,
- &lfm);
+ filter_block_plane(cm, &xd->plane[plane], mi_8x8 + mi_col, mi_row,
+ mi_col, &lfm);
#if CONFIG_NON420
else
- filter_block_plane_non420(cm, &xd->plane[plane], mi + mi_col,
+ filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col,
mi_row, mi_col);
#endif
}
@@ -1035,7 +1038,6 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd,
int y_only, int partial) {
int start_mi_row, end_mi_row, mi_rows_to_filter;
if (!frame_filter_level) return;
-
start_mi_row = 0;
mi_rows_to_filter = cm->mi_rows;
if (partial && cm->mi_rows > 8) {
diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c
index d8381ec48..bfeeb57bf 100644
--- a/vp9/common/vp9_mvref_common.c
+++ b/vp9/common/vp9_mvref_common.c
@@ -1,3 +1,4 @@
+
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
@@ -203,8 +204,8 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const MV *const mv_ref = &mv_ref_search[i];
if (is_inside(cm, mi_col, mi_row, mv_ref)) {
const int check_sub_blocks = block_idx >= 0;
- const MODE_INFO *const candidate_mi = &mi[mv_ref->col + mv_ref->row
- * xd->mode_info_stride];
+ const MODE_INFO *const candidate_mi = xd->mi_8x8[mv_ref->col + mv_ref->row
+ * xd->mode_info_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode];
@@ -230,8 +231,9 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (; i < MVREF_NEIGHBOURS; ++i) {
const MV *const mv_ref = &mv_ref_search[i];
if (is_inside(cm, mi_col, mi_row, mv_ref)) {
- const MB_MODE_INFO *const candidate = &mi[mv_ref->col + mv_ref->row
- * xd->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col +
+ mv_ref->row
+ * xd->mode_info_stride]->mbmi;
if (candidate->ref_frame[0] == ref_frame) {
ADD_MV_REF_LIST(candidate->mv[0]);
@@ -259,8 +261,9 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const MV *mv_ref = &mv_ref_search[i];
if (is_inside(cm, mi_col, mi_row, mv_ref)) {
- const MB_MODE_INFO *const candidate = &mi[mv_ref->col + mv_ref->row
- * xd->mode_info_stride].mbmi;
+ const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col +
+ mv_ref->row
+ * xd->mode_info_stride]->mbmi;
// If the candidate is INTRA we don't want to consider its mv.
if (is_inter_block(candidate))
diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h
index f0bc063f2..0431e146f 100644
--- a/vp9/common/vp9_onyxc_int.h
+++ b/vp9/common/vp9_onyxc_int.h
@@ -164,6 +164,10 @@ typedef struct VP9Common {
MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
+ MODE_INFO **mi_grid_base;
+ MODE_INFO **mi_grid_visible;
+ MODE_INFO **prev_mi_grid_base;
+ MODE_INFO **prev_mi_grid_visible;
// Persistent mb segment id map used in prediction.
unsigned char *last_frame_seg_map;
diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c
index 494cea7c5..81fbf1f26 100644
--- a/vp9/common/vp9_pred_common.c
+++ b/vp9/common/vp9_pred_common.c
@@ -18,28 +18,28 @@
// Returns a context number for the given MB prediction signal
unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
// left
- const int left_mv_pred = is_inter_mode(left_mbmi->mode);
+ const int left_mv_pred = left_in_image ? is_inter_mode(left_mi->mbmi.mode)
+ : 0;
const int left_interp = left_in_image && left_mv_pred
- ? left_mbmi->interp_filter
+ ? left_mi->mbmi.interp_filter
: SWITCHABLE_FILTERS;
// above
- const int above_mv_pred = is_inter_mode(above_mbmi->mode);
+ const int above_mv_pred = above_in_image ? is_inter_mode(above_mi->mbmi.mode)
+ : 0;
const int above_interp = above_in_image && above_mv_pred
- ? above_mbmi->interp_filter
+ ? above_mi->mbmi.interp_filter
: SWITCHABLE_FILTERS;
-
if (left_interp == above_interp)
return left_interp;
else if (left_interp == SWITCHABLE_FILTERS &&
@@ -53,13 +53,14 @@ unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
}
// Returns a context number for the given MB prediction signal
unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) {
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
- const int left_intra = !is_inter_block(left_mbmi);
- const int above_intra = !is_inter_block(above_mbmi);
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
// The mode info data structure has a one element border above and to the
// left of the entries corresponding to real macroblocks.
@@ -80,11 +81,12 @@ unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) {
unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -124,14 +126,14 @@ unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm,
unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
- const int left_intra = !is_inter_block(left_mbmi);
- const int above_intra = !is_inter_block(above_mbmi);
-
+ const MODE_INFO * const above_mi = xd->mi_8x8[-cm->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -204,14 +206,14 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
}
unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
- const int left_intra = !is_inter_block(left_mbmi);
- const int above_intra = !is_inter_block(above_mbmi);
-
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
// Note:
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
@@ -270,13 +272,14 @@ unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
int pred_context;
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
- const int left_intra = !is_inter_block(left_mbmi);
- const int above_intra = !is_inter_block(above_mbmi);
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1;
+ const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1;
// Note:
// The mode info data structure has a one element border above and to the
@@ -358,12 +361,13 @@ unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
// left of the entries corresponding to real blocks.
// The prediction flags in these dummy entries are initialized to 0.
unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd) {
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
- const int max_tx_size = max_txsize_lookup[mi->mbmi.sb_type];
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0;
+ const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0;
+ const int left_in_image = xd->left_available && left_mi;
+ const int above_in_image = xd->up_available && above_mi;
+ const int max_tx_size = max_txsize_lookup[xd->mi_8x8[0]->mbmi.sb_type];
int above_context = max_tx_size;
int left_context = max_tx_size;
@@ -384,32 +388,13 @@ unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd) {
return above_context + left_context > max_tx_size;
}
-void vp9_set_pred_flag_seg_id(VP9_COMMON *cm, BLOCK_SIZE bsize,
- int mi_row, int mi_col, uint8_t pred_flag) {
- MODE_INFO *mi = &cm->mi[mi_row * cm->mode_info_stride + mi_col];
- const int bw = 1 << mi_width_log2(bsize);
- const int bh = 1 << mi_height_log2(bsize);
- const int xmis = MIN(cm->mi_cols - mi_col, bw);
- const int ymis = MIN(cm->mi_rows - mi_row, bh);
- int x, y;
-
- for (y = 0; y < ymis; y++)
- for (x = 0; x < xmis; x++)
- mi[y * cm->mode_info_stride + x].mbmi.seg_id_predicted = pred_flag;
+void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag) {
+ xd->this_mi->mbmi.seg_id_predicted = pred_flag;
}
-void vp9_set_pred_flag_mbskip(VP9_COMMON *cm, BLOCK_SIZE bsize,
- int mi_row, int mi_col, uint8_t pred_flag) {
- MODE_INFO *mi = &cm->mi[mi_row * cm->mode_info_stride + mi_col];
- const int bw = 1 << mi_width_log2(bsize);
- const int bh = 1 << mi_height_log2(bsize);
- const int xmis = MIN(cm->mi_cols - mi_col, bw);
- const int ymis = MIN(cm->mi_rows - mi_row, bh);
- int x, y;
-
- for (y = 0; y < ymis; y++)
- for (x = 0; x < xmis; x++)
- mi[y * cm->mode_info_stride + x].mbmi.skip_coeff = pred_flag;
+void vp9_set_pred_flag_mbskip(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ uint8_t pred_flag) {
+ xd->this_mi->mbmi.skip_coeff = pred_flag;
}
int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids,
diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h
index 89e1356d7..47ca8abd8 100644
--- a/vp9/common/vp9_pred_common.h
+++ b/vp9/common/vp9_pred_common.h
@@ -19,12 +19,12 @@ int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids,
static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) {
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const int above_sip = above_mi ? above_mi->mbmi.seg_id_predicted : 0;
+ const int left_sip = left_mi ? left_mi->mbmi.seg_id_predicted : 0;
- return above_mbmi->seg_id_predicted +
- (xd->left_available ? left_mbmi->seg_id_predicted : 0);
+ return above_sip + (xd->left_available ? left_sip : 0);
}
static INLINE vp9_prob vp9_get_pred_prob_seg_id(struct segmentation *seg,
@@ -32,16 +32,15 @@ static INLINE vp9_prob vp9_get_pred_prob_seg_id(struct segmentation *seg,
return seg->pred_probs[vp9_get_pred_context_seg_id(xd)];
}
-void vp9_set_pred_flag_seg_id(VP9_COMMON *cm, BLOCK_SIZE bsize,
- int mi_row, int mi_col, uint8_t pred_flag);
+void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag);
static INLINE int vp9_get_pred_context_mbskip(const MACROBLOCKD *xd) {
- const MODE_INFO *const mi = xd->mode_info_context;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
+ const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO * const left_mi = xd->mi_8x8[-1];
+ const int above_skip_coeff = above_mi ? above_mi->mbmi.skip_coeff : 0;
+ const int left_skip_coeff = left_mi ? left_mi->mbmi.skip_coeff : 0;
- return above_mbmi->skip_coeff +
- (xd->left_available ? left_mbmi->skip_coeff : 0);
+ return above_skip_coeff + (xd->left_available ? left_skip_coeff : 0);
}
static INLINE vp9_prob vp9_get_pred_prob_mbskip(const VP9_COMMON *cm,
@@ -50,11 +49,11 @@ static INLINE vp9_prob vp9_get_pred_prob_mbskip(const VP9_COMMON *cm,
}
static INLINE unsigned char vp9_get_pred_flag_mbskip(const MACROBLOCKD *xd) {
- return xd->mode_info_context->mbmi.skip_coeff;
+ return xd->this_mi->mbmi.skip_coeff;
}
-void vp9_set_pred_flag_mbskip(VP9_COMMON *cm, BLOCK_SIZE bsize,
- int mi_row, int mi_col, uint8_t pred_flag);
+void vp9_set_pred_flag_mbskip(MACROBLOCKD *xd, BLOCK_SIZE bsize,
+ uint8_t pred_flag);
unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
@@ -114,8 +113,9 @@ static const vp9_prob *get_tx_probs(BLOCK_SIZE bsize, uint8_t context,
}
static const vp9_prob *get_tx_probs2(const MACROBLOCKD *xd,
- const struct tx_probs *tx_probs) {
- const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type;
+ const struct tx_probs *tx_probs,
+ const MODE_INFO *m) {
+ const BLOCK_SIZE bsize = m->mbmi.sb_type;
const int context = vp9_get_pred_context_tx_size(xd);
return get_tx_probs(bsize, context, tx_probs);
}
diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c
index 88bba3a60..dc1d46caa 100644
--- a/vp9/common/vp9_reconinter.c
+++ b/vp9/common/vp9_reconinter.c
@@ -24,11 +24,13 @@
void vp9_setup_interp_filters(MACROBLOCKD *xd,
INTERPOLATIONFILTERTYPE mcomp_filter_type,
VP9_COMMON *cm) {
- if (xd->mode_info_context) {
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ if (xd->mi_8x8 && xd->this_mi) {
+ MB_MODE_INFO * mbmi = &xd->this_mi->mbmi;
set_scale_factors(xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1,
cm->active_ref_scale);
+ } else {
+ set_scale_factors(xd, -1, -1, cm->active_ref_scale);
}
switch (mcomp_filter_type) {
@@ -128,7 +130,7 @@ static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
const int bh = plane_block_height(bsize, pd);
const int x = 4 * (block & ((1 << bwl) - 1));
const int y = 4 * (block >> bwl);
- const MODE_INFO *const mi = xd->mode_info_context;
+ const MODE_INFO *mi = xd->this_mi;
const int use_second_ref = mi->mbmi.ref_frame[1] > 0;
int ref;
@@ -193,7 +195,7 @@ static INLINE void foreach_predicted_block_in_plane(
// size of the predictor to use.
int pred_w, pred_h;
- if (xd->mode_info_context->mbmi.sb_type < BLOCK_8X8) {
+ if (xd->this_mi->mbmi.sb_type < BLOCK_8X8) {
assert(bsize == BLOCK_8X8);
pred_w = 0;
pred_h = 0;
diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c
index 7f23dc190..84a29b17a 100644
--- a/vp9/decoder/vp9_decodemv.c
+++ b/vp9/decoder/vp9_decodemv.c
@@ -95,7 +95,7 @@ static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_reader *r) {
MACROBLOCKD *const xd = &pbi->mb;
struct segmentation *const seg = &pbi->common.seg;
- const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type;
+ const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
int segment_id;
if (!seg->enabled)
@@ -114,7 +114,7 @@ static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct segmentation *const seg = &cm->seg;
- const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type;
+ const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
int pred_segment_id, segment_id;
if (!seg->enabled)
@@ -128,7 +128,7 @@ static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col,
if (seg->temporal_update) {
const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
const int pred_flag = vp9_read(r, pred_prob);
- vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag);
+ vp9_set_pred_flag_seg_id(xd, pred_flag);
segment_id = pred_flag ? pred_segment_id
: read_segment_id(r, seg);
} else {
@@ -156,7 +156,8 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m,
MACROBLOCKD *const xd = &pbi->mb;
MB_MODE_INFO *const mbmi = &m->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- const int mis = cm->mode_info_stride;
+ const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride];
+ const MODE_INFO *left_mi = xd->mi_8x8[-1];
mbmi->segment_id = read_intra_segment_id(pbi, mi_row, mi_col, r);
mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r);
@@ -165,9 +166,9 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m,
mbmi->ref_frame[1] = NONE;
if (bsize >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(m, 0) : DC_PRED;
+ left_block_mode(m, left_mi, 0) : DC_PRED;
mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]);
} else {
// Only 4x4, 4x8, 8x4 blocks
@@ -178,9 +179,9 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m,
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int ib = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(m, ib, mis);
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
- left_block_mode(m, ib) : DC_PRED;
+ left_block_mode(m, left_mi, ib) : DC_PRED;
const MB_PREDICTION_MODE b_mode = read_intra_mode(r,
vp9_kf_y_mode_prob[A][L]);
m->bmi[ib].as_mode = b_mode;
@@ -453,8 +454,8 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
ref0 = mbmi->ref_frame[0];
is_compound = has_second_ref(mbmi);
- vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
- ref0, mbmi->ref_mvs[ref0], mi_row, mi_col);
+ vp9_find_mv_refs(cm, xd, mi, xd->last_mi, ref0, mbmi->ref_mvs[ref0],
+ mi_row, mi_col);
inter_mode_ctx = mbmi->mode_context[ref0];
@@ -474,7 +475,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
if (is_compound) {
const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1];
- vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context,
+ vp9_find_mv_refs(cm, xd, mi, xd->last_mi,
ref1, mbmi->ref_mvs[ref1], mi_row, mi_col);
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
@@ -669,20 +670,21 @@ void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r) {
void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO *mi = xd->this_mi;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
const int bw = 1 << mi_width_log2(bsize);
const int bh = 1 << mi_height_log2(bsize);
const int y_mis = MIN(bh, cm->mi_rows - mi_row);
const int x_mis = MIN(bw, cm->mi_cols - mi_col);
- int x, y;
+ int x, y, z;
if (cm->frame_type == KEY_FRAME || cm->intra_only)
read_intra_frame_mode_info(pbi, mi, mi_row, mi_col, r);
else
read_inter_frame_mode_info(pbi, mi, mi_row, mi_col, r);
- for (y = 0; y < y_mis; y++)
- for (x = !y; x < x_mis; x++)
- mi[y * cm->mode_info_stride + x] = *mi;
+ for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride)
+ for (x = !y; x < x_mis; x++) {
+ xd->mi_8x8[z + x] = mi;
+ }
}
diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c
index 6cb7c094b..34ed0c759 100644
--- a/vp9/decoder/vp9_decodframe.c
+++ b/vp9/decoder/vp9_decodframe.c
@@ -80,6 +80,7 @@ static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) {
static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
int i;
xd->plane[0].dequant = cm->y_dequant[q_index];
+
for (i = 1; i < MAX_MB_PLANE; i++)
xd->plane[i].dequant = cm->uv_dequant[q_index];
}
@@ -124,7 +125,7 @@ static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
TX_SIZE tx_size, void *arg) {
MACROBLOCKD* const xd = arg;
struct macroblockd_plane *const pd = &xd->plane[plane];
- MODE_INFO *const mi = xd->mode_info_context;
+ MODE_INFO *const mi = xd->this_mi;
const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
block);
uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
@@ -148,7 +149,7 @@ static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
static int decode_tokens(VP9D_COMP *pbi, BLOCK_SIZE bsize, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
if (mbmi->skip_coeff) {
reset_skip_context(xd, bsize);
@@ -171,12 +172,20 @@ static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE bsize,
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int offset = mi_row * cm->mode_info_stride + mi_col;
- xd->mode_info_context = cm->mi + offset;
- xd->mode_info_context->mbmi.sb_type = bsize;
xd->mode_info_stride = cm->mode_info_stride;
+
+ xd->mi_8x8 = cm->mi_grid_visible + offset;
+ xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;
+
+ // we are using the mode info context stream here
+ xd->this_mi =
+ xd->mi_8x8[0] = xd->mic_stream_ptr;
+ xd->this_mi->mbmi.sb_type = bsize;
+ xd->mic_stream_ptr++;
+
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
- xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + offset : NULL;
+ xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
set_skip_context(cm, xd, mi_row, mi_col);
set_partition_seg_context(cm, xd, mi_row, mi_col);
@@ -191,7 +200,7 @@ static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE bsize,
static void set_ref(VP9D_COMP *pbi, int i, int mi_row, int mi_col) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
const int ref = mbmi->ref_frame[i] - LAST_FRAME;
const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[ref]];
const struct scale_factors *sf = &cm->active_ref_scale[ref];
@@ -222,7 +231,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
bsize = BLOCK_8X8;
// Has to be called after set_offsets
- mbmi = &xd->mode_info_context->mbmi;
+ mbmi = &xd->this_mi->mbmi;
if (!is_inter_block(mbmi)) {
// Intra reconstruction
@@ -246,7 +255,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
assert(mbmi->sb_type == bsize);
if (eobtotal == 0)
// skip loopfilter
- vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, 1);
+ vp9_set_pred_flag_mbskip(xd, bsize, 1);
else if (eobtotal > 0)
foreach_transformed_block(xd, bsize, decode_block, xd);
}
@@ -960,6 +969,10 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
setup_plane_dequants(cm, &pbi->mb, cm->base_qindex);
+ xd->mi_8x8 = cm->mi_grid_visible;
+ xd->mic_stream_ptr = cm->mi;
+ xd->mode_info_stride = cm->mode_info_stride;
+
cm->fc = cm->frame_contexts[cm->frame_context_idx];
vp9_zero(cm->counts);
diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c
index c119093df..cd74a0b00 100644
--- a/vp9/decoder/vp9_detokenize.c
+++ b/vp9/decoder/vp9_detokenize.c
@@ -94,7 +94,7 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd,
ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) {
FRAME_CONTEXT *const fc = &cm->fc;
FRAME_COUNTS *const counts = &cm->counts;
- const int ref = is_inter_block(&xd->mode_info_context->mbmi);
+ const int ref = is_inter_block(&xd->this_mi->mbmi);
int band, c = 0;
vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size][type][ref];
@@ -224,7 +224,7 @@ static void decode_block(int plane, int block, BLOCK_SIZE plane_bsize,
MACROBLOCKD *xd = &arg->pbi->mb;
struct segmentation *seg = &arg->pbi->common.seg;
struct macroblockd_plane* pd = &xd->plane[plane];
- const int segment_id = xd->mode_info_context->mbmi.segment_id;
+ const int segment_id = xd->this_mi->mbmi.segment_id;
const int seg_eob = get_tx_eob(seg, segment_id, tx_size);
int aoff, loff, eob;
diff --git a/vp9/decoder/vp9_onyxd_if.c b/vp9/decoder/vp9_onyxd_if.c
index 505e9dc5d..17d5def33 100644
--- a/vp9/decoder/vp9_onyxd_if.c
+++ b/vp9/decoder/vp9_onyxd_if.c
@@ -386,12 +386,17 @@ int vp9_receive_compressed_data(VP9D_PTR ptr,
if (cm->show_frame) {
// current mip will be the prev_mip for the next frame
MODE_INFO *temp = cm->prev_mip;
+ MODE_INFO **temp2 = cm->prev_mi_grid_base;
cm->prev_mip = cm->mip;
cm->mip = temp;
+ cm->prev_mi_grid_base = cm->mi_grid_base;
+ cm->mi_grid_base = temp2;
// update the upper left visible macroblock ptrs
cm->mi = cm->mip + cm->mode_info_stride + 1;
cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
cm->current_video_frame++;
}
diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c
index f3bbc17ce..635891efb 100644
--- a/vp9/encoder/vp9_bitstream.c
+++ b/vp9/encoder/vp9_bitstream.c
@@ -198,10 +198,11 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi,
(unsigned int *)cpi->y_mode_count[j]);
}
-static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size,
- BLOCK_SIZE bsize, vp9_writer *w) {
+static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m,
+ TX_SIZE tx_size, BLOCK_SIZE bsize,
+ vp9_writer *w) {
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs, m);
vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) {
vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
@@ -359,7 +360,7 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mi = &xd->this_mi->mbmi;
const int segment_id = mi->segment_id;
int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
SEG_LVL_REF_FRAME);
@@ -437,7 +438,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
- write_selected_tx_size(cpi, mi->tx_size, bsize, bc);
+ write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc);
}
if (rf == INTRA_FRAME) {
@@ -531,14 +532,16 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
}
}
-static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
+static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
vp9_writer *bc) {
const VP9_COMMON *const cm = &cpi->common;
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const struct segmentation *const seg = &cm->seg;
+ MODE_INFO *m = mi_8x8[0];
const int ym = m->mbmi.mode;
- const int mis = cm->mode_info_stride;
const int segment_id = m->mbmi.segment_id;
+ MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride];
+ MODE_INFO *left_mi = mi_8x8[-1];
if (seg->update_map)
write_segment_id(bc, seg, m->mbmi.segment_id);
@@ -546,12 +549,12 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
write_skip_coeff(cpi, segment_id, m, bc);
if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
- write_selected_tx_size(cpi, m->mbmi.tx_size, m->mbmi.sb_type, bc);
+ write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc);
if (m->mbmi.sb_type >= BLOCK_8X8) {
- const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(m, 0) : DC_PRED;
+ left_block_mode(m, left_mi, 0) : DC_PRED;
write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
} else {
int idx, idy;
@@ -559,10 +562,10 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
- const int i = idy * 2 + idx;
- const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ int i = idy * 2 + idx;
+ const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
- left_block_mode(m, i) : DC_PRED;
+ left_block_mode(m, left_mi, i) : DC_PRED;
const int bm = m->bmi[i].as_mode;
#ifdef ENTROPY_STATS
++intra_mode_stats[A][L][bm];
@@ -575,23 +578,25 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
}
-static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
+static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
+ MODE_INFO *m = mi_8x8[0];
if (m->mbmi.sb_type < BLOCK_8X8)
if (xd->ab_index > 0)
return;
- xd->mode_info_context = m;
+ xd->this_mi = mi_8x8[0];
+ xd->mi_8x8 = mi_8x8;
+
set_mi_row_col(&cpi->common, xd,
mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type]);
-
- if (cm->frame_type == KEY_FRAME || cm->intra_only) {
- write_mb_modes_kf(cpi, m, bc);
+ if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
+ write_mb_modes_kf(cpi, mi_8x8, bc);
#ifdef ENTROPY_STATS
active_section = 8;
#endif
@@ -606,7 +611,7 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
pack_mb_tokens(bc, tok, tok_end);
}
-static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
+static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
@@ -617,6 +622,7 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
int n;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
+ MODE_INFO *m = mi_8x8[0];
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
@@ -648,25 +654,26 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
switch (partition) {
case PARTITION_NONE:
- write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
break;
case PARTITION_HORZ:
- write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
*(get_sb_index(xd, subsize)) = 1;
if ((mi_row + bs) < cm->mi_rows)
- write_modes_b(cpi, m + bs * mis, bc, tok, tok_end, mi_row + bs, mi_col);
+ write_modes_b(cpi, mi_8x8 + bs * mis, bc, tok, tok_end, mi_row + bs,
+ mi_col);
break;
case PARTITION_VERT:
- write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
+ write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col);
*(get_sb_index(xd, subsize)) = 1;
if ((mi_col + bs) < cm->mi_cols)
- write_modes_b(cpi, m + bs, bc, tok, tok_end, mi_row, mi_col + bs);
+ write_modes_b(cpi, mi_8x8 + bs, bc, tok, tok_end, mi_row, mi_col + bs);
break;
case PARTITION_SPLIT:
for (n = 0; n < 4; n++) {
int j = n >> 1, i = n & 0x01;
*(get_sb_index(xd, subsize)) = n;
- write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end,
+ write_modes_sb(cpi, mi_8x8 + j * bs * mis + i * bs, bc, tok, tok_end,
mi_row + j * bs, mi_col + i * bs, subsize);
}
break;
@@ -686,18 +693,21 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
- MODE_INFO *m, *m_ptr = cm->mi;
int mi_row, mi_col;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible;
+ MODE_INFO **m_8x8;
- m_ptr += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis;
+ mi_8x8 += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis;
for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
- mi_row += 8, m_ptr += 8 * mis) {
- m = m_ptr;
+ mi_row += 8, mi_8x8 += 8 * mis) {
+ m_8x8 = mi_8x8;
vp9_zero(cm->left_seg_context);
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
- mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE)
- write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
+ mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) {
+ write_modes_sb(cpi, m_8x8, bc, tok, tok_end, mi_row, mi_col,
+ BLOCK_64X64);
+ }
}
}
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 45758e7cb..983ac9a38 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -342,7 +342,8 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi;
+ MODE_INFO *mi_addr = xd->this_mi;
int mb_mode_index = ctx->best_mode_index;
const int mis = cm->mode_info_stride;
@@ -355,13 +356,15 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES);
assert(mi->mbmi.sb_type == bsize);
+ *mi_addr = *mi;
+
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < mi_height; y++)
for (x_idx = 0; x_idx < mi_width; x_idx++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y)
- xd->mode_info_context[x_idx + y * mis] = *mi;
+ xd->mi_8x8[x_idx + y * mis] = mi_addr;
// FIXME(rbultje) I'm pretty sure this should go to the end of this block
// (i.e. after the output_enabled)
@@ -430,15 +433,6 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
- if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) {
- int i, j;
- for (j = 0; j < mi_height; ++j)
- for (i = 0; i < mi_width; ++i)
- if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i
- && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j)
- xd->mode_info_context[mis * j + i].mbmi = *mbmi;
- }
-
if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
@@ -492,11 +486,18 @@ static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col,
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
- xd->mode_info_context = cm->mi + idx_str;
- mbmi = &xd->mode_info_context->mbmi;
+
+ xd->mi_8x8 = cm->mi_grid_visible + idx_str;
+ xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
+
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
- xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL;
+ xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
+
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi + idx_str;
+
+ mbmi = &xd->this_mi->mbmi;
// Set up destination pointers
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col);
@@ -573,10 +574,10 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
}
set_offsets(cpi, mi_row, mi_col, bsize);
- xd->mode_info_context->mbmi.sb_type = bsize;
+ xd->this_mi->mbmi.sb_type = bsize;
// Set to zero to make sure we do not use the previous encoded frame stats
- xd->mode_info_context->mbmi.skip_coeff = 0;
+ xd->this_mi->mbmi.skip_coeff = 0;
x->source_variance = get_sby_perpixel_variance(cpi, x, bsize);
@@ -597,7 +598,7 @@ static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO *mi = xd->this_mi;
MB_MODE_INFO *const mbmi = &mi->mbmi;
if (cm->frame_type != KEY_FRAME) {
@@ -876,7 +877,7 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
-static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
+static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
BLOCK_SIZE bsize = cpi->sf.always_this_block_size;
@@ -892,7 +893,7 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; ++block_row) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; ++block_col) {
- m[block_row * mis + block_col].mbmi.sb_type = bsize;
+ mi_8x8[block_row * mis + block_col]->mbmi.sb_type = bsize;
}
}
} else {
@@ -916,36 +917,41 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m,
for (sub_block_col = 0; sub_block_col < bw; ++sub_block_col) {
row_index = block_row + sub_block_row;
col_index = block_col + sub_block_col;
- m[row_index * mis + col_index].mbmi.sb_type = bsize;
+ mi_8x8[row_index * mis + col_index]->mbmi.sb_type = bsize;
}
}
}
}
}
}
-static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) {
+static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ MODE_INFO **prev_mi_8x8) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int block_row, block_col;
+
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
- m[block_row * mis + block_col].mbmi.sb_type =
- p[block_row * mis + block_col].mbmi.sb_type;
+ MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col];
+ MODE_INFO * mi = mi_8x8[block_row * mis + block_col];
+ BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
+ if (mi)
+ mi->mbmi.sb_type = sb_type;
}
}
}
-static void set_block_size(VP9_COMMON * const cm, MODE_INFO *mi,
+static void set_block_size(VP9_COMMON * const cm, MODE_INFO **mi_8x8,
BLOCK_SIZE bsize, int mis, int mi_row,
int mi_col) {
int r, c;
const int bs = MAX(num_8x8_blocks_wide_lookup[bsize],
num_8x8_blocks_high_lookup[bsize]);
- MODE_INFO *const mi2 = &mi[mi_row * mis + mi_col];
+ MODE_INFO **const mi2 = &mi_8x8[mi_row * mis + mi_col];
for (r = 0; r < bs; r++)
for (c = 0; c < bs; c++)
if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols)
- mi2[r * mis + c].mbmi.sb_type = bsize;
+ mi2[r * mis + c]->mbmi.sb_type = bsize;
}
typedef struct {
@@ -1091,7 +1097,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
#else // !PERFORM_RANDOM_PARTITIONING
-static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
+static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m,
BLOCK_SIZE bsize, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
@@ -1129,8 +1135,8 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
}
#endif // PERFORM_RANDOM_PARTITIONING
-static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
- int mi_col) {
+static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mi_row, int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1168,18 +1174,21 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, LAST_FRAME)];
YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx];
YV12_BUFFER_CONFIG *second_ref_fb = NULL;
+ MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi;
setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
&xd->scale_factor[0]);
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
&xd->scale_factor[1]);
- xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
- xd->mode_info_context->mbmi.sb_type = BLOCK_64X64;
- vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
+
+ mbmi->ref_frame[0] = LAST_FRAME;
+ mbmi->sb_type = BLOCK_64X64;
+ vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[mbmi->ref_frame[0]],
&nearest_mv, &near_mv);
- xd->mode_info_context->mbmi.mv[0] = nearest_mv;
+ mbmi->mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
+
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
@@ -1216,24 +1225,24 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
- if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col,
+ if (!set_vt_partitioning(cpi, &vt, mi_8x8, BLOCK_64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
- if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32,
+ if (!set_vt_partitioning(cpi, &vt.split[i], mi_8x8, BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
- if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
+ if (!set_vt_partitioning(cpi, &vt.split[i].split[j], mi_8x8,
BLOCK_16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
- set_block_size(cm, m, BLOCK_8X8, mis,
+ set_block_size(cm, mi_8x8, BLOCK_8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
@@ -1244,9 +1253,10 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
}
}
-static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
- int mi_row, int mi_col, BLOCK_SIZE bsize,
- int *rate, int64_t *dist, int do_recon) {
+static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ TOKENEXTRA **tp, int mi_row, int mi_col,
+ BLOCK_SIZE bsize, int *rate, int64_t *dist,
+ int do_recon) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -1272,7 +1282,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int64_t chosen_dist = INT_MAX;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
int splits_below = 0;
- BLOCK_SIZE bs_type = m->mbmi.sb_type;
+ BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
@@ -1305,7 +1315,8 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
- if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
+ MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
+ if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
@@ -1325,7 +1336,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
none_rate += x->partition_cost[pl][PARTITION_NONE];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
- m->mbmi.sb_type = bs_type;
+ mi_8x8[0]->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
@@ -1396,8 +1407,9 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
*get_sb_index(xd, subsize) = i;
- rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
- mi_col + x_idx, subsize, &rt, &dt, i != 3);
+ rd_use_partition(cpi, mi_8x8 + jj * bss * mis + ii * bss, tp,
+ mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
+ i != 3);
if (rt == INT_MAX || dt == INT_MAX) {
last_part_rate = INT_MAX;
last_part_dist = INT_MAX;
@@ -1478,7 +1490,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
// If last_part is better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
- m->mbmi.sb_type = bsize;
+ mi_8x8[0]->mbmi.sb_type = bsize;
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
@@ -1526,9 +1538,9 @@ static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
-static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi,
- BLOCK_SIZE *min_block_size,
- BLOCK_SIZE *max_block_size ) {
+static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
+ BLOCK_SIZE * min_block_size,
+ BLOCK_SIZE * max_block_size ) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int sb_width_in_blocks = MI_BLOCK_SIZE;
int sb_height_in_blocks = MI_BLOCK_SIZE;
@@ -1538,8 +1550,10 @@ static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi,
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
- *min_block_size = MIN(*min_block_size, mi[index + j].mbmi.sb_type);
- *max_block_size = MAX(*max_block_size, mi[index + j].mbmi.sb_type);
+ MODE_INFO * mi = mi_8x8[index+j];
+ BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
+ *min_block_size = MIN(*min_block_size, sb_type);
+ *max_block_size = MAX(*max_block_size, sb_type);
}
index += xd->mode_info_stride;
}
@@ -1551,13 +1565,12 @@ static void rd_auto_partition_range(VP9_COMP *cpi,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
- MODE_INFO *above_sb64_mi;
- MODE_INFO *left_sb64_mi;
- const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi;
- const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi;
- const int left_in_image = xd->left_available && left_mbmi->in_image;
- const int above_in_image = xd->up_available && above_mbmi->in_image;
+ MODE_INFO ** mi_8x8 = xd->mi_8x8;
+ const int left_in_image = xd->left_available && mi_8x8[-1];
+ const int above_in_image = xd->up_available &&
+ mi_8x8[-xd->mode_info_stride];
+ MODE_INFO ** above_sb64_mi_8x8;
+ MODE_INFO ** left_sb64_mi_8x8;
// Frequency check
if (cpi->sf.auto_min_max_partition_count <= 0) {
@@ -1565,6 +1578,7 @@ static void rd_auto_partition_range(VP9_COMP *cpi,
cpi->sf.auto_min_max_partition_interval;
*min_block_size = BLOCK_4X4;
*max_block_size = BLOCK_64X64;
+
return;
} else {
--cpi->sf.auto_min_max_partition_count;
@@ -1581,16 +1595,16 @@ static void rd_auto_partition_range(VP9_COMP *cpi,
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
- left_sb64_mi = &mi[-MI_BLOCK_SIZE];
- get_sb_partition_size_range(cpi, left_sb64_mi,
+ left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
min_block_size, max_block_size);
}
// Find the min and max partition sizes used in the above SB64 taking
// the values found for left as a starting point.
if (above_in_image) {
- above_sb64_mi = &mi[-xd->mode_info_stride * MI_BLOCK_SIZE];
- get_sb_partition_size_range(cpi, above_sb64_mi,
+ above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE];
+ get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
min_block_size, max_block_size);
}
@@ -2002,18 +2016,18 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
- MODE_INFO *m = cm->mi + idx_str;
- MODE_INFO *p = cm->prev_mi + idx_str;
+ MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
+ MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
cpi->mb.source_variance = UINT_MAX;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
- set_partitioning(cpi, m, mi_row, mi_col);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
+ set_partitioning(cpi, mi_8x8, mi_row, mi_col);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else if (cpi->sf.partition_by_variance) {
- choose_partitioning(cpi, cm->mi, mi_row, mi_col);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
+ choose_partitioning(cpi, cm->mi_grid_visible, mi_row, mi_col);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
} else {
if ((cpi->common.current_video_frame
@@ -2032,8 +2046,8 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
- copy_partitioning(cpi, m, p);
- rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
+ copy_partitioning(cpi, mi_8x8, prev_mi_8x8);
+ rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
@@ -2044,7 +2058,6 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
rd_auto_partition_range(cpi, &cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
-
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
}
@@ -2076,8 +2089,8 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
- xd->mode_info_context->mbmi.mode = DC_PRED;
- xd->mode_info_context->mbmi.uv_mode = DC_PRED;
+ xd->this_mi->mbmi.mode = DC_PRED;
+ xd->this_mi->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cpi->y_uv_mode_count)
@@ -2153,8 +2166,14 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cm->counts.switchable_interp);
vp9_zero(cpi->txfm_stepdown_count);
- xd->mode_info_context = cm->mi;
- xd->prev_mode_info_context = cm->prev_mi;
+ xd->mi_8x8 = cm->mi_grid_visible;
+ // required for vp9_frame_init_quantizer
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi;
+ xd->mic_stream_ptr = cm->mi;
+
+ xd->last_mi = cm->prev_mi;
+
vp9_zero(cpi->NMVcount);
vp9_zero(cpi->coef_counts);
@@ -2257,12 +2276,12 @@ static int check_dual_ref_flags(VP9_COMP *cpi) {
}
}
-static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
+static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
- if (!mi[y * mis + x].mbmi.skip_coeff)
+ if (!mi_8x8[y * mis + x]->mbmi.skip_coeff)
return 0;
}
}
@@ -2270,42 +2289,39 @@ static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
return 1;
}
-static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
+static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs,
TX_SIZE tx_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++)
- mi[y * mis + x].mbmi.tx_size = tx_size;
+ mi_8x8[y * mis + x]->mbmi.tx_size = tx_size;
}
}
-static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis,
- TX_SIZE max_tx_size, int bw, int bh,
+static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8,
+ int mis, TX_SIZE max_tx_size, int bw, int bh,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
- VP9_COMMON *const cm = &cpi->common;
- MB_MODE_INFO *const mbmi = &mi->mbmi;
+ VP9_COMMON * const cm = &cpi->common;
+ MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (mbmi->tx_size > max_tx_size) {
- MACROBLOCK * const x = &cpi->mb;
- MACROBLOCKD * const xd = &x->e_mbd;
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
- xd->mode_info_context = mi;
assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
- get_skip_flag(mi, mis, ymbs, xmbs));
- set_txfm_flag(mi, mis, ymbs, xmbs, max_tx_size);
+ get_skip_flag(mi_8x8, mis, ymbs, xmbs));
+ set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size);
}
}
-static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
+static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
TX_SIZE max_tx_size, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
- const VP9_COMMON *const cm = &cpi->common;
+ VP9_COMMON * const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
@@ -2313,22 +2329,23 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
- bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
+ bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, bs, mi_row,
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row,
mi_col, bsize);
} else if (bw == bs && bh < bs) {
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, hbs, mi_row, mi_col,
- bsize);
- reset_skip_txfm_size_b(cpi, mi + hbs * mis, mis, max_tx_size, bs, hbs,
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row,
+ mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs,
mi_row + hbs, mi_col, bsize);
} else if (bw < bs && bh == bs) {
- reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, hbs, bs, mi_row, mi_col,
- bsize);
- reset_skip_txfm_size_b(cpi, mi + hbs, mis, max_tx_size, hbs, bs, mi_row,
+ reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row,
+ mi_col, bsize);
+ reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row,
mi_col + hbs, bsize);
+
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
@@ -2339,7 +2356,7 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- reset_skip_txfm_size_sb(cpi, &mi[mi_dr * mis + mi_dc], max_tx_size,
+ reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
@@ -2349,12 +2366,15 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON * const cm = &cpi->common;
int mi_row, mi_col;
const int mis = cm->mode_info_stride;
- MODE_INFO *mi, *mi_ptr = cm->mi;
+// MODE_INFO *mi, *mi_ptr = cm->mi;
+ MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
- mi = mi_ptr;
- for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8)
- reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64);
+ mi_8x8 = mi_ptr;
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) {
+ reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col,
+ BLOCK_64X64);
+ }
}
}
@@ -2611,7 +2631,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO **mi_8x8 = xd->mi_8x8;
+ MODE_INFO *mi = mi_8x8[0];
MB_MODE_INFO *mbmi = &mi->mbmi;
unsigned int segment_id = mbmi->segment_id;
const int mis = cm->mode_info_stride;
@@ -2690,8 +2711,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
} else {
- int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.skip_coeff : 0;
- mb_skip_context += (mi - mis)->mbmi.skip_coeff;
+ int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0;
+ mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0;
mbmi->skip_coeff = 1;
if (output_enabled)
@@ -2699,10 +2720,6 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
}
- // copy skip flag on all mb_mode_info contexts in this SB
- // if this was a skip at this txfm size
- vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, mi->mbmi.skip_coeff);
-
if (output_enabled) {
if (cm->tx_mode == TX_MODE_SELECT &&
mbmi->sb_type >= BLOCK_8X8 &&
@@ -2731,7 +2748,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
for (y = 0; y < mi_height; y++)
for (x = 0; x < mi_width; x++)
if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
- mi[mis * y + x].mbmi.tx_size = sz;
+ mi_8x8[mis * y + x]->mbmi.tx_size = sz;
}
}
}
diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c
index 588b77421..c5e5dff08 100644
--- a/vp9/encoder/vp9_encodeintra.c
+++ b/vp9/encoder/vp9_encodeintra.c
@@ -16,7 +16,7 @@
#include "vp9/encoder/vp9_encodeintra.h"
int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) {
- MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
x->skip_encode = 0;
mbmi->mode = DC_PRED;
mbmi->ref_frame[0] = INTRA_FRAME;
diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c
index da9a3bda0..13287f4f1 100644
--- a/vp9/encoder/vp9_encodemb.c
+++ b/vp9/encoder/vp9_encodemb.c
@@ -147,7 +147,7 @@ static void optimize_b(MACROBLOCK *mb,
TX_SIZE tx_size) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblockd_plane *pd = &xd->plane[plane];
- const int ref = is_inter_block(&xd->mode_info_context->mbmi);
+ const int ref = is_inter_block(&xd->this_mi->mbmi);
vp9_token_state tokens[1025][2];
unsigned best_index[1025][2];
const int16_t *coeff_ptr = BLOCK_OFFSET(mb->plane[plane].coeff, block);
@@ -199,7 +199,7 @@ static void optimize_b(MACROBLOCK *mb,
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
rdmult = mb->rdmult * err_mult;
- if (mb->e_mbd.mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME)
+ if (mb->e_mbd.mi_8x8[0]->mbmi.ref_frame[0] == INTRA_FRAME)
rdmult = (rdmult * 9) >> 4;
rddiv = mb->rddiv;
/* Initialize the sentinel node of the trellis. */
@@ -385,7 +385,7 @@ static void optimize_init_b(int plane, BLOCK_SIZE bsize,
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
- const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ const MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size;
int i;
@@ -569,7 +569,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
struct encode_b_args* const args = arg;
MACROBLOCK *const x = args->x;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
struct macroblock_plane *const p = &x->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
int16_t *coeff = BLOCK_OFFSET(p->coeff, block);
@@ -679,7 +679,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
scan = get_scan_4x4(tx_type);
iscan = get_iscan_4x4(tx_type);
if (mbmi->sb_type < BLOCK_8X8 && plane == 0)
- mode = xd->mode_info_context->bmi[block].as_mode;
+ mode = xd->this_mi->bmi[block].as_mode;
else
mode = plane == 0 ? mbmi->mode : mbmi->uv_mode;
diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c
index 997728930..ed3a2bb64 100644
--- a/vp9/encoder/vp9_encodemv.c
+++ b/vp9/encoder/vp9_encodemv.c
@@ -316,7 +316,7 @@ void vp9_build_nmv_cost_table(int *mvjoint,
void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
- MODE_INFO *mi = x->e_mbd.mode_info_context;
+ MODE_INFO *mi = x->e_mbd.mi_8x8[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
MV diff;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type];
@@ -333,7 +333,7 @@ void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
diff.col = mi->bmi[i].as_mv[0].as_mv.col - best_ref_mv->as_mv.col;
vp9_inc_mv(&diff, &cpi->NMVcount);
- if (x->e_mbd.mode_info_context->mbmi.ref_frame[1] > INTRA_FRAME) {
+ if (mi->mbmi.ref_frame[1] > INTRA_FRAME) {
diff.row = mi->bmi[i].as_mv[1].as_mv.row -
second_best_ref_mv->as_mv.row;
diff.col = mi->bmi[i].as_mv[1].as_mv.col -
diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c
index 92485f934..3eaa9f8b6 100644
--- a/vp9/encoder/vp9_firstpass.c
+++ b/vp9/encoder/vp9_firstpass.c
@@ -346,7 +346,7 @@ static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
// Set up pointers for this macro block recon buffer
xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
- switch (xd->mode_info_context->mbmi.sb_type) {
+ switch (xd->this_mi->mbmi.sb_type) {
case BLOCK_8X8:
vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
@@ -385,7 +385,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
int n;
vp9_variance_fn_ptr_t v_fn_ptr =
- cpi->fn_ptr[xd->mode_info_context->mbmi.sb_type];
+ cpi->fn_ptr[xd->this_mi->mbmi.sb_type];
int new_mv_mode_penalty = 256;
int sr = 0;
@@ -402,7 +402,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
further_steps -= sr;
// override the default variance function to use MSE
- switch (xd->mode_info_context->mbmi.sb_type) {
+ switch (xd->this_mi->mbmi.sb_type) {
case BLOCK_8X8:
v_fn_ptr.vf = vp9_mse8x8;
break;
@@ -505,8 +505,11 @@ void vp9_first_pass(VP9_COMP *cpi) {
setup_dst_planes(xd, new_yv12, 0, 0);
x->partition_info = x->pi;
-
- xd->mode_info_context = cm->mi;
+ xd->mi_8x8 = cm->mi_grid_visible;
+ // required for vp9_frame_init_quantizer
+ xd->this_mi =
+ xd->mi_8x8[0] = cm->mi;
+ xd->mic_stream_ptr = cm->mi;
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
@@ -549,23 +552,23 @@ void vp9_first_pass(VP9_COMP *cpi) {
if (mb_col * 2 + 1 < cm->mi_cols) {
if (mb_row * 2 + 1 < cm->mi_rows) {
- xd->mode_info_context->mbmi.sb_type = BLOCK_16X16;
+ xd->this_mi->mbmi.sb_type = BLOCK_16X16;
} else {
- xd->mode_info_context->mbmi.sb_type = BLOCK_16X8;
+ xd->this_mi->mbmi.sb_type = BLOCK_16X8;
}
} else {
if (mb_row * 2 + 1 < cm->mi_rows) {
- xd->mode_info_context->mbmi.sb_type = BLOCK_8X16;
+ xd->this_mi->mbmi.sb_type = BLOCK_8X16;
} else {
- xd->mode_info_context->mbmi.sb_type = BLOCK_8X8;
+ xd->this_mi->mbmi.sb_type = BLOCK_8X8;
}
}
- xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
+ xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME;
set_mi_row_col(cm, xd,
mb_row << 1,
- 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type),
+ 1 << mi_height_log2(xd->this_mi->mbmi.sb_type),
mb_col << 1,
- 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type));
+ 1 << mi_height_log2(xd->this_mi->mbmi.sb_type));
// do intra 16x16 prediction
this_error = vp9_encode_intra(x, use_dc_pred);
@@ -661,13 +664,13 @@ void vp9_first_pass(VP9_COMP *cpi) {
mv.as_mv.col <<= 3;
this_error = motion_error;
vp9_set_mbmode_and_mvs(x, NEWMV, &mv);
- xd->mode_info_context->mbmi.tx_size = TX_4X4;
- xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
- xd->mode_info_context->mbmi.ref_frame[1] = NONE;
+ xd->this_mi->mbmi.tx_size = TX_4X4;
+ xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME;
+ xd->this_mi->mbmi.ref_frame[1] = NONE;
vp9_build_inter_predictors_sby(xd, mb_row << 1,
mb_col << 1,
- xd->mode_info_context->mbmi.sb_type);
- vp9_encode_sby(x, xd->mode_info_context->mbmi.sb_type);
+ xd->this_mi->mbmi.sb_type);
+ vp9_encode_sby(x, xd->this_mi->mbmi.sb_type);
sum_mvr += mv.as_mv.row;
sum_mvr_abs += abs(mv.as_mv.row);
sum_mvc += mv.as_mv.col;
diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c
index 1baea643d..5a671f201 100644
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -145,7 +145,7 @@ static int find_best_16x16_intra(VP9_COMP *cpi,
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
unsigned int err;
- xd->mode_info_context->mbmi.mode = mode;
+ xd->this_mi->mbmi.mode = mode;
vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode,
x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
@@ -253,7 +253,7 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi,
xd->plane[0].dst.stride = buf->y_stride;
xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
- xd->mode_info_context = &mi_local;
+ xd->this_mi = &mi_local;
mi_local.mbmi.sb_type = BLOCK_16X16;
mi_local.mbmi.ref_frame[0] = LAST_FRAME;
mi_local.mbmi.ref_frame[1] = NONE;
diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c
index 55e4c36de..136008847 100644
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -1537,7 +1537,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
- int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
+ int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
@@ -1625,7 +1625,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
- int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
+ int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
@@ -1747,7 +1747,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
- int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
+ int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c
index d35b739fb..1ba304904 100644
--- a/vp9/encoder/vp9_onyx_if.c
+++ b/vp9/encoder/vp9_onyx_if.c
@@ -527,15 +527,15 @@ static void print_seg_map(VP9_COMP *cpi) {
static void update_reference_segmentation_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int row, col;
- MODE_INFO *mi, *mi_ptr = cm->mi;
+ MODE_INFO **mi_8x8, **mi_8x8_ptr = cm->mi_grid_visible;
uint8_t *cache_ptr = cm->last_frame_seg_map, *cache;
for (row = 0; row < cm->mi_rows; row++) {
- mi = mi_ptr;
+ mi_8x8 = mi_8x8_ptr;
cache = cache_ptr;
- for (col = 0; col < cm->mi_cols; col++, mi++, cache++)
- cache[0] = mi->mbmi.segment_id;
- mi_ptr += cm->mode_info_stride;
+ for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++)
+ cache[0] = mi_8x8[0]->mbmi.segment_id;
+ mi_8x8_ptr += cm->mode_info_stride;
cache_ptr += cm->mi_cols;
}
}
@@ -3528,11 +3528,15 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
if (cm->show_frame) {
// current mip will be the prev_mip for the next frame
MODE_INFO *temp = cm->prev_mip;
+ MODE_INFO **temp2 = cm->prev_mi_grid_base;
cm->prev_mip = cm->mip;
cm->mip = temp;
+ cm->prev_mi_grid_base = cm->mi_grid_base;
+ cm->mi_grid_base = temp2;
// update the upper left visible macroblock ptrs
cm->mi = cm->mip + cm->mode_info_stride + 1;
+ cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1;
// Don't increment frame counters if this was an altref buffer
// update not a real frame
@@ -3541,8 +3545,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
}
// restore prev_mi
cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1;
+ cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1;
-#if 0
+ #if 0
{
char filename[512];
FILE *recon_file;
diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c
index 96abeff38..81e14265d 100644
--- a/vp9/encoder/vp9_quantize.c
+++ b/vp9/encoder/vp9_quantize.c
@@ -278,7 +278,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
int zbin_extra;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
+ int segment_id = xd->this_mi->mbmi.segment_id;
const int qindex = vp9_get_qindex(&cpi->common.seg, segment_id,
cpi->common.base_qindex);
diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c
index 647265bf6..e219442c1 100644
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -474,7 +474,7 @@ static INLINE int cost_coeffs(MACROBLOCK *mb,
TX_SIZE tx_size,
const int16_t *scan, const int16_t *nb) {
MACROBLOCKD *const xd = &mb->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
struct macroblockd_plane *pd = &xd->plane[plane];
const PLANE_TYPE type = pd->plane_type;
const int16_t *band_count = &band_counts[tx_size][1];
@@ -567,7 +567,7 @@ static void dist_block(int plane, int block, TX_SIZE tx_size, void *arg) {
args->sse += this_sse >> shift;
if (x->skip_encode &&
- xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) {
+ xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME) {
// TODO(jingning): tune the model to better capture the distortion.
int64_t p = (pd->dequant[1] * pd->dequant[1] *
(1 << ss_txfrm_size)) >> shift;
@@ -610,7 +610,7 @@ static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize,
return;
}
- if (!is_inter_block(&xd->mode_info_context->mbmi))
+ if (!is_inter_block(&xd->this_mi->mbmi))
vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &encode_args);
else
vp9_xform_quant(plane, block, plane_bsize, tx_size, &encode_args);
@@ -634,7 +634,7 @@ static void txfm_rd_in_plane(MACROBLOCK *x,
num_4x4_blocks_wide, num_4x4_blocks_high,
0, 0, 0, ref_best_rd, 0 };
if (plane == 0)
- xd->mode_info_context->mbmi.tx_size = tx_size;
+ xd->this_mi->mbmi.tx_size = tx_size;
switch (tx_size) {
case TX_4X4:
@@ -688,7 +688,7 @@ static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x,
const TX_SIZE max_txfm_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
if (max_txfm_size == TX_32X32 &&
(cm->tx_mode == ALLOW_32X32 ||
cm->tx_mode == TX_MODE_SELECT)) {
@@ -718,13 +718,13 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd);
int64_t rd[TX_SIZES][2];
int n, m;
int s0, s1;
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi);
for (n = TX_4X4; n <= max_tx_size; n++) {
r[n][1] = r[n][0];
@@ -821,7 +821,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x,
const TX_SIZE max_txfm_size = max_txsize_lookup[bs];
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd);
int64_t rd[TX_SIZES][2];
int n, m;
@@ -829,7 +829,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x,
double scale_rd[TX_SIZES] = {1.73, 1.44, 1.20, 1.00};
// double scale_r[TX_SIZES] = {2.82, 2.00, 1.41, 1.00};
- const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs);
+ const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi);
// for (n = TX_4X4; n <= max_txfm_size; n++)
// r[n][0] = (r[n][0] * scale_r[n]);
@@ -914,7 +914,7 @@ static void super_block_yrd(VP9_COMP *cpi,
int r[TX_SIZES][2], s[TX_SIZES];
int64_t d[TX_SIZES], sse[TX_SIZES];
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
assert(bs == mbmi->sb_type);
if (mbmi->ref_frame[0] > INTRA_FRAME)
@@ -1021,7 +1021,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
vpx_memcpy(ta, a, sizeof(ta));
vpx_memcpy(tl, l, sizeof(tl));
- xd->mode_info_context->mbmi.tx_size = TX_4X4;
+ xd->this_mi->mbmi.tx_size = TX_4X4;
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int64_t this_rd;
@@ -1051,7 +1051,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
uint8_t *dst = dst_init + idx * 4 + idy * 4 * dst_stride;
block = ib + idy * 2 + idx;
- xd->mode_info_context->bmi[block].as_mode = mode;
+ xd->this_mi->bmi[block].as_mode = mode;
src_diff = raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
vp9_predict_intra_block(xd, block, 1,
@@ -1127,7 +1127,10 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
int64_t best_rd) {
int i, j;
MACROBLOCKD *const xd = &mb->e_mbd;
- const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type;
+ MODE_INFO *const mic = xd->this_mi;
+ const MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ const MODE_INFO *left_mi = xd->mi_8x8[-1];
+ const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
@@ -1137,7 +1140,6 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
int64_t total_rd = 0;
ENTROPY_CONTEXT t_above[4], t_left[4];
int *bmode_costs;
- MODE_INFO *const mic = xd->mode_info_context;
vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above));
vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left));
@@ -1147,15 +1149,15 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
// Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
- const int mis = xd->mode_info_stride;
MB_PREDICTION_MODE best_mode = DC_PRED;
int r = INT_MAX, ry = INT_MAX;
int64_t d = INT64_MAX, this_rd = INT64_MAX;
i = idy * 2 + idx;
if (cpi->common.frame_type == KEY_FRAME) {
- const MB_PREDICTION_MODE A = above_block_mode(mic, i, mis);
+ const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, i);
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
- left_block_mode(mic, i) : DC_PRED;
+ left_block_mode(mic, left_mi, i) :
+ DC_PRED;
bmode_costs = mb->y_mode_costs[A][L];
}
@@ -1185,7 +1187,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi,
*rate = cost;
*rate_y = tot_rate_y;
*distortion = total_distortion;
- xd->mode_info_context->mbmi.mode = mic->bmi[3].as_mode;
+ mic->mbmi.mode = mic->bmi[3].as_mode;
return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
}
@@ -1199,7 +1201,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
MB_PREDICTION_MODE mode;
MB_PREDICTION_MODE mode_selected = DC_PRED;
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mic = xd->mode_info_context;
+ MODE_INFO *const mic = xd->this_mi;
int this_rate, this_rate_tokenonly, s;
int64_t this_distortion, this_rd;
TX_SIZE best_tx = TX_4X4;
@@ -1213,15 +1215,16 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
/* Y Search for intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
int64_t local_tx_cache[TX_MODES];
- const int mis = xd->mode_info_stride;
+ MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride];
+ MODE_INFO *left_mi = xd->mi_8x8[-1];
if (!(cpi->sf.intra_y_mode_mask & (1 << mode)))
continue;
if (cpi->common.frame_type == KEY_FRAME) {
- const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis);
+ const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, 0);
const MB_PREDICTION_MODE L = xd->left_available ?
- left_block_mode(mic, 0) : DC_PRED;
+ left_block_mode(mic, left_mi, 0) : DC_PRED;
bmode_costs = x->y_mode_costs[A][L];
}
@@ -1268,7 +1271,7 @@ static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x,
int64_t *sse, BLOCK_SIZE bsize,
int64_t ref_best_rd) {
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
TX_SIZE uv_txfm_size = get_uv_tx_size(mbmi);
int plane;
int pnrate = 0, pnskip = 1;
@@ -1323,7 +1326,8 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (!(cpi->sf.intra_uv_mode_mask & (1 << mode)))
continue;
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
+ x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode;
+
super_block_uvrd(&cpi->common, x, &this_rate_tokenonly,
&this_distortion, &s, &this_sse, bsize, best_rd);
if (this_rate_tokenonly == INT_MAX)
@@ -1342,7 +1346,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
}
- x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
+ x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode_selected;
return best_rd;
}
@@ -1354,7 +1358,7 @@ static int64_t rd_sbuv_dcpred(VP9_COMP *cpi, MACROBLOCK *x,
int64_t this_rd;
int64_t this_sse;
- x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
+ x->e_mbd.mi_8x8[0]->mbmi.uv_mode = DC_PRED;
super_block_uvrd(&cpi->common, x, rate_tokenonly,
distortion, skippable, &this_sse, bsize, INT64_MAX);
*rate = *rate_tokenonly +
@@ -1382,14 +1386,14 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE bsize,
rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
}
- *mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode;
+ *mode_uv = x->e_mbd.mi_8x8[0]->mbmi.uv_mode;
}
static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode,
int mode_context) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
- const int segment_id = xd->mode_info_context->mbmi.segment_id;
+ const int segment_id = xd->this_mi->mbmi.segment_id;
// Don't account for mode here if segment skip is enabled.
if (!vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
@@ -1401,8 +1405,8 @@ static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode,
}
void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
- x->e_mbd.mode_info_context->mbmi.mode = mb;
- x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
+ x->e_mbd.mi_8x8[0]->mbmi.mode = mb;
+ x->e_mbd.mi_8x8[0]->mbmi.mv[0].as_int = mv->as_int;
}
static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
@@ -1425,7 +1429,7 @@ static int labels2mode(MACROBLOCK *x, int i,
int_mv *second_best_ref_mv,
int *mvjcost, int *mvcost[2], VP9_COMP *cpi) {
MACROBLOCKD *const xd = &x->e_mbd;
- MODE_INFO *const mic = xd->mode_info_context;
+ MODE_INFO *const mic = xd->this_mi;
MB_MODE_INFO *mbmi = &mic->mbmi;
int cost = 0, thismvcost = 0;
int idx, idy;
@@ -1499,7 +1503,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
int k;
MACROBLOCKD *xd = &x->e_mbd;
struct macroblockd_plane *const pd = &xd->plane[0];
- MODE_INFO *const mi = xd->mode_info_context;
+ MODE_INFO *const mi = xd->this_mi;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
const int width = plane_block_width(bsize, pd);
const int height = plane_block_height(bsize, pd);
@@ -1599,7 +1603,7 @@ static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
}
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
- MB_MODE_INFO *const mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
@@ -1615,7 +1619,7 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
struct buf_2d orig_pre[2]) {
- MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &x->e_mbd.mi_8x8[0]->mbmi;
x->plane[0].src = orig_src;
x->e_mbd.plane[0].pre[0] = orig_pre[0];
if (mbmi->ref_frame[1])
@@ -1629,7 +1633,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
int i, j, br = 0, idx, idy;
int64_t bd = 0, block_sse = 0;
MB_PREDICTION_MODE this_mode;
- MODE_INFO *mi = x->e_mbd.mode_info_context;
+ MODE_INFO *mi = x->e_mbd.mi_8x8[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
const int label_count = 4;
int64_t this_segment_rd = 0;
@@ -1747,11 +1751,9 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
if (cpi->compressor_speed) {
// use previous block's result as next block's MV predictor.
if (i > 0) {
- bsi->mvp.as_int =
- x->e_mbd.mode_info_context->bmi[i - 1].as_mv[0].as_int;
+ bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
if (i == 2)
- bsi->mvp.as_int =
- x->e_mbd.mode_info_context->bmi[i - 2].as_mv[0].as_int;
+ bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
}
}
if (i == 0)
@@ -1809,13 +1811,11 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
if (thissme < bestsme) {
bestsme = thissme;
- mode_mv[NEWMV].as_int =
- x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int;
+ mode_mv[NEWMV].as_int = mi->bmi[i].as_mv[0].as_int;
} else {
/* The full search result is actually worse so re-instate the
* previous best vector */
- x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int =
- mode_mv[NEWMV].as_int;
+ mi->bmi[i].as_mv[0].as_int = mode_mv[NEWMV].as_int;
}
}
@@ -2016,7 +2016,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
int i;
BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
MACROBLOCKD *xd = &x->e_mbd;
- MODE_INFO *mi = xd->mode_info_context;
+ MODE_INFO *mi = xd->this_mi;
MB_MODE_INFO *mbmi = &mi->mbmi;
int mode_idx;
@@ -2062,7 +2062,7 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
uint8_t *ref_y_buffer, int ref_y_stride,
int ref_frame, BLOCK_SIZE block_size ) {
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
int_mv this_mv;
int i;
int zero_seen = 0;
@@ -2182,7 +2182,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
// restored if we decide to encode this way
ctx->skip = x->skip;
ctx->best_mode_index = mode_index;
- ctx->mic = *xd->mode_info_context;
+ ctx->mic = *xd->this_mi;
if (partition)
ctx->partition_info = *partition;
@@ -2238,7 +2238,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
VP9_COMMON *cm = &cpi->common;
YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]];
MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
// set up scaling factors
scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1];
@@ -2256,8 +2256,8 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
&scale[frame_type], &scale[frame_type]);
// Gets an initial list of candidate vectors from neighbours and orders them
- vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context,
- xd->prev_mode_info_context,
+ vp9_find_mv_refs(&cpi->common, xd, xd->this_mi,
+ xd->last_mi,
frame_type,
mbmi->ref_mvs[frame_type], mi_row, mi_col);
@@ -2285,7 +2285,7 @@ static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) {
static INLINE int get_switchable_rate(const MACROBLOCK *x) {
const MACROBLOCKD *const xd = &x->e_mbd;
- const MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ const MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
const int ctx = vp9_get_pred_context_switchable_interp(xd);
return SWITCHABLE_INTERP_RATE_FACTOR *
x->switchable_interp_costs[ctx][mbmi->interp_filter];
@@ -2297,7 +2297,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *tmp_mv, int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
VP9_COMMON *cm = &cpi->common;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
int bestsme = INT_MAX;
int further_steps, step_param;
@@ -2425,7 +2425,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
int *rate_mv) {
int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize);
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
int refs[2] = { mbmi->ref_frame[0],
(mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
int_mv ref_mv[2];
@@ -2590,7 +2590,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
const int64_t ref_best_rd) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
const int is_comp_pred = (mbmi->ref_frame[1] > 0);
const int num_refs = is_comp_pred ? 2 : 1;
const int this_mode = mbmi->mode;
@@ -2636,7 +2636,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
*rate2 += rate_mv;
frame_mv[refs[0]].as_int =
- xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
+ xd->this_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
single_newmv[refs[0]].as_int = tmp_mv.as_int;
}
}
@@ -2991,11 +2991,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
- int y_skip = 0, uv_skip;
+ int y_skip = 0, uv_skip = 0;
int64_t dist_y = 0, dist_uv = 0, tx_cache[TX_MODES] = { 0 };
x->skip_encode = 0;
ctx->skip = 0;
- xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
+ xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME;
if (bsize >= BLOCK_8X8) {
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
&dist_y, &y_skip, bsize, tx_cache,
@@ -3031,7 +3031,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
ctx->tx_rd_diff[i] = tx_cache[i] - tx_cache[cm->tx_mode];
}
- ctx->mic = *xd->mode_info_context;
+ ctx->mic = *xd->this_mi;
}
int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
@@ -3043,12 +3043,12 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int64_t best_rd_so_far) {
VP9_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &x->e_mbd;
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
const struct segmentation *seg = &cm->seg;
const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]);
MB_PREDICTION_MODE this_mode;
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
- unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
+ unsigned char segment_id = mbmi->segment_id;
int comp_pred, i;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
@@ -3458,7 +3458,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
cpi->rd_threshes[bsize][THR_NEWA];
this_rd_thresh = (ref_frame == GOLDEN_FRAME) ?
cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh;
- xd->mode_info_context->mbmi.tx_size = TX_4X4;
+ xd->this_mi->mbmi.tx_size = TX_4X4;
cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX;
if (cm->mcomp_filter_type != BILINEAR) {
@@ -3514,7 +3514,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
tmp_best_mbmode = *mbmi;
tmp_best_partition = *x->partition_info;
for (i = 0; i < 4; i++)
- tmp_best_bmodes[i] = xd->mode_info_context->bmi[i];
+ tmp_best_bmodes[i] = xd->this_mi->bmi[i];
pred_exists = 1;
if (switchable_filter_index == 0 &&
cpi->sf.use_rd_breakout &&
@@ -3566,7 +3566,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
*mbmi = tmp_best_mbmode;
*x->partition_info = tmp_best_partition;
for (i = 0; i < 4; i++)
- xd->mode_info_context->bmi[i] = tmp_best_bmodes[i];
+ xd->this_mi->bmi[i] = tmp_best_bmodes[i];
}
rate2 += rate;
@@ -3690,20 +3690,20 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
// Keep record of best intra rd
- if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME &&
- is_intra_mode(xd->mode_info_context->mbmi.mode) &&
+ if (xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME &&
+ is_intra_mode(xd->this_mi->mbmi.mode) &&
this_rd < best_intra_rd) {
best_intra_rd = this_rd;
- best_intra_mode = xd->mode_info_context->mbmi.mode;
+ best_intra_mode = xd->this_mi->mbmi.mode;
}
// Keep record of best inter rd with single reference
- if (xd->mode_info_context->mbmi.ref_frame[0] > INTRA_FRAME &&
- xd->mode_info_context->mbmi.ref_frame[1] == NONE &&
+ if (xd->this_mi->mbmi.ref_frame[0] > INTRA_FRAME &&
+ xd->this_mi->mbmi.ref_frame[1] == NONE &&
!mode_excluded &&
this_rd < best_inter_rd) {
best_inter_rd = this_rd;
best_inter_ref_frame = ref_frame;
- // best_inter_mode = xd->mode_info_context->mbmi.mode;
+ // best_inter_mode = xd->this_mi->mbmi.mode;
}
if (!disable_skip && ref_frame == INTRA_FRAME) {
@@ -3747,7 +3747,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (this_mode == I4X4_PRED || this_mode == SPLITMV)
for (i = 0; i < 4; i++)
- best_bmodes[i] = xd->mode_info_context->bmi[i];
+ best_bmodes[i] = xd->this_mi->bmi[i];
// TODO(debargha): enhance this test with a better distortion prediction
// based on qp, activity mask and history
@@ -3926,24 +3926,24 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
best_mbmode.sb_type < BLOCK_8X8) {
for (i = 0; i < 4; i++)
- xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
+ xd->this_mi->bmi[i].as_mode = best_bmodes[i].as_mode;
}
if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
best_mbmode.sb_type < BLOCK_8X8) {
for (i = 0; i < 4; i++)
- xd->mode_info_context->bmi[i].as_mv[0].as_int =
+ xd->this_mi->bmi[i].as_mv[0].as_int =
best_bmodes[i].as_mv[0].as_int;
if (mbmi->ref_frame[1] > 0)
for (i = 0; i < 4; i++)
- xd->mode_info_context->bmi[i].as_mv[1].as_int =
+ xd->this_mi->bmi[i].as_mv[1].as_int =
best_bmodes[i].as_mv[1].as_int;
*x->partition_info = best_partition;
- mbmi->mv[0].as_int = xd->mode_info_context->bmi[3].as_mv[0].as_int;
- mbmi->mv[1].as_int = xd->mode_info_context->bmi[3].as_mv[1].as_int;
+ mbmi->mv[0].as_int = xd->this_mi->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = xd->this_mi->bmi[3].as_mv[1].as_int;
}
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c
index 0a6d2abe2..10655e8a7 100644
--- a/vp9/encoder/vp9_segmentation.c
+++ b/vp9/encoder/vp9_segmentation.c
@@ -117,7 +117,7 @@ static int cost_segmap(int *segcounts, vp9_prob *probs) {
return cost;
}
-static void count_segs(VP9_COMP *cpi, MODE_INFO *mi,
+static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
@@ -129,8 +129,8 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO *mi,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- segment_id = mi->mbmi.segment_id;
- xd->mode_info_context = mi;
+ segment_id = mi_8x8[0]->mbmi.segment_id;
+
set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
// Count the number of hits on each segment with no prediction
@@ -138,7 +138,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO *mi,
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
- const BLOCK_SIZE bsize = mi->mbmi.sb_type;
+ const BLOCK_SIZE bsize = mi_8x8[0]->mbmi.sb_type;
// Test to see if the segment id matches the predicted value.
const int pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
bsize, mi_row, mi_col);
@@ -147,7 +147,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO *mi,
// Store the prediction status for this mb and update counts
// as appropriate
- vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag);
+ vp9_set_pred_flag_seg_id(xd, pred_flag);
temporal_predictor_count[pred_context][pred_flag]++;
if (!pred_flag)
@@ -156,7 +156,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO *mi,
}
}
-static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
+static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
@@ -170,21 +170,22 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
- bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
- bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
+ bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
+ bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
- count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, bs, bs, mi_row, mi_col);
} else if (bw == bs && bh < bs) {
- count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
- count_segs(cpi, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count,
- t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col);
+ count_segs(cpi, mi_8x8 + hbs * mis, no_pred_segcounts,
+ temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
+ mi_row + hbs, mi_col);
} else if (bw < bs && bh == bs) {
- count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
- count_segs(cpi, mi + hbs, no_pred_segcounts, temporal_predictor_count,
+ count_segs(cpi, mi_8x8 + hbs, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs);
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
@@ -196,7 +197,7 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
- count_segs_sb(cpi, &mi[mi_dr * mis + mi_dc],
+ count_segs_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc],
no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts,
mi_row + mi_dr, mi_col + mi_dc, subsize);
@@ -222,7 +223,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
vp9_prob t_nopred_prob[PREDICTION_PROBS];
const int mis = cm->mode_info_stride;
- MODE_INFO *mi_ptr, *mi;
+ MODE_INFO **mi_ptr, **mi;
// Set default state for the segment tree probabilities and the
// temporal coding probabilities
@@ -233,7 +234,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
// predicts this one
for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
vp9_get_tile_col_offsets(cm, tile_col);
- mi_ptr = cm->mi + cm->cur_tile_mi_col_start;
+ mi_ptr = cm->mi_grid_visible + cm->cur_tile_mi_col_start;
for (mi_row = 0; mi_row < cm->mi_rows;
mi_row += 8, mi_ptr += 8 * mis) {
mi = mi_ptr;
diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c
index 3052e8f70..63826eea5 100644
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -153,7 +153,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
- ref_mv = &x->e_mbd.mode_info_context->bmi[0].as_mv[0];
+ ref_mv = &x->e_mbd.mi_8x8[0]->bmi[0].as_mv[0];
bestsme = vp9_hex_search(x, &best_ref_mv1_full,
step_param, sadpb, 1,
&cpi->fn_ptr[BLOCK_16X16],
@@ -245,8 +245,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
if (cpi->frames[frame] == NULL)
continue;
- mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row = 0;
- mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col = 0;
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row = 0;
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col = 0;
if (frame == alt_ref_index) {
filter_weight = 2;
@@ -279,8 +279,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
cpi->frames[frame]->u_buffer + mb_uv_offset,
cpi->frames[frame]->v_buffer + mb_uv_offset,
cpi->frames[frame]->y_stride,
- mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row,
- mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col,
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row,
+ mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col,
predictor);
// Apply the filter (YUV)
diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c
index 03bf14716..0c9bf9db2 100644
--- a/vp9/encoder/vp9_tokenize.c
+++ b/vp9/encoder/vp9_tokenize.c
@@ -114,7 +114,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize,
MACROBLOCKD *xd = args->xd;
TOKENEXTRA **tp = args->tp;
struct macroblockd_plane *pd = &xd->plane[plane];
- MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *mbmi = &xd->this_mi->mbmi;
int pt; /* near block/prev token context index */
int c = 0, rc = 0;
TOKENEXTRA *t = *tp; /* store tokens starting here */
@@ -210,7 +210,7 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
+ MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi;
TOKENEXTRA *t_backup = *t;
const int mb_skip_context = vp9_get_pred_context_mbskip(xd);
const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id,