From 54a03e20ddffd1fd9abf960daa74572cf173a5c8 Mon Sep 17 00:00:00 2001 From: James Zern Date: Mon, 9 Sep 2013 13:37:01 -0700 Subject: Revert "New mode_info_context storage" This reverts commit dae17734ece414091ba1184f7becd0aa6c0004f1 Encode crashes, leaks and increases integer overflow errors. Change-Id: I595aa2649bb8d0b6552ff91652837a74c103fda2 --- vp9/common/vp9_alloccommon.c | 33 +++--- vp9/common/vp9_alloccommon.h | 1 + vp9/common/vp9_blockd.h | 22 ++-- vp9/common/vp9_debugmodes.c | 11 +- vp9/common/vp9_entropymode.c | 3 + vp9/common/vp9_findnearmv.c | 4 +- vp9/common/vp9_findnearmv.h | 37 +++--- vp9/common/vp9_loopfilter.c | 88 +++++++------- vp9/common/vp9_mvref_common.c | 15 +-- vp9/common/vp9_onyxc_int.h | 4 - vp9/common/vp9_pred_common.c | 135 +++++++++++---------- vp9/common/vp9_pred_common.h | 34 +++--- vp9/common/vp9_reconinter.c | 10 +- vp9/decoder/vp9_decodemv.c | 34 +++--- vp9/decoder/vp9_decodframe.c | 29 ++--- vp9/decoder/vp9_detokenize.c | 4 +- vp9/decoder/vp9_onyxd_if.c | 5 - vp9/encoder/vp9_bitstream.c | 72 +++++------- vp9/encoder/vp9_encodeframe.c | 241 ++++++++++++++++++-------------------- vp9/encoder/vp9_encodeintra.c | 2 +- vp9/encoder/vp9_encodemb.c | 10 +- vp9/encoder/vp9_encodemv.c | 4 +- vp9/encoder/vp9_firstpass.c | 37 +++--- vp9/encoder/vp9_mbgraph.c | 4 +- vp9/encoder/vp9_mcomp.c | 6 +- vp9/encoder/vp9_onyx_if.c | 17 +-- vp9/encoder/vp9_quantize.c | 2 +- vp9/encoder/vp9_rdopt.c | 148 +++++++++++------------ vp9/encoder/vp9_segmentation.c | 35 +++--- vp9/encoder/vp9_temporal_filter.c | 10 +- vp9/encoder/vp9_tokenize.c | 4 +- 31 files changed, 503 insertions(+), 558 deletions(-) (limited to 'vp9') diff --git a/vp9/common/vp9_alloccommon.c b/vp9/common/vp9_alloccommon.c index 4937fc899..e89fea8a4 100644 --- a/vp9/common/vp9_alloccommon.c +++ b/vp9/common/vp9_alloccommon.c @@ -31,6 +31,22 @@ void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi) { vpx_memset(&mi[i * stride], 0, sizeof(MODE_INFO)); } +void vp9_update_mode_info_in_image(VP9_COMMON *cm, MODE_INFO *mi) { + int i, j; + + // For each in image mode_info element set the in image flag to 1 + for (i = 0; i < cm->mi_rows; i++) { + MODE_INFO *ptr = mi; + for (j = 0; j < cm->mi_cols; j++) { + ptr->mbmi.in_image = 1; + ptr++; // Next element in the row + } + + // Step over border element at start of next row + mi += cm->mode_info_stride; + } +} + void vp9_free_frame_buffers(VP9_COMMON *cm) { int i; @@ -66,18 +82,15 @@ static void set_mb_mi(VP9_COMMON *cm, int aligned_width, int aligned_height) { static void setup_mi(VP9_COMMON *cm) { cm->mi = cm->mip + cm->mode_info_stride + 1; cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1; - cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1; - cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1; vpx_memset(cm->mip, 0, cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO)); - vpx_memset(cm->mi_grid_base, 0, - cm->mode_info_stride * (cm->mi_rows + 1) * - sizeof(*cm->mi_grid_base)); - vp9_update_mode_info_border(cm, cm->mip); + vp9_update_mode_info_in_image(cm, cm->mi); + vp9_update_mode_info_border(cm, cm->prev_mip); + vp9_update_mode_info_in_image(cm, cm->prev_mi); } int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) { @@ -126,14 +139,6 @@ int vp9_alloc_frame_buffers(VP9_COMMON *cm, int width, int height) { if (!cm->prev_mip) goto fail; - cm->mi_grid_base = vpx_calloc(mi_size, sizeof(*cm->mi_grid_base)); - if (!cm->mi_grid_base) - goto fail; - - cm->prev_mi_grid_base = vpx_calloc(mi_size, sizeof(*cm->prev_mi_grid_base)); - if (!cm->prev_mi_grid_base) - goto fail; - setup_mi(cm); // FIXME(jkoleszar): allocate subsampled arrays for U/V once subsampling diff --git a/vp9/common/vp9_alloccommon.h b/vp9/common/vp9_alloccommon.h index 5d5fae993..b7d7eba72 100644 --- a/vp9/common/vp9_alloccommon.h +++ b/vp9/common/vp9_alloccommon.h @@ -17,6 +17,7 @@ void vp9_initialize_common(); void vp9_update_mode_info_border(VP9_COMMON *cm, MODE_INFO *mi); +void vp9_update_mode_info_in_image(VP9_COMMON *cm, MODE_INFO *mi); void vp9_create_common(VP9_COMMON *cm); void vp9_remove_common(VP9_COMMON *cm); diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index c8d677fb9..5ba784671 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -147,6 +147,10 @@ typedef struct { // Flags used for prediction status of various bit-stream signals unsigned char seg_id_predicted; + // Indicates if the block is part of the image (1) vs border (0) + // This can be useful in determining whether it provides a valid predictor + unsigned char in_image; + INTERPOLATIONFILTERTYPE interp_filter; BLOCK_SIZE sb_type; @@ -202,16 +206,10 @@ typedef struct macroblockd { struct scale_factors scale_factor[2]; - MODE_INFO *last_mi; - MODE_INFO *this_mi; + MODE_INFO *prev_mode_info_context; + MODE_INFO *mode_info_context; int mode_info_stride; - MODE_INFO *mic_stream_ptr; - - // A NULL indicates that the 8x8 is not part of the image - MODE_INFO **mi_8x8; - MODE_INFO **prev_mi_8x8; - int up_available; int left_available; int right_available; @@ -321,7 +319,7 @@ extern const TX_TYPE mode2txfm_map[MB_MODE_COUNT]; static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, const MACROBLOCKD *xd, int ib) { - const MODE_INFO *const mi = xd->this_mi; + const MODE_INFO *const mi = xd->mode_info_context; const MB_MODE_INFO *const mbmi = &mi->mbmi; if (plane_type != PLANE_TYPE_Y_WITH_DC || @@ -336,13 +334,13 @@ static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, static INLINE TX_TYPE get_tx_type_8x8(PLANE_TYPE plane_type, const MACROBLOCKD *xd) { return plane_type == PLANE_TYPE_Y_WITH_DC ? - mode2txfm_map[xd->this_mi->mbmi.mode] : DCT_DCT; + mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT; } static INLINE TX_TYPE get_tx_type_16x16(PLANE_TYPE plane_type, const MACROBLOCKD *xd) { return plane_type == PLANE_TYPE_Y_WITH_DC ? - mode2txfm_map[xd->this_mi->mbmi.mode] : DCT_DCT; + mode2txfm_map[xd->mode_info_context->mbmi.mode] : DCT_DCT; } static void setup_block_dptrs(MACROBLOCKD *xd, int ss_x, int ss_y) { @@ -391,7 +389,7 @@ static INLINE void foreach_transformed_block_in_plane( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, foreach_transformed_block_visitor visit, void *arg) { const struct macroblockd_plane *const pd = &xd->plane[plane]; - const MB_MODE_INFO* mbmi = &xd->this_mi->mbmi; + const MB_MODE_INFO* mbmi = &xd->mode_info_context->mbmi; // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 // transform size varies per plane, look it up in a common way. diff --git a/vp9/common/vp9_debugmodes.c b/vp9/common/vp9_debugmodes.c index 79f769e40..4de50aacf 100644 --- a/vp9/common/vp9_debugmodes.c +++ b/vp9/common/vp9_debugmodes.c @@ -27,7 +27,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, char *descriptor, int mi_row; int mi_col; int mi_index = 0; - MODE_INFO **mi_8x8 = cm->mi_grid_visible; + MODE_INFO *mi = cm->mi; int rows = cm->mi_rows; int cols = cm->mi_cols; char prefix = descriptor[0]; @@ -38,8 +38,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, char *descriptor, fprintf(file, "%c ", prefix); for (mi_col = 0; mi_col < cols; mi_col++) { fprintf(file, "%2d ", - *((int*) ((char *) (&mi_8x8[mi_index]->mbmi) + - member_offset))); + *((int*) ((char *) (&mi[mi_index].mbmi) + member_offset))); mi_index++; } fprintf(file, "\n"); @@ -52,7 +51,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, char *file) { int mi_col; int mi_index = 0; FILE *mvs = fopen(file, "a"); - MODE_INFO **mi_8x8 = cm->mi_grid_visible; + MODE_INFO *mi = cm->mi; int rows = cm->mi_rows; int cols = cm->mi_cols; @@ -67,8 +66,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, char *file) { for (mi_row = 0; mi_row < rows; mi_row++) { fprintf(mvs,"V "); for (mi_col = 0; mi_col < cols; mi_col++) { - fprintf(mvs, "%4d:%4d ", mi_8x8[mi_index]->mbmi.mv[0].as_mv.row, - mi_8x8[mi_index]->mbmi.mv[0].as_mv.col); + fprintf(mvs, "%4d:%4d ", mi[mi_index].mbmi.mv[0].as_mv.row, + mi[mi_index].mbmi.mv[0].as_mv.col); mi_index++; } fprintf(mvs, "\n"); diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c index 93c89b03a..a75d1a9a4 100644 --- a/vp9/common/vp9_entropymode.c +++ b/vp9/common/vp9_entropymode.c @@ -510,7 +510,10 @@ void vp9_setup_past_independence(VP9_COMMON *cm) { cm->mode_info_stride * (cm->mi_rows + 1) * sizeof(MODE_INFO)); vp9_update_mode_info_border(cm, cm->mip); + vp9_update_mode_info_in_image(cm, cm->mi); + vp9_update_mode_info_border(cm, cm->prev_mip); + vp9_update_mode_info_in_image(cm, cm->prev_mi); vp9_zero(cm->ref_frame_sign_bias); diff --git a/vp9/common/vp9_findnearmv.c b/vp9/common/vp9_findnearmv.c index 49a731fdb..c158c92dc 100644 --- a/vp9/common/vp9_findnearmv.c +++ b/vp9/common/vp9_findnearmv.c @@ -43,12 +43,12 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col) { int_mv dst_list[MAX_MV_REF_CANDIDATES]; int_mv mv_list[MAX_MV_REF_CANDIDATES]; - MODE_INFO *const mi = xd->this_mi; + MODE_INFO *const mi = xd->mode_info_context; assert(ref_idx == 0 || ref_idx == 1); assert(MAX_MV_REF_CANDIDATES == 2); // makes code here slightly easier - vp9_find_mv_refs_idx(cm, xd, mi, xd->last_mi, + vp9_find_mv_refs_idx(cm, xd, mi, xd->prev_mode_info_context, mi->mbmi.ref_frame[ref_idx], mv_list, block_idx, mi_row, mi_col); diff --git a/vp9/common/vp9_findnearmv.h b/vp9/common/vp9_findnearmv.h index ad0d882b9..72572dfb1 100644 --- a/vp9/common/vp9_findnearmv.h +++ b/vp9/common/vp9_findnearmv.h @@ -43,50 +43,41 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, int block_idx, int ref_idx, int mi_row, int mi_col); -static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, - const MODE_INFO *left_mb, int b) { +static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) { // FIXME(rbultje, jingning): temporary hack because jenkins doesn't // understand this condition. This will go away soon. - const MODE_INFO *mi = cur_mb; - if (b == 0 || b == 2) { /* On L edge, get from MB to left of us */ - mi = left_mb; - if (!mi) - return DC_PRED; + --cur_mb; - if (mi->mbmi.ref_frame[0] != INTRA_FRAME) { + if (is_inter_block(&cur_mb->mbmi)) { return DC_PRED; - } else if (mi->mbmi.sb_type < BLOCK_8X8) { - return ((mi->bmi + 1 + b)->as_mode); + } else if (cur_mb->mbmi.sb_type < BLOCK_8X8) { + return (cur_mb->bmi + 1 + b)->as_mode; } else { - return mi->mbmi.mode; + return cur_mb->mbmi.mode; } } assert(b == 1 || b == 3); - return (mi->bmi + b - 1)->as_mode; + return (cur_mb->bmi + b - 1)->as_mode; } static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb, - const MODE_INFO *above_mb, int b) { - const MODE_INFO *mi = cur_mb; - + int b, int mi_stride) { if (!(b >> 1)) { /* On top edge, get from MB above us */ - mi = above_mb; - if (!mi) - return DC_PRED; + cur_mb -= mi_stride; - if (mi->mbmi.ref_frame[0] != INTRA_FRAME) { + if (is_inter_block(&cur_mb->mbmi)) { return DC_PRED; - } else if (mi->mbmi.sb_type < BLOCK_8X8) { - return ((mi->bmi + 2 + b)->as_mode); + } else if (cur_mb->mbmi.sb_type < BLOCK_8X8) { + return (cur_mb->bmi + 2 + b)->as_mode; } else { - return mi->mbmi.mode; + return cur_mb->mbmi.mode; } } - return (mi->bmi + b - 2)->as_mode; + return (cur_mb->bmi + b - 2)->as_mode; } #endif // VP9_COMMON_VP9_FINDNEARMV_H_ diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c index cfb5cd4a3..0d883ab9b 100644 --- a/vp9/common/vp9_loopfilter.c +++ b/vp9/common/vp9_loopfilter.c @@ -559,12 +559,12 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n, // by mi_row, mi_col. // TODO(JBB): This function only works for yv12. static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, - MODE_INFO **mi_8x8, const int mode_info_stride, + const MODE_INFO *mi, const int mode_info_stride, LOOP_FILTER_MASK *lfm) { int idx_32, idx_16, idx_8; const loop_filter_info_n *const lfi_n = &cm->lf_info; - MODE_INFO **mip = mi_8x8; - MODE_INFO **mip2 = mi_8x8; + const MODE_INFO *mip = mi; + const MODE_INFO *mip2 = mi; // These are offsets to the next mi in the 64x64 block. It is what gets // added to the mi ptr as we go through each loop. It helps us to avoids @@ -596,23 +596,23 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, // TODO(jimbankoski): Try moving most of the following code into decode // loop and storing lfm in the mbmi structure so that we don't have to go // through the recursive loop structure multiple times. - switch (mip[0]->mbmi.sb_type) { + switch (mip->mbmi.sb_type) { case BLOCK_64X64: - build_masks(lfi_n, mip[0] , 0, 0, lfm); + build_masks(lfi_n, mip , 0, 0, lfm); break; case BLOCK_64X32: - build_masks(lfi_n, mip[0], 0, 0, lfm); + build_masks(lfi_n, mip, 0, 0, lfm); mip2 = mip + mode_info_stride * 4; if (4 >= max_rows) break; - build_masks(lfi_n, mip2[0], 32, 8, lfm); + build_masks(lfi_n, mip2 , 32, 8, lfm); break; case BLOCK_32X64: - build_masks(lfi_n, mip[0], 0, 0, lfm); + build_masks(lfi_n, mip, 0, 0, lfm); mip2 = mip + 4; if (4 >= max_cols) break; - build_masks(lfi_n, mip2[0], 4, 2, lfm); + build_masks(lfi_n, mip2, 4, 2, lfm); break; default: for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) { @@ -622,23 +622,23 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, const int mi_32_row_offset = ((idx_32 >> 1) << 2); if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows) continue; - switch (mip[0]->mbmi.sb_type) { + switch (mip->mbmi.sb_type) { case BLOCK_32X32: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); break; case BLOCK_32X16: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); if (mi_32_row_offset + 2 >= max_rows) continue; mip2 = mip + mode_info_stride * 2; - build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm); + build_masks(lfi_n, mip2, shift_y + 16, shift_uv + 4, lfm); break; case BLOCK_16X32: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); if (mi_32_col_offset + 2 >= max_cols) continue; mip2 = mip + 2; - build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm); + build_masks(lfi_n, mip2, shift_y + 2, shift_uv + 1, lfm); break; default: for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) { @@ -652,29 +652,29 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows) continue; - switch (mip[0]->mbmi.sb_type) { + switch (mip->mbmi.sb_type) { case BLOCK_16X16: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); break; case BLOCK_16X8: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); if (mi_16_row_offset + 1 >= max_rows) continue; mip2 = mip + mode_info_stride; - build_y_mask(lfi_n, mip2[0], shift_y+8, lfm); + build_y_mask(lfi_n, mip2, shift_y+8, lfm); break; case BLOCK_8X16: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); if (mi_16_col_offset +1 >= max_cols) continue; mip2 = mip + 1; - build_y_mask(lfi_n, mip2[0], shift_y+1, lfm); + build_y_mask(lfi_n, mip2, shift_y+1, lfm); break; default: { const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16] + shift_8_y[0]; - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); + build_masks(lfi_n, mip, shift_y, shift_uv, lfm); mip += offset[0]; for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) { const int shift_y = shift_32_y[idx_32] + @@ -688,7 +688,7 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, if (mi_8_col_offset >= max_cols || mi_8_row_offset >= max_rows) continue; - build_y_mask(lfi_n, mip[0], shift_y, lfm); + build_y_mask(lfi_n, mip, shift_y, lfm); } break; } @@ -792,7 +792,7 @@ static void setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, #if CONFIG_NON420 static void filter_block_plane_non420(VP9_COMMON *cm, struct macroblockd_plane *plane, - MODE_INFO **mi_8x8, + const MODE_INFO *mi, int mi_row, int mi_col) { const int ss_x = plane->subsampling_x; const int ss_y = plane->subsampling_y; @@ -816,25 +816,24 @@ static void filter_block_plane_non420(VP9_COMMON *cm, // Determine the vertical edges that need filtering for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) { - const MODE_INFO *mi = mi_8x8[c]; - const int skip_this = mi[0].mbmi.skip_coeff - && is_inter_block(&mi[0].mbmi); + const int skip_this = mi[c].mbmi.skip_coeff + && is_inter_block(&mi[c].mbmi); // left edge of current unit is block/partition edge -> no skip - const int block_edge_left = b_width_log2(mi[0].mbmi.sb_type) ? - !(c & ((1 << (b_width_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1; + const int block_edge_left = b_width_log2(mi[c].mbmi.sb_type) ? + !(c & ((1 << (b_width_log2(mi[c].mbmi.sb_type)-1)) - 1)) : 1; const int skip_this_c = skip_this && !block_edge_left; // top edge of current unit is block/partition edge -> no skip - const int block_edge_above = b_height_log2(mi[0].mbmi.sb_type) ? - !(r & ((1 << (b_height_log2(mi[0].mbmi.sb_type)-1)) - 1)) : 1; + const int block_edge_above = b_height_log2(mi[c].mbmi.sb_type) ? + !(r & ((1 << (b_height_log2(mi[c].mbmi.sb_type)-1)) - 1)) : 1; const int skip_this_r = skip_this && !block_edge_above; const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV) - ? get_uv_tx_size(&mi[0].mbmi) - : mi[0].mbmi.tx_size; + ? get_uv_tx_size(&mi[c].mbmi) + : mi[c].mbmi.tx_size; const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1; const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1; // Filter level can vary per MI - if (!build_lfi(&cm->lf_info, &mi[0].mbmi, lfi[r] + (c >> ss_x))) + if (!build_lfi(&cm->lf_info, &mi[c].mbmi, lfi[r] + (c >> ss_x))) continue; // Build masks based on the transform size of each block @@ -893,7 +892,7 @@ static void filter_block_plane_non420(VP9_COMMON *cm, mask_4x4_c & border_mask, mask_4x4_int[r], lfi[r]); dst->buf += 8 * dst->stride; - mi_8x8 += row_step_stride; + mi += row_step_stride; } // Now do horizontal pass @@ -914,7 +913,7 @@ static void filter_block_plane_non420(VP9_COMMON *cm, static void filter_block_plane(VP9_COMMON *const cm, struct macroblockd_plane *const plane, - MODE_INFO **mi_8x8, + const MODE_INFO *mi, int mi_row, int mi_col, LOOP_FILTER_MASK *lfm) { const int ss_x = plane->subsampling_x; @@ -937,8 +936,7 @@ static void filter_block_plane(VP9_COMMON *const cm, // Determine the vertical edges that need filtering for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) { - const MODE_INFO *mi = mi_8x8[c]; - if (!build_lfi(&cm->lf_info, &mi[0].mbmi, lfi[r] + (c >> ss_x))) + if (!build_lfi(&cm->lf_info, &mi[c].mbmi, lfi[r] + (c >> ss_x))) continue; } if (!plane->plane_type) { @@ -961,7 +959,7 @@ static void filter_block_plane(VP9_COMMON *const cm, lfi[r]); } dst->buf += 8 * dst->stride; - mi_8x8 += row_step_stride; + mi += row_step_stride; } // Now do horizontal pass @@ -1003,7 +1001,7 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, #endif for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) { - MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mode_info_stride; + MODE_INFO* const mi = cm->mi + mi_row * cm->mode_info_stride; for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { int plane; @@ -1014,18 +1012,17 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer, #if CONFIG_NON420 if (use_420) #endif - setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mode_info_stride, - &lfm); + setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mode_info_stride, &lfm); for (plane = 0; plane < num_planes; ++plane) { #if CONFIG_NON420 if (use_420) #endif - filter_block_plane(cm, &xd->plane[plane], mi_8x8 + mi_col, mi_row, - mi_col, &lfm); + filter_block_plane(cm, &xd->plane[plane], mi + mi_col, mi_row, mi_col, + &lfm); #if CONFIG_NON420 else - filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col, + filter_block_plane_non420(cm, &xd->plane[plane], mi + mi_col, mi_row, mi_col); #endif } @@ -1038,6 +1035,7 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd, int y_only, int partial) { int start_mi_row, end_mi_row, mi_rows_to_filter; if (!frame_filter_level) return; + start_mi_row = 0; mi_rows_to_filter = cm->mi_rows; if (partial && cm->mi_rows > 8) { diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c index bfeeb57bf..d8381ec48 100644 --- a/vp9/common/vp9_mvref_common.c +++ b/vp9/common/vp9_mvref_common.c @@ -1,4 +1,3 @@ - /* * Copyright (c) 2012 The WebM project authors. All Rights Reserved. * @@ -204,8 +203,8 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, const MV *const mv_ref = &mv_ref_search[i]; if (is_inside(cm, mi_col, mi_row, mv_ref)) { const int check_sub_blocks = block_idx >= 0; - const MODE_INFO *const candidate_mi = xd->mi_8x8[mv_ref->col + mv_ref->row - * xd->mode_info_stride]; + const MODE_INFO *const candidate_mi = &mi[mv_ref->col + mv_ref->row + * xd->mode_info_stride]; const MB_MODE_INFO *const candidate = &candidate_mi->mbmi; // Keep counts for entropy encoding. context_counter += mode_2_counter[candidate->mode]; @@ -231,9 +230,8 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, for (; i < MVREF_NEIGHBOURS; ++i) { const MV *const mv_ref = &mv_ref_search[i]; if (is_inside(cm, mi_col, mi_row, mv_ref)) { - const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col + - mv_ref->row - * xd->mode_info_stride]->mbmi; + const MB_MODE_INFO *const candidate = &mi[mv_ref->col + mv_ref->row + * xd->mode_info_stride].mbmi; if (candidate->ref_frame[0] == ref_frame) { ADD_MV_REF_LIST(candidate->mv[0]); @@ -261,9 +259,8 @@ void vp9_find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, for (i = 0; i < MVREF_NEIGHBOURS; ++i) { const MV *mv_ref = &mv_ref_search[i]; if (is_inside(cm, mi_col, mi_row, mv_ref)) { - const MB_MODE_INFO *const candidate = &xd->mi_8x8[mv_ref->col + - mv_ref->row - * xd->mode_info_stride]->mbmi; + const MB_MODE_INFO *const candidate = &mi[mv_ref->col + mv_ref->row + * xd->mode_info_stride].mbmi; // If the candidate is INTRA we don't want to consider its mv. if (is_inter_block(candidate)) diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h index 0431e146f..f0bc063f2 100644 --- a/vp9/common/vp9_onyxc_int.h +++ b/vp9/common/vp9_onyxc_int.h @@ -164,10 +164,6 @@ typedef struct VP9Common { MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */ MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */ - MODE_INFO **mi_grid_base; - MODE_INFO **mi_grid_visible; - MODE_INFO **prev_mi_grid_base; - MODE_INFO **prev_mi_grid_visible; // Persistent mb segment id map used in prediction. unsigned char *last_frame_seg_map; diff --git a/vp9/common/vp9_pred_common.c b/vp9/common/vp9_pred_common.c index 81fbf1f26..494cea7c5 100644 --- a/vp9/common/vp9_pred_common.c +++ b/vp9/common/vp9_pred_common.c @@ -18,28 +18,28 @@ // Returns a context number for the given MB prediction signal unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) { - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. // The prediction flags in these dummy entries are initialised to 0. // left - const int left_mv_pred = left_in_image ? is_inter_mode(left_mi->mbmi.mode) - : 0; + const int left_mv_pred = is_inter_mode(left_mbmi->mode); const int left_interp = left_in_image && left_mv_pred - ? left_mi->mbmi.interp_filter + ? left_mbmi->interp_filter : SWITCHABLE_FILTERS; // above - const int above_mv_pred = above_in_image ? is_inter_mode(above_mi->mbmi.mode) - : 0; + const int above_mv_pred = is_inter_mode(above_mbmi->mode); const int above_interp = above_in_image && above_mv_pred - ? above_mi->mbmi.interp_filter + ? above_mbmi->interp_filter : SWITCHABLE_FILTERS; + if (left_interp == above_interp) return left_interp; else if (left_interp == SWITCHABLE_FILTERS && @@ -53,14 +53,13 @@ unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) { } // Returns a context number for the given MB prediction signal unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) { - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0; - const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; + const int left_intra = !is_inter_block(left_mbmi); + const int above_intra = !is_inter_block(above_mbmi); // The mode info data structure has a one element border above and to the // left of the entries corresponding to real macroblocks. @@ -81,12 +80,11 @@ unsigned char vp9_get_pred_context_intra_inter(const MACROBLOCKD *xd) { unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm, const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0; - const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. @@ -126,14 +124,14 @@ unsigned char vp9_get_pred_context_comp_inter_inter(const VP9_COMMON *cm, unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO * const above_mi = xd->mi_8x8[-cm->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0; - const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-cm->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; + const int left_intra = !is_inter_block(left_mbmi); + const int above_intra = !is_inter_block(above_mbmi); + // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. @@ -206,14 +204,14 @@ unsigned char vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, } unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0; - const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; + const int left_intra = !is_inter_block(left_mbmi); + const int above_intra = !is_inter_block(above_mbmi); + // Note: // The mode info data structure has a one element border above and to the // left of the entries correpsonding to real macroblocks. @@ -272,14 +270,13 @@ unsigned char vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { int pred_context; - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0; - const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; - const int left_intra = left_in_image ? !is_inter_block(left_mbmi) : 1; - const int above_intra = above_in_image ? !is_inter_block(above_mbmi) : 1; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; + const int left_intra = !is_inter_block(left_mbmi); + const int above_intra = !is_inter_block(above_mbmi); // Note: // The mode info data structure has a one element border above and to the @@ -361,13 +358,12 @@ unsigned char vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { // left of the entries corresponding to real blocks. // The prediction flags in these dummy entries are initialized to 0. unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd) { - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const MB_MODE_INFO *const above_mbmi = above_mi ? &above_mi->mbmi : 0; - const MB_MODE_INFO *const left_mbmi = left_mi ? &left_mi->mbmi : 0; - const int left_in_image = xd->left_available && left_mi; - const int above_in_image = xd->up_available && above_mi; - const int max_tx_size = max_txsize_lookup[xd->mi_8x8[0]->mbmi.sb_type]; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; + const int max_tx_size = max_txsize_lookup[mi->mbmi.sb_type]; int above_context = max_tx_size; int left_context = max_tx_size; @@ -388,13 +384,32 @@ unsigned char vp9_get_pred_context_tx_size(const MACROBLOCKD *xd) { return above_context + left_context > max_tx_size; } -void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag) { - xd->this_mi->mbmi.seg_id_predicted = pred_flag; +void vp9_set_pred_flag_seg_id(VP9_COMMON *cm, BLOCK_SIZE bsize, + int mi_row, int mi_col, uint8_t pred_flag) { + MODE_INFO *mi = &cm->mi[mi_row * cm->mode_info_stride + mi_col]; + const int bw = 1 << mi_width_log2(bsize); + const int bh = 1 << mi_height_log2(bsize); + const int xmis = MIN(cm->mi_cols - mi_col, bw); + const int ymis = MIN(cm->mi_rows - mi_row, bh); + int x, y; + + for (y = 0; y < ymis; y++) + for (x = 0; x < xmis; x++) + mi[y * cm->mode_info_stride + x].mbmi.seg_id_predicted = pred_flag; } -void vp9_set_pred_flag_mbskip(MACROBLOCKD *xd, BLOCK_SIZE bsize, - uint8_t pred_flag) { - xd->this_mi->mbmi.skip_coeff = pred_flag; +void vp9_set_pred_flag_mbskip(VP9_COMMON *cm, BLOCK_SIZE bsize, + int mi_row, int mi_col, uint8_t pred_flag) { + MODE_INFO *mi = &cm->mi[mi_row * cm->mode_info_stride + mi_col]; + const int bw = 1 << mi_width_log2(bsize); + const int bh = 1 << mi_height_log2(bsize); + const int xmis = MIN(cm->mi_cols - mi_col, bw); + const int ymis = MIN(cm->mi_rows - mi_row, bh); + int x, y; + + for (y = 0; y < ymis; y++) + for (x = 0; x < xmis; x++) + mi[y * cm->mode_info_stride + x].mbmi.skip_coeff = pred_flag; } int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids, diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h index 47ca8abd8..89e1356d7 100644 --- a/vp9/common/vp9_pred_common.h +++ b/vp9/common/vp9_pred_common.h @@ -19,12 +19,12 @@ int vp9_get_segment_id(VP9_COMMON *cm, const uint8_t *segment_ids, static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) { - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const int above_sip = above_mi ? above_mi->mbmi.seg_id_predicted : 0; - const int left_sip = left_mi ? left_mi->mbmi.seg_id_predicted : 0; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; - return above_sip + (xd->left_available ? left_sip : 0); + return above_mbmi->seg_id_predicted + + (xd->left_available ? left_mbmi->seg_id_predicted : 0); } static INLINE vp9_prob vp9_get_pred_prob_seg_id(struct segmentation *seg, @@ -32,15 +32,16 @@ static INLINE vp9_prob vp9_get_pred_prob_seg_id(struct segmentation *seg, return seg->pred_probs[vp9_get_pred_context_seg_id(xd)]; } -void vp9_set_pred_flag_seg_id(MACROBLOCKD *xd, uint8_t pred_flag); +void vp9_set_pred_flag_seg_id(VP9_COMMON *cm, BLOCK_SIZE bsize, + int mi_row, int mi_col, uint8_t pred_flag); static INLINE int vp9_get_pred_context_mbskip(const MACROBLOCKD *xd) { - const MODE_INFO * const above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO * const left_mi = xd->mi_8x8[-1]; - const int above_skip_coeff = above_mi ? above_mi->mbmi.skip_coeff : 0; - const int left_skip_coeff = left_mi ? left_mi->mbmi.skip_coeff : 0; + const MODE_INFO *const mi = xd->mode_info_context; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; - return above_skip_coeff + (xd->left_available ? left_skip_coeff : 0); + return above_mbmi->skip_coeff + + (xd->left_available ? left_mbmi->skip_coeff : 0); } static INLINE vp9_prob vp9_get_pred_prob_mbskip(const VP9_COMMON *cm, @@ -49,11 +50,11 @@ static INLINE vp9_prob vp9_get_pred_prob_mbskip(const VP9_COMMON *cm, } static INLINE unsigned char vp9_get_pred_flag_mbskip(const MACROBLOCKD *xd) { - return xd->this_mi->mbmi.skip_coeff; + return xd->mode_info_context->mbmi.skip_coeff; } -void vp9_set_pred_flag_mbskip(MACROBLOCKD *xd, BLOCK_SIZE bsize, - uint8_t pred_flag); +void vp9_set_pred_flag_mbskip(VP9_COMMON *cm, BLOCK_SIZE bsize, + int mi_row, int mi_col, uint8_t pred_flag); unsigned char vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd); @@ -113,9 +114,8 @@ static const vp9_prob *get_tx_probs(BLOCK_SIZE bsize, uint8_t context, } static const vp9_prob *get_tx_probs2(const MACROBLOCKD *xd, - const struct tx_probs *tx_probs, - const MODE_INFO *m) { - const BLOCK_SIZE bsize = m->mbmi.sb_type; + const struct tx_probs *tx_probs) { + const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type; const int context = vp9_get_pred_context_tx_size(xd); return get_tx_probs(bsize, context, tx_probs); } diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c index dc1d46caa..88bba3a60 100644 --- a/vp9/common/vp9_reconinter.c +++ b/vp9/common/vp9_reconinter.c @@ -24,13 +24,11 @@ void vp9_setup_interp_filters(MACROBLOCKD *xd, INTERPOLATIONFILTERTYPE mcomp_filter_type, VP9_COMMON *cm) { - if (xd->mi_8x8 && xd->this_mi) { - MB_MODE_INFO * mbmi = &xd->this_mi->mbmi; + if (xd->mode_info_context) { + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; set_scale_factors(xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1, cm->active_ref_scale); - } else { - set_scale_factors(xd, -1, -1, cm->active_ref_scale); } switch (mcomp_filter_type) { @@ -130,7 +128,7 @@ static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize, const int bh = plane_block_height(bsize, pd); const int x = 4 * (block & ((1 << bwl) - 1)); const int y = 4 * (block >> bwl); - const MODE_INFO *mi = xd->this_mi; + const MODE_INFO *const mi = xd->mode_info_context; const int use_second_ref = mi->mbmi.ref_frame[1] > 0; int ref; @@ -195,7 +193,7 @@ static INLINE void foreach_predicted_block_in_plane( // size of the predictor to use. int pred_w, pred_h; - if (xd->this_mi->mbmi.sb_type < BLOCK_8X8) { + if (xd->mode_info_context->mbmi.sb_type < BLOCK_8X8) { assert(bsize == BLOCK_8X8); pred_w = 0; pred_h = 0; diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c index 84a29b17a..7f23dc190 100644 --- a/vp9/decoder/vp9_decodemv.c +++ b/vp9/decoder/vp9_decodemv.c @@ -95,7 +95,7 @@ static int read_intra_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, vp9_reader *r) { MACROBLOCKD *const xd = &pbi->mb; struct segmentation *const seg = &pbi->common.seg; - const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type; + const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type; int segment_id; if (!seg->enabled) @@ -114,7 +114,7 @@ static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, VP9_COMMON *const cm = &pbi->common; MACROBLOCKD *const xd = &pbi->mb; struct segmentation *const seg = &cm->seg; - const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type; + const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type; int pred_segment_id, segment_id; if (!seg->enabled) @@ -128,7 +128,7 @@ static int read_inter_segment_id(VP9D_COMP *pbi, int mi_row, int mi_col, if (seg->temporal_update) { const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); const int pred_flag = vp9_read(r, pred_prob); - vp9_set_pred_flag_seg_id(xd, pred_flag); + vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag); segment_id = pred_flag ? pred_segment_id : read_segment_id(r, seg); } else { @@ -156,8 +156,7 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m, MACROBLOCKD *const xd = &pbi->mb; MB_MODE_INFO *const mbmi = &m->mbmi; const BLOCK_SIZE bsize = mbmi->sb_type; - const MODE_INFO *above_mi = xd->mi_8x8[-cm->mode_info_stride]; - const MODE_INFO *left_mi = xd->mi_8x8[-1]; + const int mis = cm->mode_info_stride; mbmi->segment_id = read_intra_segment_id(pbi, mi_row, mi_col, r); mbmi->skip_coeff = read_skip_coeff(pbi, mbmi->segment_id, r); @@ -166,9 +165,9 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m, mbmi->ref_frame[1] = NONE; if (bsize >= BLOCK_8X8) { - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); + const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); const MB_PREDICTION_MODE L = xd->left_available ? - left_block_mode(m, left_mi, 0) : DC_PRED; + left_block_mode(m, 0) : DC_PRED; mbmi->mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]); } else { // Only 4x4, 4x8, 8x4 blocks @@ -179,9 +178,9 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m, for (idy = 0; idy < 2; idy += num_4x4_h) { for (idx = 0; idx < 2; idx += num_4x4_w) { const int ib = idy * 2 + idx; - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, ib); + const MB_PREDICTION_MODE A = above_block_mode(m, ib, mis); const MB_PREDICTION_MODE L = (xd->left_available || idx) ? - left_block_mode(m, left_mi, ib) : DC_PRED; + left_block_mode(m, ib) : DC_PRED; const MB_PREDICTION_MODE b_mode = read_intra_mode(r, vp9_kf_y_mode_prob[A][L]); m->bmi[ib].as_mode = b_mode; @@ -454,8 +453,8 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, ref0 = mbmi->ref_frame[0]; is_compound = has_second_ref(mbmi); - vp9_find_mv_refs(cm, xd, mi, xd->last_mi, ref0, mbmi->ref_mvs[ref0], - mi_row, mi_col); + vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context, + ref0, mbmi->ref_mvs[ref0], mi_row, mi_col); inter_mode_ctx = mbmi->mode_context[ref0]; @@ -475,7 +474,7 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, if (is_compound) { const MV_REFERENCE_FRAME ref1 = mbmi->ref_frame[1]; - vp9_find_mv_refs(cm, xd, mi, xd->last_mi, + vp9_find_mv_refs(cm, xd, mi, xd->prev_mode_info_context, ref1, mbmi->ref_mvs[ref1], mi_row, mi_col); if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { @@ -670,21 +669,20 @@ void vp9_prepare_read_mode_info(VP9D_COMP* pbi, vp9_reader *r) { void vp9_read_mode_info(VP9D_COMP* pbi, int mi_row, int mi_col, vp9_reader *r) { VP9_COMMON *const cm = &pbi->common; MACROBLOCKD *const xd = &pbi->mb; - MODE_INFO *mi = xd->this_mi; + MODE_INFO *mi = xd->mode_info_context; const BLOCK_SIZE bsize = mi->mbmi.sb_type; const int bw = 1 << mi_width_log2(bsize); const int bh = 1 << mi_height_log2(bsize); const int y_mis = MIN(bh, cm->mi_rows - mi_row); const int x_mis = MIN(bw, cm->mi_cols - mi_col); - int x, y, z; + int x, y; if (cm->frame_type == KEY_FRAME || cm->intra_only) read_intra_frame_mode_info(pbi, mi, mi_row, mi_col, r); else read_inter_frame_mode_info(pbi, mi, mi_row, mi_col, r); - for (y = 0, z = 0; y < y_mis; y++, z += cm->mode_info_stride) - for (x = !y; x < x_mis; x++) { - xd->mi_8x8[z + x] = mi; - } + for (y = 0; y < y_mis; y++) + for (x = !y; x < x_mis; x++) + mi[y * cm->mode_info_stride + x] = *mi; } diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c index 34ed0c759..6cb7c094b 100644 --- a/vp9/decoder/vp9_decodframe.c +++ b/vp9/decoder/vp9_decodframe.c @@ -80,7 +80,6 @@ static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) { static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) { int i; xd->plane[0].dequant = cm->y_dequant[q_index]; - for (i = 1; i < MAX_MB_PLANE; i++) xd->plane[i].dequant = cm->uv_dequant[q_index]; } @@ -125,7 +124,7 @@ static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { MACROBLOCKD* const xd = arg; struct macroblockd_plane *const pd = &xd->plane[plane]; - MODE_INFO *const mi = xd->this_mi; + MODE_INFO *const mi = xd->mode_info_context; const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size, block); uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block, @@ -149,7 +148,7 @@ static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, static int decode_tokens(VP9D_COMP *pbi, BLOCK_SIZE bsize, vp9_reader *r) { VP9_COMMON *const cm = &pbi->common; MACROBLOCKD *const xd = &pbi->mb; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; if (mbmi->skip_coeff) { reset_skip_context(xd, bsize); @@ -172,20 +171,12 @@ static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE bsize, const int bw = num_8x8_blocks_wide_lookup[bsize]; const int offset = mi_row * cm->mode_info_stride + mi_col; + xd->mode_info_context = cm->mi + offset; + xd->mode_info_context->mbmi.sb_type = bsize; xd->mode_info_stride = cm->mode_info_stride; - - xd->mi_8x8 = cm->mi_grid_visible + offset; - xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset; - - // we are using the mode info context stream here - xd->this_mi = - xd->mi_8x8[0] = xd->mic_stream_ptr; - xd->this_mi->mbmi.sb_type = bsize; - xd->mic_stream_ptr++; - // Special case: if prev_mi is NULL, the previous mode info context // cannot be used. - xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; + xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + offset : NULL; set_skip_context(cm, xd, mi_row, mi_col); set_partition_seg_context(cm, xd, mi_row, mi_col); @@ -200,7 +191,7 @@ static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE bsize, static void set_ref(VP9D_COMP *pbi, int i, int mi_row, int mi_col) { VP9_COMMON *const cm = &pbi->common; MACROBLOCKD *const xd = &pbi->mb; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; const int ref = mbmi->ref_frame[i] - LAST_FRAME; const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[ref]]; const struct scale_factors *sf = &cm->active_ref_scale[ref]; @@ -231,7 +222,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col, bsize = BLOCK_8X8; // Has to be called after set_offsets - mbmi = &xd->this_mi->mbmi; + mbmi = &xd->mode_info_context->mbmi; if (!is_inter_block(mbmi)) { // Intra reconstruction @@ -255,7 +246,7 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col, assert(mbmi->sb_type == bsize); if (eobtotal == 0) // skip loopfilter - vp9_set_pred_flag_mbskip(xd, bsize, 1); + vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, 1); else if (eobtotal > 0) foreach_transformed_block(xd, bsize, decode_block, xd); } @@ -969,10 +960,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { setup_plane_dequants(cm, &pbi->mb, cm->base_qindex); - xd->mi_8x8 = cm->mi_grid_visible; - xd->mic_stream_ptr = cm->mi; - xd->mode_info_stride = cm->mode_info_stride; - cm->fc = cm->frame_contexts[cm->frame_context_idx]; vp9_zero(cm->counts); diff --git a/vp9/decoder/vp9_detokenize.c b/vp9/decoder/vp9_detokenize.c index cd74a0b00..c119093df 100644 --- a/vp9/decoder/vp9_detokenize.c +++ b/vp9/decoder/vp9_detokenize.c @@ -94,7 +94,7 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd, ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L) { FRAME_CONTEXT *const fc = &cm->fc; FRAME_COUNTS *const counts = &cm->counts; - const int ref = is_inter_block(&xd->this_mi->mbmi); + const int ref = is_inter_block(&xd->mode_info_context->mbmi); int band, c = 0; vp9_prob (*coef_probs)[PREV_COEF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; @@ -224,7 +224,7 @@ static void decode_block(int plane, int block, BLOCK_SIZE plane_bsize, MACROBLOCKD *xd = &arg->pbi->mb; struct segmentation *seg = &arg->pbi->common.seg; struct macroblockd_plane* pd = &xd->plane[plane]; - const int segment_id = xd->this_mi->mbmi.segment_id; + const int segment_id = xd->mode_info_context->mbmi.segment_id; const int seg_eob = get_tx_eob(seg, segment_id, tx_size); int aoff, loff, eob; diff --git a/vp9/decoder/vp9_onyxd_if.c b/vp9/decoder/vp9_onyxd_if.c index 17d5def33..505e9dc5d 100644 --- a/vp9/decoder/vp9_onyxd_if.c +++ b/vp9/decoder/vp9_onyxd_if.c @@ -386,17 +386,12 @@ int vp9_receive_compressed_data(VP9D_PTR ptr, if (cm->show_frame) { // current mip will be the prev_mip for the next frame MODE_INFO *temp = cm->prev_mip; - MODE_INFO **temp2 = cm->prev_mi_grid_base; cm->prev_mip = cm->mip; cm->mip = temp; - cm->prev_mi_grid_base = cm->mi_grid_base; - cm->mi_grid_base = temp2; // update the upper left visible macroblock ptrs cm->mi = cm->mip + cm->mode_info_stride + 1; cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1; - cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1; - cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1; cm->current_video_frame++; } diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c index 635891efb..f3bbc17ce 100644 --- a/vp9/encoder/vp9_bitstream.c +++ b/vp9/encoder/vp9_bitstream.c @@ -198,11 +198,10 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi, (unsigned int *)cpi->y_mode_count[j]); } -static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m, - TX_SIZE tx_size, BLOCK_SIZE bsize, - vp9_writer *w) { +static void write_selected_tx_size(const VP9_COMP *cpi, TX_SIZE tx_size, + BLOCK_SIZE bsize, vp9_writer *w) { const MACROBLOCKD *const xd = &cpi->mb.e_mbd; - const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs, m); + const vp9_prob *tx_probs = get_tx_probs2(xd, &cpi->common.fc.tx_probs); vp9_write(w, tx_size != TX_4X4, tx_probs[0]); if (bsize >= BLOCK_16X16 && tx_size != TX_4X4) { vp9_write(w, tx_size != TX_8X8, tx_probs[1]); @@ -360,7 +359,7 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mi = &xd->this_mi->mbmi; + MB_MODE_INFO *mi = &xd->mode_info_context->mbmi; const int segment_id = mi->segment_id; int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME); @@ -438,7 +437,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && !(rf != INTRA_FRAME && (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { - write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc); + write_selected_tx_size(cpi, mi->tx_size, bsize, bc); } if (rf == INTRA_FRAME) { @@ -532,16 +531,14 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { } } -static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { const VP9_COMMON *const cm = &cpi->common; const MACROBLOCKD *const xd = &cpi->mb.e_mbd; const struct segmentation *const seg = &cm->seg; - MODE_INFO *m = mi_8x8[0]; const int ym = m->mbmi.mode; + const int mis = cm->mode_info_stride; const int segment_id = m->mbmi.segment_id; - MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride]; - MODE_INFO *left_mi = mi_8x8[-1]; if (seg->update_map) write_segment_id(bc, seg, m->mbmi.segment_id); @@ -549,12 +546,12 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, write_skip_coeff(cpi, segment_id, m, bc); if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT) - write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc); + write_selected_tx_size(cpi, m->mbmi.tx_size, m->mbmi.sb_type, bc); if (m->mbmi.sb_type >= BLOCK_8X8) { - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0); + const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); const MB_PREDICTION_MODE L = xd->left_available ? - left_block_mode(m, left_mi, 0) : DC_PRED; + left_block_mode(m, 0) : DC_PRED; write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]); } else { int idx, idy; @@ -562,10 +559,10 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type]; for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { - int i = idy * 2 + idx; - const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i); + const int i = idy * 2 + idx; + const MB_PREDICTION_MODE A = above_block_mode(m, i, mis); const MB_PREDICTION_MODE L = (xd->left_available || idx) ? - left_block_mode(m, left_mi, i) : DC_PRED; + left_block_mode(m, i) : DC_PRED; const int bm = m->bmi[i].as_mode; #ifdef ENTROPY_STATS ++intra_mode_stats[A][L][bm]; @@ -578,25 +575,23 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8, write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]); } -static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, +static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, TOKENEXTRA **tok, TOKENEXTRA *tok_end, int mi_row, int mi_col) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->mb.e_mbd; - MODE_INFO *m = mi_8x8[0]; if (m->mbmi.sb_type < BLOCK_8X8) if (xd->ab_index > 0) return; - xd->this_mi = mi_8x8[0]; - xd->mi_8x8 = mi_8x8; - + xd->mode_info_context = m; set_mi_row_col(&cpi->common, xd, mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type]); - if ((cm->frame_type == KEY_FRAME) || cm->intra_only) { - write_mb_modes_kf(cpi, mi_8x8, bc); + + if (cm->frame_type == KEY_FRAME || cm->intra_only) { + write_mb_modes_kf(cpi, m, bc); #ifdef ENTROPY_STATS active_section = 8; #endif @@ -611,7 +606,7 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, pack_mb_tokens(bc, tok, tok_end); } -static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, +static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, TOKENEXTRA **tok, TOKENEXTRA *tok_end, int mi_row, int mi_col, BLOCK_SIZE bsize) { VP9_COMMON *const cm = &cpi->common; @@ -622,7 +617,6 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, int n; PARTITION_TYPE partition = PARTITION_NONE; BLOCK_SIZE subsize; - MODE_INFO *m = mi_8x8[0]; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; @@ -654,26 +648,25 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, vp9_writer *bc, switch (partition) { case PARTITION_NONE: - write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col); + write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col); break; case PARTITION_HORZ: - write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col); + write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col); *(get_sb_index(xd, subsize)) = 1; if ((mi_row + bs) < cm->mi_rows) - write_modes_b(cpi, mi_8x8 + bs * mis, bc, tok, tok_end, mi_row + bs, - mi_col); + write_modes_b(cpi, m + bs * mis, bc, tok, tok_end, mi_row + bs, mi_col); break; case PARTITION_VERT: - write_modes_b(cpi, mi_8x8, bc, tok, tok_end, mi_row, mi_col); + write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col); *(get_sb_index(xd, subsize)) = 1; if ((mi_col + bs) < cm->mi_cols) - write_modes_b(cpi, mi_8x8 + bs, bc, tok, tok_end, mi_row, mi_col + bs); + write_modes_b(cpi, m + bs, bc, tok, tok_end, mi_row, mi_col + bs); break; case PARTITION_SPLIT: for (n = 0; n < 4; n++) { int j = n >> 1, i = n & 0x01; *(get_sb_index(xd, subsize)) = n; - write_modes_sb(cpi, mi_8x8 + j * bs * mis + i * bs, bc, tok, tok_end, + write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end, mi_row + j * bs, mi_col + i * bs, subsize); } break; @@ -693,21 +686,18 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc, TOKENEXTRA **tok, TOKENEXTRA *tok_end) { VP9_COMMON *const cm = &cpi->common; const int mis = cm->mode_info_stride; + MODE_INFO *m, *m_ptr = cm->mi; int mi_row, mi_col; - MODE_INFO **mi_8x8 = cm->mi_grid_visible; - MODE_INFO **m_8x8; - mi_8x8 += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis; + m_ptr += cm->cur_tile_mi_col_start + cm->cur_tile_mi_row_start * mis; for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end; - mi_row += 8, mi_8x8 += 8 * mis) { - m_8x8 = mi_8x8; + mi_row += 8, m_ptr += 8 * mis) { + m = m_ptr; vp9_zero(cm->left_seg_context); for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end; - mi_col += MI_BLOCK_SIZE, m_8x8 += MI_BLOCK_SIZE) { - write_modes_sb(cpi, m_8x8, bc, tok, tok_end, mi_row, mi_col, - BLOCK_64X64); - } + mi_col += MI_BLOCK_SIZE, m += MI_BLOCK_SIZE) + write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col, BLOCK_64X64); } } diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 983ac9a38..45758e7cb 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -342,8 +342,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *mi = &ctx->mic; - MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi; - MODE_INFO *mi_addr = xd->this_mi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; int mb_mode_index = ctx->best_mode_index; const int mis = cm->mode_info_stride; @@ -356,15 +355,13 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, assert(mi->mbmi.ref_frame[1] < MAX_REF_FRAMES); assert(mi->mbmi.sb_type == bsize); - *mi_addr = *mi; - // Restore the coding context of the MB to that that was in place // when the mode was picked for it for (y = 0; y < mi_height; y++) for (x_idx = 0; x_idx < mi_width; x_idx++) if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) - xd->mi_8x8[x_idx + y * mis] = mi_addr; + xd->mode_info_context[x_idx + y * mis] = *mi; // FIXME(rbultje) I'm pretty sure this should go to the end of this block // (i.e. after the output_enabled) @@ -433,6 +430,15 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv); } + if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) { + int i, j; + for (j = 0; j < mi_height; ++j) + for (i = 0; i < mi_width; ++i) + if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i + && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j) + xd->mode_info_context[mis * j + i].mbmi = *mbmi; + } + if (cm->mcomp_filter_type == SWITCHABLE && is_inter_mode(mbmi->mode)) { const int ctx = vp9_get_pred_context_switchable_interp(xd); ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; @@ -486,18 +492,11 @@ static void set_offsets(VP9_COMP *cpi, int mi_row, int mi_col, /* pointers to mode info contexts */ x->partition_info = x->pi + idx_str; - - xd->mi_8x8 = cm->mi_grid_visible + idx_str; - xd->prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; - + xd->mode_info_context = cm->mi + idx_str; + mbmi = &xd->mode_info_context->mbmi; // Special case: if prev_mi is NULL, the previous mode info context // cannot be used. - xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL; - - xd->this_mi = - xd->mi_8x8[0] = cm->mi + idx_str; - - mbmi = &xd->this_mi->mbmi; + xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + idx_str : NULL; // Set up destination pointers setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mi_row, mi_col); @@ -574,10 +573,10 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col, } set_offsets(cpi, mi_row, mi_col, bsize); - xd->this_mi->mbmi.sb_type = bsize; + xd->mode_info_context->mbmi.sb_type = bsize; // Set to zero to make sure we do not use the previous encoded frame stats - xd->this_mi->mbmi.skip_coeff = 0; + xd->mode_info_context->mbmi.skip_coeff = 0; x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); @@ -598,7 +597,7 @@ static void update_stats(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *mi = xd->this_mi; + MODE_INFO *mi = xd->mode_info_context; MB_MODE_INFO *const mbmi = &mi->mbmi; if (cm->frame_type != KEY_FRAME) { @@ -877,7 +876,7 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, // However, at the bottom and right borders of the image the requested size // may not be allowed in which case this code attempts to choose the largest // allowable partition. -static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void set_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row, int mi_col) { VP9_COMMON *const cm = &cpi->common; BLOCK_SIZE bsize = cpi->sf.always_this_block_size; @@ -893,7 +892,7 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, (row8x8_remaining >= MI_BLOCK_SIZE)) { for (block_row = 0; block_row < MI_BLOCK_SIZE; ++block_row) { for (block_col = 0; block_col < MI_BLOCK_SIZE; ++block_col) { - mi_8x8[block_row * mis + block_col]->mbmi.sb_type = bsize; + m[block_row * mis + block_col].mbmi.sb_type = bsize; } } } else { @@ -917,41 +916,36 @@ static void set_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, for (sub_block_col = 0; sub_block_col < bw; ++sub_block_col) { row_index = block_row + sub_block_row; col_index = block_col + sub_block_col; - mi_8x8[row_index * mis + col_index]->mbmi.sb_type = bsize; + m[row_index * mis + col_index].mbmi.sb_type = bsize; } } } } } } -static void copy_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, - MODE_INFO **prev_mi_8x8) { +static void copy_partitioning(VP9_COMP *cpi, MODE_INFO *m, MODE_INFO *p) { VP9_COMMON *const cm = &cpi->common; const int mis = cm->mode_info_stride; int block_row, block_col; - for (block_row = 0; block_row < 8; ++block_row) { for (block_col = 0; block_col < 8; ++block_col) { - MODE_INFO * prev_mi = prev_mi_8x8[block_row * mis + block_col]; - MODE_INFO * mi = mi_8x8[block_row * mis + block_col]; - BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0; - if (mi) - mi->mbmi.sb_type = sb_type; + m[block_row * mis + block_col].mbmi.sb_type = + p[block_row * mis + block_col].mbmi.sb_type; } } } -static void set_block_size(VP9_COMMON * const cm, MODE_INFO **mi_8x8, +static void set_block_size(VP9_COMMON * const cm, MODE_INFO *mi, BLOCK_SIZE bsize, int mis, int mi_row, int mi_col) { int r, c; const int bs = MAX(num_8x8_blocks_wide_lookup[bsize], num_8x8_blocks_high_lookup[bsize]); - MODE_INFO **const mi2 = &mi_8x8[mi_row * mis + mi_col]; + MODE_INFO *const mi2 = &mi[mi_row * mis + mi_col]; for (r = 0; r < bs; r++) for (c = 0; c < bs; c++) if (mi_row + r < cm->mi_rows && mi_col + c < cm->mi_cols) - mi2[r * mis + c]->mbmi.sb_type = bsize; + mi2[r * mis + c].mbmi.sb_type = bsize; } typedef struct { @@ -1097,7 +1091,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m, #else // !PERFORM_RANDOM_PARTITIONING -static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m, +static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m, BLOCK_SIZE bsize, int mi_row, int mi_col, int mi_size) { VP9_COMMON * const cm = &cpi->common; @@ -1135,8 +1129,8 @@ static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO **m, } #endif // PERFORM_RANDOM_PARTITIONING -static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, - int mi_row, int mi_col) { +static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row, + int mi_col) { VP9_COMMON * const cm = &cpi->common; MACROBLOCK *x = &cpi->mb; MACROBLOCKD *xd = &cpi->mb.e_mbd; @@ -1174,21 +1168,18 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, LAST_FRAME)]; YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[idx]; YV12_BUFFER_CONFIG *second_ref_fb = NULL; - MB_MODE_INFO * const mbmi = &xd->this_mi->mbmi; setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col, &xd->scale_factor[0]); setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col, &xd->scale_factor[1]); - - mbmi->ref_frame[0] = LAST_FRAME; - mbmi->sb_type = BLOCK_64X64; - vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[mbmi->ref_frame[0]], + xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME; + xd->mode_info_context->mbmi.sb_type = BLOCK_64X64; + vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]], &nearest_mv, &near_mv); - mbmi->mv[0] = nearest_mv; + xd->mode_info_context->mbmi.mv[0] = nearest_mv; vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); - d = xd->plane[0].dst.buf; dp = xd->plane[0].dst.stride; } @@ -1225,24 +1216,24 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, // Now go through the entire structure, splitting every block size until // we get to one that's got a variance lower than our threshold, or we // hit 8x8. - if (!set_vt_partitioning(cpi, &vt, mi_8x8, BLOCK_64X64, mi_row, mi_col, + if (!set_vt_partitioning(cpi, &vt, m, BLOCK_64X64, mi_row, mi_col, 4)) { for (i = 0; i < 4; ++i) { const int x32_idx = ((i & 1) << 2); const int y32_idx = ((i >> 1) << 2); - if (!set_vt_partitioning(cpi, &vt.split[i], mi_8x8, BLOCK_32X32, + if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_32X32, (mi_row + y32_idx), (mi_col + x32_idx), 2)) { for (j = 0; j < 4; ++j) { const int x16_idx = ((j & 1) << 1); const int y16_idx = ((j >> 1) << 1); - if (!set_vt_partitioning(cpi, &vt.split[i].split[j], mi_8x8, + if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m, BLOCK_16X16, (mi_row + y32_idx + y16_idx), (mi_col + x32_idx + x16_idx), 1)) { for (k = 0; k < 4; ++k) { const int x8_idx = (k & 1); const int y8_idx = (k >> 1); - set_block_size(cm, mi_8x8, BLOCK_8X8, mis, + set_block_size(cm, m, BLOCK_8X8, mis, (mi_row + y32_idx + y16_idx + y8_idx), (mi_col + x32_idx + x16_idx + x8_idx)); } @@ -1253,10 +1244,9 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO **mi_8x8, } } -static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, - TOKENEXTRA **tp, int mi_row, int mi_col, - BLOCK_SIZE bsize, int *rate, int64_t *dist, - int do_recon) { +static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, + int mi_row, int mi_col, BLOCK_SIZE bsize, + int *rate, int64_t *dist, int do_recon) { VP9_COMMON * const cm = &cpi->common; MACROBLOCK * const x = &cpi->mb; MACROBLOCKD *xd = &cpi->mb.e_mbd; @@ -1282,7 +1272,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, int64_t chosen_dist = INT_MAX; BLOCK_SIZE sub_subsize = BLOCK_4X4; int splits_below = 0; - BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; + BLOCK_SIZE bs_type = m->mbmi.sb_type; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; @@ -1315,8 +1305,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, splits_below = 1; for (i = 0; i < 4; i++) { int jj = i >> 1, ii = i & 0x01; - MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss]; - if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { + if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) { splits_below = 0; } } @@ -1336,7 +1325,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, none_rate += x->partition_cost[pl][PARTITION_NONE]; restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); - mi_8x8[0]->mbmi.sb_type = bs_type; + m->mbmi.sb_type = bs_type; *(get_sb_partitioning(x, bsize)) = subsize; } } @@ -1407,9 +1396,8 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, *get_sb_index(xd, subsize) = i; - rd_use_partition(cpi, mi_8x8 + jj * bss * mis + ii * bss, tp, - mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, - i != 3); + rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx, + mi_col + x_idx, subsize, &rt, &dt, i != 3); if (rt == INT_MAX || dt == INT_MAX) { last_part_rate = INT_MAX; last_part_dist = INT_MAX; @@ -1490,7 +1478,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO **mi_8x8, // If last_part is better set the partitioning to that... if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist) < RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) { - mi_8x8[0]->mbmi.sb_type = bsize; + m->mbmi.sb_type = bsize; if (bsize >= BLOCK_8X8) *(get_sb_partitioning(x, bsize)) = subsize; chosen_rate = last_part_rate; @@ -1538,9 +1526,9 @@ static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = { // // The min and max are assumed to have been initialized prior to calling this // function so repeat calls can accumulate a min and max of more than one sb64. -static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8, - BLOCK_SIZE * min_block_size, - BLOCK_SIZE * max_block_size ) { +static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO * mi, + BLOCK_SIZE *min_block_size, + BLOCK_SIZE *max_block_size ) { MACROBLOCKD *const xd = &cpi->mb.e_mbd; int sb_width_in_blocks = MI_BLOCK_SIZE; int sb_height_in_blocks = MI_BLOCK_SIZE; @@ -1550,10 +1538,8 @@ static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8, // Check the sb_type for each block that belongs to this region. for (i = 0; i < sb_height_in_blocks; ++i) { for (j = 0; j < sb_width_in_blocks; ++j) { - MODE_INFO * mi = mi_8x8[index+j]; - BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; - *min_block_size = MIN(*min_block_size, sb_type); - *max_block_size = MAX(*max_block_size, sb_type); + *min_block_size = MIN(*min_block_size, mi[index + j].mbmi.sb_type); + *max_block_size = MAX(*max_block_size, mi[index + j].mbmi.sb_type); } index += xd->mode_info_stride; } @@ -1565,12 +1551,13 @@ static void rd_auto_partition_range(VP9_COMP *cpi, BLOCK_SIZE *min_block_size, BLOCK_SIZE *max_block_size) { MACROBLOCKD *const xd = &cpi->mb.e_mbd; - MODE_INFO ** mi_8x8 = xd->mi_8x8; - const int left_in_image = xd->left_available && mi_8x8[-1]; - const int above_in_image = xd->up_available && - mi_8x8[-xd->mode_info_stride]; - MODE_INFO ** above_sb64_mi_8x8; - MODE_INFO ** left_sb64_mi_8x8; + MODE_INFO *mi = xd->mode_info_context; + MODE_INFO *above_sb64_mi; + MODE_INFO *left_sb64_mi; + const MB_MODE_INFO *const above_mbmi = &mi[-xd->mode_info_stride].mbmi; + const MB_MODE_INFO *const left_mbmi = &mi[-1].mbmi; + const int left_in_image = xd->left_available && left_mbmi->in_image; + const int above_in_image = xd->up_available && above_mbmi->in_image; // Frequency check if (cpi->sf.auto_min_max_partition_count <= 0) { @@ -1578,7 +1565,6 @@ static void rd_auto_partition_range(VP9_COMP *cpi, cpi->sf.auto_min_max_partition_interval; *min_block_size = BLOCK_4X4; *max_block_size = BLOCK_64X64; - return; } else { --cpi->sf.auto_min_max_partition_count; @@ -1595,16 +1581,16 @@ static void rd_auto_partition_range(VP9_COMP *cpi, // Find the min and max partition sizes used in the left SB64 if (left_in_image) { - left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE]; - get_sb_partition_size_range(cpi, left_sb64_mi_8x8, + left_sb64_mi = &mi[-MI_BLOCK_SIZE]; + get_sb_partition_size_range(cpi, left_sb64_mi, min_block_size, max_block_size); } // Find the min and max partition sizes used in the above SB64 taking // the values found for left as a starting point. if (above_in_image) { - above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE]; - get_sb_partition_size_range(cpi, above_sb64_mi_8x8, + above_sb64_mi = &mi[-xd->mode_info_stride * MI_BLOCK_SIZE]; + get_sb_partition_size_range(cpi, above_sb64_mi, min_block_size, max_block_size); } @@ -2016,18 +2002,18 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning || cpi->sf.use_one_partition_size_always ) { const int idx_str = cm->mode_info_stride * mi_row + mi_col; - MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; - MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str; + MODE_INFO *m = cm->mi + idx_str; + MODE_INFO *p = cm->prev_mi + idx_str; cpi->mb.source_variance = UINT_MAX; if (cpi->sf.use_one_partition_size_always) { set_offsets(cpi, mi_row, mi_col, BLOCK_64X64); - set_partitioning(cpi, mi_8x8, mi_row, mi_col); - rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, + set_partitioning(cpi, m, mi_row, mi_col); + rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } else if (cpi->sf.partition_by_variance) { - choose_partitioning(cpi, cm->mi_grid_visible, mi_row, mi_col); - rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, + choose_partitioning(cpi, cm->mi, mi_row, mi_col); + rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } else { if ((cpi->common.current_video_frame @@ -2046,8 +2032,8 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1, INT64_MAX); } else { - copy_partitioning(cpi, mi_8x8, prev_mi_8x8); - rd_use_partition(cpi, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, + copy_partitioning(cpi, m, p); + rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } } @@ -2058,6 +2044,7 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, rd_auto_partition_range(cpi, &cpi->sf.min_partition_size, &cpi->sf.max_partition_size); } + rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1, INT64_MAX); } @@ -2089,8 +2076,8 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) { setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); - xd->this_mi->mbmi.mode = DC_PRED; - xd->this_mi->mbmi.uv_mode = DC_PRED; + xd->mode_info_context->mbmi.mode = DC_PRED; + xd->mode_info_context->mbmi.uv_mode = DC_PRED; vp9_zero(cpi->y_mode_count) vp9_zero(cpi->y_uv_mode_count) @@ -2166,14 +2153,8 @@ static void encode_frame_internal(VP9_COMP *cpi) { vp9_zero(cm->counts.switchable_interp); vp9_zero(cpi->txfm_stepdown_count); - xd->mi_8x8 = cm->mi_grid_visible; - // required for vp9_frame_init_quantizer - xd->this_mi = - xd->mi_8x8[0] = cm->mi; - xd->mic_stream_ptr = cm->mi; - - xd->last_mi = cm->prev_mi; - + xd->mode_info_context = cm->mi; + xd->prev_mode_info_context = cm->prev_mi; vp9_zero(cpi->NMVcount); vp9_zero(cpi->coef_counts); @@ -2276,12 +2257,12 @@ static int check_dual_ref_flags(VP9_COMP *cpi) { } } -static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { +static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) { int x, y; for (y = 0; y < ymbs; y++) { for (x = 0; x < xmbs; x++) { - if (!mi_8x8[y * mis + x]->mbmi.skip_coeff) + if (!mi[y * mis + x].mbmi.skip_coeff) return 0; } } @@ -2289,39 +2270,42 @@ static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { return 1; } -static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs, +static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs, TX_SIZE tx_size) { int x, y; for (y = 0; y < ymbs; y++) { for (x = 0; x < xmbs; x++) - mi_8x8[y * mis + x]->mbmi.tx_size = tx_size; + mi[y * mis + x].mbmi.tx_size = tx_size; } } -static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO **mi_8x8, - int mis, TX_SIZE max_tx_size, int bw, int bh, +static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis, + TX_SIZE max_tx_size, int bw, int bh, int mi_row, int mi_col, BLOCK_SIZE bsize) { - VP9_COMMON * const cm = &cpi->common; - MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi; + VP9_COMMON *const cm = &cpi->common; + MB_MODE_INFO *const mbmi = &mi->mbmi; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; if (mbmi->tx_size > max_tx_size) { + MACROBLOCK * const x = &cpi->mb; + MACROBLOCKD * const xd = &x->e_mbd; const int ymbs = MIN(bh, cm->mi_rows - mi_row); const int xmbs = MIN(bw, cm->mi_cols - mi_col); + xd->mode_info_context = mi; assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) || - get_skip_flag(mi_8x8, mis, ymbs, xmbs)); - set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size); + get_skip_flag(mi, mis, ymbs, xmbs)); + set_txfm_flag(mi, mis, ymbs, xmbs, max_tx_size); } } -static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi, TX_SIZE max_tx_size, int mi_row, int mi_col, BLOCK_SIZE bsize) { - VP9_COMMON * const cm = &cpi->common; + const VP9_COMMON *const cm = &cpi->common; const int mis = cm->mode_info_stride; int bw, bh; const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; @@ -2329,23 +2313,22 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type]; - bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type]; + bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; + bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; if (bw == bs && bh == bs) { - reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, bs, mi_row, + reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, bs, mi_row, mi_col, bsize); } else if (bw == bs && bh < bs) { - reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, bs, hbs, mi_row, - mi_col, bsize); - reset_skip_txfm_size_b(cpi, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs, + reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, bs, hbs, mi_row, mi_col, + bsize); + reset_skip_txfm_size_b(cpi, mi + hbs * mis, mis, max_tx_size, bs, hbs, mi_row + hbs, mi_col, bsize); } else if (bw < bs && bh == bs) { - reset_skip_txfm_size_b(cpi, mi_8x8, mis, max_tx_size, hbs, bs, mi_row, - mi_col, bsize); - reset_skip_txfm_size_b(cpi, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row, + reset_skip_txfm_size_b(cpi, mi, mis, max_tx_size, hbs, bs, mi_row, mi_col, + bsize); + reset_skip_txfm_size_b(cpi, mi + hbs, mis, max_tx_size, hbs, bs, mi_row, mi_col + hbs, bsize); - } else { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; int n; @@ -2356,7 +2339,7 @@ static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); - reset_skip_txfm_size_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size, + reset_skip_txfm_size_sb(cpi, &mi[mi_dr * mis + mi_dc], max_tx_size, mi_row + mi_dr, mi_col + mi_dc, subsize); } } @@ -2366,15 +2349,12 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) { VP9_COMMON * const cm = &cpi->common; int mi_row, mi_col; const int mis = cm->mode_info_stride; -// MODE_INFO *mi, *mi_ptr = cm->mi; - MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible; + MODE_INFO *mi, *mi_ptr = cm->mi; for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { - mi_8x8 = mi_ptr; - for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) { - reset_skip_txfm_size_sb(cpi, mi_8x8, txfm_max, mi_row, mi_col, - BLOCK_64X64); - } + mi = mi_ptr; + for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) + reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64); } } @@ -2631,8 +2611,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, VP9_COMMON * const cm = &cpi->common; MACROBLOCK * const x = &cpi->mb; MACROBLOCKD * const xd = &x->e_mbd; - MODE_INFO **mi_8x8 = xd->mi_8x8; - MODE_INFO *mi = mi_8x8[0]; + MODE_INFO *mi = xd->mode_info_context; MB_MODE_INFO *mbmi = &mi->mbmi; unsigned int segment_id = mbmi->segment_id; const int mis = cm->mode_info_stride; @@ -2711,8 +2690,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); } else { - int mb_skip_context = xd->left_available ? mi_8x8[-1]->mbmi.skip_coeff : 0; - mb_skip_context += mi_8x8[-mis] ? mi_8x8[-mis]->mbmi.skip_coeff : 0; + int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.skip_coeff : 0; + mb_skip_context += (mi - mis)->mbmi.skip_coeff; mbmi->skip_coeff = 1; if (output_enabled) @@ -2720,6 +2699,10 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, reset_skip_context(xd, MAX(bsize, BLOCK_8X8)); } + // copy skip flag on all mb_mode_info contexts in this SB + // if this was a skip at this txfm size + vp9_set_pred_flag_mbskip(cm, bsize, mi_row, mi_col, mi->mbmi.skip_coeff); + if (output_enabled) { if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 && @@ -2748,7 +2731,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, for (y = 0; y < mi_height; y++) for (x = 0; x < mi_width; x++) if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) - mi_8x8[mis * y + x]->mbmi.tx_size = sz; + mi[mis * y + x].mbmi.tx_size = sz; } } } diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c index c5e5dff08..588b77421 100644 --- a/vp9/encoder/vp9_encodeintra.c +++ b/vp9/encoder/vp9_encodeintra.c @@ -16,7 +16,7 @@ #include "vp9/encoder/vp9_encodeintra.h" int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) { - MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi; + MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi; x->skip_encode = 0; mbmi->mode = DC_PRED; mbmi->ref_frame[0] = INTRA_FRAME; diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c index 13287f4f1..da9a3bda0 100644 --- a/vp9/encoder/vp9_encodemb.c +++ b/vp9/encoder/vp9_encodemb.c @@ -147,7 +147,7 @@ static void optimize_b(MACROBLOCK *mb, TX_SIZE tx_size) { MACROBLOCKD *const xd = &mb->e_mbd; struct macroblockd_plane *pd = &xd->plane[plane]; - const int ref = is_inter_block(&xd->this_mi->mbmi); + const int ref = is_inter_block(&xd->mode_info_context->mbmi); vp9_token_state tokens[1025][2]; unsigned best_index[1025][2]; const int16_t *coeff_ptr = BLOCK_OFFSET(mb->plane[plane].coeff, block); @@ -199,7 +199,7 @@ static void optimize_b(MACROBLOCK *mb, /* Now set up a Viterbi trellis to evaluate alternative roundings. */ rdmult = mb->rdmult * err_mult; - if (mb->e_mbd.mi_8x8[0]->mbmi.ref_frame[0] == INTRA_FRAME) + if (mb->e_mbd.mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) rdmult = (rdmult * 9) >> 4; rddiv = mb->rddiv; /* Initialize the sentinel node of the trellis. */ @@ -385,7 +385,7 @@ static void optimize_init_b(int plane, BLOCK_SIZE bsize, const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; - const MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size; int i; @@ -569,7 +569,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, struct encode_b_args* const args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; int16_t *coeff = BLOCK_OFFSET(p->coeff, block); @@ -679,7 +679,7 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, scan = get_scan_4x4(tx_type); iscan = get_iscan_4x4(tx_type); if (mbmi->sb_type < BLOCK_8X8 && plane == 0) - mode = xd->this_mi->bmi[block].as_mode; + mode = xd->mode_info_context->bmi[block].as_mode; else mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c index ed3a2bb64..997728930 100644 --- a/vp9/encoder/vp9_encodemv.c +++ b/vp9/encoder/vp9_encodemv.c @@ -316,7 +316,7 @@ void vp9_build_nmv_cost_table(int *mvjoint, void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv, int_mv *second_best_ref_mv) { - MODE_INFO *mi = x->e_mbd.mi_8x8[0]; + MODE_INFO *mi = x->e_mbd.mode_info_context; MB_MODE_INFO *const mbmi = &mi->mbmi; MV diff; const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type]; @@ -333,7 +333,7 @@ void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x, diff.col = mi->bmi[i].as_mv[0].as_mv.col - best_ref_mv->as_mv.col; vp9_inc_mv(&diff, &cpi->NMVcount); - if (mi->mbmi.ref_frame[1] > INTRA_FRAME) { + if (x->e_mbd.mode_info_context->mbmi.ref_frame[1] > INTRA_FRAME) { diff.row = mi->bmi[i].as_mv[1].as_mv.row - second_best_ref_mv->as_mv.row; diff.col = mi->bmi[i].as_mv[1].as_mv.col - diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c index 3eaa9f8b6..92485f934 100644 --- a/vp9/encoder/vp9_firstpass.c +++ b/vp9/encoder/vp9_firstpass.c @@ -346,7 +346,7 @@ static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r // Set up pointers for this macro block recon buffer xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset; - switch (xd->this_mi->mbmi.sb_type) { + switch (xd->mode_info_context->mbmi.sb_type) { case BLOCK_8X8: vp9_mse8x8(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, @@ -385,7 +385,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; int n; vp9_variance_fn_ptr_t v_fn_ptr = - cpi->fn_ptr[xd->this_mi->mbmi.sb_type]; + cpi->fn_ptr[xd->mode_info_context->mbmi.sb_type]; int new_mv_mode_penalty = 256; int sr = 0; @@ -402,7 +402,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, further_steps -= sr; // override the default variance function to use MSE - switch (xd->this_mi->mbmi.sb_type) { + switch (xd->mode_info_context->mbmi.sb_type) { case BLOCK_8X8: v_fn_ptr.vf = vp9_mse8x8; break; @@ -505,11 +505,8 @@ void vp9_first_pass(VP9_COMP *cpi) { setup_dst_planes(xd, new_yv12, 0, 0); x->partition_info = x->pi; - xd->mi_8x8 = cm->mi_grid_visible; - // required for vp9_frame_init_quantizer - xd->this_mi = - xd->mi_8x8[0] = cm->mi; - xd->mic_stream_ptr = cm->mi; + + xd->mode_info_context = cm->mi; setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); @@ -552,23 +549,23 @@ void vp9_first_pass(VP9_COMP *cpi) { if (mb_col * 2 + 1 < cm->mi_cols) { if (mb_row * 2 + 1 < cm->mi_rows) { - xd->this_mi->mbmi.sb_type = BLOCK_16X16; + xd->mode_info_context->mbmi.sb_type = BLOCK_16X16; } else { - xd->this_mi->mbmi.sb_type = BLOCK_16X8; + xd->mode_info_context->mbmi.sb_type = BLOCK_16X8; } } else { if (mb_row * 2 + 1 < cm->mi_rows) { - xd->this_mi->mbmi.sb_type = BLOCK_8X16; + xd->mode_info_context->mbmi.sb_type = BLOCK_8X16; } else { - xd->this_mi->mbmi.sb_type = BLOCK_8X8; + xd->mode_info_context->mbmi.sb_type = BLOCK_8X8; } } - xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME; + xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME; set_mi_row_col(cm, xd, mb_row << 1, - 1 << mi_height_log2(xd->this_mi->mbmi.sb_type), + 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type), mb_col << 1, - 1 << mi_height_log2(xd->this_mi->mbmi.sb_type)); + 1 << mi_height_log2(xd->mode_info_context->mbmi.sb_type)); // do intra 16x16 prediction this_error = vp9_encode_intra(x, use_dc_pred); @@ -664,13 +661,13 @@ void vp9_first_pass(VP9_COMP *cpi) { mv.as_mv.col <<= 3; this_error = motion_error; vp9_set_mbmode_and_mvs(x, NEWMV, &mv); - xd->this_mi->mbmi.tx_size = TX_4X4; - xd->this_mi->mbmi.ref_frame[0] = LAST_FRAME; - xd->this_mi->mbmi.ref_frame[1] = NONE; + xd->mode_info_context->mbmi.tx_size = TX_4X4; + xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME; + xd->mode_info_context->mbmi.ref_frame[1] = NONE; vp9_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, - xd->this_mi->mbmi.sb_type); - vp9_encode_sby(x, xd->this_mi->mbmi.sb_type); + xd->mode_info_context->mbmi.sb_type); + vp9_encode_sby(x, xd->mode_info_context->mbmi.sb_type); sum_mvr += mv.as_mv.row; sum_mvr_abs += abs(mv.as_mv.row); sum_mvc += mv.as_mv.col; diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c index 5a671f201..1baea643d 100644 --- a/vp9/encoder/vp9_mbgraph.c +++ b/vp9/encoder/vp9_mbgraph.c @@ -145,7 +145,7 @@ static int find_best_16x16_intra(VP9_COMP *cpi, for (mode = DC_PRED; mode <= TM_PRED; mode++) { unsigned int err; - xd->this_mi->mbmi.mode = mode; + xd->mode_info_context->mbmi.mode = mode; vp9_predict_intra_block(xd, 0, 2, TX_16X16, mode, x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride); @@ -253,7 +253,7 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi, xd->plane[0].dst.stride = buf->y_stride; xd->plane[0].pre[0].stride = buf->y_stride; xd->plane[1].dst.stride = buf->uv_stride; - xd->this_mi = &mi_local; + xd->mode_info_context = &mi_local; mi_local.mbmi.sb_type = BLOCK_16X16; mi_local.mbmi.ref_frame[0] = LAST_FRAME; mi_local.mbmi.ref_frame[1] = NONE; diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c index 136008847..55e4c36de 100644 --- a/vp9/encoder/vp9_mcomp.c +++ b/vp9/encoder/vp9_mcomp.c @@ -1537,7 +1537,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv, int in_what_stride = xd->plane[0].pre[0].stride; int mv_stride = xd->plane[0].pre[0].stride; uint8_t *bestaddress; - int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0]; + int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0]; int_mv this_mv; int bestsad = INT_MAX; int r, c; @@ -1625,7 +1625,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv, int in_what_stride = xd->plane[0].pre[0].stride; int mv_stride = xd->plane[0].pre[0].stride; uint8_t *bestaddress; - int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0]; + int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0]; int_mv this_mv; unsigned int bestsad = INT_MAX; int r, c; @@ -1747,7 +1747,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, int in_what_stride = xd->plane[0].pre[0].stride; int mv_stride = xd->plane[0].pre[0].stride; uint8_t *bestaddress; - int_mv *best_mv = &x->e_mbd.mi_8x8[0]->bmi[n].as_mv[0]; + int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0]; int_mv this_mv; unsigned int bestsad = INT_MAX; int r, c; diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c index 1ba304904..d35b739fb 100644 --- a/vp9/encoder/vp9_onyx_if.c +++ b/vp9/encoder/vp9_onyx_if.c @@ -527,15 +527,15 @@ static void print_seg_map(VP9_COMP *cpi) { static void update_reference_segmentation_map(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; int row, col; - MODE_INFO **mi_8x8, **mi_8x8_ptr = cm->mi_grid_visible; + MODE_INFO *mi, *mi_ptr = cm->mi; uint8_t *cache_ptr = cm->last_frame_seg_map, *cache; for (row = 0; row < cm->mi_rows; row++) { - mi_8x8 = mi_8x8_ptr; + mi = mi_ptr; cache = cache_ptr; - for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++) - cache[0] = mi_8x8[0]->mbmi.segment_id; - mi_8x8_ptr += cm->mode_info_stride; + for (col = 0; col < cm->mi_cols; col++, mi++, cache++) + cache[0] = mi->mbmi.segment_id; + mi_ptr += cm->mode_info_stride; cache_ptr += cm->mi_cols; } } @@ -3528,15 +3528,11 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, if (cm->show_frame) { // current mip will be the prev_mip for the next frame MODE_INFO *temp = cm->prev_mip; - MODE_INFO **temp2 = cm->prev_mi_grid_base; cm->prev_mip = cm->mip; cm->mip = temp; - cm->prev_mi_grid_base = cm->mi_grid_base; - cm->mi_grid_base = temp2; // update the upper left visible macroblock ptrs cm->mi = cm->mip + cm->mode_info_stride + 1; - cm->mi_grid_visible = cm->mi_grid_base + cm->mode_info_stride + 1; // Don't increment frame counters if this was an altref buffer // update not a real frame @@ -3545,9 +3541,8 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, } // restore prev_mi cm->prev_mi = cm->prev_mip + cm->mode_info_stride + 1; - cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mode_info_stride + 1; - #if 0 +#if 0 { char filename[512]; FILE *recon_file; diff --git a/vp9/encoder/vp9_quantize.c b/vp9/encoder/vp9_quantize.c index 81e14265d..96abeff38 100644 --- a/vp9/encoder/vp9_quantize.c +++ b/vp9/encoder/vp9_quantize.c @@ -278,7 +278,7 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) { int i; MACROBLOCKD *xd = &x->e_mbd; int zbin_extra; - int segment_id = xd->this_mi->mbmi.segment_id; + int segment_id = xd->mode_info_context->mbmi.segment_id; const int qindex = vp9_get_qindex(&cpi->common.seg, segment_id, cpi->common.base_qindex); diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index e219442c1..647265bf6 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -474,7 +474,7 @@ static INLINE int cost_coeffs(MACROBLOCK *mb, TX_SIZE tx_size, const int16_t *scan, const int16_t *nb) { MACROBLOCKD *const xd = &mb->e_mbd; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; struct macroblockd_plane *pd = &xd->plane[plane]; const PLANE_TYPE type = pd->plane_type; const int16_t *band_count = &band_counts[tx_size][1]; @@ -567,7 +567,7 @@ static void dist_block(int plane, int block, TX_SIZE tx_size, void *arg) { args->sse += this_sse >> shift; if (x->skip_encode && - xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME) { + xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) { // TODO(jingning): tune the model to better capture the distortion. int64_t p = (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >> shift; @@ -610,7 +610,7 @@ static void block_yrd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, return; } - if (!is_inter_block(&xd->this_mi->mbmi)) + if (!is_inter_block(&xd->mode_info_context->mbmi)) vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &encode_args); else vp9_xform_quant(plane, block, plane_bsize, tx_size, &encode_args); @@ -634,7 +634,7 @@ static void txfm_rd_in_plane(MACROBLOCK *x, num_4x4_blocks_wide, num_4x4_blocks_high, 0, 0, 0, ref_best_rd, 0 }; if (plane == 0) - xd->this_mi->mbmi.tx_size = tx_size; + xd->mode_info_context->mbmi.tx_size = tx_size; switch (tx_size) { case TX_4X4: @@ -688,7 +688,7 @@ static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x, const TX_SIZE max_txfm_size = max_txsize_lookup[bs]; VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; if (max_txfm_size == TX_32X32 && (cm->tx_mode == ALLOW_32X32 || cm->tx_mode == TX_MODE_SELECT)) { @@ -718,13 +718,13 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, const TX_SIZE max_tx_size = max_txsize_lookup[bs]; VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd); int64_t rd[TX_SIZES][2]; int n, m; int s0, s1; - const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi); + const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs); for (n = TX_4X4; n <= max_tx_size; n++) { r[n][1] = r[n][0]; @@ -821,7 +821,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, const TX_SIZE max_txfm_size = max_txsize_lookup[bs]; VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd); int64_t rd[TX_SIZES][2]; int n, m; @@ -829,7 +829,7 @@ static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, double scale_rd[TX_SIZES] = {1.73, 1.44, 1.20, 1.00}; // double scale_r[TX_SIZES] = {2.82, 2.00, 1.41, 1.00}; - const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs, xd->this_mi); + const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs); // for (n = TX_4X4; n <= max_txfm_size; n++) // r[n][0] = (r[n][0] * scale_r[n]); @@ -914,7 +914,7 @@ static void super_block_yrd(VP9_COMP *cpi, int r[TX_SIZES][2], s[TX_SIZES]; int64_t d[TX_SIZES], sse[TX_SIZES]; MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; assert(bs == mbmi->sb_type); if (mbmi->ref_frame[0] > INTRA_FRAME) @@ -1021,7 +1021,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, vpx_memcpy(ta, a, sizeof(ta)); vpx_memcpy(tl, l, sizeof(tl)); - xd->this_mi->mbmi.tx_size = TX_4X4; + xd->mode_info_context->mbmi.tx_size = TX_4X4; for (mode = DC_PRED; mode <= TM_PRED; ++mode) { int64_t this_rd; @@ -1051,7 +1051,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, uint8_t *dst = dst_init + idx * 4 + idy * 4 * dst_stride; block = ib + idy * 2 + idx; - xd->this_mi->bmi[block].as_mode = mode; + xd->mode_info_context->bmi[block].as_mode = mode; src_diff = raster_block_offset_int16(BLOCK_8X8, block, p->src_diff); coeff = BLOCK_OFFSET(x->plane[0].coeff, block); vp9_predict_intra_block(xd, block, 1, @@ -1127,10 +1127,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, int64_t best_rd) { int i, j; MACROBLOCKD *const xd = &mb->e_mbd; - MODE_INFO *const mic = xd->this_mi; - const MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride]; - const MODE_INFO *left_mi = xd->mi_8x8[-1]; - const BLOCK_SIZE bsize = xd->this_mi->mbmi.sb_type; + const BLOCK_SIZE bsize = xd->mode_info_context->mbmi.sb_type; const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; int idx, idy; @@ -1140,6 +1137,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, int64_t total_rd = 0; ENTROPY_CONTEXT t_above[4], t_left[4]; int *bmode_costs; + MODE_INFO *const mic = xd->mode_info_context; vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above)); vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left)); @@ -1149,15 +1147,15 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block. for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { + const int mis = xd->mode_info_stride; MB_PREDICTION_MODE best_mode = DC_PRED; int r = INT_MAX, ry = INT_MAX; int64_t d = INT64_MAX, this_rd = INT64_MAX; i = idy * 2 + idx; if (cpi->common.frame_type == KEY_FRAME) { - const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, i); + const MB_PREDICTION_MODE A = above_block_mode(mic, i, mis); const MB_PREDICTION_MODE L = (xd->left_available || idx) ? - left_block_mode(mic, left_mi, i) : - DC_PRED; + left_block_mode(mic, i) : DC_PRED; bmode_costs = mb->y_mode_costs[A][L]; } @@ -1187,7 +1185,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP * const cpi, *rate = cost; *rate_y = tot_rate_y; *distortion = total_distortion; - mic->mbmi.mode = mic->bmi[3].as_mode; + xd->mode_info_context->mbmi.mode = mic->bmi[3].as_mode; return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion); } @@ -1201,7 +1199,7 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, MB_PREDICTION_MODE mode; MB_PREDICTION_MODE mode_selected = DC_PRED; MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *const mic = xd->this_mi; + MODE_INFO *const mic = xd->mode_info_context; int this_rate, this_rate_tokenonly, s; int64_t this_distortion, this_rd; TX_SIZE best_tx = TX_4X4; @@ -1215,16 +1213,15 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, /* Y Search for intra prediction mode */ for (mode = DC_PRED; mode <= TM_PRED; mode++) { int64_t local_tx_cache[TX_MODES]; - MODE_INFO *above_mi = xd->mi_8x8[-xd->mode_info_stride]; - MODE_INFO *left_mi = xd->mi_8x8[-1]; + const int mis = xd->mode_info_stride; if (!(cpi->sf.intra_y_mode_mask & (1 << mode))) continue; if (cpi->common.frame_type == KEY_FRAME) { - const MB_PREDICTION_MODE A = above_block_mode(mic, above_mi, 0); + const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis); const MB_PREDICTION_MODE L = xd->left_available ? - left_block_mode(mic, left_mi, 0) : DC_PRED; + left_block_mode(mic, 0) : DC_PRED; bmode_costs = x->y_mode_costs[A][L]; } @@ -1271,7 +1268,7 @@ static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x, int64_t *sse, BLOCK_SIZE bsize, int64_t ref_best_rd) { MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; TX_SIZE uv_txfm_size = get_uv_tx_size(mbmi); int plane; int pnrate = 0, pnskip = 1; @@ -1326,8 +1323,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, if (!(cpi->sf.intra_uv_mode_mask & (1 << mode))) continue; - x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode; - + x->e_mbd.mode_info_context->mbmi.uv_mode = mode; super_block_uvrd(&cpi->common, x, &this_rate_tokenonly, &this_distortion, &s, &this_sse, bsize, best_rd); if (this_rate_tokenonly == INT_MAX) @@ -1346,7 +1342,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, } } - x->e_mbd.mi_8x8[0]->mbmi.uv_mode = mode_selected; + x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected; return best_rd; } @@ -1358,7 +1354,7 @@ static int64_t rd_sbuv_dcpred(VP9_COMP *cpi, MACROBLOCK *x, int64_t this_rd; int64_t this_sse; - x->e_mbd.mi_8x8[0]->mbmi.uv_mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; super_block_uvrd(&cpi->common, x, rate_tokenonly, distortion, skippable, &this_sse, bsize, INT64_MAX); *rate = *rate_tokenonly + @@ -1386,14 +1382,14 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE bsize, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); } - *mode_uv = x->e_mbd.mi_8x8[0]->mbmi.uv_mode; + *mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode; } static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode, int mode_context) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - const int segment_id = xd->this_mi->mbmi.segment_id; + const int segment_id = xd->mode_info_context->mbmi.segment_id; // Don't account for mode here if segment skip is enabled. if (!vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) { @@ -1405,8 +1401,8 @@ static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode, } void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) { - x->e_mbd.mi_8x8[0]->mbmi.mode = mb; - x->e_mbd.mi_8x8[0]->mbmi.mv[0].as_int = mv->as_int; + x->e_mbd.mode_info_context->mbmi.mode = mb; + x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int; } static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, @@ -1429,7 +1425,7 @@ static int labels2mode(MACROBLOCK *x, int i, int_mv *second_best_ref_mv, int *mvjcost, int *mvcost[2], VP9_COMP *cpi) { MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *const mic = xd->this_mi; + MODE_INFO *const mic = xd->mode_info_context; MB_MODE_INFO *mbmi = &mic->mbmi; int cost = 0, thismvcost = 0; int idx, idy; @@ -1503,7 +1499,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, int k; MACROBLOCKD *xd = &x->e_mbd; struct macroblockd_plane *const pd = &xd->plane[0]; - MODE_INFO *const mi = xd->this_mi; + MODE_INFO *const mi = xd->mode_info_context; const BLOCK_SIZE bsize = mi->mbmi.sb_type; const int width = plane_block_width(bsize, pd); const int height = plane_block_height(bsize, pd); @@ -1603,7 +1599,7 @@ static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) { } static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { - MB_MODE_INFO *const mbmi = &x->e_mbd.mi_8x8[0]->mbmi; + MB_MODE_INFO *const mbmi = &x->e_mbd.mode_info_context->mbmi; struct macroblock_plane *const p = &x->plane[0]; struct macroblockd_plane *const pd = &x->e_mbd.plane[0]; @@ -1619,7 +1615,7 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src, struct buf_2d orig_pre[2]) { - MB_MODE_INFO *mbmi = &x->e_mbd.mi_8x8[0]->mbmi; + MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi; x->plane[0].src = orig_src; x->e_mbd.plane[0].pre[0] = orig_pre[0]; if (mbmi->ref_frame[1]) @@ -1633,7 +1629,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, int i, j, br = 0, idx, idy; int64_t bd = 0, block_sse = 0; MB_PREDICTION_MODE this_mode; - MODE_INFO *mi = x->e_mbd.mi_8x8[0]; + MODE_INFO *mi = x->e_mbd.mode_info_context; MB_MODE_INFO *const mbmi = &mi->mbmi; const int label_count = 4; int64_t this_segment_rd = 0; @@ -1751,9 +1747,11 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, if (cpi->compressor_speed) { // use previous block's result as next block's MV predictor. if (i > 0) { - bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int; + bsi->mvp.as_int = + x->e_mbd.mode_info_context->bmi[i - 1].as_mv[0].as_int; if (i == 2) - bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int; + bsi->mvp.as_int = + x->e_mbd.mode_info_context->bmi[i - 2].as_mv[0].as_int; } } if (i == 0) @@ -1811,11 +1809,13 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, if (thissme < bestsme) { bestsme = thissme; - mode_mv[NEWMV].as_int = mi->bmi[i].as_mv[0].as_int; + mode_mv[NEWMV].as_int = + x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int; } else { /* The full search result is actually worse so re-instate the * previous best vector */ - mi->bmi[i].as_mv[0].as_int = mode_mv[NEWMV].as_int; + x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int = + mode_mv[NEWMV].as_int; } } @@ -2016,7 +2016,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, int i; BEST_SEG_INFO *bsi = bsi_buf + filter_idx; MACROBLOCKD *xd = &x->e_mbd; - MODE_INFO *mi = xd->this_mi; + MODE_INFO *mi = xd->mode_info_context; MB_MODE_INFO *mbmi = &mi->mbmi; int mode_idx; @@ -2062,7 +2062,7 @@ static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer, int ref_y_stride, int ref_frame, BLOCK_SIZE block_size ) { MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; int_mv this_mv; int i; int zero_seen = 0; @@ -2182,7 +2182,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, // restored if we decide to encode this way ctx->skip = x->skip; ctx->best_mode_index = mode_index; - ctx->mic = *xd->this_mi; + ctx->mic = *xd->mode_info_context; if (partition) ctx->partition_info = *partition; @@ -2238,7 +2238,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, VP9_COMMON *cm = &cpi->common; YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]]; MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; // set up scaling factors scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1]; @@ -2256,8 +2256,8 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, &scale[frame_type], &scale[frame_type]); // Gets an initial list of candidate vectors from neighbours and orders them - vp9_find_mv_refs(&cpi->common, xd, xd->this_mi, - xd->last_mi, + vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context, + xd->prev_mode_info_context, frame_type, mbmi->ref_mvs[frame_type], mi_row, mi_col); @@ -2285,7 +2285,7 @@ static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) { static INLINE int get_switchable_rate(const MACROBLOCK *x) { const MACROBLOCKD *const xd = &x->e_mbd; - const MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + const MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; const int ctx = vp9_get_pred_context_switchable_interp(xd); return SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[ctx][mbmi->interp_filter]; @@ -2297,7 +2297,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int_mv *tmp_mv, int *rate_mv) { MACROBLOCKD *xd = &x->e_mbd; VP9_COMMON *cm = &cpi->common; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}}; int bestsme = INT_MAX; int further_steps, step_param; @@ -2425,7 +2425,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int *rate_mv) { int pw = 4 << b_width_log2(bsize), ph = 4 << b_height_log2(bsize); MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; int refs[2] = { mbmi->ref_frame[0], (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) }; int_mv ref_mv[2]; @@ -2590,7 +2590,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, const int64_t ref_best_rd) { VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; const int is_comp_pred = (mbmi->ref_frame[1] > 0); const int num_refs = is_comp_pred ? 2 : 1; const int this_mode = mbmi->mode; @@ -2636,7 +2636,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv); *rate2 += rate_mv; frame_mv[refs[0]].as_int = - xd->this_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int; + xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int; single_newmv[refs[0]].as_int = tmp_mv.as_int; } } @@ -2991,11 +2991,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0; - int y_skip = 0, uv_skip = 0; + int y_skip = 0, uv_skip; int64_t dist_y = 0, dist_uv = 0, tx_cache[TX_MODES] = { 0 }; x->skip_encode = 0; ctx->skip = 0; - xd->this_mi->mbmi.ref_frame[0] = INTRA_FRAME; + xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME; if (bsize >= BLOCK_8X8) { if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y, &y_skip, bsize, tx_cache, @@ -3031,7 +3031,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, ctx->tx_rd_diff[i] = tx_cache[i] - tx_cache[cm->tx_mode]; } - ctx->mic = *xd->this_mi; + ctx->mic = *xd->mode_info_context; } int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, @@ -3043,12 +3043,12 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int64_t best_rd_so_far) { VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; const struct segmentation *seg = &cm->seg; const BLOCK_SIZE block_size = get_plane_block_size(bsize, &xd->plane[0]); MB_PREDICTION_MODE this_mode; MV_REFERENCE_FRAME ref_frame, second_ref_frame; - unsigned char segment_id = mbmi->segment_id; + unsigned char segment_id = xd->mode_info_context->mbmi.segment_id; int comp_pred, i; int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; struct buf_2d yv12_mb[4][MAX_MB_PLANE]; @@ -3458,7 +3458,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, cpi->rd_threshes[bsize][THR_NEWA]; this_rd_thresh = (ref_frame == GOLDEN_FRAME) ? cpi->rd_threshes[bsize][THR_NEWG] : this_rd_thresh; - xd->this_mi->mbmi.tx_size = TX_4X4; + xd->mode_info_context->mbmi.tx_size = TX_4X4; cpi->rd_filter_cache[SWITCHABLE_FILTERS] = INT64_MAX; if (cm->mcomp_filter_type != BILINEAR) { @@ -3514,7 +3514,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, tmp_best_mbmode = *mbmi; tmp_best_partition = *x->partition_info; for (i = 0; i < 4; i++) - tmp_best_bmodes[i] = xd->this_mi->bmi[i]; + tmp_best_bmodes[i] = xd->mode_info_context->bmi[i]; pred_exists = 1; if (switchable_filter_index == 0 && cpi->sf.use_rd_breakout && @@ -3566,7 +3566,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, *mbmi = tmp_best_mbmode; *x->partition_info = tmp_best_partition; for (i = 0; i < 4; i++) - xd->this_mi->bmi[i] = tmp_best_bmodes[i]; + xd->mode_info_context->bmi[i] = tmp_best_bmodes[i]; } rate2 += rate; @@ -3690,20 +3690,20 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, } // Keep record of best intra rd - if (xd->this_mi->mbmi.ref_frame[0] == INTRA_FRAME && - is_intra_mode(xd->this_mi->mbmi.mode) && + if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME && + is_intra_mode(xd->mode_info_context->mbmi.mode) && this_rd < best_intra_rd) { best_intra_rd = this_rd; - best_intra_mode = xd->this_mi->mbmi.mode; + best_intra_mode = xd->mode_info_context->mbmi.mode; } // Keep record of best inter rd with single reference - if (xd->this_mi->mbmi.ref_frame[0] > INTRA_FRAME && - xd->this_mi->mbmi.ref_frame[1] == NONE && + if (xd->mode_info_context->mbmi.ref_frame[0] > INTRA_FRAME && + xd->mode_info_context->mbmi.ref_frame[1] == NONE && !mode_excluded && this_rd < best_inter_rd) { best_inter_rd = this_rd; best_inter_ref_frame = ref_frame; - // best_inter_mode = xd->this_mi->mbmi.mode; + // best_inter_mode = xd->mode_info_context->mbmi.mode; } if (!disable_skip && ref_frame == INTRA_FRAME) { @@ -3747,7 +3747,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (this_mode == I4X4_PRED || this_mode == SPLITMV) for (i = 0; i < 4; i++) - best_bmodes[i] = xd->this_mi->bmi[i]; + best_bmodes[i] = xd->mode_info_context->bmi[i]; // TODO(debargha): enhance this test with a better distortion prediction // based on qp, activity mask and history @@ -3926,24 +3926,24 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (best_mbmode.ref_frame[0] == INTRA_FRAME && best_mbmode.sb_type < BLOCK_8X8) { for (i = 0; i < 4; i++) - xd->this_mi->bmi[i].as_mode = best_bmodes[i].as_mode; + xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode; } if (best_mbmode.ref_frame[0] != INTRA_FRAME && best_mbmode.sb_type < BLOCK_8X8) { for (i = 0; i < 4; i++) - xd->this_mi->bmi[i].as_mv[0].as_int = + xd->mode_info_context->bmi[i].as_mv[0].as_int = best_bmodes[i].as_mv[0].as_int; if (mbmi->ref_frame[1] > 0) for (i = 0; i < 4; i++) - xd->this_mi->bmi[i].as_mv[1].as_int = + xd->mode_info_context->bmi[i].as_mv[1].as_int = best_bmodes[i].as_mv[1].as_int; *x->partition_info = best_partition; - mbmi->mv[0].as_int = xd->this_mi->bmi[3].as_mv[0].as_int; - mbmi->mv[1].as_int = xd->this_mi->bmi[3].as_mv[1].as_int; + mbmi->mv[0].as_int = xd->mode_info_context->bmi[3].as_mv[0].as_int; + mbmi->mv[1].as_int = xd->mode_info_context->bmi[3].as_mv[1].as_int; } for (i = 0; i < NB_PREDICTION_TYPES; ++i) { diff --git a/vp9/encoder/vp9_segmentation.c b/vp9/encoder/vp9_segmentation.c index 10655e8a7..0a6d2abe2 100644 --- a/vp9/encoder/vp9_segmentation.c +++ b/vp9/encoder/vp9_segmentation.c @@ -117,7 +117,7 @@ static int cost_segmap(int *segcounts, vp9_prob *probs) { return cost; } -static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void count_segs(VP9_COMP *cpi, MODE_INFO *mi, int *no_pred_segcounts, int (*temporal_predictor_count)[2], int *t_unpred_seg_counts, @@ -129,8 +129,8 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8, if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - segment_id = mi_8x8[0]->mbmi.segment_id; - + segment_id = mi->mbmi.segment_id; + xd->mode_info_context = mi; set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw); // Count the number of hits on each segment with no prediction @@ -138,7 +138,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8, // Temporal prediction not allowed on key frames if (cm->frame_type != KEY_FRAME) { - const BLOCK_SIZE bsize = mi_8x8[0]->mbmi.sb_type; + const BLOCK_SIZE bsize = mi->mbmi.sb_type; // Test to see if the segment id matches the predicted value. const int pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col); @@ -147,7 +147,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8, // Store the prediction status for this mb and update counts // as appropriate - vp9_set_pred_flag_seg_id(xd, pred_flag); + vp9_set_pred_flag_seg_id(cm, bsize, mi_row, mi_col, pred_flag); temporal_predictor_count[pred_context][pred_flag]++; if (!pred_flag) @@ -156,7 +156,7 @@ static void count_segs(VP9_COMP *cpi, MODE_INFO **mi_8x8, } } -static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, +static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi, int *no_pred_segcounts, int (*temporal_predictor_count)[2], int *t_unpred_seg_counts, @@ -170,22 +170,21 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type]; - bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type]; + bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; + bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; if (bw == bs && bh == bs) { - count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count, + count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, bs, mi_row, mi_col); } else if (bw == bs && bh < bs) { - count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count, + count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, bs, hbs, mi_row, mi_col); - count_segs(cpi, mi_8x8 + hbs * mis, no_pred_segcounts, - temporal_predictor_count, t_unpred_seg_counts, bs, hbs, - mi_row + hbs, mi_col); + count_segs(cpi, mi + hbs * mis, no_pred_segcounts, temporal_predictor_count, + t_unpred_seg_counts, bs, hbs, mi_row + hbs, mi_col); } else if (bw < bs && bh == bs) { - count_segs(cpi, mi_8x8, no_pred_segcounts, temporal_predictor_count, + count_segs(cpi, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col); - count_segs(cpi, mi_8x8 + hbs, no_pred_segcounts, temporal_predictor_count, + count_segs(cpi, mi + hbs, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col + hbs); } else { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; @@ -197,7 +196,7 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO **mi_8x8, const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); - count_segs_sb(cpi, &mi_8x8[mi_dr * mis + mi_dc], + count_segs_sb(cpi, &mi[mi_dr * mis + mi_dc], no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, mi_row + mi_dr, mi_col + mi_dc, subsize); @@ -223,7 +222,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) { vp9_prob t_nopred_prob[PREDICTION_PROBS]; const int mis = cm->mode_info_stride; - MODE_INFO **mi_ptr, **mi; + MODE_INFO *mi_ptr, *mi; // Set default state for the segment tree probabilities and the // temporal coding probabilities @@ -234,7 +233,7 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) { // predicts this one for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) { vp9_get_tile_col_offsets(cm, tile_col); - mi_ptr = cm->mi_grid_visible + cm->cur_tile_mi_col_start; + mi_ptr = cm->mi + cm->cur_tile_mi_col_start; for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { mi = mi_ptr; diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c index 63826eea5..3052e8f70 100644 --- a/vp9/encoder/vp9_temporal_filter.c +++ b/vp9/encoder/vp9_temporal_filter.c @@ -153,7 +153,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, /*cpi->sf.search_method == HEX*/ // TODO Check that the 16x16 vf & sdf are selected here // Ignore mv costing by sending NULL pointer instead of cost arrays - ref_mv = &x->e_mbd.mi_8x8[0]->bmi[0].as_mv[0]; + ref_mv = &x->e_mbd.mode_info_context->bmi[0].as_mv[0]; bestsme = vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, &cpi->fn_ptr[BLOCK_16X16], @@ -245,8 +245,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, if (cpi->frames[frame] == NULL) continue; - mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row = 0; - mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col = 0; + mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row = 0; + mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col = 0; if (frame == alt_ref_index) { filter_weight = 2; @@ -279,8 +279,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, cpi->frames[frame]->u_buffer + mb_uv_offset, cpi->frames[frame]->v_buffer + mb_uv_offset, cpi->frames[frame]->y_stride, - mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.row, - mbd->mi_8x8[0]->bmi[0].as_mv[0].as_mv.col, + mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row, + mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col, predictor); // Apply the filter (YUV) diff --git a/vp9/encoder/vp9_tokenize.c b/vp9/encoder/vp9_tokenize.c index 0c9bf9db2..03bf14716 100644 --- a/vp9/encoder/vp9_tokenize.c +++ b/vp9/encoder/vp9_tokenize.c @@ -114,7 +114,7 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, MACROBLOCKD *xd = args->xd; TOKENEXTRA **tp = args->tp; struct macroblockd_plane *pd = &xd->plane[plane]; - MB_MODE_INFO *mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; int pt; /* near block/prev token context index */ int c = 0, rc = 0; TOKENEXTRA *t = *tp; /* store tokens starting here */ @@ -210,7 +210,7 @@ void vp9_tokenize_sb(VP9_COMP *cpi, TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->mb.e_mbd; - MB_MODE_INFO *const mbmi = &xd->this_mi->mbmi; + MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; TOKENEXTRA *t_backup = *t; const int mb_skip_context = vp9_get_pred_context_mbskip(xd); const int skip_inc = !vp9_segfeature_active(&cm->seg, mbmi->segment_id, -- cgit v1.2.3