From 4f55c5618afb8ef860744c870ac4e33b5583be22 Mon Sep 17 00:00:00 2001 From: John Koleszar Date: Thu, 25 Apr 2013 11:15:38 -0700 Subject: Remove destination pointers from BLOCKD Access these members from MACROBLOCKD instead. Change-Id: I7907230dd473ff12ebe182b9280d8b7f12a888c4 --- vp9/common/vp9_blockd.h | 7 +--- vp9/common/vp9_mbpitch.c | 50 -------------------------- vp9/common/vp9_reconintra.c | 20 ++++------- vp9/common/vp9_reconintra.h | 3 +- vp9/common/vp9_reconintra4x4.c | 18 +++++----- vp9/common/vp9_rtcd_defs.sh | 4 +-- vp9/decoder/vp9_decodframe.c | 79 +++++++++++++++++++++++++----------------- vp9/encoder/vp9_encodeframe.c | 1 - vp9/encoder/vp9_encodeintra.c | 45 ++++++++++++++---------- vp9/encoder/vp9_rdopt.c | 57 +++++++++++++++++++----------- 10 files changed, 130 insertions(+), 154 deletions(-) diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index b4f7c93a5..844892abc 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -279,11 +279,7 @@ typedef struct { } MODE_INFO; typedef struct blockd { - uint8_t **base_dst; - int dst; - int dst_stride; - -// union b_mode_info bmi; + int unused; } BLOCKD; struct scale_factors { @@ -743,7 +739,6 @@ static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) { return tx_type; } -void vp9_build_block_doffsets(MACROBLOCKD *xd); void vp9_setup_block_dptrs(MACROBLOCKD *xd); static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) { diff --git a/vp9/common/vp9_mbpitch.c b/vp9/common/vp9_mbpitch.c index 3a918522b..8c05a34e5 100644 --- a/vp9/common/vp9_mbpitch.c +++ b/vp9/common/vp9_mbpitch.c @@ -11,51 +11,6 @@ #include "vp9/common/vp9_blockd.h" -typedef enum { - PRED = 0, - DEST = 1 -} BLOCKSET; - -static void setup_block(BLOCKD *b, uint8_t **base, uint8_t **base2, - int stride, int offset, BLOCKSET bs) { - if (bs == DEST) { - b->dst_stride = stride; - b->dst = offset; - b->base_dst = base; - } -} - -static void setup_macroblock(MACROBLOCKD *mb, BLOCKSET bs) { - BLOCKD *blockd = mb->block; - uint8_t **y, **u, **v, **y2, **u2, **v2; - int i, stride; - - if (bs == DEST) { - y = &mb->plane[0].dst.buf; - u = &mb->plane[1].dst.buf; - v = &mb->plane[2].dst.buf; - - y2 = NULL; - u2 = NULL; - v2 = NULL; - } - - // luma - stride = mb->plane[0].dst.stride; - for (i = 0; i < 16; ++i) { - const int offset = (i >> 2) * 4 * stride + (i & 3) * 4; - setup_block(&blockd[i], y, y2, stride, offset, bs); - } - - // chroma - stride = mb->plane[1].dst.stride; - for (i = 16; i < 20; i++) { - const int offset = ((i - 16) >> 1) * 4 * stride + (i & 1) * 4; - setup_block(&blockd[i], u, u2, stride, offset, bs); - setup_block(&blockd[i + 4], v, v2, stride, offset, bs); - } -} - void vp9_setup_block_dptrs(MACROBLOCKD *mb) { int i; @@ -65,8 +20,3 @@ void vp9_setup_block_dptrs(MACROBLOCKD *mb) { mb->plane[i].subsampling_y = !!i; } } - -void vp9_build_block_doffsets(MACROBLOCKD *mb) { - // handle the destination pitch features - setup_macroblock(mb, DEST); -} diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c index 23fc55205..a66e78255 100644 --- a/vp9/common/vp9_reconintra.c +++ b/vp9/common/vp9_reconintra.c @@ -611,37 +611,31 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd, } void vp9_intra8x8_predict(MACROBLOCKD *xd, - BLOCKD *b, + int block4x4_idx, int mode, uint8_t *predictor, int pre_stride) { - const int block4x4_idx = (b - xd->block); const int block_idx = (block4x4_idx >> 2) | !!(block4x4_idx & 2); const int have_top = (block_idx >> 1) || xd->up_available; const int have_left = (block_idx & 1) || xd->left_available; const int have_right = !(block_idx & 1) || xd->right_available; - vp9_build_intra_predictors(*(b->base_dst) + b->dst, - b->dst_stride, predictor, pre_stride, + vp9_build_intra_predictors(predictor, pre_stride, + predictor, pre_stride, mode, 8, 8, have_top, have_left, have_right); } void vp9_intra_uv4x4_predict(MACROBLOCKD *xd, - BLOCKD *b, + int block4x4_idx, int mode, uint8_t *predictor, int pre_stride) { - const int block_idx = (b - xd->block) & 3; + const int block_idx = block4x4_idx & 3; const int have_top = (block_idx >> 1) || xd->up_available; const int have_left = (block_idx & 1) || xd->left_available; const int have_right = !(block_idx & 1) || xd->right_available; - vp9_build_intra_predictors(*(b->base_dst) + b->dst, - b->dst_stride, predictor, pre_stride, + vp9_build_intra_predictors(predictor, pre_stride, + predictor, pre_stride, mode, 4, 4, have_top, have_left, have_right); } - -/* TODO: try different ways of use Y-UV mode correlation - Current code assumes that a uv 4x4 block use same mode - as corresponding Y 8x8 area - */ diff --git a/vp9/common/vp9_reconintra.h b/vp9/common/vp9_reconintra.h index e94359683..21cd7abbe 100644 --- a/vp9/common/vp9_reconintra.h +++ b/vp9/common/vp9_reconintra.h @@ -18,7 +18,8 @@ B_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr, int stride, int n, int tx, int ty); -B_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, BLOCKD *x); +B_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, int block, + uint8_t *ptr, int stride); #if CONFIG_COMP_INTERINTRA_PRED void vp9_build_interintra_predictors(MACROBLOCKD *xd, diff --git a/vp9/common/vp9_reconintra4x4.c b/vp9/common/vp9_reconintra4x4.c index 779422011..5d0bdecb3 100644 --- a/vp9/common/vp9_reconintra4x4.c +++ b/vp9/common/vp9_reconintra4x4.c @@ -147,12 +147,10 @@ B_PREDICTION_MODE vp9_find_dominant_direction(uint8_t *ptr, } #endif -B_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, BLOCKD *x) { - const int block_idx = x - xd->block; +B_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, int block_idx, + uint8_t *ptr, int stride) { const int have_top = (block_idx >> 2) || xd->up_available; const int have_left = (block_idx & 3) || xd->left_available; - uint8_t *ptr = *(x->base_dst) + x->dst; - int stride = x->dst_stride; int tx = have_left ? 4 : 0; int ty = have_top ? 4 : 0; if (!have_left && !have_top) @@ -182,8 +180,8 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd, */ if (have_left) { - uint8_t *left_ptr = *(x->base_dst) + x->dst - 1; - const int stride = x->dst_stride; + uint8_t *left_ptr = predictor - 1; + const int stride = ps; left[0] = left_ptr[0 * stride]; left[1] = left_ptr[1 * stride]; @@ -194,7 +192,7 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd, } if (have_top) { - uint8_t *above_ptr = *(x->base_dst) + x->dst - x->dst_stride; + uint8_t *above_ptr = predictor - ps; top_left = have_left ? above_ptr[-1] : 127; above[0] = above_ptr[0]; @@ -213,10 +211,10 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd, uint8_t *above_right = above_ptr + 4; if (xd->sb_index == 3 && (xd->mb_index & 1)) - above_right -= 32 * x->dst_stride; + above_right -= 32 * ps; if (xd->mb_index == 3) - above_right -= 16 * x->dst_stride; - above_right -= (block_idx & ~3) * x->dst_stride; + above_right -= 16 * ps; + above_right -= (block_idx & ~3) * ps; /* use a more distant above-right (from closest available top-right * corner), but with a "localized DC" (similar'ish to TM-pred): diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh index ec0b2913b..3621dca60 100644 --- a/vp9/common/vp9_rtcd_defs.sh +++ b/vp9/common/vp9_rtcd_defs.sh @@ -98,10 +98,10 @@ specialize vp9_build_intra_predictors_sbuv_s prototype void vp9_intra4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride" specialize vp9_intra4x4_predict; -prototype void vp9_intra8x8_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride" +prototype void vp9_intra8x8_predict "struct macroblockd *xd, int block, int b_mode, uint8_t *predictor, int pre_stride" specialize vp9_intra8x8_predict; -prototype void vp9_intra_uv4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride" +prototype void vp9_intra_uv4x4_predict "struct macroblockd *xd, int block, int b_mode, uint8_t *predictor, int pre_stride" specialize vp9_intra_uv4x4_predict; if [ "$CONFIG_VP9_DECODER" = "yes" ]; then diff --git a/vp9/decoder/vp9_decodframe.c b/vp9/decoder/vp9_decodframe.c index 26f341a49..53af0079a 100644 --- a/vp9/decoder/vp9_decodframe.c +++ b/vp9/decoder/vp9_decodframe.c @@ -250,12 +250,14 @@ static void decode_8x8(MACROBLOCKD *xd) { int ib = vp9_i8x8_block[i]; int idx = (ib & 0x02) ? (ib + 2) : ib; int16_t *q = BLOCK_OFFSET(xd->plane[0].qcoeff, idx, 16); - uint8_t *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst; + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); int stride = xd->plane[0].dst.stride; if (mode == I8X8_PRED) { - BLOCKD *b = &xd->block[ib]; int i8x8mode = xd->mode_info_context->bmi[ib].as_mode.first; - vp9_intra8x8_predict(xd, b, i8x8mode, dst, stride); + vp9_intra8x8_predict(xd, ib, i8x8mode, dst, stride); } tx_type = get_tx_type_8x8(xd, ib); vp9_iht_add_8x8_c(tx_type, q, dst, stride, xd->plane[0].eobs[idx]); @@ -270,21 +272,25 @@ static void decode_8x8(MACROBLOCKD *xd) { int i; for (i = 0; i < 4; i++) { int ib = vp9_i8x8_block[i]; - BLOCKD *b = &xd->block[ib]; int i8x8mode = xd->mode_info_context->bmi[ib].as_mode.first; + uint8_t* dst; - b = &xd->block[16 + i]; - vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst, - b->dst_stride); + dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 1, i, + xd->plane[1].dst.buf, + xd->plane[1].dst.stride); + vp9_intra_uv4x4_predict(xd, 16 + i, i8x8mode, + dst, xd->plane[1].dst.stride); xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16), - *(b->base_dst) + b->dst, b->dst_stride, + dst, xd->plane[1].dst.stride, xd->plane[1].eobs[i]); - b = &xd->block[20 + i]; - vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst, - b->dst_stride); + dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 2, i, + xd->plane[2].dst.buf, + xd->plane[1].dst.stride); + vp9_intra_uv4x4_predict(xd, 20 + i, i8x8mode, + dst, xd->plane[1].dst.stride); xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16), - *(b->base_dst) + b->dst, b->dst_stride, + dst, xd->plane[1].dst.stride, xd->plane[2].eobs[i]); } } else if (mode == SPLITMV) { @@ -302,14 +308,16 @@ static void decode_8x8(MACROBLOCKD *xd) { } static INLINE void dequant_add_y(MACROBLOCKD *xd, TX_TYPE tx_type, int idx) { - BLOCKD *const b = &xd->block[idx]; struct macroblockd_plane *const y = &xd->plane[0]; + uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, idx, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); if (tx_type != DCT_DCT) { vp9_iht_add_c(tx_type, BLOCK_OFFSET(y->qcoeff, idx, 16), - *(b->base_dst) + b->dst, b->dst_stride, y->eobs[idx]); + dst, xd->plane[0].dst.stride, y->eobs[idx]); } else { - xd->itxm_add(BLOCK_OFFSET(y->qcoeff, idx, 16), *(b->base_dst) + b->dst, - b->dst_stride, y->eobs[idx]); + xd->itxm_add(BLOCK_OFFSET(y->qcoeff, idx, 16), + dst, xd->plane[0].dst.stride, y->eobs[idx]); } } @@ -323,39 +331,49 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) { int ib = vp9_i8x8_block[i]; const int iblock[4] = {0, 1, 4, 5}; int j; - BLOCKD *b = &xd->block[ib]; + uint8_t* dst; int i8x8mode = xd->mode_info_context->bmi[ib].as_mode.first; - vp9_intra8x8_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst, - b->dst_stride); + + dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); + vp9_intra8x8_predict(xd, ib, i8x8mode, dst, xd->plane[0].dst.stride); for (j = 0; j < 4; j++) { tx_type = get_tx_type_4x4(xd, ib + iblock[j]); dequant_add_y(xd, tx_type, ib + iblock[j]); } - b = &xd->block[16 + i]; - vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst, - b->dst_stride); + dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 1, i, + xd->plane[1].dst.buf, + xd->plane[1].dst.stride); + vp9_intra_uv4x4_predict(xd, 16 + i, i8x8mode, + dst, xd->plane[1].dst.stride); xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16), - *(b->base_dst) + b->dst, b->dst_stride, + dst, xd->plane[1].dst.stride, xd->plane[1].eobs[i]); - b = &xd->block[20 + i]; - vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst, - b->dst_stride); + dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 2, i, + xd->plane[2].dst.buf, + xd->plane[2].dst.stride); + vp9_intra_uv4x4_predict(xd, 20 + i, i8x8mode, + dst, xd->plane[1].dst.stride); xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16), - *(b->base_dst) + b->dst, b->dst_stride, + dst, xd->plane[1].dst.stride, xd->plane[2].eobs[i]); } } else if (mode == I4X4_PRED) { for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode.first; + uint8_t* dst; + dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); #if CONFIG_NEWBINTRAMODES xd->mode_info_context->bmi[i].as_mode.context = - vp9_find_bpred_context(xd, b); + vp9_find_bpred_context(xd, i, dst, xd->plane[0].dst.stride); if (!xd->mode_info_context->mbmi.mb_skip_coeff) vp9_decode_coefs_4x4(pbi, xd, r, PLANE_TYPE_Y_WITH_DC, i); #endif - vp9_intra4x4_predict(xd, b, b_mode, *(b->base_dst) + b->dst, - b->dst_stride); + vp9_intra4x4_predict(xd, b, b_mode, dst, xd->plane[0].dst.stride); tx_type = get_tx_type_4x4(xd, i); dequant_add_y(xd, tx_type, i); } @@ -1554,7 +1572,6 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) { vp9_setup_intra_recon(new_fb); vp9_setup_block_dptrs(xd); - vp9_build_block_doffsets(xd); // clear out the coeff buffer vpx_memset(xd->plane[0].qcoeff, 0, sizeof(xd->plane[0].qcoeff)); diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index ca2579efa..143656681 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -1779,7 +1779,6 @@ void vp9_encode_frame(VP9_COMP *cpi) { } void vp9_build_block_offsets(MACROBLOCK *x) { - vp9_build_block_doffsets(&x->e_mbd); } static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) { diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c index 6c17e4892..ee82628b6 100644 --- a/vp9/encoder/vp9_encodeintra.c +++ b/vp9/encoder/vp9_encodeintra.c @@ -47,6 +47,9 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) { uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, x->plane[0].src.buf, x->plane[0].src.stride); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, xd->plane[0].dst.stride); int16_t* const src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib, x->plane[0].src_diff); @@ -59,15 +62,15 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) { #if CONFIG_NEWBINTRAMODES xd->mode_info_context->bmi[ib].as_mode.context = - vp9_find_bpred_context(&x->e_mbd, b); + vp9_find_bpred_context(&x->e_mbd, ib, dst, xd->plane[0].dst.stride); #endif vp9_intra4x4_predict(&x->e_mbd, b, xd->mode_info_context->bmi[ib].as_mode.first, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[0].dst.stride); vp9_subtract_block(4, 4, src_diff, 16, src, x->plane[0].src.stride, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[0].dst.stride); tx_type = get_tx_type_4x4(&x->e_mbd, ib); if (tx_type != DCT_DCT) { @@ -83,8 +86,7 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) { diff, 32); } - vp9_recon_b(*(b->base_dst) + b->dst, diff, - *(b->base_dst) + b->dst, b->dst_stride); + vp9_recon_b(dst, diff, dst, xd->plane[0].dst.stride); } void vp9_encode_intra4x4mby(MACROBLOCK *mb) { @@ -157,7 +159,6 @@ void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) { void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { MACROBLOCKD *xd = &x->e_mbd; - BLOCKD *b = &xd->block[ib]; uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, x->plane[0].src.buf, x->plane[0].src.stride); @@ -167,16 +168,19 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { int16_t* const diff = raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib, xd->plane[0].diff); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, xd->plane[0].dst.stride); const int iblock[4] = {0, 1, 4, 5}; int i; TX_TYPE tx_type; - vp9_intra8x8_predict(xd, b, xd->mode_info_context->bmi[ib].as_mode.first, - *(b->base_dst) + b->dst, b->dst_stride); + vp9_intra8x8_predict(xd, ib, xd->mode_info_context->bmi[ib].as_mode.first, + dst, xd->plane[0].dst.stride); // generate residual blocks vp9_subtract_block(8, 8, src_diff, 16, src, x->plane[0].src.stride, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[0].dst.stride); if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { int idx = (ib & 0x02) ? (ib + 2) : ib; @@ -207,7 +211,6 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { xd->plane[0].diff); assert(idx < 16); - b = &xd->block[ib + iblock[i]]; tx_type = get_tx_type_4x4(xd, ib + iblock[i]); if (tx_type != DCT_DCT) { vp9_short_fht4x4(src_diff, coeff, 16, tx_type); @@ -236,9 +239,11 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) { int16_t* const diff = raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib + iblock[i], xd->plane[0].diff); - b = &xd->block[ib + iblock[i]]; - vp9_recon_b_c(*(b->base_dst) + b->dst, diff, *(b->base_dst) + b->dst, - b->dst_stride); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib + iblock[i], + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); + vp9_recon_b_c(dst, diff, dst, xd->plane[0].dst.stride); } } @@ -251,7 +256,6 @@ void vp9_encode_intra8x8mby(MACROBLOCK *x) { static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) { MACROBLOCKD * const xd = &x->e_mbd; - BLOCKD *b = &x->e_mbd.block[ib]; int16_t * const dqcoeff = MB_SUBBLOCK_FIELD(xd, dqcoeff, ib); int16_t* const coeff = MB_SUBBLOCK_FIELD(x, coeff, ib); const int plane = ib < 20 ? 1 : 2; @@ -266,23 +270,26 @@ static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) { int16_t* const diff = raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, plane, block, xd->plane[plane].diff); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, plane, block, + xd->plane[plane].dst.buf, + xd->plane[plane].dst.stride); assert(ib >= 16 && ib < 24); - vp9_intra_uv4x4_predict(&x->e_mbd, b, mode, - *(b->base_dst) + b->dst, b->dst_stride); + vp9_intra_uv4x4_predict(&x->e_mbd, ib, mode, + dst, xd->plane[plane].dst.stride); assert(xd->plane[1].subsampling_x == 1); vp9_subtract_block(4, 4, src_diff, 8, src, x->plane[plane].src.stride, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[plane].dst.stride); x->fwd_txm4x4(src_diff, coeff, 16); x->quantize_b_4x4(x, ib, DCT_DCT, 16); vp9_inverse_transform_b_4x4(&x->e_mbd, xd->plane[plane].eobs[block], dqcoeff, diff, 16); - vp9_recon_uv_b_c(*(b->base_dst) + b->dst, diff, *(b->base_dst) + b->dst, - b->dst_stride); + vp9_recon_uv_b_c(dst, diff, dst, xd->plane[plane].dst.stride); } void vp9_encode_intra8x8mbuv(MACROBLOCK *x) { diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 7643e2169..3859fe3ec 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -862,6 +862,9 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib, xd->plane[0].diff); int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, ib, 16); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, xd->plane[0].dst.stride); ENTROPY_CONTEXT ta = *a, tempa = *a; ENTROPY_CONTEXT tl = *l, templ = *l; TX_TYPE tx_type = DCT_DCT; @@ -876,7 +879,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, assert(ib < 16); #if CONFIG_NEWBINTRAMODES xd->mode_info_context->bmi[ib].as_mode.context = - vp9_find_bpred_context(xd, b); + vp9_find_bpred_context(xd, ib, dst, xd->plane[0].dst.stride); #endif xd->mode_info_context->mbmi.txfm_size = TX_4X4; for (mode = B_DC_PRED; mode < LEFT4X4; mode++) { @@ -901,10 +904,10 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, rate = bmode_costs[mode]; #endif - vp9_intra4x4_predict(xd, b, mode, *(b->base_dst) + b->dst, b->dst_stride); + vp9_intra4x4_predict(xd, b, mode, dst, xd->plane[0].dst.stride); vp9_subtract_block(4, 4, src_diff, 16, src, src_stride, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[0].dst.stride); xd->mode_info_context->bmi[ib].as_mode.first = mode; tx_type = get_tx_type_4x4(xd, ib); @@ -950,9 +953,9 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, xd->inv_txm4x4(best_dqcoeff, diff, 32); vp9_intra4x4_predict(xd, b, *best_mode, - *(b->base_dst) + b->dst, b->dst_stride); - vp9_recon_b(*(b->base_dst) + b->dst, diff, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[0].dst.stride); + vp9_recon_b(dst, diff, + dst, xd->plane[0].dst.stride); return best_rd; } @@ -986,6 +989,12 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb, const int mis = xd->mode_info_stride; B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d); +#if CONFIG_NEWBINTRAMODES + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); +#endif if (xd->frame_type == KEY_FRAME) { const B_PREDICTION_MODE A = above_block_mode(mic, i, mis); @@ -994,7 +1003,8 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb, bmode_costs = mb->bmode_costs[A][L]; } #if CONFIG_NEWBINTRAMODES - mic->bmi[i].as_mode.context = vp9_find_bpred_context(xd, xd->block + i); + mic->bmi[i].as_mode.context = vp9_find_bpred_context(xd, i, dst, + xd->plane[0].dst.stride); #endif total_rd += rd_pick_intra4x4block( @@ -1090,7 +1100,6 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, MACROBLOCKD *xd = &x->e_mbd; int64_t best_rd = INT64_MAX; int distortion = 0, rate = 0; - BLOCKD *b = xd->block + ib; ENTROPY_CONTEXT_PLANES ta, tl; ENTROPY_CONTEXT *ta0, *ta1, besta0 = 0, besta1 = 0; ENTROPY_CONTEXT *tl0, *tl1, bestl0 = 0, bestl1 = 0; @@ -1105,6 +1114,9 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, raster_block_offset_int16(xd, BLOCK_SIZE_MB16X16, 0, ib, x->plane[0].src_diff); int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, idx, 16); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, xd->plane[0].dst.stride); assert(ib < 16); for (mode = DC_PRED; mode <= TM_PRED; mode++) { @@ -1115,11 +1127,11 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, rate = mode_costs[mode]; xd->mode_info_context->bmi[ib].as_mode.first = mode; - vp9_intra8x8_predict(xd, b, mode, *(b->base_dst) + b->dst, b->dst_stride); + vp9_intra8x8_predict(xd, ib, mode, dst, xd->plane[0].dst.stride); vp9_subtract_block(8, 8, src_diff, 16, src, src_stride, - *(b->base_dst) + b->dst, b->dst_stride); + dst, xd->plane[0].dst.stride); if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) { TX_TYPE tx_type = get_tx_type_8x8(xd, ib); @@ -1165,7 +1177,6 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, ib + iblock[i], 16); int do_two = 0; - b = &xd->block[ib + iblock[i]]; tx_type = get_tx_type_4x4(xd, ib + iblock[i]); if (tx_type != DCT_DCT) { vp9_short_fht4x4(src_diff, coeff, 16, tx_type); @@ -1192,7 +1203,6 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib, TX_4X4, 16); } } - b = &xd->block[ib]; rate += rate_t; } @@ -1715,7 +1725,6 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm, *distortion = 0; for (i = 0; i < 16; i++) { if (labels[i] == which_label) { - BLOCKD *bd = &x->e_mbd.block[i]; const int src_stride = x->plane[0].src.stride; uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i, @@ -1728,12 +1737,16 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm, raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i, xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride); + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); int thisdistortion; vp9_build_inter_predictor(pre, xd->plane[0].pre[0].stride, - *(bd->base_dst) + bd->dst, - bd->dst_stride, + dst, + xd->plane[0].dst.stride, &xd->mode_info_context->bmi[i].as_mv[0], &xd->scale_factor[0], 4, 4, 0 /* no avg */, &xd->subpix); @@ -1748,7 +1761,7 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm, xd->plane[0].pre[1].stride); vp9_build_inter_predictor( second_pre, xd->plane[0].pre[1].stride, - *(bd->base_dst) + bd->dst, bd->dst_stride, + dst, xd->plane[0].dst.stride, &xd->mode_info_context->bmi[i].as_mv[1], &xd->scale_factor[1], 4, 4, 1, &xd->subpix); @@ -1756,7 +1769,7 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm, vp9_subtract_block(4, 4, src_diff, 16, src, src_stride, - *(bd->base_dst) + bd->dst, bd->dst_stride); + dst, xd->plane[0].dst.stride); x->fwd_txm4x4(src_diff, coeff, 32); x->quantize_b_4x4(x, i, DCT_DCT, 16); thisdistortion = vp9_block_error(coeff, @@ -1803,7 +1816,6 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm, xd->mode_info_context->mbmi.second_ref_frame > 0; int which_mv; const int idx = (ib & 8) + ((ib & 2) << 1); - BLOCKD *bd = &xd->block[ib]; const int src_stride = x->plane[0].src.stride; uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, @@ -1813,6 +1825,10 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm, x->plane[0].src_diff); int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, idx, 16); int thisdistortion; + uint8_t* const dst = + raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, + xd->plane[0].dst.buf, + xd->plane[0].dst.stride); assert(idx < 16); for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) { @@ -1826,7 +1842,7 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm, // weighting for splitmv modes is turned on. vp9_build_inter_predictor( pre, xd->plane[0].pre[which_mv].stride, - *(bd->base_dst) + bd->dst, bd->dst_stride, + dst, xd->plane[0].dst.stride, &xd->mode_info_context->bmi[ib].as_mv[which_mv], &xd->scale_factor[which_mv], 8, 8, which_mv, &xd->subpix); @@ -1834,7 +1850,7 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm, vp9_subtract_block(8, 8, src_diff, 16, src, src_stride, - *(bd->base_dst) + bd->dst, bd->dst_stride); + dst, xd->plane[0].dst.stride); if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) { if (otherrd) { @@ -1857,7 +1873,6 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm, x->plane[0].src_diff); int16_t* const coeff = BLOCK_OFFSET(x->plane[0].coeff, ib + iblock[j], 16); - bd = &xd->block[ib + iblock[j]]; x->fwd_txm8x4(src_diff, coeff, 32); x->quantize_b_4x4_pair(x, ib + iblock[j], ib + iblock[j] + 1, 16); thisdistortion = vp9_block_error_c(coeff, -- cgit v1.2.3 From bb41ab4a0c2944e34b727d45ef026f5b91e57e85 Mon Sep 17 00:00:00 2001 From: John Koleszar Date: Tue, 23 Apr 2013 16:22:47 -0700 Subject: Remove BLOCKD structure All members can be referenced from their per-plane counterparts, and removes assumptions about 24 blocks per macroblock. Change-Id: I7ff2fa72d22c29163eb558981c8193765a8113d9 --- vp9/common/vp9_blockd.h | 7 ------- vp9/common/vp9_reconintra4x4.c | 3 +-- vp9/common/vp9_rtcd_defs.sh | 9 ++++----- vp9/decoder/vp9_decodframe.c | 3 +-- vp9/encoder/vp9_encodeintra.c | 3 +-- vp9/encoder/vp9_firstpass.c | 5 ++--- vp9/encoder/vp9_mbgraph.c | 6 ++---- vp9/encoder/vp9_mcomp.c | 29 ++++++++++++++--------------- vp9/encoder/vp9_mcomp.h | 12 ++++++------ vp9/encoder/vp9_rdopt.c | 20 ++++++++------------ vp9/encoder/vp9_temporal_filter.c | 5 ++--- 11 files changed, 41 insertions(+), 61 deletions(-) diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index 844892abc..3d156240b 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -278,10 +278,6 @@ typedef struct { union b_mode_info bmi[16]; } MODE_INFO; -typedef struct blockd { - int unused; -} BLOCKD; - struct scale_factors { int x_num; int x_den; @@ -335,9 +331,6 @@ struct macroblockd_plane { typedef struct macroblockd { struct macroblockd_plane plane[MAX_MB_PLANE]; - /* 16 Y blocks, 4 U, 4 V, each with 16 entries. */ - BLOCKD block[24]; - struct scale_factors scale_factor[2]; struct scale_factors scale_factor_uv[2]; diff --git a/vp9/common/vp9_reconintra4x4.c b/vp9/common/vp9_reconintra4x4.c index 5d0bdecb3..08a5fac2b 100644 --- a/vp9/common/vp9_reconintra4x4.c +++ b/vp9/common/vp9_reconintra4x4.c @@ -160,12 +160,11 @@ B_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, int block_idx, #endif void vp9_intra4x4_predict(MACROBLOCKD *xd, - BLOCKD *x, + int block_idx, int b_mode, uint8_t *predictor, int ps) { int i, r, c; - const int block_idx = x - xd->block; const int have_top = (block_idx >> 2) || xd->up_available; const int have_left = (block_idx & 3) || xd->left_available; const int have_right = (block_idx & 3) != 3 || xd->right_available; diff --git a/vp9/common/vp9_rtcd_defs.sh b/vp9/common/vp9_rtcd_defs.sh index 3621dca60..745cc6984 100644 --- a/vp9/common/vp9_rtcd_defs.sh +++ b/vp9/common/vp9_rtcd_defs.sh @@ -8,7 +8,6 @@ cat <block[i]; int b_mode = xd->mode_info_context->bmi[i].as_mode.first; uint8_t* dst; dst = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, i, @@ -373,7 +372,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) { if (!xd->mode_info_context->mbmi.mb_skip_coeff) vp9_decode_coefs_4x4(pbi, xd, r, PLANE_TYPE_Y_WITH_DC, i); #endif - vp9_intra4x4_predict(xd, b, b_mode, dst, xd->plane[0].dst.stride); + vp9_intra4x4_predict(xd, i, b_mode, dst, xd->plane[0].dst.stride); tx_type = get_tx_type_4x4(xd, i); dequant_add_y(xd, tx_type, i); } diff --git a/vp9/encoder/vp9_encodeintra.c b/vp9/encoder/vp9_encodeintra.c index ee82628b6..54c4f3635 100644 --- a/vp9/encoder/vp9_encodeintra.c +++ b/vp9/encoder/vp9_encodeintra.c @@ -41,7 +41,6 @@ int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) { } static void encode_intra4x4block(MACROBLOCK *x, int ib) { - BLOCKD *b = &x->e_mbd.block[ib]; MACROBLOCKD * const xd = &x->e_mbd; TX_TYPE tx_type; uint8_t* const src = @@ -65,7 +64,7 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) { vp9_find_bpred_context(&x->e_mbd, ib, dst, xd->plane[0].dst.stride); #endif - vp9_intra4x4_predict(&x->e_mbd, b, + vp9_intra4x4_predict(&x->e_mbd, ib, xd->mode_info_context->bmi[ib].as_mode.first, dst, xd->plane[0].dst.stride); vp9_subtract_block(4, 4, src_diff, 16, diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c index a83669ab5..607cd9923 100644 --- a/vp9/encoder/vp9_firstpass.c +++ b/vp9/encoder/vp9_firstpass.c @@ -363,7 +363,6 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset) { MACROBLOCKD *const xd = &x->e_mbd; - BLOCKD *d = &x->e_mbd.block[0]; int num00; int_mv tmp_mv; @@ -399,7 +398,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, tmp_mv.as_int = 0; ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3; ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3; - tmp_err = cpi->diamond_search_sad(x, d, &ref_mv_full, &tmp_mv, step_param, + tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, step_param, x->sadperbit16, &num00, &v_fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); @@ -422,7 +421,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, if (num00) num00--; else { - tmp_err = cpi->diamond_search_sad(x, d, &ref_mv_full, &tmp_mv, + tmp_err = cpi->diamond_search_sad(x, &ref_mv_full, &tmp_mv, step_param + n, x->sadperbit16, &num00, &v_fn_ptr, x->nmvjointcost, diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c index 7e67b6b38..27e9eceb0 100644 --- a/vp9/encoder/vp9_mbgraph.c +++ b/vp9/encoder/vp9_mbgraph.c @@ -25,11 +25,9 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, int mb_col) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; - BLOCKD *d = &xd->block[0]; vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; unsigned int best_err; - int tmp_col_min = x->mv_col_min; int tmp_col_max = x->mv_col_max; int tmp_row_min = x->mv_row_min; @@ -47,7 +45,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, /*cpi->sf.search_method == HEX*/ best_err = vp9_hex_search( - x, d, + x, &ref_full, dst_mv, step_param, x->errorperbit, @@ -62,7 +60,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, int distortion; unsigned int sse; best_err = cpi->find_fractional_mv_step( - x, d, + x, dst_mv, ref_mv, x->errorperbit, &v_fn_ptr, NULL, NULL, diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c index 447a338f6..74caba5a0 100644 --- a/vp9/encoder/vp9_mcomp.c +++ b/vp9/encoder/vp9_mcomp.c @@ -239,7 +239,7 @@ void vp9_init3smotion_compensation(MACROBLOCK *x, int stride) { }, \ v = INT_MAX;) -int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCKD *d, +int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, int_mv *bestmv, int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp, @@ -421,7 +421,7 @@ int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCKD *d, #undef MIN #undef MAX -int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCKD *d, +int vp9_find_best_sub_pixel_step(MACROBLOCK *x, int_mv *bestmv, int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp, @@ -924,7 +924,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x, BLOCKD *d, #undef SP -int vp9_find_best_half_pixel_step(MACROBLOCK *x, BLOCKD *d, +int vp9_find_best_half_pixel_step(MACROBLOCK *x, int_mv *bestmv, int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp, @@ -1096,7 +1096,6 @@ static const MV next_chkpts[6][3] = { int vp9_hex_search ( MACROBLOCK *x, - BLOCKD *d, int_mv *ref_mv, int_mv *best_mv, int search_param, @@ -1250,7 +1249,7 @@ cal_neighbors: #undef CHECK_POINT #undef CHECK_BETTER -int vp9_diamond_search_sad_c(MACROBLOCK *x, BLOCKD *d, +int vp9_diamond_search_sad_c(MACROBLOCK *x, int_mv *ref_mv, int_mv *best_mv, int search_param, int sad_per_bit, int *num00, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, @@ -1361,7 +1360,7 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x, BLOCKD *d, xd->allow_high_precision_mv); } -int vp9_diamond_search_sadx4(MACROBLOCK *x, BLOCKD *d, +int vp9_diamond_search_sadx4(MACROBLOCK *x, int_mv *ref_mv, int_mv *best_mv, int search_param, int sad_per_bit, int *num00, vp9_variance_fn_ptr_t *fn_ptr, @@ -1512,13 +1511,13 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x, BLOCKD *d, point as the best match, we will do a final 1-away diamond refining search */ int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, - BLOCKD *d, int_mv *mvp_full, int step_param, + int_mv *mvp_full, int step_param, int sadpb, int further_steps, int do_refine, vp9_variance_fn_ptr_t *fn_ptr, int_mv *ref_mv, int_mv *dst_mv) { int_mv temp_mv; int thissme, n, num00; - int bestsme = cpi->diamond_search_sad(x, d, mvp_full, &temp_mv, + int bestsme = cpi->diamond_search_sad(x, mvp_full, &temp_mv, step_param, sadpb, &num00, fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); @@ -1537,7 +1536,7 @@ int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, if (num00) num00--; else { - thissme = cpi->diamond_search_sad(x, d, mvp_full, &temp_mv, + thissme = cpi->diamond_search_sad(x, mvp_full, &temp_mv, step_param + n, sadpb, &num00, fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); @@ -1558,7 +1557,7 @@ int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, int search_range = 8; int_mv best_mv; best_mv.as_int = dst_mv->as_int; - thissme = cpi->refining_search_sad(x, d, &best_mv, sadpb, search_range, + thissme = cpi->refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr, x->nmvjointcost, x->mvcost, ref_mv); @@ -1570,7 +1569,7 @@ int vp9_full_pixel_diamond(VP9_COMP *cpi, MACROBLOCK *x, return bestsme; } -int vp9_full_search_sad_c(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv, +int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv, int sad_per_bit, int distance, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], @@ -1666,7 +1665,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv, return INT_MAX; } -int vp9_full_search_sadx3(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv, +int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv, int sad_per_bit, int distance, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], int_mv *center_mv, int n) { @@ -1794,7 +1793,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv, return INT_MAX; } -int vp9_full_search_sadx8(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv, +int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv, int sad_per_bit, int distance, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], @@ -1948,7 +1947,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv, else return INT_MAX; } -int vp9_refining_search_sad_c(MACROBLOCK *x, BLOCKD *d, +int vp9_refining_search_sad_c(MACROBLOCK *x, int_mv *ref_mv, int error_per_bit, int search_range, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], int_mv *center_mv) { @@ -2026,7 +2025,7 @@ int vp9_refining_search_sad_c(MACROBLOCK *x, BLOCKD *d, return INT_MAX; } -int vp9_refining_search_sadx4(MACROBLOCK *x, BLOCKD *d, +int vp9_refining_search_sadx4(MACROBLOCK *x, int_mv *ref_mv, int error_per_bit, int search_range, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], int_mv *center_mv) { diff --git a/vp9/encoder/vp9_mcomp.h b/vp9/encoder/vp9_mcomp.h index 558663cca..e1ba7fd9d 100644 --- a/vp9/encoder/vp9_mcomp.h +++ b/vp9/encoder/vp9_mcomp.h @@ -38,12 +38,12 @@ void vp9_init3smotion_compensation(MACROBLOCK *x, int stride); // Runs sequence of diamond searches in smaller steps for RD struct VP9_COMP; int vp9_full_pixel_diamond(struct VP9_COMP *cpi, MACROBLOCK *x, - BLOCKD *d, int_mv *mvp_full, int step_param, + int_mv *mvp_full, int step_param, int sadpb, int further_steps, int do_refine, vp9_variance_fn_ptr_t *fn_ptr, int_mv *ref_mv, int_mv *dst_mv); -int vp9_hex_search(MACROBLOCK *x, BLOCKD *d, +int vp9_hex_search(MACROBLOCK *x, int_mv *ref_mv, int_mv *best_mv, int search_param, int error_per_bit, const vp9_variance_fn_ptr_t *vf, @@ -51,27 +51,27 @@ int vp9_hex_search(MACROBLOCK *x, BLOCKD *d, int *mvjcost, int *mvcost[2], int_mv *center_mv); -typedef int (fractional_mv_step_fp) (MACROBLOCK *x, BLOCKD *d, int_mv +typedef int (fractional_mv_step_fp) (MACROBLOCK *x, int_mv *bestmv, int_mv *ref_mv, int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse); extern fractional_mv_step_fp vp9_find_best_sub_pixel_step_iteratively; extern fractional_mv_step_fp vp9_find_best_sub_pixel_step; extern fractional_mv_step_fp vp9_find_best_half_pixel_step; -typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x, BLOCKD *d, +typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x, int_mv *ref_mv, int sad_per_bit, int distance, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], int_mv *center_mv, int n); -typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x, BLOCKD *d, +typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x, int_mv *ref_mv, int sad_per_bit, int distance, vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost, int *mvcost[2], int_mv *center_mv); -typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x, BLOCKD *d, +typedef int (*vp9_diamond_search_fn_t)(MACROBLOCK *x, int_mv *ref_mv, int_mv *best_mv, int search_param, int sad_per_bit, int *num00, diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 3859fe3ec..7de5bf786 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -850,7 +850,6 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, int rate = 0; int distortion; VP9_COMMON *const cm = &cpi->common; - BLOCKD *b = xd->block + ib; const int src_stride = x->plane[0].src.stride; uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_MB16X16, 0, ib, @@ -904,7 +903,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, rate = bmode_costs[mode]; #endif - vp9_intra4x4_predict(xd, b, mode, dst, xd->plane[0].dst.stride); + vp9_intra4x4_predict(xd, ib, mode, dst, xd->plane[0].dst.stride); vp9_subtract_block(4, 4, src_diff, 16, src, src_stride, dst, xd->plane[0].dst.stride); @@ -922,7 +921,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, tempa = ta; templ = tl; - ratey = cost_coeffs(cm, x, b - xd->block, + ratey = cost_coeffs(cm, x, ib, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4, 16); rate += ratey; distortion = vp9_block_error(coeff, @@ -952,7 +951,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, else xd->inv_txm4x4(best_dqcoeff, diff, 32); - vp9_intra4x4_predict(xd, b, *best_mode, + vp9_intra4x4_predict(xd, ib, *best_mode, dst, xd->plane[0].dst.stride); vp9_recon_b(dst, diff, dst, xd->plane[0].dst.stride); @@ -2057,7 +2056,6 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, int step_param = 0; int further_steps; int thissme, bestsme = INT_MAX; - BLOCKD *e; const struct buf_2d orig_src = x->plane[0].src; const struct buf_2d orig_pre = x->e_mbd.plane[0].pre[0]; @@ -2109,9 +2107,8 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_MB16X16, 0, n, x->e_mbd.plane[0].pre[0].buf, x->e_mbd.plane[0].pre[0].stride); - e = &x->e_mbd.block[n]; - bestsme = vp9_full_pixel_diamond(cpi, x, e, &mvp_full, step_param, + bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param, sadpb, further_steps, 0, v_fn_ptr, bsi->ref_mv, &mode_mv[NEW4X4]); @@ -2123,7 +2120,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - thissme = cpi->full_search_sad(x, e, &mvp_full, + thissme = cpi->full_search_sad(x, &mvp_full, sadpb, 16, v_fn_ptr, x->nmvjointcost, x->mvcost, bsi->ref_mv, @@ -2145,7 +2142,7 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x, if (bestsme < INT_MAX) { int distortion; unsigned int sse; - cpi->find_fractional_mv_step(x, e, &mode_mv[NEW4X4], + cpi->find_fractional_mv_step(x, &mode_mv[NEW4X4], bsi->ref_mv, x->errorperbit, v_fn_ptr, x->nmvjointcost, x->mvcost, &distortion, &sse); @@ -2883,7 +2880,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; - BLOCKD *d = &xd->block[0]; const int is_comp_pred = (mbmi->second_ref_frame > 0); #if CONFIG_COMP_INTERINTRA_PRED const int is_comp_interintra_pred = (mbmi->second_ref_frame == INTRA_FRAME); @@ -2964,7 +2960,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // Further step/diamond searches as necessary further_steps = (cpi->sf.max_step_search_steps - 1) - step_param; - bestsme = vp9_full_pixel_diamond(cpi, x, d, &mvp_full, step_param, + bestsme = vp9_full_pixel_diamond(cpi, x, &mvp_full, step_param, sadpb, further_steps, 1, &cpi->fn_ptr[block_size], &ref_mv[0], &tmp_mv); @@ -2977,7 +2973,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (bestsme < INT_MAX) { int dis; /* TODO: use dis in distortion calculation later. */ unsigned int sse; - cpi->find_fractional_mv_step(x, d, &tmp_mv, + cpi->find_fractional_mv_step(x, &tmp_mv, &ref_mv[0], x->errorperbit, &cpi->fn_ptr[block_size], diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c index ef868fce4..1e6b9840b 100644 --- a/vp9/encoder/vp9_temporal_filter.c +++ b/vp9/encoder/vp9_temporal_filter.c @@ -129,7 +129,6 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, int sadpb = x->sadperbit16; int bestsme = INT_MAX; - BLOCKD *d = &x->e_mbd.block[0]; int_mv best_ref_mv1; int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ int_mv *ref_mv; @@ -160,7 +159,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, // TODO Check that the 16x16 vf & sdf are selected here // Ignore mv costing by sending NULL pointer instead of cost arrays ref_mv = &x->e_mbd.mode_info_context->bmi[0].as_mv[0]; - bestsme = vp9_hex_search(x, d, &best_ref_mv1_full, ref_mv, + bestsme = vp9_hex_search(x, &best_ref_mv1_full, ref_mv, step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16], NULL, NULL, NULL, NULL, &best_ref_mv1); @@ -172,7 +171,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, int distortion; unsigned int sse; // Ignore mv costing by sending NULL pointer instead of cost array - bestsme = cpi->find_fractional_mv_step(x, d, ref_mv, + bestsme = cpi->find_fractional_mv_step(x, ref_mv, &best_ref_mv1, x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], -- cgit v1.2.3