From 773768ae27bfe427f153c8e6fadb3912b8f94c1f Mon Sep 17 00:00:00 2001 From: Scott LaVarnway Date: Thu, 2 Jun 2011 13:46:41 -0400 Subject: Removed B_MODE_INFO Declared the bmi in BLOCKD as a union instead of B_MODE_INFO. Then removed B_MODE_INFO completely. Change-Id: Ieb7469899e265892c66f7aeac87b7f2bf38e7a67 --- vp8/common/blockd.h | 27 +++++++-------------------- vp8/decoder/decodemv.c | 20 ++++++++++---------- vp8/decoder/decodframe.c | 4 +--- vp8/decoder/threading.c | 4 +++- vp8/encoder/bitstream.c | 12 ++++++++---- vp8/encoder/block.h | 6 +++++- vp8/encoder/encodeframe.c | 22 ++++------------------ vp8/encoder/encodeintra.c | 12 ++++++------ vp8/encoder/ethreading.c | 20 +++----------------- vp8/encoder/firstpass.c | 2 +- vp8/encoder/pickinter.c | 16 +++++++++------- vp8/encoder/rdopt.c | 20 +++++++++++--------- 12 files changed, 68 insertions(+), 97 deletions(-) (limited to 'vp8') diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h index 3a70b1803..be99256a4 100644 --- a/vp8/common/blockd.h +++ b/vp8/common/blockd.h @@ -137,12 +137,6 @@ typedef enum modes for the Y blocks to the left and above us; for interframes, there is a single probability table. */ -typedef struct -{ - B_PREDICTION_MODE mode; - int_mv mv; -} B_MODE_INFO; - union b_mode_info { B_PREDICTION_MODE as_mode; @@ -182,8 +176,6 @@ typedef struct short *dqcoeff; unsigned char *predictor; short *diff; - short *reference; - short *dequant; /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */ @@ -197,14 +189,13 @@ typedef struct int eob; - B_MODE_INFO bmi; + union b_mode_info bmi; } BLOCKD; typedef struct { DECLARE_ALIGNED(16, short, diff[400]); /* from idct diff */ DECLARE_ALIGNED(16, unsigned char, predictor[384]); -/* not used DECLARE_ALIGNED(16, short, reference[384]); */ DECLARE_ALIGNED(16, short, qcoeff[400]); DECLARE_ALIGNED(16, short, dqcoeff[400]); DECLARE_ALIGNED(16, char, eobs[25]); @@ -284,19 +275,15 @@ extern void vp8_setup_block_dptrs(MACROBLOCKD *x); static void update_blockd_bmi(MACROBLOCKD *xd) { int i; - if (xd->mode_info_context->mbmi.mode == SPLITMV) - { - for (i = 0; i < 16; i++) - { - BLOCKD *d = &xd->block[i]; - d->bmi.mv.as_int = xd->mode_info_context->bmi[i].mv.as_int; - } - }else if (xd->mode_info_context->mbmi.mode == B_PRED) + int is_4x4; + is_4x4 = (xd->mode_info_context->mbmi.mode == SPLITMV) || + (xd->mode_info_context->mbmi.mode == B_PRED); + + if (is_4x4) { for (i = 0; i < 16; i++) { - BLOCKD *d = &xd->block[i]; - d->bmi.mode = xd->mode_info_context->bmi[i].as_mode; + xd->block[i].bmi = xd->mode_info_context->bmi[i]; } } } diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c index 5defa75ca..d569d8815 100644 --- a/vp8/decoder/decodemv.c +++ b/vp8/decoder/decodemv.c @@ -355,7 +355,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, do /* for each subset j */ { int_mv leftmv, abovemv; - B_MODE_INFO bmi; + int_mv blockmv; int k; /* first block in subset j */ int mv_contz; k = vp8_mbsplit_offset[s][j]; @@ -364,30 +364,30 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, abovemv.as_int = above_block_mv(mi, k, mis); mv_contz = vp8_mv_cont(&leftmv, &abovemv); - switch (bmi.mode = (B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/ + switch ((B_PREDICTION_MODE) sub_mv_ref(bc, vp8_sub_mv_ref_prob2 [mv_contz])) /*pc->fc.sub_mv_ref_prob))*/ { case NEW4X4: - read_mv(bc, &bmi.mv.as_mv, (const MV_CONTEXT *) mvc); - bmi.mv.as_mv.row += best_mv.as_mv.row; - bmi.mv.as_mv.col += best_mv.as_mv.col; + read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc); + blockmv.as_mv.row += best_mv.as_mv.row; + blockmv.as_mv.col += best_mv.as_mv.col; #ifdef VPX_MODE_COUNT vp8_mv_cont_count[mv_contz][3]++; #endif break; case LEFT4X4: - bmi.mv.as_int = leftmv.as_int; + blockmv.as_int = leftmv.as_int; #ifdef VPX_MODE_COUNT vp8_mv_cont_count[mv_contz][0]++; #endif break; case ABOVE4X4: - bmi.mv.as_int = abovemv.as_int; + blockmv.as_int = abovemv.as_int; #ifdef VPX_MODE_COUNT vp8_mv_cont_count[mv_contz][1]++; #endif break; case ZERO4X4: - bmi.mv.as_int = 0; + blockmv.as_int = 0; #ifdef VPX_MODE_COUNT vp8_mv_cont_count[mv_contz][2]++; #endif @@ -396,7 +396,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, break; } - mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&bmi.mv, + mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge, mb_to_top_edge, @@ -412,7 +412,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, fill_offset = &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]]; do { - mi->bmi[ *fill_offset].mv.as_int = bmi.mv.as_int; + mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int; fill_offset++; }while (--fill_count); } diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 80e8723d6..fec517228 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -288,7 +288,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, BLOCKD *b = &xd->block[i]; RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict) - (b, b->bmi.mode, b->predictor); + (b, b->bmi.as_mode, b->predictor); if (xd->eobs[i] > 1) { @@ -974,8 +974,6 @@ int vp8_decode_frame(VP8D_COMP *pbi) vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); - vpx_memcpy(&xd->block[0].bmi, &xd->mode_info_context->bmi[0], sizeof(B_MODE_INFO)); - #if CONFIG_MULTITHREAD if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION) { diff --git a/vp8/decoder/threading.c b/vp8/decoder/threading.c index 4ca53fbe4..77c3f1732 100644 --- a/vp8/decoder/threading.c +++ b/vp8/decoder/threading.c @@ -186,7 +186,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m for (i = 0; i < 16; i++) { BLOCKD *b = &xd->block[i]; - vp8mt_predict_intra4x4(pbi, xd, b->bmi.mode, b->predictor, mb_row, mb_col, i); + + vp8mt_predict_intra4x4(pbi, xd, b->bmi.as_mode, b->predictor, mb_row, mb_col, i); + if (xd->eobs[i] > 1) { DEQUANT_INVOKE(&pbi->dequant, idct_add) diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c index b10f8c01f..ced963559 100644 --- a/vp8/encoder/bitstream.c +++ b/vp8/encoder/bitstream.c @@ -1008,28 +1008,32 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) do { - const B_MODE_INFO *const b = cpi->mb.partition_info->bmi + j; + B_PREDICTION_MODE blockmode; + int_mv blockmv; const int *const L = vp8_mbsplits [mi->partitioning]; int k = -1; /* first block in subset j */ int mv_contz; int_mv leftmv, abovemv; + blockmode = cpi->mb.partition_info->bmi[j].mode; + blockmv = cpi->mb.partition_info->bmi[j].mv; while (j != L[++k]) if (k >= 16) assert(0); + leftmv.as_int = left_block_mv(m, k); abovemv.as_int = above_block_mv(m, k, mis); mv_contz = vp8_mv_cont(&leftmv, &abovemv); - write_sub_mv_ref(w, b->mode, vp8_sub_mv_ref_prob2 [mv_contz]); //pc->fc.sub_mv_ref_prob); + write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2 [mv_contz]); - if (b->mode == NEW4X4) + if (blockmode == NEW4X4) { #ifdef ENTROPY_STATS active_section = 11; #endif - write_mv(w, &b->mv.as_mv, &best_mv, (const MV_CONTEXT *) mvc); + write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc); } } while (++j < cpi->mb.partition_info->count); diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h index fbdc89e87..bc6eeeb14 100644 --- a/vp8/encoder/block.h +++ b/vp8/encoder/block.h @@ -54,7 +54,11 @@ typedef struct typedef struct { int count; - B_MODE_INFO bmi[16]; + struct + { + B_PREDICTION_MODE mode; + int_mv mv; + } bmi[16]; } PARTITION_INFO; typedef struct diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index 1bb026048..f72fccc7c 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -272,6 +272,7 @@ static void build_activity_map( VP8_COMP *cpi ) // Activity masking based on Tim T's original code void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) { + unsigned int a; unsigned int b; unsigned int act = *(x->mb_activity_ptr); @@ -477,24 +478,9 @@ void encode_mb_row(VP8_COMP *cpi, x->mb_activity_ptr++; x->mb_norm_activity_ptr++; - if(cm->frame_type != INTRA_FRAME) - { - if (xd->mode_info_context->mbmi.mode != B_PRED) - { - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].mv.as_int = xd->block[i].bmi.mv.as_int; - }else - { - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode; - } - } - else - { - if(xd->mode_info_context->mbmi.mode != B_PRED) - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode; - } + /* save the block info */ + for (i = 0; i < 16; i++) + xd->mode_info_context->bmi[i] = xd->block[i].bmi; // adjust to the next column of macroblocks x->src.y_buffer += 16; diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 835c80d28..5da69bc59 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -36,7 +36,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, BLOCK *be = &x->block[ib]; RECON_INVOKE(&rtcd->common->recon, intra4x4_predict) - (b, b->bmi.mode, b->predictor); + (b, b->bmi.as_mode, b->predictor); ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16); @@ -89,19 +89,19 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) switch (x->e_mbd.mode_info_context->mbmi.mode) { case DC_PRED: - d->bmi.mode = B_DC_PRED; + d->bmi.as_mode = B_DC_PRED; break; case V_PRED: - d->bmi.mode = B_VE_PRED; + d->bmi.as_mode = B_VE_PRED; break; case H_PRED: - d->bmi.mode = B_HE_PRED; + d->bmi.as_mode = B_HE_PRED; break; case TM_PRED: - d->bmi.mode = B_TM_PRED; + d->bmi.as_mode = B_TM_PRED; break; default: - d->bmi.mode = B_DC_PRED; + d->bmi.as_mode = B_DC_PRED; break; } } diff --git a/vp8/encoder/ethreading.c b/vp8/encoder/ethreading.c index 1d92f20af..3e6ed2a9d 100644 --- a/vp8/encoder/ethreading.c +++ b/vp8/encoder/ethreading.c @@ -232,23 +232,9 @@ THREAD_FUNCTION thread_encoding_proc(void *p_data) x->mb_activity_ptr++; x->mb_norm_activity_ptr++; - if(cm->frame_type != INTRA_FRAME) - { - if (xd->mode_info_context->mbmi.mode != B_PRED) - { - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].mv.as_int = xd->block[i].bmi.mv.as_int; - }else - { - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode; - } - } - else { - if(xd->mode_info_context->mbmi.mode != B_PRED) - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].as_mode = xd->block[i].bmi.mode; - } + /* save the block info */ + for (i = 0; i < 16; i++) + xd->mode_info_context->bmi[i] = xd->block[i].bmi; // adjust to the next column of macroblocks x->src.y_buffer += 16; diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c index 7cf50415d..e9fc07de2 100644 --- a/vp8/encoder/firstpass.c +++ b/vp8/encoder/firstpass.c @@ -100,7 +100,7 @@ static int encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) { for (i = 0; i < 16; i++) { - x->e_mbd.block[i].bmi.mode = B_DC_PRED; + x->e_mbd.block[i].bmi.as_mode = B_DC_PRED; vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i); } } diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c index 7bc3b5260..a96908490 100644 --- a/vp8/encoder/pickinter.c +++ b/vp8/encoder/pickinter.c @@ -47,7 +47,6 @@ extern unsigned int (*vp8_get16x16pred_error)(unsigned char *src_ptr, int src_st extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride); extern int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *best_ref_mv, int best_rd, int *, int *, int *, int, int *mvcost[2], int, int fullpixel); extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]); -extern void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv); int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, @@ -215,7 +214,8 @@ static int pick_intra4x4block( *best_mode = mode; } } - b->bmi.mode = (B_PREDICTION_MODE)(*best_mode); + + b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode); vp8_encode_intra4x4block(rtcd, x, ib); return best_rd; } @@ -251,7 +251,7 @@ int vp8_pick_intra4x4mby_modes cost += r; distortion += d; - mic->bmi[i].as_mode = xd->block[i].bmi.mode = best_mode; + mic->bmi[i].as_mode = best_mode; // Break out case where we have already exceeded best so far value // that was passed in @@ -443,7 +443,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, BLOCK *b = &x->block[0]; BLOCKD *d = &x->e_mbd.block[0]; MACROBLOCKD *xd = &x->e_mbd; - B_MODE_INFO best_bmodes[16]; + union b_mode_info best_bmodes[16]; MB_MODE_INFO best_mbmode; int_mv best_ref_mv; @@ -485,6 +485,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, vpx_memset(nearest_mv, 0, sizeof(nearest_mv)); vpx_memset(near_mv, 0, sizeof(near_mv)); vpx_memset(&best_mbmode, 0, sizeof(best_mbmode)); + vpx_memset(&best_bmodes, 0, sizeof(best_bmodes)); // set up all the refframe dependent pointers. @@ -885,7 +886,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, if (this_mode == B_PRED) for (i = 0; i < 16; i++) { - vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO)); + best_bmodes[i].as_mode = x->e_mbd.block[i].bmi.as_mode; } // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time @@ -953,10 +954,11 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, } if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) + { for (i = 0; i < 16; i++) { - x->e_mbd.block[i].bmi.mode = best_bmodes[i].mode; + x->e_mbd.block[i].bmi.as_mode = best_bmodes[i].as_mode; } - + } update_mvcount(cpi, &x->e_mbd, &frame_best_ref_mv[xd->mode_info_context->mbmi.ref_frame]); } diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index bed2bb5f6..3ab8ea300 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -650,7 +650,7 @@ static int rd_pick_intra4x4block( vpx_memcpy(best_dqcoeff, b->dqcoeff, 32); } } - b->bmi.mode = (B_PREDICTION_MODE)(*best_mode); + b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode); IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32); RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); @@ -1398,8 +1398,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, { BLOCKD *bd = &x->e_mbd.block[i]; - bd->bmi.mv.as_mv = bsi.mvs[i].as_mv; - bd->bmi.mode = bsi.modes[i]; + bd->bmi.mv.as_int = bsi.mvs[i].as_int; bd->eob = bsi.eobs[i]; } @@ -1714,7 +1713,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int BLOCK *b = &x->block[0]; BLOCKD *d = &x->e_mbd.block[0]; MACROBLOCKD *xd = &x->e_mbd; - B_MODE_INFO best_bmodes[16]; + union b_mode_info best_bmodes[16]; MB_MODE_INFO best_mbmode; PARTITION_INFO best_partition; int_mv best_ref_mv; @@ -1758,6 +1757,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int unsigned char *v_buffer[4]; vpx_memset(&best_mbmode, 0, sizeof(best_mbmode)); + vpx_memset(&best_bmodes, 0, sizeof(best_bmodes)); if (cpi->ref_frame_flags & VP8_LAST_FLAG) { @@ -2319,10 +2319,12 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO)); vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO)); - for (i = 0; i < 16; i++) - { - vpx_memcpy(&best_bmodes[i], &x->e_mbd.block[i].bmi, sizeof(B_MODE_INFO)); - } + if ((this_mode == B_PRED) || (this_mode == SPLITMV)) + for (i = 0; i < 16; i++) + { + best_bmodes[i] = x->e_mbd.block[i].bmi; + } + // Testing this mode gave rise to an improvement in best error score. Lower threshold a bit for next time cpi->rd_thresh_mult[mode_index] = (cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT; @@ -2396,7 +2398,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int if (best_mbmode.mode == B_PRED) { for (i = 0; i < 16; i++) - x->e_mbd.block[i].bmi.mode = best_bmodes[i].mode; + x->e_mbd.block[i].bmi.as_mode = best_bmodes[i].as_mode; } if (best_mbmode.mode == SPLITMV) -- cgit v1.2.3 From e5c236c21011c668c6b521dc4e554dbf91a161fe Mon Sep 17 00:00:00 2001 From: Yunqing Wang Date: Thu, 2 Jun 2011 17:33:17 -0400 Subject: Adjust bounds checking for hex search in real-time mode Currently, hex search couldn't guarantee the motion vector(MV) found is within the limit of maximum MV. Therefore, very large motion vectors resulted from big motion in the video could cause encoding artifacts. This change adjusted hex search bounds checking to make sure the resulted motion vector won't go out of the range. James Berry, thank you for finding the bug. Change-Id: If2c55edd9019e72444ad9b4b8688969eef610c55 --- vp8/encoder/pickinter.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'vp8') diff --git a/vp8/encoder/pickinter.c b/vp8/encoder/pickinter.c index 77e188e30..f4bcabe79 100644 --- a/vp8/encoder/pickinter.c +++ b/vp8/encoder/pickinter.c @@ -736,26 +736,26 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, //adjust search range according to sr from mv prediction if(sr > step_param) step_param = sr; - - col_min = (best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL) >>3; - col_max = (best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL) >>3; - row_min = (best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL) >>3; - row_max = (best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL) >>3; - - // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. - if (x->mv_col_min < col_min ) - x->mv_col_min = col_min; - if (x->mv_col_max > col_max ) - x->mv_col_max = col_max; - if (x->mv_row_min < row_min ) - x->mv_row_min = row_min; - if (x->mv_row_max > row_max ) - x->mv_row_max = row_max; }else { mvp.as_int = best_ref_mv.as_int; } + col_min = (best_ref_mv.as_mv.col - MAX_FULL_PEL_VAL) >>3; + col_max = (best_ref_mv.as_mv.col + MAX_FULL_PEL_VAL) >>3; + row_min = (best_ref_mv.as_mv.row - MAX_FULL_PEL_VAL) >>3; + row_max = (best_ref_mv.as_mv.row + MAX_FULL_PEL_VAL) >>3; + + // Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. + if (x->mv_col_min < col_min ) + x->mv_col_min = col_min; + if (x->mv_col_max > col_max ) + x->mv_col_max = col_max; + if (x->mv_row_min < row_min ) + x->mv_row_min = row_min; + if (x->mv_row_max > row_max ) + x->mv_row_max = row_max; + further_steps = (cpi->Speed >= 8)? 0: (cpi->sf.max_step_search_steps - 1 - step_param); if (cpi->sf.search_method == HEX) @@ -808,13 +808,10 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, } } - if(cpi->sf.improved_mv_pred) - { - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - } + x->mv_col_min = tmp_col_min; + x->mv_col_max = tmp_col_max; + x->mv_row_min = tmp_row_min; + x->mv_row_max = tmp_row_max; if (bestsme < INT_MAX) cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv, -- cgit v1.2.3