diff options
Diffstat (limited to 'vp8/encoder')
-rw-r--r-- | vp8/encoder/bitstream.c | 28 | ||||
-rw-r--r-- | vp8/encoder/encodeframe.c | 4 | ||||
-rw-r--r-- | vp8/encoder/encodeintra.c | 4 | ||||
-rw-r--r-- | vp8/encoder/encodemb.c | 8 | ||||
-rw-r--r-- | vp8/encoder/temporal_filter.c | 14 | ||||
-rw-r--r-- | vp8/encoder/tokenize.c | 186 | ||||
-rw-r--r-- | vp8/encoder/tokenize.h | 23 |
7 files changed, 135 insertions, 132 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c index 6a08eb8ba..76aed7e2d 100644 --- a/vp8/encoder/bitstream.c +++ b/vp8/encoder/bitstream.c @@ -138,7 +138,7 @@ static void update_mode( } static void update_mbintra_mode_probs(VP8_COMP *cpi) { - VP8_COMMON *const x = & cpi->common; + VP8_COMMON *const cm = & cpi->common; vp8_writer *const w = & cpi->bc; @@ -148,7 +148,7 @@ static void update_mbintra_mode_probs(VP8_COMP *cpi) { update_mode( w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, - Pnew, x->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count + Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count ); } } @@ -569,31 +569,31 @@ static void write_mv_hp // This function writes the current macro block's segnment id to the bitstream // It should only be called if a segment map update is indicated. static void write_mb_segid(vp8_writer *w, - const MB_MODE_INFO *mi, const MACROBLOCKD *x) { + const MB_MODE_INFO *mi, const MACROBLOCKD *xd) { // Encode the MB segment id. - if (x->segmentation_enabled && x->update_mb_segmentation_map) { + if (xd->segmentation_enabled && xd->update_mb_segmentation_map) { switch (mi->segment_id) { case 0: - vp8_write(w, 0, x->mb_segment_tree_probs[0]); - vp8_write(w, 0, x->mb_segment_tree_probs[1]); + vp8_write(w, 0, xd->mb_segment_tree_probs[0]); + vp8_write(w, 0, xd->mb_segment_tree_probs[1]); break; case 1: - vp8_write(w, 0, x->mb_segment_tree_probs[0]); - vp8_write(w, 1, x->mb_segment_tree_probs[1]); + vp8_write(w, 0, xd->mb_segment_tree_probs[0]); + vp8_write(w, 1, xd->mb_segment_tree_probs[1]); break; case 2: - vp8_write(w, 1, x->mb_segment_tree_probs[0]); - vp8_write(w, 0, x->mb_segment_tree_probs[2]); + vp8_write(w, 1, xd->mb_segment_tree_probs[0]); + vp8_write(w, 0, xd->mb_segment_tree_probs[2]); break; case 3: - vp8_write(w, 1, x->mb_segment_tree_probs[0]); - vp8_write(w, 1, x->mb_segment_tree_probs[2]); + vp8_write(w, 1, xd->mb_segment_tree_probs[0]); + vp8_write(w, 1, xd->mb_segment_tree_probs[2]); break; // TRAP.. This should not happen default: - vp8_write(w, 0, x->mb_segment_tree_probs[0]); - vp8_write(w, 0, x->mb_segment_tree_probs[1]); + vp8_write(w, 0, xd->mb_segment_tree_probs[0]); + vp8_write(w, 0, xd->mb_segment_tree_probs[1]); break; } } diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index d3e0e4535..e58c852a7 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -47,7 +47,7 @@ int enc_debug = 0; int mb_row_debug, mb_col_debug; #endif -extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, +extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run); extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex); @@ -1471,7 +1471,7 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi, extern int cnt_pm; #endif -extern void vp8_fix_contexts(MACROBLOCKD *x); +extern void vp8_fix_contexts(MACROBLOCKD *xd); void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 70ef55a1b..053639b9d 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -113,9 +113,9 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) { int i; #if 0 - MACROBLOCKD *x = &mb->e_mbd; + MACROBLOCKD *xd = &mb->e_mbd; // Intra modes requiring top-right MB reconstructed data have been disabled - vp8_intra_prediction_down_copy(x); + vp8_intra_prediction_down_copy(xd); #endif for (i = 0; i < 16; i++) diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c index d137603d1..e03b47e2c 100644 --- a/vp8/encoder/encodemb.c +++ b/vp8/encoder/encodemb.c @@ -607,11 +607,11 @@ fall between -65 and +65. **************************************************************************/ #define SUM_2ND_COEFF_THRESH 65 -static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, +static void check_reset_2nd_coeffs(MACROBLOCKD *xd, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) { int sum = 0; int i; - BLOCKD *bd = &x->block[24]; + BLOCKD *bd = &xd->block[24]; if (bd->dequant[0] >= SUM_2ND_COEFF_THRESH && bd->dequant[1] >= SUM_2ND_COEFF_THRESH) return; @@ -634,10 +634,10 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, } } #define SUM_2ND_COEFF_THRESH_8X8 32 -static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *x, int type, +static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) { int sum = 0; - BLOCKD *bd = &x->block[24]; + BLOCKD *bd = &xd->block[24]; int coef; coef = bd->dqcoeff[0]; diff --git a/vp8/encoder/temporal_filter.c b/vp8/encoder/temporal_filter.c index 0e86a0ca4..035eebb98 100644 --- a/vp8/encoder/temporal_filter.c +++ b/vp8/encoder/temporal_filter.c @@ -39,7 +39,7 @@ static void vp8_temporal_filter_predictors_mb_c ( - MACROBLOCKD *x, + MACROBLOCKD *xd, unsigned char *y_mb_ptr, unsigned char *u_mb_ptr, unsigned char *v_mb_ptr, @@ -56,10 +56,10 @@ static void vp8_temporal_filter_predictors_mb_c yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3); if ((mv_row | mv_col) & 7) { - x->subpixel_predict16x16(yptr, stride, + xd->subpixel_predict16x16(yptr, stride, (mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16); } else { - RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16); + RECON_INVOKE(&xd->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16); } // U & V @@ -73,14 +73,14 @@ static void vp8_temporal_filter_predictors_mb_c vptr = v_mb_ptr + offset; if ((omv_row | omv_col) & 15) { - x->subpixel_predict8x8(uptr, stride, + xd->subpixel_predict8x8(uptr, stride, (omv_col & 15), (omv_row & 15), &pred[256], 8); - x->subpixel_predict8x8(vptr, stride, + xd->subpixel_predict8x8(vptr, stride, (omv_col & 15), (omv_row & 15), &pred[320], 8); } else { - RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8); - RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8); + RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8); + RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8); } } void vp8_temporal_filter_apply_c diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c index 9b848f3d5..5072b1967 100644 --- a/vp8/encoder/tokenize.c +++ b/vp8/encoder/tokenize.c @@ -39,14 +39,14 @@ extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS] #endif #endif void vp8_stuff_mb(VP8_COMP *cpi, - MACROBLOCKD *x, TOKENEXTRA **t, int dry_run); + MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run); void vp8_stuff_mb_8x8(VP8_COMP *cpi, - MACROBLOCKD *x, TOKENEXTRA **t, int dry_run); + MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run); #if CONFIG_TX16X16 -void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *x, +void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run); #endif -void vp8_fix_contexts(MACROBLOCKD *x); +void vp8_fix_contexts(MACROBLOCKD *xd); static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2]; const TOKENVALUE *vp8_dct_value_tokens_ptr; @@ -717,100 +717,100 @@ static void tokenize1st_order_b } -int mby_is_skippable(MACROBLOCKD *x, int has_y2_block) { +int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) { int skip = 1; int i = 0; if (has_y2_block) { for (i = 0; i < 16; i++) - skip &= (x->block[i].eob < 2); - skip &= (!x->block[24].eob); + skip &= (xd->block[i].eob < 2); + skip &= (!xd->block[24].eob); } else { for (i = 0; i < 16; i++) - skip &= (!x->block[i].eob); + skip &= (!xd->block[i].eob); } return skip; } -int mbuv_is_skippable(MACROBLOCKD *x) { +int mbuv_is_skippable(MACROBLOCKD *xd) { int skip = 1; int i; for (i = 16; i < 24; i++) - skip &= (!x->block[i].eob); + skip &= (!xd->block[i].eob); return skip; } -int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) { - return (mby_is_skippable(x, has_y2_block) & - mbuv_is_skippable(x)); +int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block) { + return (mby_is_skippable(xd, has_y2_block) & + mbuv_is_skippable(xd)); } -int mby_is_skippable_8x8(MACROBLOCKD *x) { +int mby_is_skippable_8x8(MACROBLOCKD *xd) { int skip = 1; int i = 0; for (i = 0; i < 16; i += 4) - skip &= (x->block[i].eob < 2); - skip &= (!x->block[24].eob); + skip &= (xd->block[i].eob < 2); + skip &= (!xd->block[24].eob); return skip; } -int mbuv_is_skippable_8x8(MACROBLOCKD *x) { - return (!x->block[16].eob) & (!x->block[20].eob); +int mbuv_is_skippable_8x8(MACROBLOCKD *xd) { + return (!xd->block[16].eob) & (!xd->block[20].eob); } -int mb_is_skippable_8x8(MACROBLOCKD *x) { - return (mby_is_skippable_8x8(x) & mbuv_is_skippable_8x8(x)); +int mb_is_skippable_8x8(MACROBLOCKD *xd) { + return (mby_is_skippable_8x8(xd) & mbuv_is_skippable_8x8(xd)); } #if CONFIG_TX16X16 -int mby_is_skippable_16x16(MACROBLOCKD *x) { +int mby_is_skippable_16x16(MACROBLOCKD *xd) { int skip = 1; - //skip &= (x->block[0].eob < 2); // I think this should be commented? No second order == DC must be coded - //skip &= (x->block[0].eob < 1); - //skip &= (!x->block[24].eob); - skip &= !x->block[0].eob; + //skip &= (xd->block[0].eob < 2); // I think this should be commented? No second order == DC must be coded + //skip &= (xd->block[0].eob < 1); + //skip &= (!xd->block[24].eob); + skip &= !xd->block[0].eob; return skip; } -int mb_is_skippable_16x16(MACROBLOCKD *x) { +int mb_is_skippable_16x16(MACROBLOCKD *xd) { return (mby_is_skippable_16x16(x) & mbuv_is_skippable_8x8(x)); } #endif void vp8_tokenize_mb(VP8_COMP *cpi, - MACROBLOCKD *x, + MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) { int plane_type; int has_y2_block; int b; - int tx_type = x->mode_info_context->mbmi.txfm_size; - int mb_skip_context = get_pred_context(&cpi->common, x, PRED_MBSKIP); + int tx_type = xd->mode_info_context->mbmi.txfm_size; + int mb_skip_context = get_pred_context(&cpi->common, xd, PRED_MBSKIP); TOKENEXTRA *t_backup = *t; // If the MB is going to be skipped because of a segment level flag // exclude this from the skip count stats used to calculate the // transmitted skip probability; int skip_inc; - int segment_id = x->mode_info_context->mbmi.segment_id; + int segment_id = xd->mode_info_context->mbmi.segment_id; #if CONFIG_HYBRIDTRANSFORM int QIndex = cpi->mb.q_index; int active_ht = (QIndex < ACTIVE_HT) && - (x->mode_info_context->mbmi.mode == B_PRED); + (xd->mode_info_context->mbmi.mode == B_PRED); #endif - if (!segfeature_active(x, segment_id, SEG_LVL_EOB) || - (get_segdata(x, segment_id, SEG_LVL_EOB) != 0)) { + if (!segfeature_active(xd, segment_id, SEG_LVL_EOB) || + (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0)) { skip_inc = 1; } else skip_inc = 0; - has_y2_block = (x->mode_info_context->mbmi.mode != B_PRED - && x->mode_info_context->mbmi.mode != I8X8_PRED - && x->mode_info_context->mbmi.mode != SPLITMV); + has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED + && xd->mode_info_context->mbmi.mode != I8X8_PRED + && xd->mode_info_context->mbmi.mode != SPLITMV); #if CONFIG_TX16X16 if (tx_type == TX_16X16) has_y2_block = 0; // Because of inter frames #endif @@ -818,18 +818,18 @@ void vp8_tokenize_mb(VP8_COMP *cpi, switch (tx_type) { #if CONFIG_TX16X16 case TX_16X16: - x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(x); + xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(x); break; #endif case TX_8X8: - x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(x); + xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd); break; default: - x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(x, has_y2_block); + xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block); break; } - if (x->mode_info_context->mbmi.mb_skip_coeff) { + if (xd->mode_info_context->mbmi.mb_skip_coeff) { if (!dry_run) cpi->skip_true_count[mb_skip_context] += skip_inc; if (!cpi->common.mb_no_coeff_skip) { @@ -839,11 +839,11 @@ void vp8_tokenize_mb(VP8_COMP *cpi, else #endif if (tx_type == TX_8X8) - vp8_stuff_mb_8x8(cpi, x, t, dry_run); + vp8_stuff_mb_8x8(cpi, xd, t, dry_run); else - vp8_stuff_mb(cpi, x, t, dry_run); + vp8_stuff_mb(cpi, xd, t, dry_run); } else { - vp8_fix_contexts(x); + vp8_fix_contexts(xd); } if (dry_run) *t = t_backup; @@ -856,31 +856,31 @@ void vp8_tokenize_mb(VP8_COMP *cpi, plane_type = 3; if (has_y2_block) { if (tx_type == TX_8X8) { - ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context; - tokenize2nd_order_b_8x8(x, - x->block + 24, t, 1, x->frame_type, + ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context; + tokenize2nd_order_b_8x8(xd, + xd->block + 24, t, 1, xd->frame_type, A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24], cpi, dry_run); } else - tokenize2nd_order_b(x, t, cpi, dry_run); + tokenize2nd_order_b(xd, t, cpi, dry_run); plane_type = 0; } #if CONFIG_TX16X16 if (tx_type == TX_16X16) { - ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context; - tokenize1st_order_b_16x16(x, x->block, t, 3, - x->frame_type, A, L, cpi, dry_run); + ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context; + tokenize1st_order_b_16x16(xd, xd->block, t, 3, + xd->frame_type, A, L, cpi, dry_run); for (b = 1; b < 16; b++) { *(A + vp8_block2above[b]) = *(A); *(L + vp8_block2left[b] ) = *(L); } for (b = 16; b < 24; b += 4) { - tokenize1st_order_b_8x8(x, x->block + b, t, 2, x->frame_type, + tokenize1st_order_b_8x8(xd, xd->block + b, t, 2, xd->frame_type, A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run); *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]); *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]); @@ -891,11 +891,11 @@ void vp8_tokenize_mb(VP8_COMP *cpi, else #endif if (tx_type == TX_8X8) { - ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context; + ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context; for (b = 0; b < 16; b += 4) { - tokenize1st_order_b_8x8(x, - x->block + b, t, plane_type, x->frame_type, + tokenize1st_order_b_8x8(xd, + xd->block + b, t, plane_type, xd->frame_type, A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run); @@ -903,8 +903,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi, *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]); } for (b = 16; b < 24; b += 4) { - tokenize1st_order_b_8x8(x, - x->block + b, t, 2, x->frame_type, + tokenize1st_order_b_8x8(xd, + xd->block + b, t, 2, xd->frame_type, A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run); @@ -914,34 +914,34 @@ void vp8_tokenize_mb(VP8_COMP *cpi, } else { #if CONFIG_HYBRIDTRANSFORM if(active_ht) { - tokenize1st_order_ht(x, t, plane_type, cpi, dry_run); + tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run); } else { #if CONFIG_HYBRIDTRANSFORM8X8 - if (x->mode_info_context->mbmi.mode == I8X8_PRED) { - ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context; + if (xd->mode_info_context->mbmi.mode == I8X8_PRED) { + ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context; for (b = 0; b < 16; b += 4) { - tokenize1st_order_b_8x8(x, - x->block + b, t, PLANE_TYPE_Y_WITH_DC, - x->frame_type, + tokenize1st_order_b_8x8(xd, + xd->block + b, t, PLANE_TYPE_Y_WITH_DC, + xd->frame_type, A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run); *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]); *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]); } - tokenize1st_order_chroma(x, t, PLANE_TYPE_UV, cpi, dry_run); + tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run); } else { - tokenize1st_order_b(x, t, plane_type, cpi, dry_run); + tokenize1st_order_b(xd, t, plane_type, cpi, dry_run); } #else - tokenize1st_order_b(x, t, plane_type, cpi, dry_run); + tokenize1st_order_b(xd, t, plane_type, cpi, dry_run); #endif } #else - tokenize1st_order_b(x, t, plane_type, cpi, dry_run); + tokenize1st_order_b(xd, t, plane_type, cpi, dry_run); #endif } if (dry_run) @@ -1304,22 +1304,22 @@ void stuff1st_order_buv_8x8 } void vp8_stuff_mb_8x8(VP8_COMP *cpi, - MACROBLOCKD *x, + MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) { - ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context; + ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context; int plane_type; int b; TOKENEXTRA *t_backup = *t; - stuff2nd_order_b_8x8(x->block + 24, t, 1, x->frame_type, + stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type, A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24], cpi, dry_run); plane_type = 0; for (b = 0; b < 16; b += 4) { - stuff1st_order_b_8x8(x->block + b, t, plane_type, x->frame_type, + stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type, A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run); @@ -1328,7 +1328,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi, } for (b = 16; b < 24; b += 4) { - stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type, + stuff1st_order_buv_8x8(xd->block + b, t, 2, xd->frame_type, A + vp8_block2above[b], L + vp8_block2left[b], cpi, dry_run); @@ -1367,21 +1367,21 @@ void stuff1st_order_b_16x16(const BLOCKD *const b, } void vp8_stuff_mb_16x16(VP8_COMP *cpi, - MACROBLOCKD *x, + MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) { - ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context; + ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context; int b, i; TOKENEXTRA *t_backup = *t; - stuff1st_order_b_16x16(x->block, t, x->frame_type, A, L, cpi, dry_run); + stuff1st_order_b_16x16(xd->block, t, xd->frame_type, A, L, cpi, dry_run); for (i = 1; i < 16; i++) { *(A + vp8_block2above[i]) = *(A); *(L + vp8_block2left[i]) = *(L); } for (b = 16; b < 24; b += 4) { - stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type, + stuff1st_order_buv_8x8(xd->block + b, t, 2, xd->frame_type, A + vp8_block2above[b], L + vp8_block2left[b], cpi, dry_run); @@ -1462,10 +1462,10 @@ void stuff1st_order_buv *a = *l = pt; } -void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, +void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) { - ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context; - ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context; + ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context; int plane_type; int b; TOKENEXTRA *t_backup = *t; @@ -1491,19 +1491,19 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, if (dry_run) *t = t_backup; } -void vp8_fix_contexts(MACROBLOCKD *x) { +void vp8_fix_contexts(MACROBLOCKD *xd) { /* Clear entropy contexts for Y2 blocks */ - if ((x->mode_info_context->mbmi.mode != B_PRED - && x->mode_info_context->mbmi.mode != I8X8_PRED - && x->mode_info_context->mbmi.mode != SPLITMV) + if ((xd->mode_info_context->mbmi.mode != B_PRED + && xd->mode_info_context->mbmi.mode != I8X8_PRED + && xd->mode_info_context->mbmi.mode != SPLITMV) #if CONFIG_TX16X16 - || x->mode_info_context->mbmi.txfm_size == TX_16X16 + || xd->mode_info_context->mbmi.txfm_size == TX_16X16 #endif ) { - vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); - vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); } else { - vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); - vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); } } diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h index baff4c2b0..ca5d49722 100644 --- a/vp8/encoder/tokenize.h +++ b/vp8/encoder/tokenize.h @@ -31,22 +31,25 @@ typedef struct { int rd_cost_mby(MACROBLOCKD *); -extern int mby_is_skippable(MACROBLOCKD *x, int has_y2_block); -extern int mbuv_is_skippable(MACROBLOCKD *x); -extern int mb_is_skippable(MACROBLOCKD *x, int has_y2_block); -extern int mby_is_skippable_8x8(MACROBLOCKD *x); -extern int mbuv_is_skippable_8x8(MACROBLOCKD *x); -extern int mb_is_skippable_8x8(MACROBLOCKD *x); -extern int mb_is_skippable_16x16(MACROBLOCKD *x); +extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block); +extern int mbuv_is_skippable(MACROBLOCKD *xd); +extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block); +extern int mby_is_skippable_8x8(MACROBLOCKD *xd); +extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd); +extern int mb_is_skippable_8x8(MACROBLOCKD *xd); +extern int mb_is_skippable_16x16(MACROBLOCKD *xd); #ifdef ENTROPY_STATS void init_context_counters(); void print_context_counters(); -extern INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -extern INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; +extern INT64 context_counters[BLOCK_TYPES][COEF_BANDS] + [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; +extern INT64 context_counters_8x8[BLOCK_TYPES_8X8][COEF_BANDS] + [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; #if CONFIG_TX16X16 -extern INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; +extern INT64 context_counters_16x16[BLOCK_TYPES_16X16][COEF_BANDS] + [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; #endif #endif |