summaryrefslogtreecommitdiff
path: root/vp9/encoder/vp9_encodeframe.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp9/encoder/vp9_encodeframe.c')
-rw-r--r--vp9/encoder/vp9_encodeframe.c366
1 files changed, 51 insertions, 315 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 5f29c2770..1f20d5e1c 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -278,7 +278,7 @@ static void build_activity_map(VP9_COMP *cpi) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
#if ALT_ACT_MEASURE
- xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
+ xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
#endif
@@ -305,8 +305,8 @@ static void build_activity_map(VP9_COMP *cpi) {
#if ALT_ACT_MEASURE
// extend the recon for intra prediction
- vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
- xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
+ vp9_extend_mb_row(new_yv12, xd->plane[0].dst.buf + 16,
+ xd->plane[1].dst.buf + 8, xd->plane[2].dst.buf + 8);
#endif
}
@@ -345,87 +345,6 @@ void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
adjust_act_zbin(cpi, x);
}
-#if CONFIG_NEW_MVREF
-static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
- int cost;
-
- // Encode the index for the MV reference.
- switch (mv_ref_id) {
- case 0:
- cost = vp9_cost_zero(ref_id_probs[0]);
- break;
- case 1:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_zero(ref_id_probs[1]);
- break;
- case 2:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_one(ref_id_probs[1]);
- cost += vp9_cost_zero(ref_id_probs[2]);
- break;
- case 3:
- cost = vp9_cost_one(ref_id_probs[0]);
- cost += vp9_cost_one(ref_id_probs[1]);
- cost += vp9_cost_one(ref_id_probs[2]);
- break;
-
- // TRAP.. This should not happen
- default:
- assert(0);
- break;
- }
- return cost;
-}
-
-// Estimate the cost of each coding the vector using each reference candidate
-static unsigned int pick_best_mv_ref(MACROBLOCK *x,
- MV_REFERENCE_FRAME ref_frame,
- int_mv target_mv,
- int_mv * mv_ref_list,
- int_mv * best_ref) {
- int i;
- int best_index = 0;
- int cost, cost2;
- int zero_seen = (mv_ref_list[0].as_int) ? 0 : 1;
- MACROBLOCKD *xd = &x->e_mbd;
- int max_mv = MV_MAX;
-
- cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], 0) +
- vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
- x->mvcost, 96, xd->allow_high_precision_mv);
-
- for (i = 1; i < MAX_MV_REF_CANDIDATES; ++i) {
- // If we see a 0,0 reference vector for a second time we have reached
- // the end of the list of valid candidate vectors.
- if (!mv_ref_list[i].as_int) {
- if (zero_seen)
- break;
- else
- zero_seen = 1;
- }
-
- // Check for cases where the reference choice would give rise to an
- // uncodable/out of range residual for row or col.
- if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
- (abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
- continue;
- }
-
- cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_probs[ref_frame], i) +
- vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
- x->mvcost, 96, xd->allow_high_precision_mv);
-
- if (cost2 < cost) {
- cost = cost2;
- best_index = i;
- }
- }
- best_ref->as_int = mv_ref_list[best_index].as_int;
-
- return best_index;
-}
-#endif
-
static void update_state(VP9_COMP *cpi,
PICK_MODE_CONTEXT *ctx,
BLOCK_SIZE_TYPE bsize,
@@ -561,31 +480,11 @@ static void update_state(VP9_COMP *cpi,
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
-#if CONFIG_NEW_MVREF
- unsigned int best_index;
- MV_REFERENCE_FRAME sec_ref_frame = mbmi->second_ref_frame;
-#endif
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
-#if CONFIG_NEW_MVREF
- best_index = pick_best_mv_ref(x, rf, mbmi->mv[0],
- mbmi->ref_mvs[rf], &best_mv);
- mbmi->best_index = best_index;
- ++cpi->mb_mv_ref_count[rf][best_index];
-
- if (mbmi->second_ref_frame > 0) {
- unsigned int best_index;
- best_index =
- pick_best_mv_ref(x, sec_ref_frame, mbmi->mv[1],
- mbmi->ref_mvs[sec_ref_frame],
- &best_second_mv);
- mbmi->best_second_index = best_index;
- ++cpi->mb_mv_ref_count[sec_ref_frame][best_index];
- }
-#endif
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
@@ -652,9 +551,6 @@ static void set_offsets(VP9_COMP *cpi,
xd->above_context = cm->above_context + mb_col;
xd->left_context = cm->left_context + (mb_row & 3);
- // GF active flags data structure
- x->gf_active_ptr = (signed char *)&cpi->gf_active_flags[idx_map];
-
// Activity map pointer
x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
x->active_ptr = cpi->active_map + idx_map;
@@ -666,9 +562,7 @@ static void set_offsets(VP9_COMP *cpi,
xd->prev_mode_info_context = cm->prev_mi + idx_str;
// Set up destination pointers
- setup_pred_block(&xd->dst,
- &cm->yv12_fb[dst_fb_idx],
- mb_row, mb_col, NULL, NULL);
+ setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mb_row, mb_col);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
@@ -844,9 +738,6 @@ static void update_stats(VP9_COMP *cpi, int mb_row, int mb_col) {
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
}
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_update_nzc_counts(&cpi->common, xd, mb_row, mb_col);
-#endif
}
static void encode_sb(VP9_COMP *cpi,
@@ -870,7 +761,7 @@ static void encode_sb(VP9_COMP *cpi,
update_stats(cpi, mb_row, mb_col);
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#if CONFIG_SBSEGMENT
@@ -888,7 +779,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row, mb_col + i);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -906,7 +797,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row + i, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -937,7 +828,7 @@ static void encode_sb(VP9_COMP *cpi,
if (output_enabled) {
update_stats(cpi, mb_row + y_idx, mb_col + x_idx);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
@@ -970,7 +861,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row, mb_col, bsize);
update_stats(cpi, mb_row, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
#if CONFIG_SBSEGMENT
@@ -985,7 +876,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row, mb_col + i * 2, BLOCK_SIZE_SB32X64);
update_stats(cpi, mb_row, mb_col + i * 2);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
} else if (is_sb[0] == BLOCK_SIZE_SB64X32) {
@@ -999,7 +890,7 @@ static void encode_sb64(VP9_COMP *cpi,
1, mb_row + i * 2, mb_col, BLOCK_SIZE_SB64X32);
update_stats(cpi, mb_row + i * 2, mb_col);
- (*tp)->Token = EOSB_TOKEN;
+ (*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#endif
@@ -1344,8 +1235,11 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
// Copy data over into macro block data structures.
x->src = *cpi->Source;
- xd->pre = cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]];
- xd->dst = cm->yv12_fb[cm->new_fb_idx];
+
+ // TODO(jkoleszar): are these initializations required?
+ setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
+ 0, 0, NULL, NULL);
+ setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
// set up frame for intra coded blocks
vp9_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
@@ -1444,15 +1338,11 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cpi->coef_counts_16x16);
vp9_zero(cpi->coef_counts_32x32);
vp9_zero(cm->fc.eob_branch_counts);
-#if CONFIG_CODE_NONZEROCOUNT
- vp9_zero(cm->fc.nzc_counts_4x4);
- vp9_zero(cm->fc.nzc_counts_8x8);
- vp9_zero(cm->fc.nzc_counts_16x16);
- vp9_zero(cm->fc.nzc_counts_32x32);
- vp9_zero(cm->fc.nzc_pcat_counts);
-#endif
-#if CONFIG_NEW_MVREF
- vp9_zero(cpi->mb_mv_ref_count);
+#if CONFIG_CODE_ZEROGROUP
+ vp9_zero(cm->fc.zpc_counts_4x4);
+ vp9_zero(cm->fc.zpc_counts_8x8);
+ vp9_zero(cm->fc.zpc_counts_16x16);
+ vp9_zero(cm->fc.zpc_counts_32x32);
#endif
cpi->mb.e_mbd.lossless = (cm->base_qindex == 0 &&
@@ -1506,6 +1396,8 @@ static void encode_frame_internal(VP9_COMP *cpi) {
encode_sb_row(cpi, mb_row, &tp, &totalrate);
}
cpi->tok_count[tile_col] = (unsigned int)(tp - tp_old);
+ assert(tp - cpi->tok <=
+ get_token_alloc(cm->mb_rows, cm->mb_cols));
}
}
}
@@ -1982,137 +1874,6 @@ static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
#endif
}
-#if CONFIG_CODE_NONZEROCOUNT
-static void gather_nzcs_mb16(VP9_COMMON *const cm,
- MACROBLOCKD *xd) {
- int i;
- vpx_memset(xd->mode_info_context->mbmi.nzcs, 0,
- 384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_4X4:
- for (i = 0; i < 24; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_8X8:
- for (i = 0; i < 16; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
- xd->mode_info_context->mbmi.mode == SPLITMV) {
- for (i = 16; i < 24; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- } else {
- for (i = 16; i < 24; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- }
- break;
-
- case TX_16X16:
- xd->mode_info_context->mbmi.nzcs[0] = xd->nzcs[0];
- for (i = 16; i < 24; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- default:
- break;
- }
-}
-
-static void gather_nzcs_sb32(VP9_COMMON *const cm,
- MACROBLOCKD *xd) {
- MODE_INFO *m = xd->mode_info_context;
- int mis = cm->mode_info_stride;
- int i, j;
-
- vpx_memset(m->mbmi.nzcs, 0,
- 384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_4X4:
- for (i = 0; i < 96; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_8X8:
- for (i = 0; i < 96; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_16X16:
- for (i = 0; i < 96; i += 16) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_32X32:
- xd->mode_info_context->mbmi.nzcs[0] = xd->nzcs[0];
- for (i = 64; i < 96; i += 16) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- default:
- break;
- }
- for (i = 0; i < 2; ++i)
- for (j = 0; j < 2; ++j) {
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
-}
-
-static void gather_nzcs_sb64(VP9_COMMON *const cm,
- MACROBLOCKD *xd) {
- MODE_INFO *m = xd->mode_info_context;
- int mis = cm->mode_info_stride;
- int i, j;
-
- vpx_memset(xd->mode_info_context->mbmi.nzcs, 0,
- 384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
- switch (xd->mode_info_context->mbmi.txfm_size) {
- case TX_4X4:
- for (i = 0; i < 384; ++i) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_8X8:
- for (i = 0; i < 384; i += 4) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_16X16:
- for (i = 0; i < 384; i += 16) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- case TX_32X32:
- for (i = 0; i < 384; i += 64) {
- xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
- }
- break;
-
- default:
- break;
- }
- for (i = 0; i < 4; ++i)
- for (j = 0; j < 4; ++j) {
- if (i == 0 && j == 0) continue;
- vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
- 384 * sizeof(m->mbmi.nzcs[0]));
- }
-}
-#endif
-
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled,
int mb_row, int mb_col) {
@@ -2188,7 +1949,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
- int ref_fb_idx;
+ int ref_fb_idx, second_ref_fb_idx;
#ifdef ENC_DEBUG
if (enc_debug)
printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
@@ -2207,27 +1968,21 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
else
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
- setup_pred_block(&xd->pre,
- &cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[0], &xd->scale_factor_uv[0]);
-
if (mbmi->second_ref_frame > 0) {
- int second_ref_fb_idx;
-
if (mbmi->second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (mbmi->second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
else
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
-
- setup_pred_block(&xd->second_pre,
- &cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
+ setup_pre_planes(xd,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ mbmi->second_ref_frame > 0 ? &cpi->common.yv12_fb[second_ref_fb_idx]
+ : NULL,
+ mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
+
if (!x->skip) {
vp9_encode_inter16x16(cm, x, mb_row, mb_col);
} else {
@@ -2235,11 +1990,11 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
- xd->dst.y_buffer,
- xd->dst.u_buffer,
- xd->dst.v_buffer,
- xd->dst.y_stride,
- xd->dst.uv_stride);
+ xd->plane[0].dst.buf,
+ xd->plane[1].dst.buf,
+ xd->plane[2].dst.buf,
+ xd->plane[0].dst.stride,
+ xd->plane[1].dst.stride);
}
#endif
}
@@ -2277,30 +2032,27 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
- printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
+ printf("%3d ", xd->plane[0].dst.buf[i * xd->plane[0].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final u\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[1].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
printf("\n");
printf("final v\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
- printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
+ printf("%3d ", xd->plane[2].dst.buf[i * xd->plane[1].dst.stride + j]);
printf("\n");
}
fflush(stdout);
}
#endif
-#if CONFIG_CODE_NONZEROCOUNT
- gather_nzcs_mb16(cm, xd);
-#endif
vp9_tokenize_mb(cpi, xd, t, !output_enabled);
} else {
@@ -2342,8 +2094,6 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
}
}
-void __attribute__((noinline)) hi(void) { }
-
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
@@ -2351,13 +2101,13 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const uint8_t *src = x->src.y_buffer;
- uint8_t *dst = xd->dst.y_buffer;
+ uint8_t *dst = xd->plane[0].dst.buf;
const uint8_t *usrc = x->src.u_buffer;
- uint8_t *udst = xd->dst.u_buffer;
+ uint8_t *udst = xd->plane[1].dst.buf;
const uint8_t *vsrc = x->src.v_buffer;
- uint8_t *vdst = xd->dst.v_buffer;
- int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
- int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
+ uint8_t *vdst = xd->plane[2].dst.buf;
+ int src_y_stride = x->src.y_stride, dst_y_stride = xd->plane[0].dst.stride;
+ int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->plane[1].dst.stride;
int n;
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
@@ -2407,7 +2157,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
- int ref_fb_idx;
+ int ref_fb_idx, second_ref_fb_idx;
assert(cm->frame_type != KEY_FRAME);
@@ -2418,27 +2168,21 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
else
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
- setup_pred_block(&xd->pre,
- &cpi->common.yv12_fb[ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[0], &xd->scale_factor_uv[0]);
-
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
- int second_ref_fb_idx;
-
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
else
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
-
- setup_pred_block(&xd->second_pre,
- &cpi->common.yv12_fb[second_ref_fb_idx],
- mb_row, mb_col,
- &xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
+ setup_pre_planes(xd,
+ &cpi->common.yv12_fb[ref_fb_idx],
+ xd->mode_info_context->mbmi.second_ref_frame > 0
+ ? &cpi->common.yv12_fb[second_ref_fb_idx] : NULL,
+ mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
+
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
}
@@ -2521,15 +2265,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
break;
default: assert(0);
}
- vp9_recon_sby_s_c(xd, dst, bsize);
- vp9_recon_sbuv_s_c(&x->e_mbd, udst, vdst, bsize);
-#if CONFIG_CODE_NONZEROCOUNT
- if (bsize == BLOCK_SIZE_SB32X32) {
- gather_nzcs_sb32(cm, &x->e_mbd);
- } else {
- gather_nzcs_sb64(cm, &x->e_mbd);
- }
-#endif
+ vp9_recon_sb_c(xd, bsize);
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
} else {
// FIXME(rbultje): not tile-aware (mi - 1)