summaryrefslogtreecommitdiff
path: root/vp8/encoder/encodeframe.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder/encodeframe.c')
-rw-r--r--vp8/encoder/encodeframe.c756
1 files changed, 567 insertions, 189 deletions
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index b9b100b8f..e742809c8 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -56,10 +56,16 @@ extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
MB_ROW_COMP *mbr_ei,
int mb_row,
int count);
+extern int vp8cx_pick_mode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
+ int recon_yoffset,
+ int recon_uvoffset);
void vp8_build_block_offsets(MACROBLOCK *x);
void vp8_setup_block_ptrs(MACROBLOCK *x);
-int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
-int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
+void vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
+ int recon_yoffset, int recon_uvoffset,
+ int output_enabled);
+void vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x,
+ TOKENEXTRA **t, int output_enabled);
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
@@ -315,7 +321,9 @@ static void build_activity_map( VP8_COMP *cpi )
recon_yoffset += 16;
#endif
//Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
+ x->src.y_stride,
+ x->thismb, 16);
// measure activity
mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
@@ -380,80 +388,406 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
adjust_act_zbin(cpi, x);
}
-static
-void encode_mb_row(VP8_COMP *cpi,
- VP8_COMMON *cm,
- int mb_row,
- MACROBLOCK *x,
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int *totalrate)
+static void update_state (VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx)
{
+ int i;
+ MACROBLOCKD *xd = &x->e_mbd;
+ MODE_INFO *mi = &ctx->mic;
+ int mb_mode = mi->mbmi.mode;
+ int mb_mode_index = ctx->best_mode_index;
+
+#if CONFIG_DEBUG
+ assert (mb_mode < MB_MODE_COUNT);
+ assert (mb_mode_index < MAX_MODES);
+ assert (mi->mbmi.ref_frame < MAX_REF_FRAMES);
+#endif
+
+ // Restore the coding context of the MB to that that was in place
+ // when the mode was picked for it
+ vpx_memcpy(xd->mode_info_context, mi, sizeof(MODE_INFO));
+
+ if (mb_mode == B_PRED)
+ {
+ for (i = 0; i < 16; i++)
+ {
+ xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
+ assert (xd->block[i].bmi.as_mode.first < MB_MODE_COUNT);
+ }
+ }
+ else if (mb_mode == I8X8_PRED)
+ {
+ for (i = 0; i < 16; i++)
+ {
+ xd->block[i].bmi = xd->mode_info_context->bmi[i];
+ }
+ }
+ else if (mb_mode == SPLITMV)
+ {
+ vpx_memcpy(x->partition_info, &ctx->partition_info,
+ sizeof(PARTITION_INFO));
+
+ xd->mode_info_context->mbmi.mv.as_int =
+ x->partition_info->bmi[15].mv.as_int;
+ }
+
+ if (cpi->common.frame_type == KEY_FRAME)
+ {
+ // Restore the coding modes to that held in the coding context
+ //if (mb_mode == B_PRED)
+ // for (i = 0; i < 16; i++)
+ // {
+ // xd->block[i].bmi.as_mode =
+ // xd->mode_info_context->bmi[i].as_mode;
+ // assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
+ // }
+ }
+ else
+ {
+/*
+ // Reduce the activation RD thresholds for the best choice mode
+ if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
+ (cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
+ {
+ int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
+
+ cpi->rd_thresh_mult[mb_mode_index] =
+ (cpi->rd_thresh_mult[mb_mode_index]
+ >= (MIN_THRESHMULT + best_adjustment)) ?
+ cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
+ MIN_THRESHMULT;
+ cpi->rd_threshes[mb_mode_index] =
+ (cpi->rd_baseline_thresh[mb_mode_index] >> 7)
+ * cpi->rd_thresh_mult[mb_mode_index];
+
+ }
+*/
+ // Note how often each mode chosen as best
+ cpi->mode_chosen_counts[mb_mode_index]++;
+
+ rd_update_mvcount(cpi, x, &ctx->best_ref_mv);
+
+ cpi->prediction_error += ctx->distortion;
+ cpi->intra_error += ctx->intra_error;
+ }
+}
+
+static void pick_mb_modes (VP8_COMP *cpi,
+ VP8_COMMON *cm,
+ int mb_row,
+ int mb_col,
+ MACROBLOCK *x,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ int *totalrate)
+{
+ int i;
+ int map_index;
int recon_yoffset, recon_uvoffset;
- int mb_col;
int ref_fb_idx = cm->lst_fb_idx;
int dst_fb_idx = cm->new_fb_idx;
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
- int map_index = (mb_row * cpi->common.mb_cols);
+ ENTROPY_CONTEXT_PLANES left_context[2];
+ ENTROPY_CONTEXT_PLANES above_context[2];
+ ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
+ + mb_col;
+
+ // Offsets to move pointers from MB to MB within a SB in raster order
+ int row_delta[4] = { 0, +1, 0, -1};
+ int col_delta[4] = {+1, -1, +1, +1};
+
+ /* Function should not modify L & A contexts; save and restore on exit */
+ vpx_memcpy (left_context,
+ cpi->left_context,
+ sizeof(left_context));
+ vpx_memcpy (above_context,
+ initial_above_context_ptr,
+ sizeof(above_context));
+
+ /* Encode MBs in raster order within the SB */
+ for ( i=0; i<4; i++ )
+ {
+ int dy = row_delta[i];
+ int dx = col_delta[i];
+ int offset_unextended = dy * cm->mb_cols + dx;
+ int offset_extended = dy * xd->mode_info_stride + dx;
+
+ // TODO Many of the index items here can be computed more efficiently!
+
+ if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols))
+ {
+ // MB lies outside frame, move on
+ mb_row += dy;
+ mb_col += dx;
+
+ // Update pointers
+ x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
+
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
+#if CONFIG_DEBUG
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
+#endif
+ continue;
+ }
+
+ // Index of the MB in the SB 0..3
+ xd->mb_index = i;
+
+ map_index = (mb_row * cpi->common.mb_cols) + mb_col;
+ x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+
+ // set above context pointer
+ xd->above_context = cm->above_context + mb_col;
+
+ // Restore the appropriate left context depending on which
+ // row in the SB the MB is situated
+ vpx_memcpy (&cm->left_context,
+ &cpi->left_context[i>>1],
+ sizeof(ENTROPY_CONTEXT_PLANES));
+
+ // Set up distance of MB to edge of frame in 1/8th pel units
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+ xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
+
+ // Set up limit values for MV components to prevent them from
+ // extending beyond the UMV borders assuming 16x16 block size
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ + (VP8BORDERINPIXELS - 16);
+ x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ + (VP8BORDERINPIXELS - 16);
+
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
- // Reset the left context
- vp8_zero(cm->left_context)
+ recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
- // reset above block coeffs
- xd->above_context = cm->above_context;
+ xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
+ xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
+ xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
- xd->up_available = (mb_row != 0);
- recon_yoffset = (mb_row * recon_y_stride * 16);
- recon_uvoffset = (mb_row * recon_uv_stride * 8);
+ // Copy current MB to a work buffer
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
+ x->src.y_stride,
+ x->thismb, 16);
- cpi->tplist[mb_row].start = *tp;
- //printf("Main mb_row = %d\n", mb_row);
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
- // Distance of Mb to the top & bottom edges, specified in 1/8th pel
- // units as they are always compared to values that are in 1/8th pel units
- xd->mb_to_top_edge = -((mb_row * 16) << 3);
- xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+ if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ vp8_activity_masking(cpi, x);
- // Set up limit values for vertical motion vector components
- // to prevent them extending beyond the UMV borders
- x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
- x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
- + (VP8BORDERINPIXELS - 16);
+ // Is segmentation enabled
+ if (xd->segmentation_enabled)
+ {
+ // Code to set segment id in xd->mbmi.segment_id
+ if (cpi->segmentation_map[map_index] <= 3)
+ xd->mode_info_context->mbmi.segment_id =
+ cpi->segmentation_map[map_index];
+ else
+ xd->mode_info_context->mbmi.segment_id = 0;
- // Set the mb activity pointer to the start of the row.
- x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+ vp8cx_mb_init_quantizer(cpi, x);
+ }
+ else
+ // Set to Segment 0 by default
+ xd->mode_info_context->mbmi.segment_id = 0;
+
+ x->active_ptr = cpi->active_map + map_index;
+
+ /* force 4x4 transform for mode selection */
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4; // TODO IS this right??
- // for each macroblock col in image
- for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
+ cpi->update_context = 0; // TODO Do we need this now??
+
+ // Find best coding mode & reconstruct the MB so it is available
+ // as a predictor for MBs that follow in the SB
+ if (cm->frame_type == KEY_FRAME)
+ {
+ *totalrate += vp8_rd_pick_intra_mode(cpi, x);
+
+ // Save the coding context
+ vpx_memcpy (&x->mb_context[i].mic, xd->mode_info_context,
+ sizeof(MODE_INFO));
+
+ // Dummy encode, do not do the tokenization
+ vp8cx_encode_intra_macro_block(cpi, x, tp, 0);
+ //Note the encoder may have changed the segment_id
+ }
+ else
+ {
+ *totalrate += vp8cx_pick_mode_inter_macroblock(cpi, x,
+ recon_yoffset,
+ recon_uvoffset);
+
+ // Dummy encode, do not do the tokenization
+ vp8cx_encode_inter_macroblock(cpi, x, tp,
+ recon_yoffset, recon_uvoffset, 0);
+ }
+
+ // Keep a copy of the updated left context
+ vpx_memcpy (&cpi->left_context[i>>1],
+ &cm->left_context,
+ sizeof(ENTROPY_CONTEXT_PLANES));
+
+ // Next MB
+ mb_row += dy;
+ mb_col += dx;
+
+ x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
+
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
+
+#if CONFIG_DEBUG
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
+#endif
+ }
+
+ /* Restore L & A coding context to those in place on entry */
+ vpx_memcpy (cpi->left_context,
+ left_context,
+ sizeof(left_context));
+ vpx_memcpy (initial_above_context_ptr,
+ above_context,
+ sizeof(above_context));
+}
+
+static void encode_sb ( VP8_COMP *cpi,
+ VP8_COMMON *cm,
+ int mbrow,
+ int mbcol,
+ MACROBLOCK *x,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp )
+{
+ int i, j;
+ int map_index;
+ int mb_row, mb_col;
+ int recon_yoffset, recon_uvoffset;
+ int ref_fb_idx = cm->lst_fb_idx;
+ int dst_fb_idx = cm->new_fb_idx;
+ int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
+ int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
+ int row_delta[4] = { 0, +1, 0, -1};
+ int col_delta[4] = {+1, -1, +1, +1};
+
+ mb_row = mbrow;
+ mb_col = mbcol;
+
+ /* Encode MBs in raster order within the SB */
+ for ( i=0; i<4; i++ )
{
+ int dy = row_delta[i];
+ int dx = col_delta[i];
+ int offset_extended = dy * xd->mode_info_stride + dx;
+ int offset_unextended = dy * cm->mb_cols + dx;
+
+ if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols))
+ {
+ // MB lies outside frame, move on
+ mb_row += dy;
+ mb_col += dx;
+
+ x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
+
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
+
+#if CONFIG_DEBUG
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
+#endif
+ continue;
+ }
+
+ xd->mb_index = i;
+
#ifdef ENC_DEBUG
- enc_debug = (cpi->common.current_video_frame ==1 && mb_row==4 && mb_col==0);
+ enc_debug = (cpi->common.current_video_frame == 0 &&
+ mb_row==0 && mb_col==0);
mb_col_debug=mb_col;
mb_row_debug=mb_row;
#endif
- // Distance of Mb to the left & right edges, specified in
- // 1/8th pel units as they are always compared to values
- // that are in 1/8th pel units
- xd->mb_to_left_edge = -((mb_col * 16) << 3);
- xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
-
- // Set up limit values for horizontal motion vector components
- // to prevent them extending beyond the UMV borders
+
+ // Restore MB state to that when it was picked
+#if CONFIG_SUPERBLOCKS
+ if (x->encode_as_sb)
+ update_state (cpi, x, &x->sb_context[i]);
+ else
+#endif
+ update_state (cpi, x, &x->mb_context[i]);
+
+ // Copy in the appropriate left context
+ vpx_memcpy (&cm->left_context,
+ &cpi->left_context[i>>1],
+ sizeof(ENTROPY_CONTEXT_PLANES));
+
+ map_index = (mb_row * cpi->common.mb_cols) + mb_col;
+ x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
+
+ // reset above block coeffs
+ xd->above_context = cm->above_context + mb_col;
+
+ // Set up distance of MB to edge of the frame in 1/8th pel units
+ xd->mb_to_top_edge = -((mb_row * 16) << 3);
+ xd->mb_to_left_edge = -((mb_col * 16) << 3);
+ xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
+ xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
+
+ // Set up limit values for MV components to prevent them from
+ // extending beyond the UMV borders assuming 16x16 block size
+ x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
+ + (VP8BORDERINPIXELS - 16);
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
+ (VP8BORDERINPIXELS - 16);
+#if CONFIG_SUPERBLOCKS
+ // Set up limit values for MV components to prevent them from
+ // extending beyond the UMV borders assuming 32x32 block size
+ x->mv_row_min_sb = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_row_max_sb = ((cm->mb_rows - 1 - mb_row) * 16)
+ + (VP8BORDERINPIXELS - 32);
+ x->mv_col_min_sb = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
+ x->mv_col_max_sb = ((cm->mb_cols - 1 - mb_col) * 16)
+ + (VP8BORDERINPIXELS - 32);
+#endif
+
+ xd->up_available = (mb_row != 0);
+ xd->left_available = (mb_col != 0);
+
+ recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
+ recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
+
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
- xd->left_available = (mb_col != 0);
- x->rddiv = cpi->RDDIV;
- x->rdmult = cpi->RDMULT;
-
- //Copy current mb to a buffer
- RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
+ // Copy current MB to a work buffer
+ RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer,
+ x->src.y_stride,
+ x->thismb, 16);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
@@ -462,8 +796,9 @@ void encode_mb_row(VP8_COMP *cpi,
if (xd->segmentation_enabled)
{
// Code to set segment id in xd->mbmi.segment_id
- if (cpi->segmentation_map[map_index+mb_col] <= 3)
- xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
+ if (cpi->segmentation_map[map_index] <= 3)
+ xd->mode_info_context->mbmi.segment_id =
+ cpi->segmentation_map[map_index];
else
xd->mode_info_context->mbmi.segment_id = 0;
@@ -473,14 +808,13 @@ void encode_mb_row(VP8_COMP *cpi,
// Set to Segment 0 by default
xd->mode_info_context->mbmi.segment_id = 0;
- x->active_ptr = cpi->active_map + map_index + mb_col;
+ x->active_ptr = cpi->active_map + map_index;
- /* force 4x4 transform for mode selection */
- xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->update_context = 0;
if (cm->frame_type == KEY_FRAME)
{
- *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
+ vp8cx_encode_intra_macro_block(cpi, x, tp, 1);
//Note the encoder may have changed the segment_id
#ifdef MODE_STATS
@@ -489,7 +823,8 @@ void encode_mb_row(VP8_COMP *cpi,
}
else
{
- *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
+ vp8cx_encode_inter_macroblock(cpi, x, tp,
+ recon_yoffset, recon_uvoffset, 1);
//Note the encoder may have changed the segment_id
#ifdef MODE_STATS
@@ -502,63 +837,148 @@ void encode_mb_row(VP8_COMP *cpi,
for (b = 0; b < x->partition_info->count; b++)
{
inter_b_modes[x->partition_info->bmi[b].mode] ++;
- }
+ }
}
#endif
// Count of last ref frame 0,0 usage
- if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
+ if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
+ (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
}
+ // TODO Partitioning is broken!
cpi->tplist[mb_row].stop = *tp;
- // Increment pointer into gf usage flags structure.
- x->gf_active_ptr++;
-
- // Increment the activity mask pointers.
- x->mb_activity_ptr++;
+ // Copy back updated left context
+ vpx_memcpy (&cpi->left_context[i>>1],
+ &cm->left_context,
+ sizeof(ENTROPY_CONTEXT_PLANES));
- // adjust to the next column of macroblocks
- x->src.y_buffer += 16;
- x->src.u_buffer += 8;
- x->src.v_buffer += 8;
+ // Next MB
+ mb_row += dy;
+ mb_col += dx;
- recon_yoffset += 16;
- recon_uvoffset += 8;
+ x->src.y_buffer += 16 * (dx + dy*x->src.y_stride);
+ x->src.u_buffer += 8 * (dx + dy*x->src.uv_stride);
+ x->src.v_buffer += 8 * (dx + dy*x->src.uv_stride);
- // skip to next mb
- xd->mode_info_context++;
+ x->gf_active_ptr += offset_unextended;
+ x->partition_info += offset_extended;
+ xd->mode_info_context += offset_extended;
+ xd->prev_mode_info_context += offset_extended;
- xd->prev_mode_info_context++;
- assert((xd->prev_mode_info_context - cpi->common.prev_mip)
- ==(xd->mode_info_context - cpi->common.mip));
- x->partition_info++;
-
- xd->above_context++;
+#if CONFIG_DEBUG
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
+#endif
}
- //extend the recon for intra prediction
- vp8_extend_mb_row(
- &cm->yv12_fb[dst_fb_idx],
- xd->dst.y_buffer + 16,
- xd->dst.u_buffer + 8,
- xd->dst.v_buffer + 8);
+ // debug output
+#if DBG_PRNT_SEGMAP
+ {
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n" );
+ fclose(statsfile);
+ }
+ #endif
+}
- // this is to account for the border
- xd->prev_mode_info_context++;
- xd->mode_info_context++;
- x->partition_info++;
+static
+void encode_sb_row ( VP8_COMP *cpi,
+ VP8_COMMON *cm,
+ int mb_row,
+ MACROBLOCK *x,
+ MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ int *totalrate )
+{
+ int mb_col;
+ int mb_cols = cm->mb_cols;
-// debug output
-#if DBG_PRNT_SEGMAP
+ // Initialize the left context for the new SB row
+ vpx_memset (cpi->left_context, 0, sizeof(cpi->left_context));
+ vpx_memset (&cm->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+
+ // Code each SB in the row
+ for (mb_col=0; mb_col<mb_cols; mb_col+=2)
{
- FILE *statsfile;
- statsfile = fopen("segmap2.stt", "a");
- fprintf(statsfile, "\n" );
- fclose(statsfile);
+ int mb_rate = 0;
+#if CONFIG_SUPERBLOCKS
+ int sb_rate = INT_MAX;
+#endif
+
+#if CONFIG_DEBUG
+ MODE_INFO *mic = xd->mode_info_context;
+ PARTITION_INFO *pi = x->partition_info;
+ signed char *gfa = x->gf_active_ptr;
+ unsigned char *yb = x->src.y_buffer;
+ unsigned char *ub = x->src.u_buffer;
+ unsigned char *vb = x->src.v_buffer;
+#endif
+
+ // Pick modes assuming the SB is coded as 4 independent MBs
+ pick_mb_modes (cpi, cm, mb_row, mb_col, x, xd, tp, &mb_rate);
+
+ x->src.y_buffer -= 32;
+ x->src.u_buffer -= 16;
+ x->src.v_buffer -= 16;
+
+ x->gf_active_ptr -= 2;
+ x->partition_info -= 2;
+ xd->mode_info_context -= 2;
+ xd->prev_mode_info_context -= 2;
+
+#if CONFIG_DEBUG
+ assert (x->gf_active_ptr == gfa);
+ assert (x->partition_info == pi);
+ assert (xd->mode_info_context == mic);
+ assert (x->src.y_buffer == yb);
+ assert (x->src.u_buffer == ub);
+ assert (x->src.v_buffer == vb);
+#endif
+
+#if CONFIG_SUPERBLOCKS
+ // Pick a mode assuming that it applies all 4 of the MBs in the SB
+ pick_sb_modes(cpi, cm, mb_row, mb_col, x, xd, &sb_rate);
+
+ // Decide whether to encode as a SB or 4xMBs
+ if(sb_rate < mb_rate)
+ {
+ x->encode_as_sb = 1;
+ *totalrate += sb_rate;
+ }
+ else
+#endif
+ {
+ x->encode_as_sb = 0;
+ *totalrate += mb_rate;
+ }
+
+ // Encode SB using best computed mode(s)
+ encode_sb (cpi, cm, mb_row, mb_col, x, xd, tp);
+
+#if CONFIG_DEBUG
+ assert (x->gf_active_ptr == gfa+2);
+ assert (x->partition_info == pi+2);
+ assert (xd->mode_info_context == mic+2);
+ assert (x->src.y_buffer == yb+32);
+ assert (x->src.u_buffer == ub+16);
+ assert (x->src.v_buffer == vb+16);
+#endif
}
+
+ // this is to account for the border
+ x->gf_active_ptr += mb_cols - (mb_cols & 0x1);
+ x->partition_info += xd->mode_info_stride + 1 - (mb_cols & 0x1);
+ xd->mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
+ xd->prev_mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
+
+#if CONFIG_DEBUG
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
+ (xd->mode_info_context - cpi->common.mip));
#endif
}
@@ -574,8 +994,6 @@ void init_encode_frame_mb_context(VP8_COMP *cpi)
// Activity map pointer
x->mb_activity_ptr = cpi->mb_activity_map;
- x->vector_range = 32;
-
x->act_zbin_adj = 0;
x->partition_info = x->pi;
@@ -593,7 +1011,7 @@ void init_encode_frame_mb_context(VP8_COMP *cpi)
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
- // Copy data over into macro block data sturctures.
+ // Copy data over into macro block data structures.
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
@@ -640,8 +1058,8 @@ static void encode_frame_internal(VP8_COMP *cpi)
int totalrate;
// Compute a modified set of reference frame probabilities to use when
- // prediction fails. These are based on the current genreal estimates for
- // this frame which may be updated with each itteration of the recode loop.
+ // prediction fails. These are based on the current general estimates for
+ // this frame which may be updated with each iteration of the recode loop.
compute_mod_refprobs( cm );
// debug output
@@ -740,7 +1158,6 @@ static void encode_frame_internal(VP8_COMP *cpi)
#endif
xd->mode_info_context = cm->mi;
-
xd->prev_mode_info_context = cm->prev_mi;
vp8_zero(cpi->MVcount);
@@ -775,19 +1192,20 @@ static void encode_frame_internal(VP8_COMP *cpi)
vpx_usec_timer_start(&emr_timer);
{
- // for each macroblock row in the image
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
+ // For each row of SBs in the frame
+ for (mb_row = 0; mb_row < cm->mb_rows; mb_row+=2)
{
- encode_mb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
+ int offset = (cm->mb_cols+1) & ~0x1;
- // adjust to the next row of MBs
- x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
- x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
- x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
+ encode_sb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
+
+ // adjust to the next row of SBs
+ x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
+ x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
+ x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
}
cpi->tok_count = tp - cpi->tok;
-
}
vpx_usec_timer_mark(&emr_timer);
@@ -795,8 +1213,9 @@ static void encode_frame_internal(VP8_COMP *cpi)
}
- // 256 rate units to the bit
- cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
+ // 256 rate units to the bit,
+ // projected_frame_size in units of BYTES
+ cpi->projected_frame_size = totalrate >> 8;
// Make a note of the percentage MBs coded Intra.
if (cm->frame_type == KEY_FRAME)
@@ -813,7 +1232,8 @@ static void encode_frame_internal(VP8_COMP *cpi)
+ cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
- cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
+ cpi->this_frame_percent_intra =
+ cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
@@ -1114,18 +1534,12 @@ static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
#endif
}
-int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
+void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
+ MACROBLOCK *x,
+ TOKENEXTRA **t,
+ int output_enabled)
{
- int rate, i;
- int mb_skip_context;
-
- // Non rd path deprecated in test code base
- //if (cpi->sf.RD && cpi->compressor_speed != 2)
- vp8_rd_pick_intra_mode(cpi, x, &rate);
- //else
- // vp8_pick_intra_mode(cpi, x, &rate);
-
- if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
+ if((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled)
{
adjust_act_zbin( cpi, x );
vp8_update_zbin_extra(cpi, x);
@@ -1157,9 +1571,13 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
if(x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
- sum_intra_stats(cpi, x);
- vp8_tokenize_mb(cpi, &x->e_mbd, t);
- return rate;
+
+ if (output_enabled)
+ {
+ // Tokenize
+ sum_intra_stats(cpi, x);
+ vp8_tokenize_mb(cpi, &x->e_mbd, t);
+ }
}
#ifdef SPEEDSTATS
extern int cnt_pm;
@@ -1167,10 +1585,11 @@ extern int cnt_pm;
extern void vp8_fix_contexts(MACROBLOCKD *x);
-int vp8cx_encode_inter_macroblock
+void vp8cx_encode_inter_macroblock
(
VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
- int recon_yoffset, int recon_uvoffset
+ int recon_yoffset, int recon_uvoffset,
+ int output_enabled
)
{
VP8_COMMON *cm = &cpi->common;
@@ -1184,64 +1603,6 @@ int vp8cx_encode_inter_macroblock
x->skip = 0;
- if (xd->segmentation_enabled)
- x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
- else
- x->encode_breakout = cpi->oxcf.encode_breakout;
-
- //if (cpi->sf.RD)
- // For now this codebase is limited to a single rd encode path
- {
- int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
- int single, compound, hybrid;
-
- vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
- &distortion, &intra_error, &single, &compound, &hybrid);
-
- cpi->rd_single_diff += single;
- cpi->rd_comp_diff += compound;
- cpi->rd_hybrid_diff += hybrid;
- if (x->e_mbd.mode_info_context->mbmi.ref_frame &&
- x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- {
- unsigned char pred_context;
-
- pred_context = get_pred_context( cm, xd, PRED_COMP );
-
- if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
- cpi->single_pred_count[pred_context]++;
- else
- cpi->comp_pred_count[pred_context]++;
- }
-
-
- /* test code: set transform size based on mode selection */
- if( cpi->common.txfm_mode == ALLOW_8X8
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
- {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
- cpi->t8x8_count ++;
- }
- else
- {
- x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
- cpi->t4x4_count++;
- }
-
- /* restore cpi->zbin_mode_boost_enabled */
- cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
-
- }
- //else
- // The non rd encode path has been deleted from this code base
- // to simplify development
- // vp8_pick_inter_mode
-
- cpi->prediction_error += distortion;
- cpi->intra_error += intra_error;
-
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Adjust the zbin based on this MB rate.
@@ -1250,7 +1611,7 @@ int vp8cx_encode_inter_macroblock
{
// Experimental code. Special case for gf and arf zeromv modes.
- // Increase zbin size to supress noise
+ // Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled)
{
@@ -1282,6 +1643,21 @@ int vp8cx_encode_inter_macroblock
get_pred_ref( cm, xd )) );
set_pred_flag( xd, PRED_REF, ref_pred_flag );
+ /* test code: set transform size based on mode selection */
+ if( cpi->common.txfm_mode == ALLOW_8X8
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ cpi->t8x8_count ++;
+ }
+ else
+ {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->t4x4_count++;
+ }
+
// If we have just a single reference frame coded for a segment then
// exclude from the reference frame counts used to work out
// probabilities. NOTE: At the moment we dont support custom trees
@@ -1323,7 +1699,9 @@ int vp8cx_encode_inter_macroblock
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
- sum_intra_stats(cpi, x);
+
+ if (output_enabled)
+ sum_intra_stats(cpi, x);
}
else
{
@@ -1394,7 +1772,8 @@ int vp8cx_encode_inter_macroblock
fflush(stdout);
}
#endif
- vp8_tokenize_mb(cpi, xd, t);
+ if (output_enabled)
+ vp8_tokenize_mb(cpi, xd, t);
#ifdef ENC_DEBUG
if (enc_debug) {
printf("Tokenized\n");
@@ -1432,5 +1811,4 @@ int vp8cx_encode_inter_macroblock
#endif
}
}
- return rate;
}