summaryrefslogtreecommitdiff
path: root/vp8/encoder/encodeframe.c
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder/encodeframe.c')
-rw-r--r--vp8/encoder/encodeframe.c644
1 files changed, 347 insertions, 297 deletions
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 46e6c7645..a59dd6324 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -9,7 +9,7 @@
*/
-#include "vpx_config.h"
+#include "vpx_ports/config.h"
#include "encodemb.h"
#include "encodemv.h"
#include "vp8/common/common.h"
@@ -22,13 +22,17 @@
#include "encodeintra.h"
#include "vp8/common/reconinter.h"
#include "rdopt.h"
-#include "pickinter.h"
#include "vp8/common/findnearmv.h"
#include "vp8/common/reconintra.h"
+#include "vp8/common/seg_common.h"
#include <stdio.h>
+#include <math.h>
#include <limits.h>
#include "vp8/common/subpixel.h"
#include "vpx_ports/vpx_timer.h"
+#include "vp8/common/pred_common.h"
+
+//#define DBG_PRNT_SEGMAP 1
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
@@ -37,6 +41,12 @@
#define RTCD(x) NULL
#define IF_RTCD(x) NULL
#endif
+
+#ifdef ENC_DEBUG
+int enc_debug=0;
+int mb_row_debug, mb_col_debug;
+#endif
+
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
@@ -52,13 +62,25 @@ int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t);
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
+
+
#ifdef MODE_STATS
-unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
-unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-unsigned int y_modes[5] = {0, 0, 0, 0, 0};
-unsigned int uv_modes[4] = {0, 0, 0, 0};
-unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+unsigned int inter_y_modes[MB_MODE_COUNT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+unsigned int inter_uv_modes[VP8_UV_MODES] = {0, 0, 0, 0};
+unsigned int inter_b_modes[B_MODE_COUNT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+unsigned int y_modes[VP8_YMODES] = {0, 0, 0, 0, 0, 0};
+unsigned int i8x8_modes[VP8_I8X8_MODES]={0 };
+unsigned int uv_modes[VP8_UV_MODES] = {0, 0, 0, 0};
+unsigned int uv_modes_y[VP8_YMODES][VP8_UV_MODES]=
+{
+{0, 0, 0, 0},
+{0, 0, 0, 0},
+{0, 0, 0, 0},
+{0, 0, 0, 0},
+{0, 0, 0, 0},
+{0, 0, 0, 0}
+};
+unsigned int b_modes[B_MODE_COUNT] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#endif
@@ -365,7 +387,6 @@ void encode_mb_row(VP8_COMP *cpi,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
- int *segment_counts,
int *totalrate)
{
int recon_yoffset, recon_uvoffset;
@@ -376,16 +397,8 @@ void encode_mb_row(VP8_COMP *cpi,
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
int map_index = (mb_row * cpi->common.mb_cols);
-#if CONFIG_MULTITHREAD
- const int nsync = cpi->mt_sync_range;
- const int rightmost_col = cm->mb_cols - 1;
- volatile const int *last_row_current_mb_col;
-
- if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
- last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
- else
- last_row_current_mb_col = &rightmost_col;
-#endif
+ // Reset the left context
+ vp8_zero(cm->left_context)
// reset above block coeffs
xd->above_context = cm->above_context;
@@ -414,6 +427,11 @@ void encode_mb_row(VP8_COMP *cpi,
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
+#ifdef ENC_DEBUG
+ enc_debug = (cpi->common.current_video_frame ==1 && mb_row==4 && mb_col==0);
+ mb_col_debug=mb_col;
+ mb_row_debug=mb_row;
+#endif
// Distance of Mb to the left & right edges, specified in
// 1/8th pel units as they are always compared to values
// that are in 1/8th pel units
@@ -437,29 +455,13 @@ void encode_mb_row(VP8_COMP *cpi,
//Copy current mb to a buffer
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
-#if CONFIG_MULTITHREAD
- if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
- {
- if ((mb_col & (nsync - 1)) == 0)
- {
- while (mb_col > (*last_row_current_mb_col - nsync)
- && (*last_row_current_mb_col) != (cm->mb_cols - 1))
- {
- x86_pause_hint();
- thread_sleep(0);
- }
- }
- }
-#endif
-
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp8_activity_masking(cpi, x);
// Is segmentation enabled
- // MB level adjutment to quantizer
if (xd->segmentation_enabled)
{
- // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
+ // Code to set segment id in xd->mbmi.segment_id
if (cpi->segmentation_map[map_index+mb_col] <= 3)
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
else
@@ -468,66 +470,51 @@ void encode_mb_row(VP8_COMP *cpi,
vp8cx_mb_init_quantizer(cpi, x);
}
else
- xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
+ // Set to Segment 0 by default
+ xd->mode_info_context->mbmi.segment_id = 0;
x->active_ptr = cpi->active_map + map_index + mb_col;
+ /* force 4x4 transform for mode selection */
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
+
if (cm->frame_type == KEY_FRAME)
{
*totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
+ //Note the encoder may have changed the segment_id
+
#ifdef MODE_STATS
- y_modes[xd->mbmi.mode] ++;
+ y_modes[xd->mode_info_context->mbmi.mode] ++;
#endif
}
else
{
*totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset);
+ //Note the encoder may have changed the segment_id
#ifdef MODE_STATS
- inter_y_modes[xd->mbmi.mode] ++;
+ inter_y_modes[xd->mode_info_context->mbmi.mode] ++;
- if (xd->mbmi.mode == SPLITMV)
+ if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int b;
- for (b = 0; b < xd->mbmi.partition_count; b++)
+ for (b = 0; b < x->partition_info->count; b++)
{
- inter_b_modes[x->partition->bmi[b].mode] ++;
+ inter_b_modes[x->partition_info->bmi[b].mode] ++;
}
}
#endif
- // Count of last ref frame 0,0 useage
+ // Count of last ref frame 0,0 usage
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
-
- // Special case code for cyclic refresh
- // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
- // during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
- if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
- {
- cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
-
- // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
- // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
- // else mark it as dirty (1).
- if (xd->mode_info_context->mbmi.segment_id)
- cpi->cyclic_refresh_map[map_index+mb_col] = -1;
- else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
- {
- if (cpi->cyclic_refresh_map[map_index+mb_col] == 1)
- cpi->cyclic_refresh_map[map_index+mb_col] = 0;
- }
- else
- cpi->cyclic_refresh_map[map_index+mb_col] = 1;
-
- }
}
cpi->tplist[mb_row].stop = *tp;
- // Increment pointer into gf useage flags structure.
+ // Increment pointer into gf usage flags structure.
x->gf_active_ptr++;
// Increment the activity mask pointers.
@@ -541,20 +528,15 @@ void encode_mb_row(VP8_COMP *cpi,
recon_yoffset += 16;
recon_uvoffset += 8;
- // Keep track of segment useage
- segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
-
// skip to next mb
xd->mode_info_context++;
+
+ xd->prev_mode_info_context++;
+ assert((xd->prev_mode_info_context - cpi->common.prev_mip)
+ ==(xd->mode_info_context - cpi->common.mip));
x->partition_info++;
xd->above_context++;
-#if CONFIG_MULTITHREAD
- if (cpi->b_multi_threaded != 0)
- {
- cpi->mt_current_mb_col[mb_row] = mb_col;
- }
-#endif
}
//extend the recon for intra prediction
@@ -565,13 +547,17 @@ void encode_mb_row(VP8_COMP *cpi,
xd->dst.v_buffer + 8);
// this is to account for the border
+ xd->prev_mode_info_context++;
xd->mode_info_context++;
x->partition_info++;
-#if CONFIG_MULTITHREAD
- if ((cpi->b_multi_threaded != 0) && (mb_row == cm->mb_rows - 1))
+// debug output
+#if DBG_PRNT_SEGMAP
{
- sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n" );
+ fclose(statsfile);
}
#endif
}
@@ -596,6 +582,7 @@ void init_encode_frame_mb_context(VP8_COMP *cpi)
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
+ xd->prev_mode_info_context = cm->prev_mi;
xd->frame_type = cm->frame_type;
@@ -630,42 +617,19 @@ void init_encode_frame_mb_context(VP8_COMP *cpi)
vp8_zero(cpi->uv_mode_count)
x->mvc = cm->fc.mvc;
+#if CONFIG_HIGH_PRECISION_MV
+ x->mvc_hp = cm->fc.mvc_hp;
+#endif
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
- xd->ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
-
- // Special case treatment when GF and ARF are not sensible options for reference
- if (cpi->ref_frame_flags == VP8_LAST_FLAG)
- {
- xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
- + vp8_cost_zero(255);
- xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
- + vp8_cost_one(255)
- + vp8_cost_zero(128);
- xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
- + vp8_cost_one(255)
- + vp8_cost_one(128);
- }
- else
- {
- xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
- + vp8_cost_zero(cpi->prob_last_coded);
- xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
- + vp8_cost_one(cpi->prob_last_coded)
- + vp8_cost_zero(cpi->prob_gf_coded);
- xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
- + vp8_cost_one(cpi->prob_last_coded)
- + vp8_cost_one(cpi->prob_gf_coded);
- }
-
xd->fullpixel_mask = 0xffffffff;
if(cm->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
}
-void vp8_encode_frame(VP8_COMP *cpi)
+static void encode_frame_internal(VP8_COMP *cpi)
{
int mb_row;
MACROBLOCK *const x = & cpi->mb;
@@ -673,19 +637,24 @@ void vp8_encode_frame(VP8_COMP *cpi)
MACROBLOCKD *const xd = & x->e_mbd;
TOKENEXTRA *tp = cpi->tok;
- int segment_counts[MAX_MB_SEGMENTS];
int totalrate;
- vpx_memset(segment_counts, 0, sizeof(segment_counts));
- totalrate = 0;
+ // Compute a modified set of reference frame probabilities to use when
+ // prediction fails. These are based on the current genreal estimates for
+ // this frame which may be updated with each itteration of the recode loop.
+ compute_mod_refprobs( cm );
- if (cpi->compressor_speed == 2)
+// debug output
+#if DBG_PRNT_SEGMAP
{
- if (cpi->oxcf.cpu_used < 0)
- cpi->Speed = -(cpi->oxcf.cpu_used);
- else
- vp8_auto_select_speed(cpi);
+ FILE *statsfile;
+ statsfile = fopen("segmap2.stt", "a");
+ fprintf(statsfile, "\n" );
+ fclose(statsfile);
}
+#endif
+
+ totalrate = 0;
// Functions setup for all frame types so we can use MC in AltRef
if (cm->mcomp_filter_type == SIXTAP)
@@ -698,6 +667,10 @@ void vp8_encode_frame(VP8_COMP *cpi)
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
+ xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg8x8);
+ xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, sixtap_avg16x16);
}
else
{
@@ -709,13 +682,15 @@ void vp8_encode_frame(VP8_COMP *cpi)
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
+ xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear_avg8x8);
+ xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
+ &cpi->common.rtcd.subpix, bilinear_avg16x16);
}
- // Reset frame count of inter 0,0 motion vector useage.
+ // Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
- vpx_memset(segment_counts, 0, sizeof(segment_counts));
-
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count = 0;
@@ -729,7 +704,12 @@ void vp8_encode_frame(VP8_COMP *cpi)
xd->mode_info_context = cm->mi;
+ xd->prev_mode_info_context = cm->prev_mi;
+
vp8_zero(cpi->MVcount);
+#if CONFIG_HIGH_PRECISION_MV
+ vp8_zero(cpi->MVcount_hp);
+#endif
vp8_zero(cpi->coef_counts);
vp8cx_frame_init_quantizer(cpi);
@@ -749,86 +729,21 @@ void vp8_encode_frame(VP8_COMP *cpi)
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
+ cpi->rd_single_diff = cpi->rd_comp_diff = cpi->rd_hybrid_diff = 0;
+ vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
+ vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
+
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
-#if CONFIG_MULTITHREAD
- if (cpi->b_multi_threaded)
{
- int i;
-
- vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
-
- for (i = 0; i < cm->mb_rows; i++)
- cpi->mt_current_mb_col[i] = -1;
-
- for (i = 0; i < cpi->encoding_thread_count; i++)
- {
- sem_post(&cpi->h_event_start_encoding[i]);
- }
-
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
- {
- vp8_zero(cm->left_context)
-
- tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
-
- encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
-
- // adjust to the next row of mbs
- x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
- x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
- x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
-
- xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
- x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
- x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
-
- }
-
- sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */
-
- cpi->tok_count = 0;
-
- for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
- {
- cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
- }
-
- if (xd->segmentation_enabled)
- {
- int i, j;
-
- if (xd->segmentation_enabled)
- {
-
- for (i = 0; i < cpi->encoding_thread_count; i++)
- {
- for (j = 0; j < 4; j++)
- segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
- }
- }
- }
-
- for (i = 0; i < cpi->encoding_thread_count; i++)
- {
- totalrate += cpi->mb_row_ei[i].totalrate;
- }
-
- }
- else
-#endif
- {
- // for each macroblock row in image
+ // for each macroblock row in the image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
+ encode_mb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
- vp8_zero(cm->left_context)
-
- encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
-
- // adjust to the next row of mbs
+ // adjust to the next row of MBs
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
@@ -843,43 +758,6 @@ void vp8_encode_frame(VP8_COMP *cpi)
}
-
- // Work out the segment probabilites if segmentation is enabled
- if (xd->segmentation_enabled)
- {
- int tot_count;
- int i;
-
- // Set to defaults
- vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
-
- tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
-
- if (tot_count)
- {
- xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
-
- tot_count = segment_counts[0] + segment_counts[1];
-
- if (tot_count > 0)
- {
- xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
- }
-
- tot_count = segment_counts[2] + segment_counts[3];
-
- if (tot_count > 0)
- xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
-
- // Zero probabilities not allowed
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
- {
- if (xd->mb_segment_tree_probs[i] == 0)
- xd->mb_segment_tree_probs[i] = 1;
- }
- }
- }
-
// 256 rate units to the bit
cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
@@ -932,44 +810,95 @@ void vp8_encode_frame(VP8_COMP *cpi)
}
#endif
- // Adjust the projected reference frame useage probability numbers to reflect
- // what we have just seen. This may be usefull when we make multiple itterations
- // of the recode loop rather than continuing to use values from the previous frame.
- if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
- {
- const int *const rfct = cpi->count_mb_ref_frame_usage;
- const int rf_intra = rfct[INTRA_FRAME];
- const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
+#if 0
+ // Keep record of the total distortion this time around for future use
+ cpi->last_frame_distortion = cpi->frame_distortion;
+#endif
- if ((rf_intra + rf_inter) > 0)
- {
- cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
+}
- if (cpi->prob_intra_coded < 1)
- cpi->prob_intra_coded = 1;
+void vp8_encode_frame(VP8_COMP *cpi)
+{
+ if (cpi->sf.RD)
+ {
+ int frame_type, pred_type;
+ int redo = 0;
+ int single_diff, comp_diff, hybrid_diff;
+
+ /*
+ * This code does a single RD pass over the whole frame assuming
+ * either compound, single or hybrid prediction as per whatever has
+ * worked best for that type of frame in the past.
+ * It also predicts whether another coding mode would have worked
+ * better that this coding mode. If that is the case, it remembers
+ * that for subsequent frames. If the difference is above a certain
+ * threshold, it will actually re-encode the current frame using
+ * that different coding mode.
+ */
+ if (cpi->common.frame_type == KEY_FRAME)
+ frame_type = 0;
+ else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
+ frame_type = 3;
+ else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
+ frame_type = 1;
+ else
+ frame_type = 2;
+
+ if (cpi->rd_prediction_type_threshes[frame_type][1] >
+ cpi->rd_prediction_type_threshes[frame_type][0] &&
+ cpi->rd_prediction_type_threshes[frame_type][1] >
+ cpi->rd_prediction_type_threshes[frame_type][2])
+ pred_type = COMP_PREDICTION_ONLY;
+ else if (cpi->rd_prediction_type_threshes[frame_type][0] >
+ cpi->rd_prediction_type_threshes[frame_type][1] &&
+ cpi->rd_prediction_type_threshes[frame_type][0] >
+ cpi->rd_prediction_type_threshes[frame_type][2])
+ pred_type = SINGLE_PREDICTION_ONLY;
+ else
+ pred_type = HYBRID_PREDICTION;
+
+ cpi->common.comp_pred_mode = pred_type;
+ encode_frame_internal(cpi);
+
+ single_diff = cpi->rd_single_diff / cpi->common.MBs;
+ cpi->rd_prediction_type_threshes[frame_type][0] += single_diff;
+ cpi->rd_prediction_type_threshes[frame_type][0] >>= 1;
+ comp_diff = cpi->rd_comp_diff / cpi->common.MBs;
+ cpi->rd_prediction_type_threshes[frame_type][1] += comp_diff;
+ cpi->rd_prediction_type_threshes[frame_type][1] >>= 1;
+ hybrid_diff = cpi->rd_hybrid_diff / cpi->common.MBs;
+ cpi->rd_prediction_type_threshes[frame_type][2] += hybrid_diff;
+ cpi->rd_prediction_type_threshes[frame_type][2] >>= 1;
+
+ if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
+ {
+ int single_count_zero = 0;
+ int comp_count_zero = 0;
+ int i;
- if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
+ for ( i = 0; i < COMP_PRED_CONTEXTS; i++ )
{
- cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
-
- if (cpi->prob_last_coded < 1)
- cpi->prob_last_coded = 1;
-
- cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
- ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
+ single_count_zero += cpi->single_pred_count[i];
+ comp_count_zero += cpi->comp_pred_count[i];
+ }
- if (cpi->prob_gf_coded < 1)
- cpi->prob_gf_coded = 1;
+ if (comp_count_zero == 0)
+ {
+ cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
+ }
+ else if (single_count_zero == 0)
+ {
+ cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
}
}
}
-
-#if 0
- // Keep record of the total distortion this time around for future use
- cpi->last_frame_distortion = cpi->frame_distortion;
-#endif
+ else
+ {
+ encode_frame_internal(cpi);
+ }
}
+
void vp8_setup_block_ptrs(MACROBLOCK *x)
{
int r, c;
@@ -1070,6 +999,7 @@ static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
const int is_key = cpi->common.frame_type == KEY_FRAME;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
+ ++ uv_modes_y[m][uvm];
if (m == B_PRED)
{
@@ -1079,11 +1009,18 @@ static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
do
{
- ++ bct[xd->block[b].bmi.mode];
+ ++ bct[xd->block[b].bmi.as_mode.first];
}
while (++b < 16);
}
+ if(m==I8X8_PRED)
+ {
+ i8x8_modes[xd->block[0].bmi.as_mode.first]++;
+ i8x8_modes[xd->block[2].bmi.as_mode.first]++;
+ i8x8_modes[xd->block[8].bmi.as_mode.first]++;
+ i8x8_modes[xd->block[10].bmi.as_mode.first]++;
+ }
#endif
++cpi->ymode_count[m];
@@ -1117,10 +1054,11 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
{
int rate;
- if (cpi->sf.RD && cpi->compressor_speed != 2)
- vp8_rd_pick_intra_mode(cpi, x, &rate);
- else
- vp8_pick_intra_mode(cpi, x, &rate);
+ // Non rd path deprecated in test code base
+ //if (cpi->sf.RD && cpi->compressor_speed != 2)
+ vp8_rd_pick_intra_mode(cpi, x, &rate);
+ //else
+ // vp8_pick_intra_mode(cpi, x, &rate);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
@@ -1128,12 +1066,32 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
vp8_update_zbin_extra(cpi, x);
}
- if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
+ /* test code: set transform size based on mode selection */
+ if(cpi->common.txfm_mode == ALLOW_8X8
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != B_PRED)
+ {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ cpi->t8x8_count++;
+ }
+ else
+ {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->t4x4_count ++;
+ }
+
+ if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
+ {
+ vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
+ vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
+ }
+ else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
- vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
+ if(x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
+ vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t);
@@ -1151,21 +1109,27 @@ int vp8cx_encode_inter_macroblock
int recon_yoffset, int recon_uvoffset
)
{
+ VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error = 0;
int rate;
int distortion;
+ unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
+ int seg_ref_active;
+ unsigned char ref_pred_flag;
x->skip = 0;
if (xd->segmentation_enabled)
- x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
+ x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
- if (cpi->sf.RD)
+ //if (cpi->sf.RD)
+ // For now this codebase is limited to a single rd encode path
{
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
+ int single, compound, hybrid;
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
@@ -1180,7 +1144,39 @@ int vp8cx_encode_inter_macroblock
cpi->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
- &distortion, &intra_error);
+ &distortion, &intra_error, &single, &compound, &hybrid);
+
+ cpi->rd_single_diff += single;
+ cpi->rd_comp_diff += compound;
+ cpi->rd_hybrid_diff += hybrid;
+ if (x->e_mbd.mode_info_context->mbmi.ref_frame &&
+ x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ {
+ unsigned char pred_context;
+
+ pred_context = get_pred_context( cm, xd, PRED_COMP );
+
+ if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
+ cpi->single_pred_count[pred_context]++;
+ else
+ cpi->comp_pred_count[pred_context]++;
+ }
+
+
+ /* test code: set transform size based on mode selection */
+ if( cpi->common.txfm_mode == ALLOW_8X8
+ && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != B_PRED
+ && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
+ {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
+ cpi->t8x8_count ++;
+ }
+ else
+ {
+ x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
+ cpi->t4x4_count++;
+ }
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
@@ -1190,14 +1186,14 @@ int vp8cx_encode_inter_macroblock
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
}
-
/* restore cpi->zbin_mode_boost_enabled */
cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
}
- else
- vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
- &distortion, &intra_error);
+ //else
+ // The non rd encode path has been deleted from this code base
+ // to simplify development
+ // vp8_pick_inter_mode
cpi->prediction_error += distortion;
cpi->intra_error += intra_error;
@@ -1208,30 +1204,6 @@ int vp8cx_encode_inter_macroblock
adjust_act_zbin( cpi, x );
}
-#if 0
- // Experimental RD code
- cpi->frame_distortion += distortion;
- cpi->last_mb_distortion = distortion;
-#endif
-
- // MB level adjutment to quantizer setup
- if (xd->segmentation_enabled)
- {
- // If cyclic update enabled
- if (cpi->cyclic_refresh_mode_enabled)
- {
- // Clear segment_id back to 0 if not coded (last frame 0,0)
- if ((xd->mode_info_context->mbmi.segment_id == 1) &&
- ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
- {
- xd->mode_info_context->mbmi.segment_id = 0;
-
- /* segment_id changed, so update */
- vp8cx_mb_init_quantizer(cpi, x);
- }
- }
- }
-
{
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to supress noise
@@ -1260,21 +1232,56 @@ int vp8cx_encode_inter_macroblock
vp8_update_zbin_extra(cpi, x);
}
- cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
+ seg_ref_active = segfeature_active( xd, *segment_id, SEG_LVL_REF_FRAME );
+
+ // SET VARIOUS PREDICTION FLAGS
+
+ // Did the chosen reference frame match its predicted value.
+ ref_pred_flag = ( (xd->mode_info_context->mbmi.ref_frame ==
+ get_pred_ref( cm, xd )) );
+ set_pred_flag( xd, PRED_REF, ref_pred_flag );
+
+ // If we have just a single reference frame coded for a segment then
+ // exclude from the reference frame counts used to work out
+ // probabilities. NOTE: At the moment we dont support custom trees
+ // for the reference frame coding for each segment but this is a
+ // possible future action.
+ if ( !seg_ref_active ||
+ ( ( check_segref( xd, *segment_id, INTRA_FRAME ) +
+ check_segref( xd, *segment_id, LAST_FRAME ) +
+ check_segref( xd, *segment_id, GOLDEN_FRAME ) +
+ check_segref( xd, *segment_id, ALTREF_FRAME ) ) > 1 ) )
+ {
+// TODO this may not be a good idea as it makes sample size small and means
+// the predictor functions cannot use data about most likely value only most
+// likely unpredicted value.
+//#if CONFIG_COMPRED
+// // Only update count for incorrectly predicted cases
+// if ( !ref_pred_flag )
+//#endif
+ {
+ cpi->count_mb_ref_frame_usage
+ [xd->mode_info_context->mbmi.ref_frame]++;
+ }
+ }
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
- vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
-
if (xd->mode_info_context->mbmi.mode == B_PRED)
{
+ vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
+ else if(xd->mode_info_context->mbmi.mode == I8X8_PRED)
+ {
+ vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
+ vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
+ }
else
{
+ vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
-
sum_intra_stats(cpi, x);
}
else
@@ -1292,6 +1299,24 @@ int vp8cx_encode_inter_macroblock
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
+ if (xd->mode_info_context->mbmi.second_ref_frame) {
+ int second_ref_fb_idx;
+
+ if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
+ second_ref_fb_idx = cpi->common.lst_fb_idx;
+ else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
+ second_ref_fb_idx = cpi->common.gld_fb_idx;
+ else
+ second_ref_fb_idx = cpi->common.alt_fb_idx;
+
+ xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
+ recon_yoffset;
+ xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
+ recon_uvoffset;
+ xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
+ recon_uvoffset;
+ }
+
if (!x->skip)
{
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
@@ -1302,14 +1327,40 @@ int vp8cx_encode_inter_macroblock
}
else
+ {
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
-
+ }
}
if (!x->skip)
+ {
+#ifdef ENC_DEBUG
+ if (enc_debug)
+ {
+ int i;
+ printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
+ for (i =0; i<400; i++) {
+ printf("%3d ", xd->qcoeff[i]);
+ if (i%16 == 15) printf("\n");
+ }
+ printf("\n");
+ printf("eobs = ");
+ for (i=0;i<25;i++)
+ printf("%d:%d ", i, xd->block[i].eob);
+ printf("\n");
+ fflush(stdout);
+ }
+#endif
vp8_tokenize_mb(cpi, xd, t);
+#ifdef ENC_DEBUG
+ if (enc_debug) {
+ printf("Tokenized\n");
+ fflush(stdout);
+ }
+#endif
+ }
else
{
if (cpi->common.mb_no_coeff_skip)
@@ -1325,6 +1376,5 @@ int vp8cx_encode_inter_macroblock
cpi->skip_false_count ++;
}
}
-
return rate;
}