summaryrefslogtreecommitdiff
path: root/vp8/encoder/encodeframe.c
diff options
context:
space:
mode:
authorTimothy B. Terriberry <tterribe@xiph.org>2010-10-11 14:37:27 -0700
committerJohn Koleszar <jkoleszar@google.com>2010-10-12 08:41:03 -0400
commit8d0f7a01e6727253df0d3c8625568bc39e83b6f4 (patch)
tree9aaa5b72daa0ccc08c27c80efa7af24e6e11ef7a /vp8/encoder/encodeframe.c
parentf4a8594492b7edd854ede663eb23be97efd56b6b (diff)
downloadlibvpx-8d0f7a01e6727253df0d3c8625568bc39e83b6f4.tar
libvpx-8d0f7a01e6727253df0d3c8625568bc39e83b6f4.tar.gz
libvpx-8d0f7a01e6727253df0d3c8625568bc39e83b6f4.tar.bz2
libvpx-8d0f7a01e6727253df0d3c8625568bc39e83b6f4.zip
Add simple version of activity masking.
This uses MB variance to change the RDO weight for mode decision and quantization. Activity is normalized against the average for the frame, which is currently tracked using feed-forward statistics. This could also be used to adjust the quantizer for the entire frame, but that requires more extensive rate control changes. This does not yet attempt to adapt the quantizer within the frame, but the signaling cost means that will likely only be useful at very high rates. Change-Id: I26cd7c755cac3ff33cfe0688b1da50b2b87b9c93
Diffstat (limited to 'vp8/encoder/encodeframe.c')
-rw-r--r--vp8/encoder/encodeframe.c82
1 files changed, 79 insertions, 3 deletions
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index efbe2365b..b0a3c4713 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -369,6 +369,62 @@ void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
}
+/* activity_avg must be positive, or flat regions could get a zero weight
+ * (infinite lambda), which confounds analysis.
+ * This also avoids the need for divide by zero checks in
+ * vp8_activity_masking().
+ */
+#define VP8_ACTIVITY_AVG_MIN (64)
+
+/* This is used as a reference when computing the source variance for the
+ * purposes of activity masking.
+ * Eventually this should be replaced by custom no-reference routines,
+ * which will be faster.
+ */
+static const unsigned char VP8_VAR_OFFS[16]=
+{
+ 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
+};
+
+unsigned int vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
+{
+ unsigned int act;
+ unsigned int sse;
+ int sum;
+ unsigned int a;
+ unsigned int b;
+ unsigned int d;
+ /* TODO: This could also be done over smaller areas (8x8), but that would
+ * require extensive changes elsewhere, as lambda is assumed to be fixed
+ * over an entire MB in most of the code.
+ * Another option is to compute four 8x8 variances, and pick a single
+ * lambda using a non-linear combination (e.g., the smallest, or second
+ * smallest, etc.).
+ */
+ VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer,
+ x->src.y_stride, VP8_VAR_OFFS, 0, &sse, &sum);
+ /* This requires a full 32 bits of precision. */
+ act = (sse<<8) - sum*sum;
+ /* Drop 4 to give us some headroom to work with. */
+ act = (act + 8) >> 4;
+ /* If the region is flat, lower the activity some more. */
+ if (act < 8<<12)
+ act = act < 5<<12 ? act : 5<<12;
+ /* TODO: For non-flat regions, edge regions should receive less masking
+ * than textured regions, but identifying edge regions quickly and
+ * reliably enough is still a subject of experimentation.
+ * This will be most noticable near edges with a complex shape (e.g.,
+ * text), but the 4x4 transform size should make this less of a problem
+ * than it would be for an 8x8 transform.
+ */
+ /* Apply the masking to the RD multiplier. */
+ a = act + 4*cpi->activity_avg;
+ b = 4*act + cpi->activity_avg;
+ x->rdmult = (unsigned int)(((INT64)x->rdmult*b + (a>>1))/a);
+ return act;
+}
+
+
static
void encode_mb_row(VP8_COMP *cpi,
@@ -380,6 +436,7 @@ void encode_mb_row(VP8_COMP *cpi,
int *segment_counts,
int *totalrate)
{
+ INT64 activity_sum = 0;
int i;
int recon_yoffset, recon_uvoffset;
int mb_col;
@@ -431,6 +488,11 @@ void encode_mb_row(VP8_COMP *cpi,
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0);
+ x->rddiv = cpi->RDDIV;
+ x->rdmult = cpi->RDMULT;
+
+ activity_sum += vp8_activity_masking(cpi, x);
+
// Is segmentation enabled
// MB level adjutment to quantizer
if (xd->segmentation_enabled)
@@ -537,6 +599,7 @@ void encode_mb_row(VP8_COMP *cpi,
// this is to account for the border
xd->mode_info_context++;
x->partition_info++;
+ x->activity_sum += activity_sum;
}
@@ -653,8 +716,7 @@ void vp8_encode_frame(VP8_COMP *cpi)
vp8_setup_block_ptrs(x);
- x->rddiv = cpi->RDDIV;
- x->rdmult = cpi->RDMULT;
+ x->activity_sum = 0;
#if 0
// Experimental rd code
@@ -709,11 +771,12 @@ void vp8_encode_frame(VP8_COMP *cpi)
else
{
#if CONFIG_MULTITHREAD
+ int i;
+
vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
{
- int i;
cpi->current_mb_col_main = -1;
for (i = 0; i < cpi->encoding_thread_count; i++)
@@ -791,6 +854,11 @@ void vp8_encode_frame(VP8_COMP *cpi)
totalrate += cpi->mb_row_ei[i].totalrate;
}
+ for (i = 0; i < cpi->encoding_thread_count; i++)
+ {
+ x->activity_sum += cpi->mb_row_ei[i].mb.activity_sum;
+ }
+
#endif
}
@@ -926,6 +994,14 @@ void vp8_encode_frame(VP8_COMP *cpi)
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
+ /* Update the average activity for the next frame.
+ * This is feed-forward for now; it could also be saved in two-pass, or
+ * done during lookahead when that is eventually added.
+ */
+ cpi->activity_avg = (unsigned int )(x->activity_sum/cpi->common.MBs);
+ if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
+ cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
+
}
void vp8_setup_block_ptrs(MACROBLOCK *x)
{