summaryrefslogtreecommitdiff
path: root/vp9/encoder
diff options
context:
space:
mode:
authorMarco Paniconi <marpan@google.com>2015-12-12 02:59:33 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2015-12-12 02:59:33 +0000
commite19b7df8d33c795e0daf4432315325ec445d21bd (patch)
tree3d57b027b37159b3c9a970dfd46d30e43ddef5f1 /vp9/encoder
parent6e87880e7f9dfb53d10c2be55a82765b076aaa76 (diff)
parentd4440614ae4534f68d94bb88c05f35c86624b38f (diff)
downloadlibvpx-e19b7df8d33c795e0daf4432315325ec445d21bd.tar
libvpx-e19b7df8d33c795e0daf4432315325ec445d21bd.tar.gz
libvpx-e19b7df8d33c795e0daf4432315325ec445d21bd.tar.bz2
libvpx-e19b7df8d33c795e0daf4432315325ec445d21bd.zip
Merge "Non-rd variance partition: Adjustments to reduce dragging artifact."
Diffstat (limited to 'vp9/encoder')
-rw-r--r--vp9/encoder/vp9_encodeframe.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index c643b1848..1606b16a2 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -496,6 +496,8 @@ static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
threshold_base = 3 * threshold_base;
else if (noise_level == kMedium)
threshold_base = threshold_base << 1;
+ else if (noise_level < kLow)
+ threshold_base = (7 * threshold_base) >> 3;
}
if (cm->width <= 352 && cm->height <= 288) {
thresholds[0] = threshold_base >> 3;
@@ -668,6 +670,8 @@ static int choose_partitioning(VP9_COMP *cpi,
v64x64 vt;
v16x16 vt2[16];
int force_split[21];
+ int avg_32x32;
+ int avg_16x16[4];
uint8_t *s;
const uint8_t *d;
int sp;
@@ -819,6 +823,7 @@ static int choose_partitioning(VP9_COMP *cpi,
const int y32_idx = ((i >> 1) << 5);
const int i2 = i << 2;
force_split[i + 1] = 0;
+ avg_16x16[i] = 0;
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
@@ -836,6 +841,7 @@ static int choose_partitioning(VP9_COMP *cpi,
is_key_frame);
fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
get_variance(&vt.split[i].split[j].part_variances.none);
+ avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance;
if (vt.split[i].split[j].part_variances.none.variance >
thresholds[2]) {
// 16X16 variance is above threshold for split, so force split to 8x8
@@ -888,6 +894,7 @@ static int choose_partitioning(VP9_COMP *cpi,
}
// Fill the rest of the variance tree by summing split partition values.
+ avg_32x32 = 0;
for (i = 0; i < 4; i++) {
const int i2 = i << 2;
for (j = 0; j < 4; j++) {
@@ -908,19 +915,29 @@ static int choose_partitioning(VP9_COMP *cpi,
}
}
fill_variance_tree(&vt.split[i], BLOCK_32X32);
- // If variance of this 32x32 block is above the threshold, force the block
- // to split. This also forces a split on the upper (64x64) level.
+ // If variance of this 32x32 block is above the threshold, or if its above
+ // (some threshold of) the average variance over the sub-16x16 blocks, then
+ // force this block to split. This also forces a split on the upper
+ // (64x64) level.
if (!force_split[i + 1]) {
get_variance(&vt.split[i].part_variances.none);
- if (vt.split[i].part_variances.none.variance > thresholds[1]) {
+ if (vt.split[i].part_variances.none.variance > thresholds[1] ||
+ (!is_key_frame &&
+ vt.split[i].part_variances.none.variance > (3 * avg_16x16[i]) >> 2)) {
force_split[i + 1] = 1;
force_split[0] = 1;
}
+ avg_32x32 += vt.split[i].part_variances.none.variance;
}
}
if (!force_split[0]) {
fill_variance_tree(&vt, BLOCK_64X64);
get_variance(&vt.part_variances.none);
+ // If variance of this 64x64 block is above (some threshold of) the average
+ // variance over the sub-32x32 blocks, then force this block to split.
+ if (!is_key_frame &&
+ vt.part_variances.none.variance > (3 * avg_32x32) >> 3)
+ force_split[0] = 1;
}
// Now go through the entire structure, splitting every block size until