summaryrefslogtreecommitdiff
path: root/vp9/encoder/vp9_encodeframe.c
diff options
context:
space:
mode:
authorRonald S. Bultje <rbultje@google.com>2012-12-07 14:45:05 -0800
committerRonald S. Bultje <rbultje@google.com>2012-12-07 14:45:05 -0800
commitc456b35fdf1b1e9fd3c964b822e9de05437544e2 (patch)
tree9eb0624d58ae710964ba4a388ad8a1ad740ec49d /vp9/encoder/vp9_encodeframe.c
parenta36d9a4a150c2f7e7eaa825d7ca0289aeac6c5b1 (diff)
downloadlibvpx-c456b35fdf1b1e9fd3c964b822e9de05437544e2.tar
libvpx-c456b35fdf1b1e9fd3c964b822e9de05437544e2.tar.gz
libvpx-c456b35fdf1b1e9fd3c964b822e9de05437544e2.tar.bz2
libvpx-c456b35fdf1b1e9fd3c964b822e9de05437544e2.zip
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds code all over the place to wrap that in the bitstream/encoder/decoder/RD. Some implementation notes (these probably need careful review): - token range is extended by 1 bit, since the value range out of this transform is [-16384,16383]. - the coefficients coming out of the FDCT are manually scaled back by 1 bit, or else they won't fit in int16_t (they are 17 bits). Because of this, the RD error scoring does not right-shift the MSE score by two (unlike for 4x4/8x8/16x16). - to compensate for this loss in precision, the quantizer is halved also. This is currently a little hacky. - FDCT and IDCT is double-only right now. Needs a fixed-point impl. - There are no default probabilities for the 32x32 transform yet; I'm simply using the 16x16 luma ones. A future commit will add newly generated probabilities for all transforms. - No ADST version. I don't think we'll add one for this level; if an ADST is desired, transform-size selection can scale back to 16x16 or lower, and use an ADST at that level. Additional notes specific to Debargha's DWT/DCT hybrid: - coefficient scale is different for the top/left 16x16 (DCT-over-DWT) block than for the rest (DWT pixel differences) of the block. Therefore, RD error scoring isn't easily scalable between coefficient and pixel domain. Thus, unfortunately, we need to compute the RD distortion in the pixel domain until we figure out how to scale these appropriately. Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
Diffstat (limited to 'vp9/encoder/vp9_encodeframe.c')
-rw-r--r--vp9/encoder/vp9_encodeframe.c127
1 files changed, 111 insertions, 16 deletions
diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c
index 6ab23cae0..f504fc53c 100644
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -456,6 +456,10 @@ static void update_state(VP9_COMP *cpi, MACROBLOCK *x,
if (xd->mb_to_right_edge >= 0)
vpx_memcpy(xd->mode_info_context + mis + 1, mi, sizeof(MODE_INFO));
}
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ } else {
+ ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
+#endif
}
#endif
@@ -1487,6 +1491,9 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vp9_zero(cpi->hybrid_coef_counts_8x8);
vp9_zero(cpi->coef_counts_16x16);
vp9_zero(cpi->hybrid_coef_counts_16x16);
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ vp9_zero(cpi->coef_counts_32x32);
+#endif
vp9_frame_init_quantizer(cpi);
@@ -1507,7 +1514,8 @@ static void encode_frame_internal(VP9_COMP *cpi) {
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
- vpx_memset(cpi->txfm_count, 0, sizeof(cpi->txfm_count));
+ vpx_memset(cpi->txfm_count_32x32p, 0, sizeof(cpi->txfm_count_32x32p));
+ vpx_memset(cpi->txfm_count_16x16p, 0, sizeof(cpi->txfm_count_16x16p));
vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
{
@@ -1700,7 +1708,11 @@ void vp9_encode_frame(VP9_COMP *cpi) {
* keyframe's probabilities as an estimate of what the current keyframe's
* coefficient cost distributions may look like. */
if (frame_type == 0) {
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ txfm_type = ALLOW_32X32;
+#else
txfm_type = ALLOW_16X16;
+#endif
} else
#if 0
/* FIXME (rbultje)
@@ -1731,10 +1743,16 @@ void vp9_encode_frame(VP9_COMP *cpi) {
} else
txfm_type = ALLOW_8X8;
#else
- txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
+ ALLOW_32X32 : TX_MODE_SELECT;
+#else
+ txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
+ cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_16X16 : TX_MODE_SELECT;
#endif
+#endif
cpi->common.txfm_mode = txfm_type;
if (txfm_type != TX_MODE_SELECT) {
cpi->common.prob_tx[0] = 128;
@@ -1753,7 +1771,8 @@ void vp9_encode_frame(VP9_COMP *cpi) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
if (i == TX_MODE_SELECT)
- pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZE_MAX - 1), 0);
+ pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
+ 2048 * (TX_SIZE_MAX_SB - 1), 0);
diff = (int)(pd / cpi->common.MBs);
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
@@ -1776,19 +1795,37 @@ void vp9_encode_frame(VP9_COMP *cpi) {
}
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
- const int count4x4 = cpi->txfm_count[TX_4X4] + cpi->txfm_count_8x8p[TX_4X4];
- const int count8x8 = cpi->txfm_count[TX_8X8];
+ const int count4x4 = cpi->txfm_count_16x16p[TX_4X4] +
+ cpi->txfm_count_32x32p[TX_4X4] +
+ cpi->txfm_count_8x8p[TX_4X4];
+ const int count8x8_lp = cpi->txfm_count_32x32p[TX_8X8] +
+ cpi->txfm_count_16x16p[TX_8X8];
const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
- const int count16x16 = cpi->txfm_count[TX_16X16];
+ const int count16x16_16x16p = cpi->txfm_count_16x16p[TX_16X16];
+ const int count16x16_lp = cpi->txfm_count_32x32p[TX_16X16];
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ const int count32x32 = cpi->txfm_count_32x32p[TX_32X32];
+#else
+ const int count32x32 = 0;
+#endif
- if (count4x4 == 0 && count16x16 == 0) {
+ if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
+ count32x32 == 0) {
cpi->common.txfm_mode = ALLOW_8X8;
reset_skip_txfm_size(cpi, TX_8X8);
- } else if (count8x8 == 0 && count16x16 == 0 && count8x8_8x8p == 0) {
+ } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
+ count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
cpi->common.txfm_mode = ONLY_4X4;
reset_skip_txfm_size(cpi, TX_4X4);
- } else if (count8x8 == 0 && count4x4 == 0) {
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
+ cpi->common.txfm_mode = ALLOW_32X32;
+#endif
+ } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_16X16;
+#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
+ reset_skip_txfm_size(cpi, TX_16X16);
+#endif
}
}
} else {
@@ -2087,6 +2124,7 @@ static void encode_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
}
+ assert(mbmi->txfm_size <= TX_16X16);
if (mbmi->ref_frame == INTRA_FRAME) {
#ifdef ENC_DEBUG
if (enc_debug) {
@@ -2266,7 +2304,7 @@ static void encode_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
vp9_get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV) {
- cpi->txfm_count[mbmi->txfm_size]++;
+ cpi->txfm_count_16x16p[mbmi->txfm_size]++;
} else if (mbmi->mode == I8X8_PRED ||
(mbmi->mode == SPLITMV &&
mbmi->partitioning != PARTITIONING_4X4)) {
@@ -2308,6 +2346,7 @@ static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
ENTROPY_CONTEXT_PLANES ta[4], tl[4];
+ const int mis = cm->mode_info_stride;
x->skip = 0;
@@ -2397,6 +2436,53 @@ static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
xd->dst.y_stride, xd->dst.uv_stride);
}
+#if CONFIG_TX32X32
+ if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
+ vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride,
+ dst, dst_y_stride);
+ vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
+ usrc, vsrc, src_uv_stride,
+ udst, vdst, dst_uv_stride);
+ vp9_transform_sby_32x32(x);
+ vp9_transform_sbuv_16x16(x);
+ vp9_quantize_sby_32x32(x);
+ vp9_quantize_sbuv_16x16(x);
+ // TODO(rbultje): trellis optimize
+ vp9_inverse_transform_sbuv_16x16(&x->e_mbd.sb_coeff_data);
+ vp9_inverse_transform_sby_32x32(&x->e_mbd.sb_coeff_data);
+ vp9_recon_sby_s_c(&x->e_mbd, dst);
+ vp9_recon_sbuv_s_c(&x->e_mbd, udst, vdst);
+
+ if (!x->skip) {
+ vp9_tokenize_sb(cpi, &x->e_mbd, t, 0);
+ } else {
+ int mb_skip_context =
+ cpi->common.mb_no_coeff_skip ?
+ (mi - 1)->mbmi.mb_skip_coeff +
+ (mi - mis)->mbmi.mb_skip_coeff :
+ 0;
+ mi->mbmi.mb_skip_coeff = 1;
+ if (cm->mb_no_coeff_skip) {
+ cpi->skip_true_count[mb_skip_context]++;
+ vp9_fix_contexts_sb(xd);
+ } else {
+ vp9_stuff_sb(cpi, xd, t, 0);
+ cpi->skip_false_count[mb_skip_context]++;
+ }
+ }
+
+ // copy skip flag on all mb_mode_info contexts in this SB
+ // if this was a skip at this txfm size
+ if (mb_col < cm->mb_cols - 1)
+ mi[1].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
+ if (mb_row < cm->mb_rows - 1) {
+ mi[mis].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
+ if (mb_col < cm->mb_cols - 1)
+ mi[mis + 1].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
+ }
+ skip[0] = skip[2] = skip[1] = skip[3] = mi->mbmi.mb_skip_coeff;
+ } else {
+#endif
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
@@ -2405,7 +2491,7 @@ static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
memcpy(&ta[n], xd->above_context, sizeof(ta[n]));
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
- xd->mode_info_context = mi + x_idx + y_idx * cm->mode_info_stride;
+ xd->mode_info_context = mi + x_idx + y_idx * mis;
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
@@ -2433,7 +2519,7 @@ static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
- (x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
+ (x->e_mbd.mode_info_context - mis)->mbmi.mb_skip_coeff :
0;
xd->mode_info_context->mbmi.mb_skip_coeff = skip[n] = 1;
if (cpi->common.mb_no_coeff_skip) {
@@ -2450,20 +2536,29 @@ static void encode_superblock(VP9_COMP *cpi, MACROBLOCK *x,
xd->mode_info_context = mi;
update_sb_skip_coeff_state(cpi, x, ta, tl, tp, t, skip);
+#if CONFIG_TX32X32
+ }
+#endif
if (cm->txfm_mode == TX_MODE_SELECT &&
!((cm->mb_no_coeff_skip && skip[0] && skip[1] && skip[2] && skip[3]) ||
(vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
- cpi->txfm_count[mi->mbmi.txfm_size]++;
+ cpi->txfm_count_32x32p[mi->mbmi.txfm_size]++;
} else {
- TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ? TX_16X16 : cm->txfm_mode;
+ TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ?
+#if CONFIG_TX32X32
+ TX_32X32 :
+#else
+ TX_16X16 :
+#endif
+ cm->txfm_mode;
mi->mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[1].mbmi.txfm_size = sz;
if (mb_row < cm->mb_rows - 1) {
- mi[cm->mode_info_stride].mbmi.txfm_size = sz;
+ mi[mis].mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
- mi[cm->mode_info_stride + 1].mbmi.txfm_size = sz;
+ mi[mis + 1].mbmi.txfm_size = sz;
}
}
}