summaryrefslogtreecommitdiff
path: root/vp8/encoder/encodeintra.c
diff options
context:
space:
mode:
authorDeb Mukherjee <debargha@google.com>2012-09-09 22:42:35 -0700
committerDeb Mukherjee <debargha@google.com>2012-09-27 11:21:39 -0700
commit3f5d60b38442e7f0ba37883c4b636150305dfae0 (patch)
tree0987351ddb373538b16073599e3f8bde1fa9a6ce /vp8/encoder/encodeintra.c
parent70e21afb3fac6bb5cdfa4c8063521f5b634c0379 (diff)
downloadlibvpx-3f5d60b38442e7f0ba37883c4b636150305dfae0.tar
libvpx-3f5d60b38442e7f0ba37883c4b636150305dfae0.tar.gz
libvpx-3f5d60b38442e7f0ba37883c4b636150305dfae0.tar.bz2
libvpx-3f5d60b38442e7f0ba37883c4b636150305dfae0.zip
Entropy coding for hybrid transform
Separates the entropy coding context models for 4x4, 8x8 and 16x16 ADST variants. There is a small improvement for HD (hd/std-hd) by about 0.1-0.2%. Results on derf/yt are about the same, probably because there is not enough statistics. Results may improve somewhat once the initial probability tables are updated for the hybrid transforms which is coming soon. Change-Id: Ic7c0c62dacc68ef551054fdb575be8b8507d32a8
Diffstat (limited to 'vp8/encoder/encodeintra.c')
-rw-r--r--vp8/encoder/encodeintra.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index c2f123c92..d8757c531 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -85,21 +85,21 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
#if CONFIG_HYBRIDTRANSFORM
- if(active_ht) {
- b->bmi.as_mode.test = b->bmi.as_mode.first;
- txfm_map(b, b->bmi.as_mode.first);
- vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b(be, b);
- vp8_inverse_htransform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
- } else {
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
- x->quantize_b(be, b) ;
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
- }
+ if (active_ht) {
+ b->bmi.as_mode.test = b->bmi.as_mode.first;
+ txfm_map(b, b->bmi.as_mode.first);
+ vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
+ vp8_ht_quantize_b(be, b);
+ vp8_inverse_htransform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
+ } else {
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
+ x->quantize_b(be, b) ;
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
+ }
#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b(be, b);
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
#endif
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -298,7 +298,6 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
}
}
-extern const int vp8_i8x8_block[4];
void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib;