summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
authorDeb Mukherjee <debargha@google.com>2012-10-22 12:18:46 -0700
committerGerrit Code Review <gerrit@gerrit.golo.chromium.org>2012-10-22 12:18:46 -0700
commit7906ed091ab5d16a7fbdc21109a034644aa6b9a3 (patch)
treef8fc3e6687ce6f5e6532778df19ca7f0b14fc7d2 /vp8/encoder
parent372e001d68c3a6c58613d8418933047e868a1e98 (diff)
parentf10465634bbbdc2cd251d8e12d965b91b6cecb83 (diff)
downloadlibvpx-7906ed091ab5d16a7fbdc21109a034644aa6b9a3.tar
libvpx-7906ed091ab5d16a7fbdc21109a034644aa6b9a3.tar.gz
libvpx-7906ed091ab5d16a7fbdc21109a034644aa6b9a3.tar.bz2
libvpx-7906ed091ab5d16a7fbdc21109a034644aa6b9a3.zip
Merge "Merging the hybrid transform experiments" into experimental
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c25
-rw-r--r--vp8/encoder/block.h2
-rw-r--r--vp8/encoder/dct.c7
-rw-r--r--vp8/encoder/dct.h2
-rw-r--r--vp8/encoder/encodeframe.c6
-rw-r--r--vp8/encoder/encodeintra.c20
-rw-r--r--vp8/encoder/encodemb.c2
-rw-r--r--vp8/encoder/onyx_if.c6
-rw-r--r--vp8/encoder/onyx_int.h12
-rw-r--r--vp8/encoder/quantize.c2
-rw-r--r--vp8/encoder/quantize.h2
-rw-r--r--vp8/encoder/ratectrl.c12
-rw-r--r--vp8/encoder/rdopt.c45
-rw-r--r--vp8/encoder/tokenize.c48
14 files changed, 3 insertions, 188 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index f74f85fad..512057ea7 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -45,32 +45,26 @@ unsigned int tree_update_hist [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_tree_update_hist [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
-#endif
unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#endif
unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#endif
extern unsigned int active_section;
#endif
@@ -1498,7 +1492,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1519,8 +1512,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#endif
-
if (cpi->common.txfm_mode != ONLY_4X4) {
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
@@ -1547,7 +1538,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1572,7 +1562,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#endif
}
if (cpi->common.txfm_mode > ALLOW_8X8) {
@@ -1595,7 +1584,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1614,7 +1602,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#endif
}
#if 0
@@ -1887,7 +1874,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
}
}
-#if CONFIG_HYBRIDTRANSFORM
savings = 0;
update[0] = update[1] = 0;
for (i = 0; i < BLOCK_TYPES; ++i) {
@@ -1976,7 +1962,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
}
}
}
-#endif
/* do not do this if not even allowed */
if (cpi->common.txfm_mode != ONLY_4X4) {
@@ -2054,7 +2039,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
update[0] = update[1] = 0;
savings = 0;
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
@@ -2128,7 +2112,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
}
}
}
-#endif
}
if (cpi->common.txfm_mode > ALLOW_8X8) {
@@ -2206,7 +2189,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
update[0] = update[1] = 0;
savings = 0;
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
@@ -2280,7 +2262,6 @@ static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
}
}
}
-#endif
}
}
@@ -2731,17 +2712,11 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_clear_system_state(); // __asm emms;
vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs, cpi->common.fc.hybrid_coef_probs);
-#endif
vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16);
-#endif
vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 80f9b75b8..292011f81 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -164,10 +164,8 @@ typedef struct {
unsigned int token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#endif
int optimize;
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index cd13fec7c..0983b1c0a 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -14,8 +14,6 @@
#include "vp8/common/idct.h"
#include "vp8/common/systemdependent.h"
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
-
#include "vp8/common/blockd.h"
// TODO: these transforms can be converted into integer forms to reduce
@@ -71,9 +69,7 @@ float adst_8[64] = {
0.175227946595735, -0.326790388032145, 0.434217976756762, -0.483002021635509,
0.466553967085785, -0.387095214016348, 0.255357107325376, -0.089131608307532
};
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8
float dct_16[256] = {
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
@@ -143,7 +139,6 @@ float adst_16[256] = {
0.065889, -0.129396, 0.188227, -0.240255, 0.283599, -0.316693, 0.338341, -0.347761,
0.344612, -0.329007, 0.301511, -0.263118, 0.215215, -0.159534, 0.098087, -0.033094
};
-#endif
static const int xC1S7 = 16069;
static const int xC2S6 = 15137;
@@ -400,7 +395,6 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8
}
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim) {
@@ -518,7 +512,6 @@ void vp8_fht_c(short *input, short *output, int pitch,
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
-#endif
void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
int i;
diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h
index 180192bbb..4ad1fe85d 100644
--- a/vp8/encoder/dct.h
+++ b/vp8/encoder/dct.h
@@ -26,10 +26,8 @@
#endif
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
-#endif
#ifndef vp8_fdct_short16x16
#define vp8_fdct_short16x16 vp8_short_fdct16x16_c
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index adfbfc79b..4658a7946 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1362,17 +1362,11 @@ static void encode_frame_internal(VP8_COMP *cpi) {
vp8_zero(cpi->MVcount_hp);
#endif
vp8_zero(cpi->coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_zero(cpi->hybrid_coef_counts);
-#endif
vp8_zero(cpi->coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(cpi->hybrid_coef_counts_8x8);
-#endif
vp8_zero(cpi->coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(cpi->hybrid_coef_counts_16x16);
-#endif
vp8cx_frame_init_quantizer(cpi);
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index ff5395f2d..f44df22ea 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -57,9 +57,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib) {
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
@@ -74,15 +72,12 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type(&x->e_mbd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4);
vp8_ht_quantize_b_4x4(be, b, tx_type);
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4);
- } else
-#endif
- {
+ } else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->quantize_b_4x4(be, b) ;
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
@@ -103,9 +98,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
@@ -120,7 +113,6 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
xd->predictor, b->src_stride);
if (tx_size == TX_16X16) {
-#if CONFIG_HYBRIDTRANSFORM16X16
BLOCKD *bd = &xd->block[0];
tx_type = get_tx_type(xd, bd);
if (tx_type != DCT_DCT) {
@@ -129,9 +121,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16);
- } else
-#endif
- {
+ } else {
vp8_transform_mby_16x16(x);
vp8_quantize_mby_16x16(x);
if (x->optimize)
@@ -196,9 +186,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
BLOCK *be = &x->block[ib];
const int iblock[4] = {0, 1, 4, 5};
int i;
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
@@ -217,7 +205,6 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
// generate residual blocks
vp8_subtract_4b_c(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM8X8
tx_type = get_tx_type(xd, xd->block + idx);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
@@ -226,13 +213,10 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
tx_type, 8);
} else {
-#endif
x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
-#if CONFIG_HYBRIDTRANSFORM8X8
}
-#endif
} else {
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index dc54d05a2..0272a5cb6 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -304,7 +304,6 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
scan = vp8_default_zig_zag1d;
bands = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
{
@@ -327,7 +326,6 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
scan = vp8_default_zig_zag1d;
}
}
-#endif
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 0e46071a9..8f0a52be8 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -3712,18 +3712,12 @@ static void encode_frame_to_data_rate
update_reference_frames(cm);
vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cpi->common.fc.hybrid_coef_counts, cpi->hybrid_coef_counts);
-#endif
vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8);
-#endif
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
cpi->hybrid_coef_counts_16x16);
-#endif
vp8_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {
vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 79287e5fa..8746ab07c 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -97,24 +97,18 @@ typedef struct {
vp8_prob coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM
vp8_prob hybrid_coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
@@ -572,29 +566,23 @@ typedef struct VP8_COMP {
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
int gfu_boost;
int last_boost;
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index b6a1f27f8..16b4e6e1d 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -21,7 +21,6 @@
extern int enc_debug;
#endif
-#if CONFIG_HYBRIDTRANSFORM
void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
int i, rc, eob;
int zbin;
@@ -85,7 +84,6 @@ void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
d->eob = eob + 1;
}
-#endif
void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index 1375ed0b0..e39433fc3 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -30,11 +30,9 @@
#include "arm/quantize_arm.h"
#endif
-#if CONFIG_HYBRIDTRANSFORM
#define prototype_quantize_block_type(sym) \
void (sym)(BLOCK *b, BLOCKD *d, TX_TYPE type)
extern prototype_quantize_block_type(vp8_ht_quantize_b_4x4);
-#endif
#ifndef vp8_quantize_quantb_4x4
#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 570bedfe9..e0e2a4e5b 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -178,17 +178,11 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
vp8_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas);
vp8_copy(cc->coef_probs, cm->fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cc->hybrid_coef_probs, cm->fc.hybrid_coef_probs);
-#endif
vp8_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
#endif
@@ -249,17 +243,11 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
vp8_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas);
vp8_copy(cm->fc.coef_probs, cc->coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cm->fc.hybrid_coef_probs, cc->hybrid_coef_probs);
-#endif
vp8_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
-#endif
vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
#endif
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index e3f989acd..30f7e705e 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -355,37 +355,31 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
cpi->mb.token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs,
BLOCK_TYPES);
-#if CONFIG_HYBRIDTRANSFORM
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs,
BLOCK_TYPES);
-#endif
fill_token_costs(
cpi->mb.token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8,
BLOCK_TYPES_8X8);
-#if CONFIG_HYBRIDTRANSFORM8X8
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_8x8,
BLOCK_TYPES_8X8);
-#endif
fill_token_costs(
cpi->mb.token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16,
BLOCK_TYPES_16X16);
-#if CONFIG_HYBRIDTRANSFORM16X16
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_16x16,
BLOCK_TYPES_16X16);
-#endif
/*rough estimate for costing*/
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
@@ -605,9 +599,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
short *qcoeff_ptr = b->qcoeff;
MACROBLOCKD *xd = &mb->e_mbd;
MB_MODE_INFO *mbmi = &mb->e_mbd.mode_info_context->mbmi;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = DCT_DCT;
-#endif
int segment_id = mbmi->segment_id;
switch (tx_size) {
@@ -615,7 +607,6 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
scan = vp8_default_zig_zag1d;
band = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
@@ -634,14 +625,12 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
}
}
}
-#endif
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
band = vp8_coef_bands_8x8;
default_eob = 64;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (type == PLANE_TYPE_Y_WITH_DC) {
BLOCKD *bb;
int ib = (b - xd->block);
@@ -651,17 +640,14 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
tx_type = get_tx_type_8x8(xd, bb);
}
}
-#endif
break;
case TX_16X16:
scan = vp8_default_zig_zag1d_16x16;
band = vp8_coef_bands_16x16;
default_eob = 256;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_16x16(xd, b);
}
-#endif
break;
default:
break;
@@ -673,7 +659,6 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT) {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
@@ -685,9 +670,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
if (c < seg_eob)
cost += mb->hybrid_token_costs[tx_size][type][band[c]]
[pt][DCT_EOB_TOKEN];
- } else
-#endif
- {
+ } else {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
int t = vp8_dct_value_tokens_ptr[v].Token;
@@ -868,9 +851,7 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
MACROBLOCKD *xd = &mb->e_mbd;
BLOCKD *b = &mb->e_mbd.block[0];
BLOCK *be = &mb->block[0];
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
-#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
mb->src_diff,
@@ -878,24 +859,18 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
mb->e_mbd.predictor,
mb->block[0].src_stride);
-#if CONFIG_HYBRIDTRANSFORM16X16
tx_type = get_tx_type_16x16(xd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 16);
} else
vp8_transform_mby_16x16(mb);
-#else
- vp8_transform_mby_16x16(mb);
-#endif
vp8_quantize_mby_16x16(mb);
-#if CONFIG_HYBRIDTRANSFORM16X16
// TODO(jingning) is it possible to quickly determine whether to force
// trailing coefficients to be zero, instead of running trellis
// optimization in the rate-distortion optimization loop?
if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
-#endif
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
@@ -1153,10 +1128,8 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
ENTROPY_CONTEXT ta = *a, tempa = *a;
ENTROPY_CONTEXT tl = *l, templ = *l;
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = DCT_DCT;
TX_TYPE best_tx_type = DCT_DCT;
-#endif
/*
* The predictor buffer is a 2d buffer with a stride of 16. Create
* a temp buffer that meets the stride requirements, but we are only
@@ -1189,7 +1162,6 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
b->bmi.as_mode.first = mode;
-#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4);
@@ -1198,10 +1170,6 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
}
-#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(be, b);
-#endif
tempa = ta;
templ = tl;
@@ -1219,9 +1187,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
*bestdistortion = distortion;
best_rd = this_rd;
*best_mode = mode;
-#if CONFIG_HYBRIDTRANSFORM
best_tx_type = tx_type;
-#endif
#if CONFIG_COMP_INTRA_PRED
*best_second_mode = mode2;
@@ -1240,17 +1206,12 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
b->bmi.as_mode.second = (B_PREDICTION_MODE)(*best_second_mode);
#endif
-#if CONFIG_HYBRIDTRANSFORM
// inverse transform
if (best_tx_type != DCT_DCT)
vp8_ihtllm_c(best_dqcoeff, b->diff, 32, best_tx_type, 4);
else
IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(
best_dqcoeff, b->diff, 32);
-#else
- IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(
- best_dqcoeff, b->diff, 32);
-#endif
vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -1533,15 +1494,11 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
vp8_subtract_4b_c(be, b, 16);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = get_tx_type_8x8(xd, b);
if (tx_type != DCT_DCT)
vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32, tx_type, 8);
else
x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
-#else
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
-#endif
x->quantize_b_8x8(x->block + idx, xd->block + idx);
// compute quantization mse of 8x8 block
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index d46637a3e..575c7acf0 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -26,38 +26,26 @@
#ifdef ENTROPY_STATS
INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM
INT64 hybrid_context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM8X8
INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM16X16
INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
extern unsigned int hybrid_tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-#endif
extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM8X8
extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#endif
extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM16X16
extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#endif
#endif /* ENTROPY_STATS */
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
@@ -134,9 +122,7 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
int seg_eob = 256;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -162,22 +148,18 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
}
t->Token = x;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_16x16[type][band][pt][x];
else
-#endif
++cpi->coef_counts_16x16[type][band][pt][x];
}
pt = vp8_prev_token_class[x];
@@ -310,9 +292,7 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; /* start at DC unless type 0 */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int eob = b->eob;
int seg_eob = 64;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -338,11 +318,9 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
x = DCT_EOB_TOKEN;
t->Token = x;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
@@ -350,11 +328,9 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_8x8[type][band][pt][x];
else
-#endif
++cpi->coef_counts_8x8[type][band][pt][x];
}
pt = vp8_prev_token_class[x];
@@ -451,7 +427,6 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
const int16_t *qcoeff_ptr = b->qcoeff;
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, &xd->block[block]);
switch (tx_type) {
case ADST_DCT:
@@ -464,7 +439,6 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
pt_scan = vp8_default_zig_zag1d;
break;
}
-#endif
a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
@@ -485,22 +459,18 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
token = DCT_EOB_TOKEN;
t->Token = token;
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts[type][band][pt][token];
else
-#endif
++cpi->coef_counts[type][band][pt][token];
}
pt = vp8_prev_token_class[token];
@@ -995,30 +965,24 @@ static __inline void stuff1st_order_b_8x8(MACROBLOCKD *xd,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands_8x8[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) b;
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
// t->section = 8;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type == DCT_DCT)
++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
@@ -1100,29 +1064,23 @@ static __inline void stuff1st_order_b_16x16(MACROBLOCKD *xd,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands_16x16[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) b;
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
@@ -1189,28 +1147,22 @@ static __inline void stuff1st_order_b_4x4(MACROBLOCKD *xd,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */