summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c15
-rw-r--r--vp8/encoder/block.h7
-rw-r--r--vp8/encoder/dct.c4
-rw-r--r--vp8/encoder/dct.h5
-rw-r--r--vp8/encoder/defaultcoefcounts.h3
-rw-r--r--vp8/encoder/encodeframe.c189
-rw-r--r--vp8/encoder/encodeintra.c26
-rw-r--r--vp8/encoder/encodemb.c28
-rw-r--r--vp8/encoder/encodemb.h2
-rw-r--r--vp8/encoder/generic/csystemdependent.c4
-rw-r--r--vp8/encoder/onyx_if.c13
-rw-r--r--vp8/encoder/onyx_int.h4
-rw-r--r--vp8/encoder/picklpf.c4
-rw-r--r--vp8/encoder/quantize.c346
-rw-r--r--vp8/encoder/quantize.h8
-rw-r--r--vp8/encoder/ratectrl.c5
-rw-r--r--vp8/encoder/rdopt.c26
-rw-r--r--vp8/encoder/tokenize.c37
-rw-r--r--vp8/encoder/tokenize.h2
19 files changed, 33 insertions, 695 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 5be404167..8e22926e1 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -35,9 +35,7 @@ unsigned __int64 Sectionbits[500];
#ifdef ENTROPY_STATS
int intra_mode_stats[10][10][10];
static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
-#if CONFIG_T8X8
static unsigned int tree_update_hist_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
-#endif
extern unsigned int active_section;
#endif
@@ -1685,9 +1683,7 @@ static int default_coef_context_savings(VP8_COMP *cpi)
int vp8_estimate_entropy_savings(VP8_COMP *cpi)
{
int savings = 0;
-#if CONFIG_T8X8
int i=0;
-#endif
VP8_COMMON *const cm = & cpi->common;
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
@@ -1761,7 +1757,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
savings += default_coef_context_savings(cpi);
-#if CONFIG_T8X8
+
/* do not do this if not evena allowed */
if(cpi->common.txfm_mode == ALLOW_8X8)
{
@@ -1820,8 +1816,6 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
savings += savings8x8 >> 8;
}
-#endif
-
return savings;
}
@@ -1955,7 +1949,6 @@ static void update_coef_probs(VP8_COMP *cpi)
}
-#if CONFIG_T8X8
/* do not do this if not evena allowed */
if(cpi->common.txfm_mode == ALLOW_8X8)
{
@@ -2090,8 +2083,6 @@ static void update_coef_probs(VP8_COMP *cpi)
while (++i < BLOCK_TYPES);
}
}
-
-#endif
}
#ifdef PACKET_TESTING
FILE *vpxlogc = 0;
@@ -2400,9 +2391,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
}
-#if CONFIG_T8X8
vp8_write_bit(bc, pc->txfm_mode);
-#endif
// Encode the loop filter level and type
vp8_write_bit(bc, pc->filter_type);
@@ -2636,7 +2625,6 @@ void print_tree_update_probs()
fprintf(f, "};\n");
-#if CONFIG_T8X8
fprintf(f, "const vp8_prob tree_update_probs_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
for (i = 0; i < BLOCK_TYPES; i++)
@@ -2674,7 +2662,6 @@ void print_tree_update_probs()
fprintf(f, " },\n");
}
-#endif
fclose(f);
}
#endif
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 54f38590c..25d2398ce 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -47,9 +47,7 @@ typedef struct
int src_stride;
int eob_max_offset;
-#if CONFIG_T8X8
int eob_max_offset_8x8;
-#endif
} BLOCK;
@@ -131,11 +129,8 @@ typedef struct
unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-
-#if CONFIG_T8X8
unsigned int token_costs_8x8[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
int optimize;
int q_index;
@@ -145,12 +140,10 @@ typedef struct
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b)(BLOCK *b, BLOCKD *d);
void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
- #if CONFIG_T8X8
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
void (*short_fhaar2x2)(short *input, short *output, int pitch);
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);
-#endif
} MACROBLOCK;
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index b1e461e87..c2f2d1117 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -16,7 +16,7 @@
-#if CONFIG_T8X8
+
void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
{
int j1, i, j, k;
@@ -126,7 +126,7 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) //pitch = 8
op1[8]=(ip1[0] - ip1[1] - ip1[4] + ip1[8])>>1;
}
-#endif
+
void vp8_short_fdct4x4_c(short *input, short *output, int pitch)
{
int i;
diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h
index c37d47aca..7ab525c0b 100644
--- a/vp8/encoder/dct.h
+++ b/vp8/encoder/dct.h
@@ -22,7 +22,7 @@
#include "arm/dct_arm.h"
#endif
-#if CONFIG_T8X8
+
#ifndef vp8_fdct_short8x8
#define vp8_fdct_short8x8 vp8_short_fdct8x8_c
@@ -34,7 +34,6 @@ extern prototype_fdct(vp8_fdct_short8x8);
#endif
extern prototype_fdct(vp8_fhaar_short2x2);
-#endif
#ifndef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_c
@@ -63,10 +62,8 @@ extern prototype_fdct(vp8_fdct_walsh_short4x4);
typedef prototype_fdct(*vp8_fdct_fn_t);
typedef struct
{
-#if CONFIG_T8X8
vp8_fdct_fn_t short8x8;
vp8_fdct_fn_t haar_short2x2;
-#endif
vp8_fdct_fn_t short4x4;
vp8_fdct_fn_t short8x4;
vp8_fdct_fn_t fast4x4;
diff --git a/vp8/encoder/defaultcoefcounts.h b/vp8/encoder/defaultcoefcounts.h
index 3b54c823c..f2729d9ce 100644
--- a/vp8/encoder/defaultcoefcounts.h
+++ b/vp8/encoder/defaultcoefcounts.h
@@ -223,7 +223,6 @@ static const unsigned int default_coef_counts[BLOCK_TYPES]
};
-#if CONFIG_T8X8
const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
@@ -399,4 +398,4 @@ const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES]
}
}
};
-#endif
+
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index d6910677f..114c7b648 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -102,187 +102,6 @@ static const unsigned char VP8_VAR_OFFS[16]=
};
-
-#if CONFIG_T8X8
-
-//INTRA mode transform size
-//When all three criteria are off the default is 4x4
-//#define INTRA_VARIANCE_ENTROPY_CRITERIA
-#define INTRA_WTD_SSE_ENTROPY_CRITERIA
-//#define INTRA_TEST_8X8_ONLY
-//
-//INTER mode transform size
-//When all three criteria are off the default is 4x4
-//#define INTER_VARIANCE_ENTROPY_CRITERIA
-#define INTER_WTD_SSE_ENTROPY_CRITERIA
-//#define INTER_TEST_8X8_ONLY
-
-double variance_Block(short *b1, int pitch, int dimension)
-{
- short ip[8][8]={{0}};
- short *b = b1;
- int i, j = 0;
- double mean = 0.0, variance = 0.0;
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- ip[i][j] = b[j];
- mean += ip[i][j];
- }
- b += pitch;
- }
- mean /= (dimension*dimension);
-
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- variance += (ip[i][j]-mean)*(ip[i][j]-mean);
- }
- }
- variance /= (dimension*dimension);
- return variance;
-}
-
-double mean_Block(short *b, int pitch, int dimension)
-{
- short ip[8][8]={{0}};
- int i, j = 0;
- double mean = 0;
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- ip[i][j] = b[j];
- mean += ip[i][j];
- }
- b += pitch;
- }
- mean /= (dimension*dimension);
-
- return mean;
-}
-
-int SSE_Block(short *b, int pitch, int dimension)
-{
- int i, j, sse_block = 0;
- for (i = 0; i < dimension; i++)
- {
- for (j = 0; j < dimension; j++)
- {
- sse_block += b[j]*b[j];
- }
- b += pitch;
- }
- return sse_block;
-}
-
-double Compute_Variance_Entropy(MACROBLOCK *x)
-{
- double variance_8[4] = {0.0, 0.0, 0.0, 0.0}, sum_var = 0.0, all_entropy = 0.0;
- variance_8[0] = variance_Block(x->block[0].src_diff, 16, 8);
- variance_8[1] = variance_Block(x->block[2].src_diff, 16, 8);
- variance_8[2] = variance_Block(x->block[8].src_diff, 16, 8);
- variance_8[3] = variance_Block(x->block[10].src_diff, 16, 8);
- sum_var = variance_8[0] + variance_8[1] + variance_8[2] + variance_8[3];
- if(sum_var)
- {
- int i;
- for(i = 0; i <4; i++)
- {
- if(variance_8[i])
- {
- variance_8[i] /= sum_var;
- all_entropy -= variance_8[i]*log(variance_8[i]);
- }
- }
- }
- return (all_entropy /log(2));
-}
-
-double Compute_Wtd_SSE_SubEntropy(MACROBLOCK *x)
-{
- double variance_8[4] = {0.0, 0.0, 0.0, 0.0};
- double entropy_8[4] = {0.0, 0.0, 0.0, 0.0};
- double sse_1, sse_2, sse_3, sse_4, sse_0;
- int i;
- for (i=0;i<3;i+=2)
- {
- sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
- if(sse_0)
- {
- sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
- sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
- sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
- sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
- variance_8[i]= variance_Block(x->block[i].src_diff, 16, 8);
- if(sse_1 && sse_2 && sse_3 && sse_4)
- entropy_8[i]= (-sse_1*log(sse_1)
- -sse_2*log(sse_2)
- -sse_3*log(sse_3)
- -sse_4*log(sse_4))/log(2);
- }
- }
- for (i=8;i<11;i+=2)
- {
- if(sse_0)
- {
- sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
- sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
- sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
- sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
- sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
- variance_8[i-7]= variance_Block(x->block[i].src_diff, 16, 8);
- if(sse_1 && sse_2 && sse_3 && sse_4)
- entropy_8[i-7]= (-sse_1*log(sse_1)
- -sse_2*log(sse_2)
- -sse_3*log(sse_3)
- -sse_4*log(sse_4))/log(2);
- }
- }
-
- if(variance_8[0]+variance_8[1]+variance_8[2]+variance_8[3])
- return (entropy_8[0]*variance_8[0]+
- entropy_8[1]*variance_8[1]+
- entropy_8[2]*variance_8[2]+
- entropy_8[3]*variance_8[3])/
- (variance_8[0]+
- variance_8[1]+
- variance_8[2]+
- variance_8[3]);
- else
- return 0;
-}
-
-int vp8_8x8_selection_intra(MACROBLOCK *x)
-{
-#ifdef INTRA_VARIANCE_ENTROPY_CRITERIA
- return (Compute_Variance_Entropy(x) > 1.2);
-#elif defined(INTRA_WTD_SSE_ENTROPY_CRITERIA)
- return (Compute_Wtd_SSE_SubEntropy(x) > 1.2);
-#elif defined(INTRA_TEST_8X8_ONLY)
- return 1;
-#else
- return 0; //when all criteria are off use the default 4x4 only
-#endif
-}
-
-int vp8_8x8_selection_inter(MACROBLOCK *x)
-{
-#ifdef INTER_VARIANCE_ENTROPY_CRITERIA
- return (Compute_Variance_Entropy(x) > 1.5);
-#elif defined(INTER_WTD_SSE_ENTROPY_CRITERIA)
- return (Compute_Wtd_SSE_SubEntropy(x) > 1.5);
-#elif defined(INTER_TEST_8X8_ONLY)
- return 1;
-#else
- return 0; //when all criteria are off use the default 4x4 only
-#endif
-}
-
-#endif
-
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
@@ -876,10 +695,8 @@ void encode_mb_row(VP8_COMP *cpi,
x->active_ptr = cpi->active_map + map_index + mb_col;
-#if CONFIG_T8X8
/* force 4x4 transform for mode selection */
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
-#endif
if (cm->frame_type == KEY_FRAME)
{
@@ -1485,7 +1302,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
vp8_update_zbin_extra(cpi, x);
}
-#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if(cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
@@ -1499,7 +1315,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count ++;
}
-#endif
if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
{
@@ -1583,7 +1398,7 @@ int vp8cx_encode_inter_macroblock
cpi->comp_pred_count[pred_context]++;
}
-#if CONFIG_T8X8
+
/* test code: set transform size based on mode selection */
if( cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
@@ -1598,7 +1413,7 @@ int vp8cx_encode_inter_macroblock
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
-#endif
+
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 63a146551..c791762ad 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -114,9 +114,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
BLOCK *b = &x->block[0];
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
@@ -129,35 +127,27 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_intra_mby_8x8(x);
else
-#endif
- vp8_transform_intra_mby(x);
+ vp8_transform_intra_mby(x);
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_quantize_mby_8x8(x);
else
-#endif
vp8_quantize_mby(x);
if (x->optimize)
{
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_optimize_mby_8x8(x, rtcd);
else
-#endif
vp8_optimize_mby(x, rtcd);
}
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#ifdef ENC_DEBUG
@@ -198,9 +188,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
{
@@ -215,18 +203,14 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_transform_mbuv_8x8(x);
else
-#endif
vp8_transform_mbuv(x);
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x);
else
-#endif
vp8_quantize_mbuv(x);
#ifdef ENC_DEBUG
@@ -262,20 +246,16 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
#endif
if (x->optimize)
{
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_optimize_mbuv_8x8(x, rtcd);
else
-#endif
vp8_optimize_mbuv(x, rtcd);
}
-#if CONFIG_T8X8
if(tx_type == TX_8X8)
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
- vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 22bf92b07..ab0f1c13e 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -142,7 +142,6 @@ static void build_dcblock(MACROBLOCK *x)
src_diff_ptr[i] = x->coeff[i * 16];
}
}
-#if CONFIG_T8X8
void vp8_build_dcblock_8x8(MACROBLOCK *x)
{
short *src_diff_ptr = &x->src_diff[384];
@@ -156,7 +155,7 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x)
src_diff_ptr[4] = x->coeff[8 * 16];
src_diff_ptr[8] = x->coeff[12 * 16];
}
-#endif
+
void vp8_transform_mbuv(MACROBLOCK *x)
{
int i;
@@ -236,8 +235,6 @@ static void transform_mby(MACROBLOCK *x)
}
}
-#if CONFIG_T8X8
-
void vp8_transform_mbuv_8x8(MACROBLOCK *x)
{
int i;
@@ -338,7 +335,6 @@ void vp8_transform_mby_8x8(MACROBLOCK *x)
}
}
-#endif
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
@@ -798,7 +794,6 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
}
}
-#if CONFIG_T8X8
void optimize_b_8x8(MACROBLOCK *mb, int i, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd)
@@ -1150,50 +1145,37 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
}
}
-#endif
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
-
vp8_build_inter_predictors_mb(&x->e_mbd);
vp8_subtract_mb(rtcd, x);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_mb_8x8(x);
else
-#endif
transform_mb(x);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_quantize_mb_8x8(x);
else
-#endif
vp8_quantize_mb(x);
if (x->optimize)
{
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
optimize_mb_8x8(x, rtcd);
else
-#endif
optimize_mb(x, rtcd);
}
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
{
#ifdef ENC_DEBUG
@@ -1225,7 +1207,6 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
}
#endif
}
-#endif
RECON_INVOKE(&rtcd->common->recon, recon_mb)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
@@ -1251,9 +1232,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
/* this function is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
-#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-#endif
BLOCK *b = &x->block[0];
@@ -1261,19 +1240,16 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
-#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_mby_8x8(x);
else
-#endif
transform_mby(x);
vp8_quantize_mby(x);
-#if CONFIG_T8X8
+
if( tx_type == TX_8X8 )
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
-#endif
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, recon_mby)
diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h
index 995ce4f0a..e211eea65 100644
--- a/vp8/encoder/encodemb.h
+++ b/vp8/encoder/encodemb.h
@@ -104,7 +104,6 @@ void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
-#if CONFIG_T8X8
void vp8_transform_mb_8x8(MACROBLOCK *mb);
void vp8_transform_mby_8x8(MACROBLOCK *x);
void vp8_transform_mbuv_8x8(MACROBLOCK *x);
@@ -112,7 +111,6 @@ void vp8_transform_intra_mby_8x8(MACROBLOCK *x);
void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
-#endif
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index b6fc0dc9b..ebb16e3f0 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -69,10 +69,8 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
-#if CONFIG_T8X8
cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
-#endif
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
@@ -90,12 +88,10 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;
cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c;
-#if CONFIG_T8X8
cpi->rtcd.quantize.quantb_8x8 = vp8_regular_quantize_b_8x8;
cpi->rtcd.quantize.fastquantb_8x8 = vp8_fast_quantize_b_8x8_c;
cpi->rtcd.quantize.quantb_2x2 = vp8_regular_quantize_b_2x2;
cpi->rtcd.quantize.fastquantb_2x2 = vp8_fast_quantize_b_2x2_c;
-#endif
cpi->rtcd.search.full_search = vp8_full_search_sad;
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 3184932d6..925e24755 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -964,25 +964,19 @@ void vp8_set_speed_features(VP8_COMP *cpi)
if (cpi->sf.improved_dct)
{
-#if CONFIG_T8X8
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
-#endif
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
}
else
{
-#if CONFIG_T8X8
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
-#endif
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
}
cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
-#if CONFIG_T8X8
cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
-#endif
if (cpi->sf.improved_quant)
{
@@ -990,10 +984,8 @@ void vp8_set_speed_features(VP8_COMP *cpi)
quantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
-#if CONFIG_T8X8
cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_8x8);
cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_2x2);
-#endif
}
else
{
@@ -1001,10 +993,8 @@ void vp8_set_speed_features(VP8_COMP *cpi)
fastquantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb_pair);
-#if CONFIG_T8X8
cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_8x8);
cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_2x2);
-#endif
}
if (cpi->sf.improved_quant != last_improved_quant)
vp8cx_init_quantizer(cpi);
@@ -1863,9 +1853,8 @@ void vp8_remove_compressor(VP8_PTR *ptr)
#if CONFIG_INTERNAL_STATS
vp8_clear_system_state();
-#if CONFIG_T8X8
+
printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
-#endif
if (cpi->pass != 1)
{
FILE *f = fopen("opsnr.stt", "a");
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index fa2da1dd6..9c655b845 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -453,11 +453,9 @@ typedef struct VP8_COMP
//save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_T8X8
unsigned int coef_counts_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
int gfu_boost;
int kf_boost;
@@ -514,10 +512,8 @@ typedef struct VP8_COMP
int gf_update_recommended;
int skip_true_count;
int skip_false_count;
-#if CONFIG_T8X8
int t4x4_count;
int t8x8_count;
-#endif
#if CONFIG_UVINTRA
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index 54a50fb08..171ec3a4d 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -354,11 +354,9 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
-#if CONFIG_T8X8
// yx, bias less for large block size
if(cpi->common.txfm_mode == ALLOW_8X8)
Bias >>= 1;
-#endif
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
@@ -578,11 +576,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
-#if CONFIG_T8X8
// yx, bias less for large block size
if(cpi->common.txfm_mode == ALLOW_8X8)
Bias >>= 1;
-#endif
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 3d1c2ff35..636ed3a67 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -23,56 +23,6 @@ extern int enc_debug;
#endif
#define EXACT_QUANT
-
-#ifdef EXACT_FASTQUANT
-void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant_fast;
- unsigned char *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
-
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
-
- for (i = 0; i < 16; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
- zbin = zbin_ptr[rc] ;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- x += round_ptr[rc];
- y = (((x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- }
- }
- }
- d->eob = eob + 1;
-}
-
-#else
-
void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
{
int i, rc, eob, nonzeros;
@@ -83,11 +33,10 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
-#if CONFIG_T8X8
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
-#endif
+
eob = -1;
for (i = 0; i < 16; i++)
{
@@ -110,7 +59,7 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-#endif
+
#ifdef EXACT_QUANT
void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
@@ -277,7 +226,8 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-#endif //EXACT_QUANT
+#endif
+//EXACT_QUANT
void vp8_quantize_mby_c(MACROBLOCK *x)
@@ -314,113 +264,8 @@ void vp8_quantize_mbuv_c(MACROBLOCK *x)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
-#if CONFIG_T8X8
-
-#ifdef EXACT_FASTQUANT
-void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- //double q2nd = 4;
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
-
- for (i = 0; i < 4; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
- //zbin = zbin_ptr[rc]/q2nd ;
- zbin = zbin_ptr[rc] ;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //x += (round_ptr[rc]/q2nd);
- x += (round_ptr[rc]);
- //y = ((int)((int)(x * quant_ptr[rc] * q2nd) >> 16) + x)
- // >> quant_shift_ptr[rc]; // quantize (x)
- y = ((int)((int)(x * quant_ptr[rc]) >> 16) + x)
- >> quant_shift_ptr[rc]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- }
- }
- }
- d->eob = eob + 1;
-}
-
-void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d)// only ac and dc difference, no difference among ac
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *quant_shift_ptr = b->quant_shift;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- //double q1st = 2;
- vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
- vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
-
- eob = -1;
-
- for (i = 0; i < 64; i++)
- {
- rc = vp8_default_zig_zag1d_8x8[i];
- z = coeff_ptr[rc];
- //zbin = zbin_ptr[rc!=0]/q1st ;
- zbin = zbin_ptr[rc!=0] ;
-
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //x += round_ptr[rc]/q1st;
- //y = ((int)(((int)((x * quant_ptr[rc!=0] * q1st)) >> 16) + x))
- // >> quant_shift_ptr[rc!=0]; // quantize (x)
- x += round_ptr[rc];
- y = ((int)(((int)((x * quant_ptr[rc!=0])) >> 16) + x))
- >> quant_shift_ptr[rc!=0]; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- //dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0] / q1st; // dequantized value
- dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- }
- }
- }
- d->eob = eob + 1;
-}
-
-#else
void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d)
{
@@ -520,9 +365,9 @@ void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-#endif //EXACT_FASTQUANT
-#ifdef EXACT_QUANT
+
+
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
@@ -757,107 +602,7 @@ void vp8_strict_quantize_b_8x8(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
-#else
-void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *zbin_boost_ptr = b->zrun_zbin_boost;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- short zbin_oq_value = b->zbin_extra;
- //double q2nd = 4;
- vpx_memset(qcoeff_ptr, 0, 32);
- vpx_memset(dqcoeff_ptr, 0, 32);
-
- eob = -1;
- for (i = 0; i < 4; i++)
- {
- rc = vp8_default_zig_zag1d[i];
- z = coeff_ptr[rc];
- //zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value)/q2nd;
- zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value);
- zbin_boost_ptr ++;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //y = (((x + round_ptr[rc]/q2nd) * quant_ptr[rc]*q2nd)) >> 16; // quantize (x)
- y = (((x + round_ptr[rc]) * quant_ptr[rc])) >> 16; // quantize (x)
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- //dqcoeff_ptr[rc] = x * dequant_ptr[rc]/q2nd; // dequantized value
- dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
- }
- }
- }
-
- d->eob = eob + 1;
-}
-
-void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d)
-{
- int i, rc, eob;
- int zbin;
- int x, y, z, sz;
- short *zbin_boost_ptr = b->zrun_zbin_boost;
- short *coeff_ptr = b->coeff;
- short *zbin_ptr = b->zbin;
- short *round_ptr = b->round;
- short *quant_ptr = b->quant;
- short *qcoeff_ptr = d->qcoeff;
- short *dqcoeff_ptr = d->dqcoeff;
- short *dequant_ptr = d->dequant;
- short zbin_oq_value = b->zbin_extra;
- //double q1st = 2;
- vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
- vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
-
- eob = -1;
- for (i = 0; i < 64; i++)
- {
-
- rc = vp8_default_zig_zag1d_8x8[i];
- z = coeff_ptr[rc];
- //zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value)/q1st;
- zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
- zbin_boost_ptr ++;
- sz = (z >> 31); // sign of z
- x = (z ^ sz) - sz; // x = abs(z)
-
- if (x >= zbin)
- {
- //y = ((x + round_ptr[rc!=0]/q1st) * quant_ptr[rc!=0] * q1st) >> 16;
- y = ((x + round_ptr[rc!=0]) * quant_ptr[rc!=0]) >> 16;
- x = (y ^ sz) - sz; // get the sign back
- qcoeff_ptr[rc] = x; // write to destination
- //dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]/q1st; // dequantized value
- dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
-
- if (y)
- {
- eob = i; // last nonzero coeffs
- zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
- }
- }
- }
- d->eob = eob + 1;
-}
-
-#endif //EXACT_QUANT
void vp8_quantize_mby_8x8(MACROBLOCK *x)
{
@@ -905,7 +650,7 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
-#endif //CONFIG_T8X8
+
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
@@ -1023,69 +768,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
}
}
}
-#else
-void vp8cx_init_quantizer(VP8_COMP *cpi)
-{
- int i;
- int quant_val;
- int Q;
- int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
- int qrounding_factor = 48;
-
- for (Q = 0; Q < QINDEX_RANGE; Q++)
- {
- int qzbin_factor = vp8_dc_quant(Q,0) < 148 ) ? 84: 80;
-
- // dc values
- quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
- cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y1dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
- cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y2dequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
- cpi->UVquant[Q][0] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;;
- cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
- cpi->common.UVdequant[Q][0] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
-
- // all the ac values = ;
- for (i = 1; i < 16; i++)
- {
- int rc = vp8_default_zig_zag1d[i];
-
- quant_val = vp8_ac_yquant(Q);
- cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y1zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7;
- cpi->common.Y1dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
- cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
- cpi->Y2zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->Y2round[Q][rc] = (qrounding_factors * quant_val) >> 7;
- cpi->common.Y2dequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
-
- quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
- cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
- cpi->UVzbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
- cpi->UVround[Q][rc] = (qrounding_factors * quant_val) >> 7;
- cpi->common.UVdequant[Q][rc] = quant_val;
- cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
- }
- }
-}
#endif
@@ -1139,17 +821,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
x->block[i].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#endif
}
else
{
x->block[i].eob_max_offset = 16;
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 = 64;
-#endif
}
}
@@ -1175,19 +853,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
x->block[i].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#endif
-
}
else
{
x->block[i].eob_max_offset = 16;
-#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 = 64;
-#endif
-
}
}
@@ -1212,17 +884,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
x->block[24].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#if CONFIG_T8X8
x->block[24].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
-#endif
}
else
{
x->block[24].eob_max_offset = 16;
-#if CONFIG_T8X8
x->block[24].eob_max_offset_8x8 = 4;
-#endif
}
/* save this macroblock QIndex for vp8_update_zbin_extra() */
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index 1a2bad667..93a1b714c 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -45,7 +45,7 @@ extern prototype_quantize_block_pair(vp8_quantize_quantb_pair);
#define vp8_quantize_fastquantb vp8_fast_quantize_b_c
#endif
extern prototype_quantize_block(vp8_quantize_fastquantb);
-#if CONFIG_T8X8
+
#ifndef vp8_quantize_quantb_8x8
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
#endif
@@ -65,7 +65,7 @@ extern prototype_quantize_block(vp8_quantize_quantb_2x2);
#define vp8_quantize_fastquantb_2x2 vp8_fast_quantize_b_2x2_c
#endif
extern prototype_quantize_block(vp8_quantize_fastquantb_2x2);
-#endif
+
#ifndef vp8_quantize_fastquantb_pair
#define vp8_quantize_fastquantb_pair vp8_fast_quantize_b_pair_c
@@ -77,12 +77,10 @@ typedef struct
prototype_quantize_block(*quantb);
prototype_quantize_block_pair(*quantb_pair);
prototype_quantize_block(*fastquantb);
-#if CONFIG_T8X8
prototype_quantize_block(*quantb_8x8);
prototype_quantize_block(*fastquantb_8x8);
prototype_quantize_block(*quantb_2x2);
prototype_quantize_block(*fastquantb_2x2);
-#endif
prototype_quantize_block_pair(*fastquantb_pair);
} vp8_quantize_rtcd_vtable_t;
@@ -108,10 +106,8 @@ extern prototype_quantize_mb(vp8_quantize_mby);
#endif
extern void vp8_strict_quantize_b(BLOCK *b,BLOCKD *d);
-#if CONFIG_T8X8
extern void vp8_strict_quantize_b_8x8(BLOCK *b,BLOCKD *d);
extern void vp8_strict_quantize_b_2x2(BLOCK *b,BLOCKD *d);
-#endif
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 47609306c..bf06f32e1 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -242,9 +242,7 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
#endif
-#if CONFIG_T8X8
cpi->common.txfm_mode = ONLY_4X4;
-#endif
//cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
@@ -268,13 +266,12 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
}
void vp8_setup_inter_frame(VP8_COMP *cpi)
{
-#if CONFIG_T8X8
+
if(cpi->common.Width * cpi->common.Height > 640*360)
//||cpi->this_frame_target < 7 * cpi->common.MBs)
cpi->common.txfm_mode = ALLOW_8X8;
else
cpi->common.txfm_mode = ONLY_4X4;
-#endif
if(cpi->common.refresh_alt_ref_frame)
{
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 609b70f8e..58b3216d6 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -353,12 +353,10 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex)
(const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs
);
-#if CONFIG_T8X8
fill_token_costs(
cpi->mb.token_costs_8x8,
(const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs_8x8
);
-#endif
#if CONFIG_QIMODE
//rough estimate for costing
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex>>4;
@@ -664,7 +662,6 @@ static void macro_block_yrd( MACROBLOCK *mb,
*Rate = vp8_rdcost_mby(mb);
}
-#if CONFIG_T8X8
static int cost_coeffs_2x2(MACROBLOCK *mb,
BLOCKD *b, int type,
@@ -794,7 +791,6 @@ static void macro_block_yrd_8x8( MACROBLOCK *mb,
// rate
*Rate = vp8_rdcost_mby_8x8(mb);
}
-#endif
static void copy_predictor(unsigned char *dst, const unsigned char *predictor)
{
@@ -1311,7 +1307,7 @@ static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-#if CONFIG_T8X8
+
static int rd_cost_mbuv_8x8(MACROBLOCK *mb)
{
int b;
@@ -1351,7 +1347,7 @@ static int rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
-#endif
+
static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel)
@@ -2470,10 +2466,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
int rate2, distortion2;
int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
int uv_intra_tteob = 0;
-#if CONFIG_T8X8
int uv_intra_rate_8x8, uv_intra_distortion_8x8, uv_intra_rate_tokenonly_8x8;
int uv_intra_tteob_8x8=0;
-#endif
int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
int distortion_uv;
int best_yrd = INT_MAX;
@@ -2564,9 +2558,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
for(i=16; i<24; i++)
uv_intra_tteob += x->e_mbd.block[i].eob;
-#if CONFIG_T8X8
uv_intra_tteob_8x8 = uv_intra_tteob;
-#endif
// Get estimates of reference frame costs for each reference frame
// that depend on the current prediction etc.
@@ -2770,12 +2762,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// FIXME compound intra prediction
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
(&x->e_mbd);
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd)) ;
else
-#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb)) ;
rate2 += rate_y;
@@ -3014,12 +3004,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
rate2 += vp8_cost_mv_ref(&cpi->common, this_mode, mdcounts);
// Y cost and distortion
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
else
-#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb));
@@ -3029,13 +3017,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// UV cost and distortion
vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
else
-#endif
rd_inter16x16_uv(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
@@ -3126,12 +3112,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
&x->e_mbd.predictor[320], 16, 8);
/* Y cost and distortion */
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
else
-#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb));
@@ -3139,13 +3123,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
distortion2 += distortion;
/* UV cost and distortion */
-#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
else
-#endif
rd_inter16x16_uv(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
@@ -3195,7 +3177,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if(has_y2_block)
tteob += x->e_mbd.block[24].eob;
-#if CONFIG_T8X8
if(cpi->common.txfm_mode ==ALLOW_8X8 && has_y2_block)
{
for (i = 0; i < 16; i+=4)
@@ -3211,7 +3192,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
}
else
-#endif
{
for (i = 0; i < 16; i++)
tteob += (x->e_mbd.block[i].eob > has_y2_block);
@@ -3467,9 +3447,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_)
{
-#if CONFIG_T8X8
MACROBLOCKD *xd = &x->e_mbd;
-#endif
int error4x4, error16x16;
int rate4x4, rate16x16 = 0, rateuv;
int dist4x4, dist16x16, distuv;
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index d496bbde1..e2c9dc9a7 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -24,14 +24,10 @@
#ifdef ENTROPY_STATS
_int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_T8X8
_int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#endif
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
-#if CONFIG_T8X8
void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
-#endif
void vp8_fix_contexts(MACROBLOCKD *x);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE*2];
@@ -104,7 +100,6 @@ static void fill_value_tokens()
vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
-#if CONFIG_T8X8
static void tokenize2nd_order_b_8x8
(
MACROBLOCKD *xd,
@@ -176,7 +171,6 @@ static void tokenize2nd_order_b_8x8
*a = *l = pt;
}
-#endif
static void tokenize2nd_order_b
(
@@ -247,7 +241,7 @@ static void tokenize2nd_order_b
*a = *l = pt;
}
-#if CONFIG_T8X8
+
static void tokenize1st_order_b_8x8
(
MACROBLOCKD *xd,
@@ -313,7 +307,7 @@ static void tokenize1st_order_b_8x8
*a = *l = pt;
}
-#endif
+
static void tokenize1st_order_b
@@ -465,7 +459,7 @@ static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block)
return skip;
}
-#if CONFIG_T8X8
+
static int mb_is_skippable_8x8(MACROBLOCKD *x)
{
int has_y2_block;
@@ -485,17 +479,14 @@ static int mb_is_skippable_8x8(MACROBLOCKD *x)
return skip;
}
-#endif
+
void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
{
int plane_type;
int has_y2_block;
int b;
-
-#if CONFIG_T8X8
int tx_type = x->mode_info_context->mbmi.txfm_size;
-#endif
// If the MB is going to be skipped because of a segment level flag
// exclude this from the skip count stats used to calculate the
@@ -516,13 +507,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
&& x->mode_info_context->mbmi.mode != SPLITMV);
x->mode_info_context->mbmi.mb_skip_coeff =
-#if CONFIG_T8X8
(( tx_type == TX_8X8 ) ?
mb_is_skippable_8x8(x) :
mb_is_skippable(x, has_y2_block));
-#else
- mb_is_skippable(x, has_y2_block);
-#endif
if (x->mode_info_context->mbmi.mb_skip_coeff)
{
@@ -530,11 +517,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
if (!cpi->common.mb_no_coeff_skip)
{
-#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
vp8_stuff_mb_8x8(cpi, x, t) ;
else
-#endif
vp8_stuff_mb(cpi, x, t) ;
}
else
@@ -550,7 +535,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
plane_type = 3;
if(has_y2_block)
{
-#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
@@ -561,13 +545,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
L + vp8_block2left_8x8[24], cpi);
}
else
-#endif
tokenize2nd_order_b(x, t, cpi);
plane_type = 0;
}
-#if CONFIG_T8X8
+
if ( tx_type == TX_8X8 )
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
@@ -594,7 +577,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
}
}
else
-#endif
+
tokenize1st_order_b(x, t, plane_type, cpi);
}
@@ -604,9 +587,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
void init_context_counters(void)
{
vpx_memset(context_counters, 0, sizeof(context_counters));
-#if CONFIG_T8X8
vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
-#endif
}
void print_context_counters()
@@ -670,7 +651,6 @@ void print_context_counters()
}
while (++type < BLOCK_TYPES);
-#if CONFIG_T8X8
fprintf(f, "int Contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];\n\n");
fprintf(f, "const int default_contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
@@ -718,7 +698,6 @@ void print_context_counters()
fprintf(f, "\n }");
}
while (++type < BLOCK_TYPES);
-#endif
fprintf(f, "\n};\n");
fclose(f);
@@ -731,7 +710,7 @@ void vp8_tokenize_initialize()
fill_value_tokens();
}
-#if CONFIG_T8X8
+
static __inline void stuff2nd_order_b_8x8
(
const BLOCKD *const b,
@@ -857,7 +836,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
*(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
}
-#endif
+
static __inline void stuff2nd_order_b
(
diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h
index cd122f19c..545c5d045 100644
--- a/vp8/encoder/tokenize.h
+++ b/vp8/encoder/tokenize.h
@@ -38,10 +38,8 @@ void init_context_counters();
void print_context_counters();
extern _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_T8X8
extern _int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
-#endif
extern const int *vp8_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the