summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/blockd.h10
-rw-r--r--vp8/common/invtrans.c146
-rw-r--r--vp8/common/invtrans.h13
-rw-r--r--vp8/decoder/detokenize.c25
-rw-r--r--vp8/encoder/block.h4
-rw-r--r--vp8/encoder/encodeframe.c29
-rw-r--r--vp8/encoder/encodeintra.c178
-rw-r--r--vp8/encoder/encodemb.c483
-rw-r--r--vp8/encoder/encodemb.h14
-rw-r--r--vp8/encoder/firstpass.c1
-rw-r--r--vp8/encoder/onyx_if.c10
-rw-r--r--vp8/encoder/onyx_int.h1
-rw-r--r--vp8/encoder/quantize.c92
-rw-r--r--vp8/encoder/quantize.h36
-rw-r--r--vp8/encoder/rdopt.c50
-rw-r--r--vp8/encoder/tokenize.c1024
-rw-r--r--vp8/encoder/tokenize.h6
17 files changed, 710 insertions, 1412 deletions
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index a9f6f49d8..a4c618534 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -52,10 +52,12 @@ typedef struct {
int r, c;
} POS;
-#define PLANE_TYPE_Y_NO_DC 0
-#define PLANE_TYPE_Y2 1
-#define PLANE_TYPE_UV 2
-#define PLANE_TYPE_Y_WITH_DC 3
+typedef enum PlaneType {
+ PLANE_TYPE_Y_NO_DC = 0,
+ PLANE_TYPE_Y2,
+ PLANE_TYPE_UV,
+ PLANE_TYPE_Y_WITH_DC,
+} PLANE_TYPE;
typedef char ENTROPY_CONTEXT;
typedef struct {
diff --git a/vp8/common/invtrans.c b/vp8/common/invtrans.c
index 3aba58ccf..e0f4d6f19 100644
--- a/vp8/common/invtrans.c
+++ b/vp8/common/invtrans.c
@@ -8,11 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "invtrans.h"
-
-
static void recon_dcblock(MACROBLOCKD *xd) {
BLOCKD *b = &xd->block[24];
int i;
@@ -20,103 +17,78 @@ static void recon_dcblock(MACROBLOCKD *xd) {
for (i = 0; i < 16; i++) {
xd->block[i].dqcoeff[0] = b->diff[i];
}
-
}
+
static void recon_dcblock_8x8(MACROBLOCKD *xd) {
BLOCKD *b = &xd->block[24]; // for coeff 0, 2, 8, 10
+
xd->block[0].dqcoeff[0] = b->diff[0];
xd->block[4].dqcoeff[0] = b->diff[1];
xd->block[8].dqcoeff[0] = b->diff[4];
xd->block[12].dqcoeff[0] = b->diff[8];
-
-}
-
-#if CONFIG_HYBRIDTRANSFORM
-void vp8_inverse_htransform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
- vp8_ihtllm_c(b->dqcoeff, b->diff, pitch, b->bmi.as_mode.tx_type, 4);
}
-#endif
-void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
+void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ BLOCKD *b, int pitch) {
if (b->eob <= 1)
IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
else
IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->diff, pitch);
}
-
-void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- /* do 2nd order transform on the dc block */
- IDCT_INVOKE(rtcd, iwalsh16)(blockd[24].dqcoeff, blockd[24].diff);
-
- recon_dcblock(xd);
-
- for (i = 0; i < 16; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 32);
- }
-
-}
-void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- for (i = 16; i < 24; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 16);
- }
-
-}
-
-
-void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
+void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
- if (xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != I8X8_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV) {
+ if (xd->mode_info_context->mbmi.mode != SPLITMV) {
/* do 2nd order transform on the dc block */
-
- IDCT_INVOKE(rtcd, iwalsh16)(&blockd[24].dqcoeff[0], blockd[24].diff);
+ IDCT_INVOKE(rtcd, iwalsh16)(blockd[24].dqcoeff, blockd[24].diff);
recon_dcblock(xd);
}
for (i = 0; i < 16; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 32);
+ vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
}
+}
+void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
+ int i;
+ BLOCKD *blockd = xd->block;
for (i = 16; i < 24; i++) {
- vp8_inverse_transform_b(rtcd, &blockd[i], 16);
+ vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
}
-
}
+void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
+ MACROBLOCKD *xd) {
+ vp8_inverse_transform_mby_4x4(rtcd, xd);
+ vp8_inverse_transform_mbuv_4x4(rtcd, xd);
+}
-void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch) { // pay attention to use when 8x8
+void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
+ short *input_dqcoeff, short *output_coeff,
+ int pitch) {
// int b,i;
// if (b->eob > 1)
IDCT_INVOKE(rtcd, idct8)(input_dqcoeff, output_coeff, pitch);
// else
// IDCT_INVOKE(rtcd, idct8_1)(b->dqcoeff, b->diff, pitch);//pitch
-
}
-
void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
- // do 2nd order transform on the dc block
- IDCT_INVOKE(rtcd, ihaar2)(blockd[24].dqcoeff, blockd[24].diff, 8);
+ if (xd->mode_info_context->mbmi.mode != SPLITMV) {
+ // do 2nd order transform on the dc block
+ IDCT_INVOKE(rtcd, ihaar2)(blockd[24].dqcoeff, blockd[24].diff, 8);
+ recon_dcblock_8x8(xd); // need to change for 8x8
+ }
- recon_dcblock_8x8(xd); // need to change for 8x8
for (i = 0; i < 9; i += 8) {
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 32);
@@ -125,8 +97,8 @@ void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
&blockd[i].diff[0], 32);
}
-
}
+
void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
@@ -136,39 +108,12 @@ void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
-
}
-
void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- if (xd->mode_info_context->mbmi.mode != B_PRED &&
- xd->mode_info_context->mbmi.mode != SPLITMV) {
- // do 2nd order transform on the dc block
-
- IDCT_INVOKE(rtcd, ihaar2)(&blockd[24].dqcoeff[0],
- blockd[24].diff, 8);// dqcoeff[0]
- recon_dcblock_8x8(xd); // need to change for 8x8
-
- }
-
- for (i = 0; i < 9; i += 8) {
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 32);
- }
- for (i = 2; i < 11; i += 8) {
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
- &blockd[i].diff[0], 32);
- }
-
- for (i = 16; i < 24; i += 4) {
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 16);
- }
-
+ vp8_inverse_transform_mby_8x8(rtcd, xd);
+ vp8_inverse_transform_mbuv_8x8(rtcd, xd);
}
void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
@@ -179,33 +124,12 @@ void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
- vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
- &xd->block[0].diff[0], 32);
-}
-
-// U,V blocks are 8x8 per macroblock, so just run 8x8
-void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
- MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- for (i = 16; i < 24; i += 4)
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 16);
+ vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
+ &xd->block[0].diff[0], 32);
}
void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
- int i;
- BLOCKD *blockd = xd->block;
-
- // Luma
- vp8_inverse_transform_b_16x16(rtcd, &blockd[0].dqcoeff[0],
- &blockd[0].diff[0], 32);
-
- // U, V
- // Chroma blocks are downscaled, so run an 8x8 on them.
- for (i = 16; i < 24; i+= 4)
- vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
- &blockd[i].diff[0], 16);
+ vp8_inverse_transform_mby_16x16(rtcd, xd);
+ vp8_inverse_transform_mbuv_8x8(rtcd, xd);
}
diff --git a/vp8/common/invtrans.h b/vp8/common/invtrans.h
index df12226c5..47a9caaf2 100644
--- a/vp8/common/invtrans.h
+++ b/vp8/common/invtrans.h
@@ -16,14 +16,10 @@
#include "idct.h"
#include "blockd.h"
-#if CONFIG_HYBRIDTRANSFORM
-extern void vp8_inverse_htransform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
-#endif
-
-extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
-extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
+extern void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
+extern void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
+extern void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
+extern void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
@@ -35,5 +31,4 @@ extern void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
int pitch);
extern void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
-extern void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
#endif
diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c
index 50e9a7dea..5b5ec7e2a 100644
--- a/vp8/decoder/detokenize.c
+++ b/vp8/decoder/detokenize.c
@@ -137,7 +137,7 @@ int get_token(int v) {
#if CONFIG_HYBRIDTRANSFORM
void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr,
- int block, int type,
+ int block, PLANE_TYPE type,
TX_TYPE tx_type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int eob, int seg_eob,
@@ -182,7 +182,7 @@ void static count_tokens_adaptive_scan(const MACROBLOCKD *xd, INT16 *qcoeff_ptr,
}
#endif
-void static count_tokens(INT16 *qcoeff_ptr, int block, int type,
+void static count_tokens(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int eob, int seg_eob, FRAME_CONTEXT *const fc) {
int c, pt, token, band;
@@ -201,7 +201,7 @@ void static count_tokens(INT16 *qcoeff_ptr, int block, int type,
}
}
-void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, int type,
+void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type,
#endif
@@ -233,7 +233,7 @@ void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, int type,
}
}
-void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, int type,
+void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, PLANE_TYPE type,
#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
#endif
@@ -303,7 +303,8 @@ static int vp8_get_signed(BOOL_DECODER *br, int value_to_sign) {
} while (0);
static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, int type,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
+ PLANE_TYPE type,
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
#endif
@@ -312,7 +313,7 @@ static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
const int *coef_bands) {
FRAME_CONTEXT *const fc = &dx->common.fc;
BOOL_DECODER *br = xd->current_bc;
- int tmp, c = (type == 0);
+ int tmp, c = (type == PLANE_TYPE_Y_NO_DC);
const vp8_prob *prob, *coef_probs;
switch (block_type) {
@@ -450,7 +451,8 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT* const L = (ENTROPY_CONTEXT *)xd->left_context;
char* const eobs = xd->eobs;
- int c, i, type, eobtotal = 0, seg_eob;
+ PLANE_TYPE type;
+ int c, i, eobtotal = 0, seg_eob;
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
@@ -471,7 +473,6 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
// Luma block
{
const int* const scan = vp8_default_zig_zag1d_16x16;
- //printf("16: %d\n", tx_type);
c = vp8_decode_coefs(pbi, xd, A, L, type,
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
@@ -502,7 +503,6 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT* const l = L + vp8_block2left_8x8[i];
const int* const scan = vp8_default_zig_zag1d_8x8;
- //printf("8: %d\n", tx_type);
c = vp8_decode_coefs(pbi, xd, a, l, type,
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
tx_type,
@@ -526,7 +526,8 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT *const L = (ENTROPY_CONTEXT *)xd->left_context;
char *const eobs = xd->eobs;
- int c, i, type, eobtotal = 0, seg_eob;
+ PLANE_TYPE type;
+ int c, i, eobtotal = 0, seg_eob;
const int segment_id = xd->mode_info_context->mbmi.segment_id;
const int seg_active = segfeature_active(xd, segment_id, SEG_LVL_EOB);
INT16 *qcoeff_ptr = &xd->qcoeff[0];
@@ -633,8 +634,8 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd) {
char *const eobs = xd->eobs;
const int *scan = vp8_default_zig_zag1d;
-
- int c, i, type, eobtotal = 0, seg_eob = 16;
+ PLANE_TYPE type;
+ int c, i, eobtotal = 0, seg_eob = 16;
INT16 *qcoeff_ptr = &xd->qcoeff[0];
int segment_id = xd->mode_info_context->mbmi.segment_id;
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index e8c4c31ad..861700409 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -183,8 +183,8 @@ typedef struct {
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch);
- void (*quantize_b)(BLOCK *b, BLOCKD *d);
- void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
+ void (*quantize_b_4x4)(BLOCK *b, BLOCKD *d);
+ void (*quantize_b_4x4_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
void (*vp8_short_fdct16x16)(short *input, short *output, int pitch);
void (*short_fhaar2x2)(short *input, short *output, int pitch);
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 4f2437fac..ac22456d1 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -52,9 +52,6 @@ int enc_debug = 0;
int mb_row_debug, mb_col_debug;
#endif
-extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run);
-
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi);
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
@@ -81,8 +78,6 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
MACROBLOCK *x,
TOKENEXTRA **t, int mb_col);
static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x);
-extern void vp8_stuff_mb_8x8(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
#ifdef MODE_STATS
unsigned int inter_y_modes[MB_MODE_COUNT];
@@ -1843,7 +1838,7 @@ static void update_sb_skip_coeff_state(VP8_COMP *cpi,
if (skip[n]) {
x->e_mbd.above_context = &ta[n];
x->e_mbd.left_context = &tl[n];
- vp8_stuff_mb_8x8(cpi, &x->e_mbd, tp, 0);
+ vp8_stuff_mb(cpi, &x->e_mbd, tp, 0);
} else {
if (n_tokens[n]) {
memcpy(*tp, tokens[n], sizeof(*t[0]) * n_tokens[n]);
@@ -1903,16 +1898,13 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
- vp8_transform_intra_mby_8x8(x);
- vp8_transform_mbuv_8x8(x);
- vp8_quantize_mby_8x8(x);
- vp8_quantize_mbuv_8x8(x);
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
}
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
@@ -2305,20 +2297,13 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
- if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
- vp8_transform_intra_mby_8x8(x);
- } else {
- vp8_transform_mby_8x8(x);
- }
- vp8_transform_mbuv_8x8(x);
- vp8_quantize_mby_8x8(x);
- vp8_quantize_mbuv_8x8(x);
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_8x8(x, rtcd);
}
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_mby_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp8_recon_mbuv_s_c(IF_RTCD(&rtcd->common->recon), &x->e_mbd,
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 323df005b..eacae81d6 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "vpx_ports/config.h"
#include "vp8/common/idct.h"
#include "quantize.h"
@@ -21,17 +20,12 @@
#include "vp8/common/g_common.h"
#include "encodeintra.h"
-
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
#define IF_RTCD(x) NULL
#endif
-#if CONFIG_HYBRIDTRANSFORM
-extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
-#endif
-
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
int i;
int intra_pred_var = 0;
@@ -64,12 +58,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
-#if CONFIG_HYBRIDTRANSFORM
- int QIndex = x->q_index;
- int active_ht = (QIndex < ACTIVE_HT);
-#endif
-
-
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
#endif
@@ -85,22 +73,19 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
#if CONFIG_HYBRIDTRANSFORM
- if (active_ht) {
+ if (x->q_index < ACTIVE_HT) {
b->bmi.as_mode.test = b->bmi.as_mode.first;
txfm_map(b, b->bmi.as_mode.first);
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b(be, b);
- vp8_inverse_htransform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
- } else {
+ vp8_ht_quantize_b_4x4(be, b);
+ vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
+ } else
+#endif
+ {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
- x->quantize_b(be, b) ;
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
+ x->quantize_b_4x4(be, b) ;
+ vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
}
-#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
-#endif
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
@@ -120,120 +105,96 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
}
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
+ MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
-
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
#if CONFIG_HYBRIDTRANSFORM16X16
- TX_TYPE txfm_type = x->e_mbd.mode_info_context->bmi[0].as_mode.tx_type;
+ TX_TYPE txfm_type = xd->mode_info_context->bmi[0].as_mode.tx_type;
#endif
#if CONFIG_COMP_INTRA_PRED
- if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
+ if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
#endif
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(xd);
#if CONFIG_COMP_INTRA_PRED
else
- RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mby)(xd);
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
+ xd->predictor, b->src_stride);
- if (tx_type == TX_16X16)
+ if (tx_size == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16
- {
- if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
+ if ((xd->mode_info_context->mbmi.mode < I8X8_PRED) &&
(x->q_index < ACTIVE_HT16)) {
- BLOCKD *bd = &x->e_mbd.block[0];
- txfm_map(bd, pred_mode_conv(x->e_mbd.mode_info_context->mbmi.mode));
+ BLOCKD *bd = &xd->block[0];
+ txfm_map(bd, pred_mode_conv(xd->mode_info_context->mbmi.mode));
txfm_type = bd->bmi.as_mode.tx_type;
vp8_fht_c(b->src_diff, b->coeff, 32, txfm_type, 16);
+ vp8_quantize_mby_16x16(x);
+ if (x->optimize)
+ vp8_optimize_mby_16x16(x, rtcd);
+ vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
} else
- vp8_transform_intra_mby_16x16(x);
- }
-#else
- vp8_transform_intra_mby_16x16(x);
#endif
- else if (tx_type == TX_8X8)
- vp8_transform_intra_mby_8x8(x);
- else
- vp8_transform_intra_mby(x);
-
- if (tx_type == TX_16X16)
- vp8_quantize_mby_16x16(x);
- else if (tx_type == TX_8X8)
+ {
+ vp8_transform_mby_16x16(x);
+ vp8_quantize_mby_16x16(x);
+ if (x->optimize)
+ vp8_optimize_mby_16x16(x, rtcd);
+ vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), xd);
+ }
+ } else if (tx_size == TX_8X8) {
+ vp8_transform_mby_8x8(x);
vp8_quantize_mby_8x8(x);
- else
- vp8_quantize_mby(x);
-
- if (x->optimize) {
- if (tx_type == TX_16X16)
- vp8_optimize_mby_16x16(x, rtcd);
- else if (tx_type == TX_8X8)
+ if (x->optimize)
vp8_optimize_mby_8x8(x, rtcd);
- else
- vp8_optimize_mby(x, rtcd);
- }
-
- if (tx_type == TX_16X16)
-#if CONFIG_HYBRIDTRANSFORM16X16
- {
- if ((x->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
- (x->q_index < ACTIVE_HT16)) {
- BLOCKD *bd = &x->e_mbd.block[0];
- vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
- } else
- vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
+ vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ } else {
+ vp8_transform_mby_4x4(x);
+ vp8_quantize_mby_4x4(x);
+ if (x->optimize)
+ vp8_optimize_mby_4x4(x, rtcd);
+ vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
-#else
- vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-#endif
- else if (tx_type == TX_8X8)
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mby)(IF_RTCD(&rtcd->common->recon),
+ xd);
}
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
- if (tx_type == TX_16X16) tx_type = TX_8X8; // 16x16 for U and V should default to 8x8 behavior.
+ MACROBLOCKD *xd = &x->e_mbd;
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+
#if CONFIG_COMP_INTRA_PRED
- if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
+ if (xd->mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif
- RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv)(xd);
#if CONFIG_COMP_INTRA_PRED
} else {
- RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(&x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, build_comp_intra_predictors_mbuv)(xd);
}
#endif
- ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- if (tx_type == TX_8X8)
+ ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff,
+ x->src.u_buffer, x->src.v_buffer,
+ xd->predictor, x->src.uv_stride);
+ if (tx_size == TX_4X4) {
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
+ if (x->optimize)
+ vp8_optimize_mbuv_4x4(x, rtcd);
+ vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
+ } else /* 16x16 or 8x8 */ {
vp8_transform_mbuv_8x8(x);
- else
- vp8_transform_mbuv(x);
-
- if (tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x);
- else
- vp8_quantize_mbuv(x);
-
- if (x->optimize) {
- if (tx_type == TX_8X8)
+ if (x->optimize)
vp8_optimize_mbuv_8x8(x, rtcd);
- else
- vp8_optimize_mbuv(x, rtcd);
+ vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), xd);
}
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), xd);
}
void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
@@ -256,7 +217,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
}
#endif
- if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_8X8) {
+ if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
int idx = (ib & 0x02) ? (ib + 2) : ib;
// generate residual blocks
@@ -280,8 +241,8 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
be = &x->block[ib + iblock[i]];
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
+ x->quantize_b_4x4(be, b);
+ vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
}
}
@@ -323,20 +284,17 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 8);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
-
- x->quantize_b(be, b);
-
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16);
+ x->quantize_b_4x4(be, b);
+ vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
-
-
void vp8_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib, mode, second;
BLOCKD *b;
+
for (i = 0; i < 4; i++) {
ib = vp8_i8x8_block[i];
b = &x->e_mbd.block[ib];
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 0b753b5e6..5abf69d2f 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "vpx_ports/config.h"
#include "encodemb.h"
#include "vp8/common/reconinter.h"
@@ -57,6 +56,7 @@ void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *pred_ptr = bd->predictor;
int src_stride = be->src_stride;
int r, c;
+
for (r = 0; r < 8; r++) {
for (c = 0; c < 8; c++) {
diff_ptr[c] = src_ptr[c] - pred_ptr[c];
@@ -73,7 +73,6 @@ void vp8_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
const unsigned char *vpred, int dst_stride) {
short *udiff = diff + 256;
short *vdiff = diff + 320;
-
int r, c;
for (r = 0; r < 8; r++) {
@@ -132,7 +131,7 @@ static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
}
-static void build_dcblock(MACROBLOCK *x) {
+static void build_dcblock_4x4(MACROBLOCK *x) {
short *src_diff_ptr = &x->src_diff[384];
int i;
@@ -140,149 +139,55 @@ static void build_dcblock(MACROBLOCK *x) {
src_diff_ptr[i] = x->coeff[i * 16];
}
}
-void vp8_build_dcblock_8x8(MACROBLOCK *x) {
- short *src_diff_ptr = &x->src_diff[384];
- int i;
- for (i = 0; i < 16; i++) {
- src_diff_ptr[i] = 0;
- }
- src_diff_ptr[0] = x->coeff[0 * 16];
- src_diff_ptr[1] = x->coeff[4 * 16];
- src_diff_ptr[4] = x->coeff[8 * 16];
- src_diff_ptr[8] = x->coeff[12 * 16];
-}
-
-void vp8_transform_mbuv(MACROBLOCK *x) {
- int i;
-
- for (i = 16; i < 24; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
-}
-
-
-void vp8_transform_intra_mby(MACROBLOCK *x) {
- int i;
-
- for (i = 0; i < 16; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
-
- // build dc block from 16 y dc values
- build_dcblock(x);
-
- // do 2nd order transform on the dc block
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
-
-}
-
-static void transform_mb(MACROBLOCK *x) {
+void vp8_transform_mby_4x4(MACROBLOCK *x) {
int i;
- MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
for (i = 0; i < 16; i += 2) {
x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
}
- // build dc block from 16 y dc values
- if (mode != SPLITMV)
- build_dcblock(x);
-
- for (i = 16; i < 24; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
-
- // do 2nd order transform on the dc block
- if (mode != SPLITMV)
- x->short_walsh4x4(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
-
-}
-
-
-static void transform_mby(MACROBLOCK *x) {
- int i;
-
- for (i = 0; i < 16; i += 2) {
- x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
-
- // build dc block from 16 y dc values
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
- build_dcblock(x);
+ // build dc block from 16 y dc values
+ build_dcblock_4x4(x);
+
+ // do 2nd order transform on the dc block
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
}
}
-void vp8_transform_mbuv_8x8(MACROBLOCK *x) {
+void vp8_transform_mbuv_4x4(MACROBLOCK *x) {
int i;
- for (i = 16; i < 24; i += 4) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
+ for (i = 16; i < 24; i += 2) {
+ x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
-
-void vp8_transform_intra_mby_8x8(MACROBLOCK *x) { // changed
- int i;
- for (i = 0; i < 9; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i + 2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- vp8_build_dcblock_8x8(x);
- // vp8_build_dcblock(x);
-
- // do 2nd order transform on the dc block
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
-
+static void transform_mb_4x4(MACROBLOCK *x) {
+ vp8_transform_mby_4x4(x);
+ vp8_transform_mbuv_4x4(x);
}
-
-void vp8_transform_mb_8x8(MACROBLOCK *x) {
+void vp8_build_dcblock_8x8(MACROBLOCK *x) {
+ int16_t *src_diff_ptr = x->block[24].src_diff;
int i;
- MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
- for (i = 0; i < 9; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 32);
- }
- for (i = 2; i < 11; i += 8) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i + 2].coeff[0], 32);
- }
- // build dc block from 16 y dc values
- if (mode != B_PRED && mode != SPLITMV)
- vp8_build_dcblock_8x8(x);
- // vp8_build_dcblock(x);
-
- for (i = 16; i < 24; i += 4) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
+ for (i = 0; i < 16; i++) {
+ src_diff_ptr[i] = 0;
}
-
- // do 2nd order transform on the dc block
- if (mode != B_PRED && mode != SPLITMV)
- x->short_fhaar2x2(&x->block[24].src_diff[0],
- &x->block[24].coeff[0], 8);
+ src_diff_ptr[0] = x->coeff[0 * 16];
+ src_diff_ptr[1] = x->coeff[4 * 16];
+ src_diff_ptr[4] = x->coeff[8 * 16];
+ src_diff_ptr[8] = x->coeff[12 * 16];
}
void vp8_transform_mby_8x8(MACROBLOCK *x) {
int i;
+
for (i = 0; i < 9; i += 8) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
@@ -291,48 +196,40 @@ void vp8_transform_mby_8x8(MACROBLOCK *x) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i + 2].coeff[0], 32);
}
- // build dc block from 16 y dc values
+
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
- // vp8_build_dcblock(x);
+ // build dc block from 2x2 y dc values
vp8_build_dcblock_8x8(x);
+
+ // do 2nd order transform on the dc block
x->short_fhaar2x2(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
}
}
-void vp8_transform_mbuv_16x16(MACROBLOCK *x) {
+void vp8_transform_mbuv_8x8(MACROBLOCK *x) {
int i;
- vp8_clear_system_state();
- // Default to the 8x8
- for (i = 16; i < 24; i += 4)
+ for (i = 16; i < 24; i += 4) {
x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
+ &x->block[i].coeff[0], 16);
+ }
}
-
-void vp8_transform_intra_mby_16x16(MACROBLOCK *x) {
- vp8_clear_system_state();
- x->vp8_short_fdct16x16(&x->block[0].src_diff[0],
- &x->block[0].coeff[0], 32);
+void vp8_transform_mb_8x8(MACROBLOCK *x) {
+ vp8_transform_mby_8x8(x);
+ vp8_transform_mbuv_8x8(x);
}
-
-void vp8_transform_mb_16x16(MACROBLOCK *x) {
- int i;
+void vp8_transform_mby_16x16(MACROBLOCK *x) {
vp8_clear_system_state();
x->vp8_short_fdct16x16(&x->block[0].src_diff[0],
- &x->block[0].coeff[0], 32);
-
- for (i = 16; i < 24; i += 4) {
- x->vp8_short_fdct8x8(&x->block[i].src_diff[0],
- &x->block[i].coeff[0], 16);
- }
+ &x->block[0].coeff[0], 32);
}
-void vp8_transform_mby_16x16(MACROBLOCK *x) {
- vp8_clear_system_state();
- x->vp8_short_fdct16x16(&x->block[0].src_diff[0], &x->block[0].coeff[0], 32);
+void vp8_transform_mb_16x16(MACROBLOCK *x) {
+ vp8_transform_mby_16x16(x);
+ vp8_transform_mbuv_8x8(x);
}
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
@@ -369,7 +266,7 @@ static const int plane_rd_mult[4] = {
}\
}
-void optimize_b(MACROBLOCK *mb, int i, int type,
+void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd, int tx_type) {
BLOCK *b;
@@ -446,7 +343,7 @@ void optimize_b(MACROBLOCK *mb, int i, int type,
coeff_ptr = b->coeff;
qcoeff_ptr = d->qcoeff;
dqcoeff_ptr = d->dqcoeff;
- i0 = !type;
+ i0 = (type == PLANE_TYPE_Y_NO_DC);
eob = d->eob;
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
@@ -620,7 +517,7 @@ fall between -65 and +65.
**************************************************************************/
#define SUM_2ND_COEFF_THRESH 65
-static void check_reset_2nd_coeffs(MACROBLOCKD *xd, int type,
+static void check_reset_2nd_coeffs(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
int sum = 0;
int i;
@@ -643,11 +540,12 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *xd, int type,
bd->dqcoeff[rc] = 0;
}
bd->eob = 0;
- *a = *l = (bd->eob != !type);
+ *a = *l = (bd->eob != 0);
}
}
+
#define SUM_2ND_COEFF_THRESH_8X8 32
-static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
+static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
int sum = 0;
BLOCKD *bd = &xd->block[24];
@@ -672,62 +570,20 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
bd->qcoeff[8] = 0;
bd->dqcoeff[8] = 0;
bd->eob = 0;
- *a = *l = (bd->eob != !type);
+ *a = *l = (bd->eob != 0);
}
}
-static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+void vp8_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
- int type;
+ PLANE_TYPE type;
int has_2nd_order;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- has_2nd_order = (mode != B_PRED && mode != I8X8_PRED && mode != SPLITMV);
- type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
-
- for (b = 0; b < 16; b++) {
- optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- }
-
- for (b = 16; b < 24; b++) {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- }
-
- if (has_2nd_order) {
- b = 24;
- optimize_b(x, b, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
- }
-}
-
-
-void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- int b;
- int type;
- int has_2nd_order;
-
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
- MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
-
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -744,26 +600,22 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
}
-
if (has_2nd_order) {
b = 24;
optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
- check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ check_reset_2nd_coeffs(&x->e_mbd,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
}
-void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -778,55 +630,19 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
}
}
-void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- int b;
- int type;
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta;
- ENTROPY_CONTEXT *tl;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
-
- type = 0;
- for (b = 0; b < 16; b += 4) {
- optimize_b(x, b, type,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
- }
-
- for (b = 16; b < 24; b += 4) {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
- }
-
- // 8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
-
+static void optimize_mb_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ vp8_optimize_mby_4x4(x, rtcd);
+ vp8_optimize_mbuv_4x4(x, rtcd);
}
void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b;
- int type;
-
+ PLANE_TYPE type;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
-
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -834,7 +650,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- type = 0;
+ type = PLANE_TYPE_Y_NO_DC;
for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b],
@@ -842,10 +658,10 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
*(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
*(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
}
+
// 8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ check_reset_8x8_2nd_coeffs(&x->e_mbd,
ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
-
}
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
@@ -854,10 +670,7 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
- if (!x->e_mbd.above_context)
- return;
-
- if (!x->e_mbd.left_context)
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -873,12 +686,14 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
*(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
*(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
}
-
}
+void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ vp8_optimize_mby_8x8(x, rtcd);
+ vp8_optimize_mbuv_8x8(x, rtcd);
+}
-
-void optimize_b_16x16(MACROBLOCK *mb, int i, int type,
+void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd) {
BLOCK *b = &mb->block[i];
@@ -1053,169 +868,75 @@ void optimize_b_16x16(MACROBLOCK *mb, int i, int type,
}
void vp8_optimize_mby_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- ENTROPY_CONTEXT_PLANES t_above, t_left;
- ENTROPY_CONTEXT *ta, *tl;
-
- if (!x->e_mbd.above_context)
- return;
- if (!x->e_mbd.left_context)
- return;
-
- vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
- vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
-
- ta = (ENTROPY_CONTEXT *)&t_above;
- tl = (ENTROPY_CONTEXT *)&t_left;
- optimize_b_16x16(x, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, rtcd);
- *(ta + 1) = *ta;
- *(tl + 1) = *tl;
-}
-
-void optimize_mb_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
- int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta, *tl;
+ if (!x->e_mbd.above_context || !x->e_mbd.left_context)
+ return;
+
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
-
optimize_b_16x16(x, 0, PLANE_TYPE_Y_WITH_DC, ta, tl, rtcd);
- *(ta + 1) = *ta;
- *(tl + 1) = *tl;
+}
- for (b = 16; b < 24; b += 4) {
- optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
- rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
- }
+static void optimize_mb_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
+ vp8_optimize_mby_16x16(x, rtcd);
+ vp8_optimize_mbuv_8x8(x, rtcd);
}
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
- vp8_build_inter_predictors_mb(&x->e_mbd);
+ MACROBLOCKD *xd = &x->e_mbd;
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+ vp8_build_inter_predictors_mb(xd);
vp8_subtract_mb(rtcd, x);
- if (tx_type == TX_16X16)
+ if (tx_size == TX_16X16) {
vp8_transform_mb_16x16(x);
- else if (tx_type == TX_8X8)
- vp8_transform_mb_8x8(x);
- else
- transform_mb(x);
-
- if (tx_type == TX_16X16)
vp8_quantize_mb_16x16(x);
- else if (tx_type == TX_8X8)
- vp8_quantize_mb_8x8(x);
- else
- vp8_quantize_mb(x);
-
- if (x->optimize) {
- if (tx_type == TX_16X16)
+ if (x->optimize)
optimize_mb_16x16(x, rtcd);
- else if (tx_type == TX_8X8)
+ vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
+ } else if (tx_size == TX_8X8) {
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
+ if (x->optimize)
optimize_mb_8x8(x, rtcd);
- else
- optimize_mb(x, rtcd);
- }
-
- if (tx_type == TX_16X16)
- vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- if (tx_type == TX_8X8) {
-#ifdef ENC_DEBUG
- if (enc_debug) {
- int i;
- printf("qcoeff:\n");
- printf("%d %d:\n", x->e_mbd.mb_to_left_edge, x->e_mbd.mb_to_top_edge);
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.qcoeff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("dqcoeff:\n");
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.dqcoeff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("diff:\n");
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.diff[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("predictor:\n");
- for (i = 0; i < 400; i++) {
- printf("%3d ", x->e_mbd.predictor[i]);
- if (i % 16 == 15) printf("\n");
- }
- printf("\n");
- }
-#endif
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ } else {
+ transform_mb_4x4(x);
+ vp8_quantize_mb_4x4(x);
+ if (x->optimize)
+ optimize_mb_4x4(x, rtcd);
+ vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), xd);
}
- RECON_INVOKE(&rtcd->common->recon, recon_mb)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
-#ifdef ENC_DEBUG
- if (enc_debug) {
- int i, j, k;
- printf("Final Reconstruction\n");
- for (i = 0; i < 16; i += 4) {
- BLOCKD *b = &x->e_mbd.block[i];
- unsigned char *d = *(b->base_dst) + b->dst;
- for (k = 0; k < 4; k++) {
- for (j = 0; j < 16; j++)
- printf("%3d ", d[j]);
- printf("\n");
- d += b->dst_stride;
- }
- }
- }
-#endif
+ RECON_INVOKE(&rtcd->common->recon, recon_mb)(IF_RTCD(&rtcd->common->recon),
+ xd);
}
-
/* this function is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
- int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
-
+ MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
#if CONFIG_PRED_FILTER
// Disable the prediction filter for firstpass
- x->e_mbd.mode_info_context->mbmi.pred_filter_enabled = 0;
+ xd->mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
- vp8_build_1st_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor,
- 16, 0);
+ vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
+ xd->predictor, b->src_stride);
+
+ vp8_transform_mby_4x4(x);
+ vp8_quantize_mby_4x4(x);
+ vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), xd);
- if (tx_type == TX_16X16)
- vp8_transform_mby_16x16(x);
- else if (tx_type == TX_8X8)
- vp8_transform_mby_8x8(x);
- else
- transform_mby(x);
-
- vp8_quantize_mby(x);
-
- if (tx_type == TX_16X16)
- vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- if (tx_type == TX_8X8)
- vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
- else
- vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
-
- RECON_INVOKE(&rtcd->common->recon, recon_mby)
- (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
+ RECON_INVOKE(&rtcd->common->recon, recon_mby)(IF_RTCD(&rtcd->common->recon),
+ xd);
}
diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h
index dde66cb99..6c28ea025 100644
--- a/vp8/encoder/encodemb.h
+++ b/vp8/encoder/encodemb.h
@@ -105,27 +105,23 @@ struct VP8_ENCODER_RTCD;
void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_build_dcblock(MACROBLOCK *b);
-void vp8_transform_mb(MACROBLOCK *mb);
-void vp8_transform_mbuv(MACROBLOCK *x);
-void vp8_transform_intra_mby(MACROBLOCK *x);
+void vp8_transform_mb_4x4(MACROBLOCK *mb);
+void vp8_transform_mbuv_4x4(MACROBLOCK *x);
+void vp8_transform_mby_4x4(MACROBLOCK *x);
-void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
-void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
+void vp8_optimize_mby_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
+void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_transform_mb_8x8(MACROBLOCK *mb);
void vp8_transform_mby_8x8(MACROBLOCK *x);
void vp8_transform_mbuv_8x8(MACROBLOCK *x);
-void vp8_transform_intra_mby_8x8(MACROBLOCK *x);
void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_transform_mb_16x16(MACROBLOCK *mb);
void vp8_transform_mby_16x16(MACROBLOCK *x);
-void vp8_transform_mbuv_16x16(MACROBLOCK *x);
-void vp8_transform_intra_mby_16x16(MACROBLOCK *x);
-void vp8_build_dcblock_16x16(MACROBLOCK *b);
void vp8_optimize_mby_16x16(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 18333e64c..bad50b606 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -623,6 +623,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
mv.as_mv.col <<= 3;
this_error = motion_error;
vp8_set_mbmode_and_mvs(x, NEWMV, &mv);
+ xd->mode_info_context->mbmi.txfm_size = TX_4X4;
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x);
sum_mvr += mv.as_mv.row;
sum_mvr_abs += abs(mv.as_mv.row);
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index e8f9adc5e..21fff7408 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -1218,11 +1218,11 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
- cpi->mb.quantize_b = vp8_regular_quantize_b;
- cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
- cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
- cpi->mb.quantize_b_16x16= vp8_regular_quantize_b_16x16;
- cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
+ cpi->mb.quantize_b_4x4 = vp8_regular_quantize_b_4x4;
+ cpi->mb.quantize_b_4x4_pair = vp8_regular_quantize_b_4x4_pair;
+ cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
+ cpi->mb.quantize_b_16x16 = vp8_regular_quantize_b_16x16;
+ cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
vp8cx_init_quantizer(cpi);
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 696da14ed..1e7494039 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -805,6 +805,7 @@ void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x);
int rd_cost_intra_mb(MACROBLOCKD *x);
void vp8_tokenize_mb(VP8_COMP *, MACROBLOCKD *, TOKENEXTRA **, int dry_run);
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_set_speed_features(VP8_COMP *cpi);
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 03f868e56..8ae3029ee 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <math.h>
#include "vpx_mem/vpx_mem.h"
@@ -23,7 +22,7 @@ extern int enc_debug;
#endif
#if CONFIG_HYBRIDTRANSFORM
-void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d) {
+void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -88,7 +87,7 @@ void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d) {
}
#endif
-void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
+void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -137,39 +136,28 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
-void vp8_quantize_mby_c(MACROBLOCK *x) {
+void vp8_quantize_mby_4x4_c(MACROBLOCK *x) {
int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
for (i = 0; i < 16; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
if (has_2nd_order)
- x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
-}
-
-void vp8_quantize_mb_c(MACROBLOCK *x) {
- int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
-
- for (i = 0; i < 24 + has_2nd_order; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b_4x4(&x->block[24], &x->e_mbd.block[24]);
}
-
-void vp8_quantize_mbuv_c(MACROBLOCK *x) {
+void vp8_quantize_mbuv_4x4_c(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i++)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
}
-
-
+void vp8_quantize_mb_4x4_c(MACROBLOCK *x) {
+ vp8_quantize_mby_4x4_c(x);
+ vp8_quantize_mbuv_4x4_c(x);
+}
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
@@ -271,8 +259,8 @@ void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
void vp8_quantize_mby_8x8(MACROBLOCK *x) {
int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
+ int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
+
for (i = 0; i < 16; i ++) {
x->e_mbd.block[i].eob = 0;
}
@@ -282,21 +270,6 @@ void vp8_quantize_mby_8x8(MACROBLOCK *x) {
if (has_2nd_order)
x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
-
-}
-
-void vp8_quantize_mb_8x8(MACROBLOCK *x) {
- int i;
- int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- for (i = 0; i < 25; i ++) {
- x->e_mbd.block[i].eob = 0;
- }
- for (i = 0; i < 24; i += 4)
- x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
-
- if (has_2nd_order)
- x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
}
void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
@@ -308,11 +281,14 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
-
-
+void vp8_quantize_mb_8x8(MACROBLOCK *x) {
+ vp8_quantize_mby_8x8(x);
+ vp8_quantize_mbuv_8x8(x);
+}
void vp8_quantize_mby_16x16(MACROBLOCK *x) {
int i;
+
for (i = 0; i < 16; i++)
x->e_mbd.block[i].eob = 0;
x->e_mbd.block[24].eob = 0;
@@ -320,22 +296,8 @@ void vp8_quantize_mby_16x16(MACROBLOCK *x) {
}
void vp8_quantize_mb_16x16(MACROBLOCK *x) {
- int i;
- for(i = 0; i < 25; i++)
- x->e_mbd.block[i].eob = 0;
- x->quantize_b_16x16(&x->block[0], &x->e_mbd.block[0]);
- for (i = 16; i < 24; i += 4)
- x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
-}
-
-// U and V should use 8x8
-void vp8_quantize_mbuv_16x16(MACROBLOCK *x) {
- int i;
-
- for(i = 16; i < 24; i++)
- x->e_mbd.block[i].eob = 0;
- for (i = 16; i < 24; i += 4)
- x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
+ vp8_quantize_mby_16x16(x);
+ vp8_quantize_mbuv_8x8(x);
}
void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
@@ -385,15 +347,13 @@ void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
-
-
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
* of blocks. */
-void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
- vp8_regular_quantize_b(b1, d1);
- vp8_regular_quantize_b(b2, d2);
+void vp8_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
+ vp8_regular_quantize_b_4x4(b1, d1);
+ vp8_regular_quantize_b_4x4(b2, d2);
}
static void invert_quant(short *quant,
@@ -408,7 +368,6 @@ static void invert_quant(short *quant,
*shift = l;
}
-
void vp8cx_init_quantizer(VP8_COMP *cpi) {
int i;
int quant_val;
@@ -697,7 +656,6 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->q_index = QIndex;
}
-
void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
int i;
int QIndex = x->q_index;
@@ -731,7 +689,6 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[24].zbin_extra = (short)zbin_extra;
}
-
void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
// Clear Zbin mode boost for default case
cpi->zbin_mode_boost = 0;
@@ -740,7 +697,6 @@ void vp8cx_frame_init_quantizer(VP8_COMP *cpi) {
vp8cx_mb_init_quantizer(cpi, &cpi->mb);
}
-
void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
VP8_COMMON *cm = &cpi->common;
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index 6aafafc76..ad3a3fc0e 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#ifndef __INC_QUANTIZE_H
#define __INC_QUANTIZE_H
@@ -31,15 +30,19 @@
#include "arm/quantize_arm.h"
#endif
-#ifndef vp8_quantize_quantb
-#define vp8_quantize_quantb vp8_regular_quantize_b
+#if CONFIG_HYBRIDTRANSFORM
+extern prototype_quantize_block(vp8_ht_quantize_b_4x4);
+#endif
+
+#ifndef vp8_quantize_quantb_4x4
+#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
#endif
-extern prototype_quantize_block(vp8_quantize_quantb);
+extern prototype_quantize_block(vp8_quantize_quantb_4x4);
-#ifndef vp8_quantize_quantb_pair
-#define vp8_quantize_quantb_pair vp8_regular_quantize_b_pair
+#ifndef vp8_quantize_quantb_4x4_pair
+#define vp8_quantize_quantb_4x4_pair vp8_regular_quantize_b_4x4_pair
#endif
-extern prototype_quantize_block_pair(vp8_quantize_quantb_pair);
+extern prototype_quantize_block_pair(vp8_quantize_quantb_4x4_pair);
#ifndef vp8_quantize_quantb_8x8
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
@@ -56,21 +59,21 @@ extern prototype_quantize_block(vp8_quantize_quantb_16x16);
#endif
extern prototype_quantize_block(vp8_quantize_quantb_2x2);
-#ifndef vp8_quantize_mb
-#define vp8_quantize_mb vp8_quantize_mb_c
+#ifndef vp8_quantize_mb_4x4
+#define vp8_quantize_mb_4x4 vp8_quantize_mb_4x4_c
#endif
-extern prototype_quantize_mb(vp8_quantize_mb);
+extern prototype_quantize_mb(vp8_quantize_mb_4x4);
void vp8_quantize_mb_8x8(MACROBLOCK *x);
-#ifndef vp8_quantize_mbuv
-#define vp8_quantize_mbuv vp8_quantize_mbuv_c
+#ifndef vp8_quantize_mbuv_4x4
+#define vp8_quantize_mbuv_4x4 vp8_quantize_mbuv_4x4_c
#endif
-extern prototype_quantize_mb(vp8_quantize_mbuv);
+extern prototype_quantize_mb(vp8_quantize_mbuv_4x4);
-#ifndef vp8_quantize_mby
-#define vp8_quantize_mby vp8_quantize_mby_c
+#ifndef vp8_quantize_mby_4x4
+#define vp8_quantize_mby_4x4 vp8_quantize_mby_4x4_c
#endif
-extern prototype_quantize_mb(vp8_quantize_mby);
+extern prototype_quantize_mb(vp8_quantize_mby_4x4);
extern prototype_quantize_mb(vp8_quantize_mby_8x8);
extern prototype_quantize_mb(vp8_quantize_mbuv_8x8);
@@ -78,7 +81,6 @@ extern prototype_quantize_mb(vp8_quantize_mbuv_8x8);
void vp8_quantize_mb_16x16(MACROBLOCK *x);
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
extern prototype_quantize_mb(vp8_quantize_mby_16x16);
-extern prototype_quantize_mb(vp8_quantize_mbuv_16x16);
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 467cb313d..6be2d3521 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -56,10 +56,6 @@
extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
-#if CONFIG_HYBRIDTRANSFORM
-extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
-#endif
-
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
#define INVALID_MV 0x80008000
@@ -572,9 +568,9 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
}
static int cost_coeffs_2x2(MACROBLOCK *mb,
- BLOCKD *b, int type,
+ BLOCKD *b, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
- int c = !type; /* start at coef 0, unless Y with Y2 */
+ int c = (type == PLANE_TYPE_Y_NO_DC); /* start at coef 0, unless Y with Y2 */
int eob = b->eob;
int pt; /* surrounding block/prev coef predictor */
int cost = 0;
@@ -600,11 +596,11 @@ static int cost_coeffs_2x2(MACROBLOCK *mb,
return cost;
}
-static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
+static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int tx_size) {
const int eob = b->eob;
- int c = !type; /* start at coef 0, unless Y with Y2 */
+ int c = (type == PLANE_TYPE_Y_NO_DC); /* start at coef 0, unless Y with Y2 */
int cost = 0, default_eob, seg_eob;
int pt; /* surrounding block/prev coef predictor */
int const *scan, *band;
@@ -771,11 +767,11 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
// Quantization
for (b = 0; b < 16; b++) {
- mb->quantize_b(&mb->block[b], &xd->block[b]);
+ mb->quantize_b_4x4(&mb->block[b], &xd->block[b]);
}
// DC predication and Quantization of 2nd Order block
- mb->quantize_b(mb_y2, x_y2);
+ mb->quantize_b_4x4(mb_y2, x_y2);
// Distortion
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
@@ -785,7 +781,7 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
*Distortion = (d >> 2);
// rate
*Rate = vp8_rdcost_mby(mb);
- *skippable = mby_is_skippable(&mb->e_mbd, 1);
+ *skippable = mby_is_skippable_4x4(&mb->e_mbd, 1);
}
static int vp8_rdcost_mby_8x8(MACROBLOCK *mb, int backup) {
@@ -1206,14 +1202,14 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
b->bmi.as_mode.test = mode;
txfm_map(b, mode);
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b(be, b);
+ vp8_ht_quantize_b_4x4(be, b);
} else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
+ x->quantize_b_4x4(be, b);
}
#else
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
+ x->quantize_b_4x4(be, b);
#endif
tempa = ta;
@@ -1592,10 +1588,10 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
x->vp8_short_fdct8x4((be + 4)->src_diff, (be + 4)->coeff, 32);
- x->quantize_b_pair(x->block + ib, x->block + ib + 1,
- xd->block + ib, xd->block + ib + 1);
- x->quantize_b_pair(x->block + ib + 4, x->block + ib + 5,
- xd->block + ib + 4, xd->block + ib + 5);
+ x->quantize_b_4x4_pair(x->block + ib, x->block + ib + 1,
+ xd->block + ib, xd->block + ib + 1);
+ x->quantize_b_4x4_pair(x->block + ib + 4, x->block + ib + 5,
+ xd->block + ib + 4, xd->block + ib + 5);
distortion = vp8_block_error_c((x->block + ib)->coeff,
(xd->block + ib)->dqcoeff, 16);
@@ -1745,12 +1741,12 @@ static int64_t rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
x->e_mbd.predictor,
x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
- *skip = mbuv_is_skippable(&x->e_mbd);
+ *skip = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
@@ -1855,8 +1851,8 @@ static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
*rate = rd_cost_mbuv(x);
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
@@ -1908,8 +1904,8 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
x->src.uv_stride);
- vp8_transform_mbuv(x);
- vp8_quantize_mbuv(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mbuv_4x4(x);
rate_to = rd_cost_mbuv(x);
rate = rate_to
@@ -1920,7 +1916,7 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd) {
- skip = mbuv_is_skippable(xd);
+ skip = mbuv_is_skippable_4x4(xd);
best_rd = this_rd;
d = distortion;
r = rate;
@@ -2252,7 +2248,7 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
// set to 0 no way to account for 2nd order DC so discount
// be->coeff[0] = 0;
- x->quantize_b(be, bd);
+ x->quantize_b_4x4(be, bd);
thisdistortion = ENCODEMB_INVOKE(&rtcd->encodemb, berr)(
be->coeff, bd->dqcoeff, 16) / 4;
distortion += thisdistortion;
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index f359d2528..c72c1e7e7 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -60,14 +60,7 @@ extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
#endif
#endif /* ENTROPY_STATS */
-void vp8_stuff_mb(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-void vp8_stuff_mb_8x8(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
- MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run);
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_fix_contexts(MACROBLOCKD *xd);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
@@ -131,27 +124,22 @@ static void fill_value_tokens() {
static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
- const int type,
- const FRAME_TYPE frametype,
+ PLANE_TYPE type,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
VP8_COMP *cpi,
int dry_run) {
int pt; /* near block/prev token context index */
- int c = 0; /* start at DC unless type 0 */
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- int x;
const short *qcoeff_ptr = b->qcoeff;
#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
#endif
-
int seg_eob = 256;
int segment_id = xd->mode_info_context->mbmi.segment_id;
- //if (!dry_run) printf("16: %d\n", tx_type);
-
if (segfeature_active(xd, segment_id, SEG_LVL_EOB))
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
@@ -159,17 +147,18 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
do {
const int band = vp8_coef_bands_16x16[c];
- int v;
+ int x;
- x = DCT_EOB_TOKEN;
if (c < eob) {
- int rc = vp8_default_zig_zag1d_16x16[c];
- v = qcoeff_ptr[rc];
+ const int rc = vp8_default_zig_zag1d_16x16[c];
+ const int v = qcoeff_ptr[rc];
assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
x = vp8_dct_value_tokens_ptr[v].Token;
+ } else {
+ x = DCT_EOB_TOKEN;
}
t->Token = x;
@@ -180,7 +169,8 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
@@ -189,31 +179,27 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
#endif
++cpi->coef_counts_16x16[type][band][pt][x];
}
- } while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
+ pt = vp8_prev_token_class[x];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
-static void tokenize2nd_order_b_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize2nd_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
int c = 0; /* start at DC */
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- int x;
const short *qcoeff_ptr = b->qcoeff;
-
int seg_eob = 4;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -227,125 +213,107 @@ static void tokenize2nd_order_b_8x8
do {
const int band = vp8_coef_bands[c];
- int v = 0;
+ int x;
if (c < eob) {
- int rc = vp8_default_zig_zag1d[c];
- v = qcoeff_ptr[rc];
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
x = vp8_dct_value_tokens_ptr[v].Token;
- } else
+ } else {
x = DCT_EOB_TOKEN;
+ }
t->Token = x;
- // printf("Token : %d\n", x);
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[PLANE_TYPE_Y2][band][pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = ((pt == 0) && (band > 0));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run)
- ++cpi->coef_counts_8x8 [type] [band] [pt] [x];
- } while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
-
+ ++cpi->coef_counts_8x8[PLANE_TYPE_Y2][band][pt][x];
+ pt = vp8_prev_token_class[x];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
+ pt = (c != 0); /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-static void tokenize2nd_order_b(MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize2nd_order_b_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
- int c; /* start at DC */
+ int c = 0; /* start at DC */
TOKENEXTRA *t = *tp;/* store tokens starting here */
- const BLOCKD *b;
- const short *qcoeff_ptr;
+ const BLOCKD *b = xd->block + 24;
+ const short *qcoeff_ptr = b->qcoeff;
ENTROPY_CONTEXT *a;
ENTROPY_CONTEXT *l;
- int band, rc, v, token;
-
+ const int eob = b->eob;
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
if (segfeature_active(xd, segment_id, SEG_LVL_EOB))
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
- b = xd->block + 24;
- qcoeff_ptr = b->qcoeff;
a = (ENTROPY_CONTEXT *)xd->above_context + 8;
l = (ENTROPY_CONTEXT *)xd->left_context + 8;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- for (c = 0; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_Y2][band][pt];
t->skip_eob_node = ((pt == 0) && (band > 0));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run)
- ++cpi->coef_counts [1] [band] [pt] [token];
-
+ ++cpi->coef_counts[PLANE_TYPE_Y2][band][pt][token];
pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [1] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
-
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
pt = (c != 0); /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-static void tokenize1st_order_b_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
- int c = type ? 0 : 1; /* start at DC unless type 0 */
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; /* start at DC unless type 0 */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = xd->mode_info_context->mbmi.mode == I8X8_PRED ?
get_tx_type(xd, b) : DCT_DCT;
#endif
-
+ const int eob = b->eob;
int seg_eob = 64;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -354,16 +322,20 @@ static void tokenize1st_order_b_8x8
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- //if (!dry_run) printf("8: %d\n", tx_type);
- for (; c < b->eob; ++c) {
+ do {
const int band = vp8_coef_bands_8x8[c];
- int rc = vp8_default_zig_zag1d_8x8[c];
- int v = qcoeff_ptr[rc], x;
+ int x;
- assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d_8x8[c];
+ const int v = qcoeff_ptr[rc];
+
+ assert(-DCT_MAX_VALUE <= v && v < (DCT_MAX_VALUE));
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- x = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ x = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ x = DCT_EOB_TOKEN;
t->Token = x;
#if CONFIG_HYBRIDTRANSFORM8X8
@@ -373,7 +345,8 @@ static void tokenize1st_order_b_8x8
#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
@@ -384,65 +357,89 @@ static void tokenize1st_order_b_8x8
#endif
++cpi->coef_counts_8x8[type][band][pt][x];
}
-
pt = vp8_prev_token_class[x];
++t;
+ } while (c < eob && ++c < seg_eob);
+
+ *tp = t;
+ pt = (c != !type); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+}
+
+static void tokenize1st_order_chroma_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ VP8_COMP *cpi,
+ int dry_run) {
+ unsigned int block;
+ const BLOCKD *b = xd->block + 16;
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp;/* store tokens starting here */
+ ENTROPY_CONTEXT *a;
+ ENTROPY_CONTEXT *l;
+ int seg_eob = 16;
+ int segment_id = xd->mode_info_context->mbmi.segment_id;
+
+ if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
+ seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
}
- if (c < seg_eob) {
- const int band = vp8_coef_bands_8x8[c];
- t->Token = DCT_EOB_TOKEN;
+ /* Chroma */
+ for (block = 16; block < 24; block++, b++) {
+ const int eob = b->eob;
+ const int tmp1 = vp8_block2above[block];
+ const int tmp2 = vp8_block2left[block];
+ const int16_t *qcoeff_ptr = b->qcoeff;
+ int c = 0;
-#if CONFIG_HYBRIDTRANSFORM8X8
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8 [type] [band] [pt];
- else
-#endif
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
- if (vp8_coef_encodings[t->Token].Len - t->skip_eob_node <= 0) {
- printf("type %d, seg-eob %d, eob %d, pt %d, c %d band %d\n", type, seg_eob, b->eob, pt, c, band);
- fflush(stdout);
- }
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
- else
-#endif
- ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
- }
- ++t;
- }
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- *tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
+
+ t->Token = token;
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_UV][band][pt];
+
+ t->skip_eob_node = ((pt == 0) && (band > 0));
+ assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+
+ if (!dry_run)
+ ++cpi->coef_counts[PLANE_TYPE_UV][band][pt][token];
+ pt = vp8_prev_token_class[token];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
+
+ *tp = t;
+ pt = (c != 0); /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
+ }
}
#if CONFIG_HYBRIDTRANSFORM
-static void tokenize1st_order_ht( MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type,
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize1st_order_ht_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ VP8_COMP *cpi,
+ int dry_run) {
unsigned int block;
- const BLOCKD *b;
+ const BLOCKD *b = xd->block;
int pt; /* near block/prev token context index */
- int c;
- int token;
TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
ENTROPY_CONTEXT * a;
ENTROPY_CONTEXT * l;
- int band, rc, v;
- int tmp1, tmp2;
-
int const *pt_scan ;
-
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -450,33 +447,33 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
}
- b = xd->block;
-
/* Luma */
for (block = 0; block < 16; block++, b++) {
- B_PREDICTION_MODE b_mode;
+ const int eob = b->eob;
TX_TYPE tx_type = DCT_DCT;
+ const int tmp1 = vp8_block2above[block];
+ const int tmp2 = vp8_block2left[block];
+ const int16_t *qcoeff_ptr = b->qcoeff;
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
+
+ a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
+ l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
if( xd->mode_info_context->mbmi.mode == B_PRED ) {
- b_mode = b->bmi.as_mode.first;
tx_type = get_tx_type(xd, b);
}
// assign scanning order for luma components coded in intra4x4 mode
- if( (xd->mode_info_context->mbmi.mode == B_PRED) &&
- (type == PLANE_TYPE_Y_WITH_DC) ) {
- switch(b_mode) {
- case B_VE_PRED :
- case B_VR_PRED :
+ if ((xd->mode_info_context->mbmi.mode == B_PRED) &&
+ (type == PLANE_TYPE_Y_WITH_DC)) {
+ switch (tx_type) {
+ case ADST_DCT:
pt_scan = vp8_row_scan;
break;
-
- case B_HE_PRED :
- case B_HD_PRED :
- case B_HU_PRED :
+ case DCT_ADST:
pt_scan = vp8_col_scan;
break;
-
default :
pt_scan = vp8_default_zig_zag1d;
break;
@@ -485,218 +482,58 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
pt_scan = vp8_default_zig_zag1d;
}
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- c = type ? 0 : 1;
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- for (; c < b->eob; c++) {
- rc = pt_scan[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ if (c < eob) {
+ const int rc = pt_scan[c];
+ const int v = qcoeff_ptr[rc];
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
t->Token = token;
if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs [type] [band] [pt];
+ t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[type] [band] [pt] [token];
+ ++cpi->hybrid_coef_counts[type][band][pt][token];
else
- ++cpi->coef_counts [type] [band] [pt] [token];
+ ++cpi->coef_counts [type][band][pt][token];
}
-
pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs [type] [band] [pt];
- else
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
-
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run) {
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[type] [band] [pt] [DCT_EOB_TOKEN];
- else
- ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
- }
-
- t++;
- }
+ ++t;
+ } while (c < eob && ++c < seg_eob);
*tp = t;
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
- // reset scanning order for chroma components
- pt_scan = vp8_default_zig_zag1d ;
-
- /* Chroma */
- for (block = 16; block < 24; block++, b++) {
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- for (c = 0; c < b->eob; c++) {
- rc = pt_scan[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
- t++;
- }
-
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
+ tokenize1st_order_chroma_4x4(xd, tp, cpi, dry_run);
}
#endif
-
-static void tokenize1st_order_chroma
-(
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- VP8_COMP *cpi,
- int dry_run) {
- unsigned int block;
- const BLOCKD *b;
- int pt; /* near block/prev token context index */
- int c;
- int token;
- TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
- ENTROPY_CONTEXT *a;
- ENTROPY_CONTEXT *l;
- int band, rc, v;
- int tmp1, tmp2;
-
- int seg_eob = 16;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
- seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
- }
-
- b = xd->block;
- b += 16;
-
- /* Chroma */
- for (block = 16; block < 24; block++, b++) {
- tmp1 = vp8_block2above[block];
- tmp2 = vp8_block2left[block];
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- for (c = 0; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
-}
-
-static void tokenize1st_order_b
-(
- MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- VP8_COMP *cpi,
- int dry_run) {
+static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ VP8_COMP *cpi,
+ int dry_run) {
unsigned int block;
- const BLOCKD *b;
+ const BLOCKD *b = xd->block;
int pt; /* near block/prev token context index */
- int band, rc, v, c, token;
TOKENEXTRA *t = *tp;/* store tokens starting here */
- const short *qcoeff_ptr;
ENTROPY_CONTEXT *a, *l;
-
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -704,108 +541,52 @@ static void tokenize1st_order_b
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
}
- b = xd->block;
/* Luma */
for (block = 0; block < 16; block++, b++) {
- qcoeff_ptr = b->qcoeff;
+ const int eob = b->eob;
+ const int16_t *qcoeff_ptr = b->qcoeff;
+ int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
+
a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- c = type ? 0 : 1;
-
assert(b->eob <= 16);
- for (; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
+ do {
+ const int band = vp8_coef_bands[c];
+ int token;
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ if (c < eob) {
+ const int rc = vp8_default_zig_zag1d[c];
+ const int v = qcoeff_ptr[rc];
+
+ t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
+ token = vp8_dct_value_tokens_ptr[v].Token;
+ } else
+ token = DCT_EOB_TOKEN;
t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
+ t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
+ (band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run)
- ++cpi->coef_counts [type] [band] [pt] [token];
-
+ ++cpi->coef_counts[type][band][pt][token];
pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
-
- t->skip_eob_node = pt == 0 &&
- ((band > 0 && type > 0) || (band > 1 && type == 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run)
- ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
+ ++t;
+ } while (c < eob && ++c < seg_eob);
- t++;
- }
*tp = t;
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
- /* Chroma */
- for (block = 16; block < 24; block++, b++) {
- qcoeff_ptr = b->qcoeff;
- a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
- l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
-
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- assert(b->eob <= 16);
- for (c = 0; c < b->eob; c++) {
- rc = vp8_default_zig_zag1d[c];
- band = vp8_coef_bands[c];
- v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
-
- t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [token];
-
- pt = vp8_prev_token_class[token];
- t++;
- }
-
- if (c < seg_eob) {
- band = vp8_coef_bands[c];
- t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt];
-
- t->skip_eob_node = ((pt == 0) && (band > 0));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run)
- ++cpi->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN];
-
- t++;
- }
- *tp = t;
- pt = (c != 0); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
+ tokenize1st_order_chroma_4x4(xd, tp, cpi, dry_run);
}
-
-int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
+int mby_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block) {
int skip = 1;
int i = 0;
@@ -820,7 +601,7 @@ int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
return skip;
}
-int mbuv_is_skippable(MACROBLOCKD *xd) {
+int mbuv_is_skippable_4x4(MACROBLOCKD *xd) {
int skip = 1;
int i;
@@ -829,9 +610,9 @@ int mbuv_is_skippable(MACROBLOCKD *xd) {
return skip;
}
-int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
- return (mby_is_skippable(xd, has_y2_block) &
- mbuv_is_skippable(xd));
+int mb_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block) {
+ return (mby_is_skippable_4x4(xd, has_y2_block) &
+ mbuv_is_skippable_4x4(xd));
}
int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
@@ -860,7 +641,7 @@ int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block) {
return (mby_is_skippable_8x8(xd, has_y2_block) &
- mbuv_is_skippable(xd));
+ mbuv_is_skippable_4x4(xd));
}
int mby_is_skippable_16x16(MACROBLOCKD *xd) {
@@ -880,7 +661,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
MACROBLOCKD *xd,
TOKENEXTRA **t,
int dry_run) {
- int plane_type;
+ PLANE_TYPE plane_type;
int has_y2_block;
int b;
int tx_size = xd->mode_info_context->mbmi.txfm_size;
@@ -905,10 +686,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
} else
skip_inc = 0;
- has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
+ has_y2_block = (tx_size != TX_16X16
+ && xd->mode_info_context->mbmi.mode != B_PRED
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
&& xd->mode_info_context->mbmi.mode != SPLITMV);
- if (tx_size == TX_16X16) has_y2_block = 0; // Because of inter frames
switch (tx_size) {
case TX_16X16:
@@ -922,7 +703,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
break;
default:
- xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
+ xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_4x4(xd, has_y2_block);
break;
}
@@ -930,16 +711,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_true_count[mb_skip_context] += skip_inc;
if (!cpi->common.mb_no_coeff_skip) {
- if (tx_size == TX_16X16)
- vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
- else
- if (tx_size == TX_8X8) {
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
- vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
- else
- vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
- } else
- vp8_stuff_mb(cpi, xd, t, dry_run);
+ vp8_stuff_mb(cpi, xd, t, dry_run);
} else {
vp8_fix_contexts(xd);
}
@@ -951,36 +723,37 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_false_count[mb_skip_context] += skip_inc;
- plane_type = 3;
if (has_y2_block) {
if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
tokenize2nd_order_b_8x8(xd,
- xd->block + 24, t, 1, xd->frame_type,
+ xd->block + 24, t,
A + vp8_block2above_8x8[24],
L + vp8_block2left_8x8[24],
cpi, dry_run);
} else
- tokenize2nd_order_b(xd, t, cpi, dry_run);
+ tokenize2nd_order_b_4x4(xd, t, cpi, dry_run);
- plane_type = 0;
- }
+ plane_type = PLANE_TYPE_Y_NO_DC;
+ } else
+ plane_type = PLANE_TYPE_Y_WITH_DC;
if (tx_size == TX_16X16) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
- tokenize1st_order_b_16x16(xd, xd->block, t, 3,
- xd->frame_type, A, L, cpi, dry_run);
+ tokenize1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
+ A, L, cpi, dry_run);
for (b = 1; b < 16; b++) {
*(A + vp8_block2above[b]) = *(A);
*(L + vp8_block2left[b] ) = *(L);
}
for (b = 16; b < 24; b += 4) {
- tokenize1st_order_b_8x8(xd, xd->block + b, t, 2, xd->frame_type,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run);
+ tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b], cpi, dry_run);
*(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
@@ -990,13 +763,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
else if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
- plane_type = PLANE_TYPE_Y_WITH_DC;
- }
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
- xd->block + b,
- t, plane_type, xd->frame_type,
+ xd->block + b, t, plane_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1004,14 +773,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
- tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
+ tokenize1st_order_chroma_4x4(xd, t, cpi, dry_run);
} else {
for (b = 16; b < 24; b += 4) {
- tokenize1st_order_b_8x8(xd,
- xd->block + b, t, 2, xd->frame_type,
+ tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b],
- cpi, dry_run);
+ L + vp8_block2left_8x8[b], cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
@@ -1019,10 +786,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
} else {
#if CONFIG_HYBRIDTRANSFORM
if (active_ht)
- tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
+ tokenize1st_order_ht_4x4(xd, t, plane_type, cpi, dry_run);
else
#endif
- tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
+ tokenize1st_order_b_4x4(xd, t, plane_type, cpi, dry_run);
}
if (dry_run)
*t = t_backup;
@@ -1271,73 +1038,60 @@ void print_context_counters() {
}
#endif
-
void vp8_tokenize_initialize() {
fill_value_tokens();
}
-
-static __inline void stuff2nd_order_b_8x8(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff2nd_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [1] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[PLANE_TYPE_Y2][0][pt];
// t->section = 11;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts_8x8 [1] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts_8x8[PLANE_TYPE_Y2][0][pt][DCT_EOB_TOKEN];
pt = 0;
*a = *l = pt;
-
}
-static __inline void stuff1st_order_b_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff1st_order_b_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
#if CONFIG_HYBRIDTRANSFORM8X8
TX_TYPE tx_type = xd->mode_info_context->mbmi.mode == I8X8_PRED ?
get_tx_type(xd, b) : DCT_DCT;
#endif
-
+ const int band = vp8_coef_bands_8x8[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8 [0] [1] [pt];
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
#endif
- t->context_tree = cpi->common.fc.coef_probs_8x8 [0] [1] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
// t->section = 8;
t->skip_eob_node = 0;
++t;
@@ -1345,66 +1099,61 @@ static __inline void stuff1st_order_b_8x8
if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type == DCT_DCT)
- ++cpi->hybrid_coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
else
#endif
- ++cpi->coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
-
}
-static __inline
-void stuff1st_order_buv_8x8
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff1st_order_buv_8x8(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [2] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs_8x8[PLANE_TYPE_UV][0][pt];
// t->section = 13;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts_8x8[2] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts_8x8[PLANE_TYPE_UV][0][pt][DCT_EOB_TOKEN];
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-void vp8_stuff_mb_8x8(VP8_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
+static void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- int plane_type;
+ PLANE_TYPE plane_type;
int b;
TOKENEXTRA *t_backup = *t;
+ const int has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
+ && xd->mode_info_context->mbmi.mode != I8X8_PRED
+ && xd->mode_info_context->mbmi.mode != SPLITMV);
- stuff2nd_order_b_8x8(xd, xd->block + 24, t, 1, xd->frame_type,
- A + vp8_block2above_8x8[24],
- L + vp8_block2left_8x8[24], cpi, dry_run);
- plane_type = 0;
+ if (has_y2_block) {
+ stuff2nd_order_b_8x8(xd, xd->block + 24, t,
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi, dry_run);
+ plane_type = PLANE_TYPE_Y_NO_DC;
+ } else {
+ plane_type = PLANE_TYPE_Y_WITH_DC;
+ }
for (b = 0; b < 16; b += 4) {
- stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type, xd->frame_type,
+ stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1413,7 +1162,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
}
for (b = 16; b < 24; b += 4) {
- stuff1st_order_buv_8x8(xd, xd->block + b, t, 2, xd->frame_type,
+ stuff1st_order_buv_8x8(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1424,62 +1173,60 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
*t = t_backup;
}
-static __inline
-void stuff1st_order_b_16x16(MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- const FRAME_TYPE frametype,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run){
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp; /* store tokens starting here */
+static __inline void stuff1st_order_b_16x16(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
+ int pt; /* near block/prev token context index */
+ TOKENEXTRA *t = *tp; /* store tokens starting here */
#if CONFIG_HYBRIDTRANSFORM16X16
- TX_TYPE tx_type = get_tx_type(xd, b);
+ TX_TYPE tx_type = get_tx_type(xd, b);
#endif
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- (void) frametype;
- (void) b;
+ const int band = vp8_coef_bands_16x16[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ (void) b;
- t->Token = DCT_EOB_TOKEN;
+ t->Token = DCT_EOB_TOKEN;
#if CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[3][1][pt];
- else
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
+ else
#endif
- t->context_tree = cpi->common.fc.coef_probs_16x16[3][1][pt];
- t->skip_eob_node = 0;
- ++t;
- *tp = t;
- if (!dry_run) {
+ t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
+ t->skip_eob_node = 0;
+ ++t;
+ *tp = t;
+ if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts_16x16[3][1][pt][DCT_EOB_TOKEN];
- else
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
+ else
#endif
- ++cpi->coef_counts_16x16[3][1][pt][DCT_EOB_TOKEN];
- }
- pt = 0; /* 0 <-> all coeff data is zero */
- *a = *l = pt;
+ ++cpi->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
+ }
+ pt = 0; /* 0 <-> all coeff data is zero */
+ *a = *l = pt;
}
-void vp8_stuff_mb_16x16(VP8_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
+static void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
int b, i;
TOKENEXTRA *t_backup = *t;
- stuff1st_order_b_16x16(xd, xd->block, t, xd->frame_type, A, L, cpi, dry_run);
+ stuff1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
+ A, L, cpi, dry_run);
for (i = 1; i < 16; i++) {
*(A + vp8_block2above[i]) = *(A);
*(L + vp8_block2left[i]) = *(L);
}
for (b = 16; b < 24; b += 4) {
- stuff1st_order_buv_8x8(xd, xd->block + b, t, 2, xd->frame_type,
+ stuff1st_order_buv_8x8(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1492,138 +1239,135 @@ void vp8_stuff_mb_16x16(VP8_COMP *cpi,
*t = t_backup;
}
-static __inline void stuff2nd_order_b
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff2nd_order_b_4x4(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_Y2][0][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts [1] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts[PLANE_TYPE_Y2][0][pt] [DCT_EOB_TOKEN];
pt = 0;
*a = *l = pt;
-
}
-static __inline void stuff1st_order_b(MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+static __inline void stuff1st_order_b_4x4(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ PLANE_TYPE type,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, b);
#endif
+ const int band = vp8_coef_bands[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs [0] [1] [pt];
+ t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
#endif
- t->context_tree = cpi->common.fc.coef_probs [0] [1] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN];
else
#endif
- ++cpi->coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
-
}
-static __inline
-void stuff1st_order_buv
-(
- MACROBLOCKD *xd,
- const BLOCKD *const b,
- TOKENEXTRA **tp,
- ENTROPY_CONTEXT *a,
- ENTROPY_CONTEXT *l,
- VP8_COMP *cpi,
- int dry_run) {
+
+static __inline void stuff1st_order_buv_4x4(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
+ ENTROPY_CONTEXT *a,
+ ENTROPY_CONTEXT *l,
+ VP8_COMP *cpi,
+ int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt];
+ t->context_tree = cpi->common.fc.coef_probs[PLANE_TYPE_UV][0][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run)
- ++cpi->coef_counts[2] [0] [pt] [DCT_EOB_TOKEN];
+ ++cpi->coef_counts[PLANE_TYPE_UV][0][pt][DCT_EOB_TOKEN];
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
-void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
- TOKENEXTRA **t, int dry_run) {
+static void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
int b;
TOKENEXTRA *t_backup = *t;
+ PLANE_TYPE plane_type;
+ const int has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
+ && xd->mode_info_context->mbmi.mode != I8X8_PRED
+ && xd->mode_info_context->mbmi.mode != SPLITMV);
- stuff2nd_order_b(xd, xd->block + 24, t,
- A + vp8_block2above[24],
- L + vp8_block2left[24],
- cpi, dry_run);
+ if (has_y2_block) {
+ stuff2nd_order_b_4x4(xd, xd->block + 24, t,
+ A + vp8_block2above[24],
+ L + vp8_block2left[24],
+ cpi, dry_run);
+ plane_type = PLANE_TYPE_Y_NO_DC;
+ } else {
+ plane_type = PLANE_TYPE_Y_WITH_DC;
+ }
for (b = 0; b < 16; b++)
- stuff1st_order_b(xd, xd->block + b, t,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi, dry_run);
+ stuff1st_order_b_4x4(xd, xd->block + b, t, plane_type,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
for (b = 16; b < 24; b++)
- stuff1st_order_buv(xd, xd->block + b, t,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi, dry_run);
+ stuff1st_order_buv_4x4(xd, xd->block + b, t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
if (dry_run)
*t = t_backup;
}
-void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
- MACROBLOCKD *xd,
- TOKENEXTRA **t,
- int dry_run) {
+static void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
+ TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- int plane_type;
int b;
TOKENEXTRA *t_backup = *t;
- stuff2nd_order_b_8x8(xd, xd->block + 24, t, 1, xd->frame_type,
- A + vp8_block2above_8x8[24],
- L + vp8_block2left_8x8[24], cpi, dry_run);
- plane_type = 3;
-
for (b = 0; b < 16; b += 4) {
- stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type, xd->frame_type,
+ stuff1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1632,15 +1376,31 @@ void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
}
for (b = 16; b < 24; b++)
- stuff1st_order_buv(xd, xd->block + b, t,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- cpi, dry_run);
+ stuff1st_order_buv_4x4(xd, xd->block + b, t,
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi, dry_run);
if (dry_run)
*t = t_backup;
}
+void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
+ TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
+
+ if (tx_size == TX_16X16) {
+ vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
+ } else if (tx_size == TX_8X8) {
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
+ } else {
+ vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
+ }
+ } else {
+ vp8_stuff_mb_4x4(cpi, xd, t, dry_run);
+ }
+}
+
void vp8_fix_contexts(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((xd->mode_info_context->mbmi.mode != B_PRED
diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h
index 7d1b62a79..87cb44bdc 100644
--- a/vp8/encoder/tokenize.h
+++ b/vp8/encoder/tokenize.h
@@ -31,9 +31,9 @@ typedef struct {
int rd_cost_mby(MACROBLOCKD *);
-extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block);
-extern int mbuv_is_skippable(MACROBLOCKD *xd);
-extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block);
+extern int mby_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block);
+extern int mbuv_is_skippable_4x4(MACROBLOCKD *xd);
+extern int mb_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block);
extern int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd);
extern int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);