summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorYaowu Xu <yaowu@google.com>2012-01-20 15:30:31 -0800
committerYaowu Xu <yaowu@google.com>2012-02-16 15:00:10 -0800
commitb92a96d8adfcdb381d559c65aaa65805e1bc9321 (patch)
treea9b17483944bcf36450a10d8f828b908f380b301 /vp8
parentf90983e16748f8f3a4c18a4c6f8f13e0de4aa4ac (diff)
downloadlibvpx-b92a96d8adfcdb381d559c65aaa65805e1bc9321.tar
libvpx-b92a96d8adfcdb381d559c65aaa65805e1bc9321.tar.gz
libvpx-b92a96d8adfcdb381d559c65aaa65805e1bc9321.tar.bz2
libvpx-b92a96d8adfcdb381d559c65aaa65805e1bc9321.zip
Reworked context conversion between 8x8 and 4x4
The commit rationized and simplified the entropy context conversion betwen MB using 8x8 transform and MB using 4x4 transform. The old version had a number of weirdness in how 4x4 transform MB's context is used for 8x8 blocks other than the first 8x8 within a MB. Test showed the change has a gain ~.1% for avg psnr, glb psnr and ssim on the limited HD set. Change-Id: I774536c416baa6845aa741f956d8a69fa40e5d47
Diffstat (limited to 'vp8')
-rw-r--r--vp8/common/blockd.c14
-rw-r--r--vp8/common/blockd.h3
-rw-r--r--vp8/decoder/detokenize.c56
-rw-r--r--vp8/encoder/bitstream.c1
-rw-r--r--vp8/encoder/encodemb.c143
-rw-r--r--vp8/encoder/rdopt.c14
-rw-r--r--vp8/encoder/tokenize.c162
7 files changed, 84 insertions, 309 deletions
diff --git a/vp8/common/blockd.c b/vp8/common/blockd.c
index 1fc3cd0ca..661a77fe5 100644
--- a/vp8/common/blockd.c
+++ b/vp8/common/blockd.c
@@ -12,6 +12,7 @@
#include "blockd.h"
#include "vpx_mem/vpx_mem.h"
+
const unsigned char vp8_block2left[25] =
{
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
@@ -20,3 +21,16 @@ const unsigned char vp8_block2above[25] =
{
0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8
};
+
+
+const unsigned char vp8_block2left_8x8[25] =
+{
+ 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
+};
+const unsigned char vp8_block2above_8x8[25] =
+{
+ 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
+};
+
+
+
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index cadc44229..7850fb065 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -65,6 +65,9 @@ typedef struct
extern const unsigned char vp8_block2left[25];
extern const unsigned char vp8_block2above[25];
+extern const unsigned char vp8_block2left_8x8[25];
+extern const unsigned char vp8_block2above_8x8[25];
+
#define VP8_COMBINEENTROPYCONTEXTS( Dest, A, B) \
Dest = ((A)!=0) + ((B)!=0);
diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c
index 905c29ec4..d7dc143c7 100644
--- a/vp8/decoder/detokenize.c
+++ b/vp8/decoder/detokenize.c
@@ -321,20 +321,9 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *dx, MACROBLOCKD *x)
coef_probs = oc->fc.coef_probs_8x8 [type] [ 0 ] [0];
BLOCK_LOOP_8x8:
- a = A + vp8_block2above[i];
- l = L + vp8_block2left[i];
-
- if(i < 16)
- {
- a1 = A + vp8_block2above[i+1];
- l1 = L + vp8_block2left[i+4];
- }
- else if(i<24)
- {
- a1 = A + vp8_block2above[i+1];
- l1 = L + vp8_block2left[i+2];
+ a = A + vp8_block2above_8x8[i];
+ l = L + vp8_block2left_8x8[i];
- }
c = (INT16)(!type);
// Dest = ((A)!=0) + ((B)!=0);
@@ -350,7 +339,7 @@ BLOCK_LOOP_8x8:
}
else
{
- VP8_COMBINEENTROPYCONTEXTS_8x8(v, *a, *l, *a1, *l1);
+ VP8_COMBINEENTROPYCONTEXTS(v, *a, *l);
if ( segfeature_active( x, segment_id, SEG_LVL_EOB ) )
{
seg_eob = get_segdata( x, segment_id, SEG_LVL_EOB );
@@ -549,45 +538,10 @@ ONE_CONTEXT_NODE_0_8x8_:
BLOCK_FINISHED_8x8:
*a = *l = ((eobs[i] = c) != !type); // any nonzero data?
- /*if (i!=24) {
- *(A + vp8_block2above[i+1]) = *(A + vp8_block2above[i+2]) = *(A + vp8_block2above[i+3]) = *a;
- *(L + vp8_block2left[i+1]) = *(L + vp8_block2left[i+2]) = *(L + vp8_block2left[i+3]) = *l;
- }*/
-
if (i!=24)
{
- if(i==0)
- {
- *(A + vp8_block2above[1]) = *(A + vp8_block2above[4]) = *(A + vp8_block2above[5]) = *a;
- *(L + vp8_block2left[1]) = *(L + vp8_block2left[4]) = *(L + vp8_block2left[5]) = *l;
- }
- else if(i==4)
- {
- *(A + vp8_block2above[2]) = *(A + vp8_block2above[3]) = *(A + vp8_block2above[6]) = *(A + vp8_block2above[7]) = *a;
- *(L + vp8_block2left[2]) = *(L + vp8_block2left[3]) = *(L + vp8_block2left[6]) = *(L + vp8_block2left[7]) = *l;
- *(A + vp8_block2above[4]) = *(A + vp8_block2above[1]);
- *(L + vp8_block2left[4]) = *(L + vp8_block2left[1]);
- }
- else if(i==8)
- {
- *(A + vp8_block2above[9]) = *(A + vp8_block2above[12]) = *(A + vp8_block2above[13]) = *a;
- *(L + vp8_block2left[9]) = *(L + vp8_block2left[12]) = *(L + vp8_block2left[13]) = *l;
-
- }
- else if(i==12)
- {
- *(A + vp8_block2above[10]) = *(A + vp8_block2above[11]) = *(A + vp8_block2above[14]) = *(A + vp8_block2above[15]) = *a;
- *(L + vp8_block2left[10]) = *(L + vp8_block2left[11]) = *(L + vp8_block2left[14]) = *(L + vp8_block2left[15]) = *l;
- *(A + vp8_block2above[12]) = *(A + vp8_block2above[8]);
- *(L + vp8_block2left[12]) = *(L + vp8_block2left[8]);
-
- }
- else
- {
- *(A + vp8_block2above[i+1]) = *(A + vp8_block2above[i+2]) = *(A + vp8_block2above[i+3]) = *a;
- *(L + vp8_block2left[i+1]) = *(L + vp8_block2left[i+2]) = *(L + vp8_block2left[i+3]) = *l;
-
- }
+ *(a + 1) = *a;
+ *(l + 1) = *l;
}
eobtotal += c;
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 078a29791..c2613bfbd 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -2429,6 +2429,7 @@ static void update_coef_probs(VP8_COMP *cpi)
((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
const int s = old_b - new_b - update_b;
const int u = s > 0 ? 1 : 0;
+
#ifdef ENTROPY_STATS
++ tree_update_hist_8x8 [i][j][k][t] [u];
#endif
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index b8f92c970..22bf92b07 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -656,7 +656,6 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *x, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
{
int sum=0;
- int i;
BLOCKD *bd = &x->block[24];
int coef;
@@ -802,7 +801,6 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
#if CONFIG_T8X8
void optimize_b_8x8(MACROBLOCK *mb, int i, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- ENTROPY_CONTEXT *a1, ENTROPY_CONTEXT *l1,
const VP8_ENCODER_RTCD *rtcd)
{
BLOCK *b;
@@ -1010,7 +1008,7 @@ void optimize_b_8x8(MACROBLOCK *mb, int i, int type,
/* Now pick the best path through the whole trellis. */
band = vp8_coef_bands_8x8[i + 1];
- VP8_COMBINEENTROPYCONTEXTS_8x8(pt, *a, *l, *a1, *l1);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
error0 = tokens[next][0].error;
@@ -1051,7 +1049,6 @@ void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
{
int b;
int type;
- int has_2nd_order;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
@@ -1062,79 +1059,28 @@ void optimize_mb_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- &&x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- type = has_2nd_order ? 0 : 3;
-
+ type = 0;
for (b = 0; b < 16; b+=4)
{
optimize_b_8x8(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+4],
- rtcd);
-
- if(b==0)
- {
- *(ta + vp8_block2above[1]) = *(ta + vp8_block2above[4]) = *(ta + vp8_block2above[5]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[1]) = *(tl + vp8_block2left[4]) = *(tl + vp8_block2left[5]) = *(tl + vp8_block2left[b]);
- }
- else if(b==4)
- {
- *(ta + vp8_block2above[2]) = *(ta + vp8_block2above[3]) = *(ta + vp8_block2above[6]) = *(ta + vp8_block2above[7]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[2]) = *(tl + vp8_block2left[3]) = *(tl + vp8_block2left[6]) = *(tl + vp8_block2left[7]) = *(tl + vp8_block2left[b]);
- *(ta + vp8_block2above[4]) = *(ta + vp8_block2above[1]);
- *(tl + vp8_block2left[4]) = *(tl + vp8_block2left[1]);
- }
- else if(b==8)
- {
- *(ta + vp8_block2above[9]) = *(ta + vp8_block2above[12]) = *(ta + vp8_block2above[13]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[9]) = *(tl + vp8_block2left[12]) = *(tl + vp8_block2left[13]) = *(tl + vp8_block2left[b]);
-
- }
- else if(b==12)
- {
- *(ta + vp8_block2above[10]) = *(ta + vp8_block2above[11]) = *(ta + vp8_block2above[14]) = *(ta + vp8_block2above[15]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[10]) = *(tl + vp8_block2left[11]) = *(tl + vp8_block2left[14]) = *(tl + vp8_block2left[15]) = *(tl + vp8_block2left[b]);
- *(ta + vp8_block2above[12]) = *(ta + vp8_block2above[8]);
- *(tl + vp8_block2left[12]) = *(tl + vp8_block2left[8]);
-
- }
-
-
-
- }
-
- for (b = 16; b < 20; b+=4)
- {
- optimize_b_8x8(x, b, PLANE_TYPE_UV, //vp8_block2type[b],
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+2],
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd);
- *(ta + vp8_block2above[b+1]) = *(ta + vp8_block2above[b+2]) = *(ta + vp8_block2above[b+3]) =
- *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[b+1]) = *(tl + vp8_block2left[b+2]) = *(tl + vp8_block2left[b+3]) =
- *(tl + vp8_block2left[b]);
-
+ *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b] );
}
- for (b = 20; b < 24; b+=4)
+ for (b = 16; b < 24; b+=4)
{
- optimize_b_8x8(x, b, PLANE_TYPE_UV, //vp8_block2type[b],
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+2],
+ optimize_b_8x8(x, b, PLANE_TYPE_UV,
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd);
- *(ta + vp8_block2above[b+1]) = *(ta + vp8_block2above[b+2]) = *(ta + vp8_block2above[b+3]) =
- *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[b+1]) = *(tl + vp8_block2left[b+2]) = *(tl + vp8_block2left[b+3]) =
- *(tl + vp8_block2left[b]);
-
+ *(ta + vp8_block2above_8x8[b]+1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b]+1 ) = *(tl + vp8_block2left_8x8[b]);
}
//8x8 always have 2nd roder haar block
check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24]);
-
+ ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
}
@@ -1160,50 +1106,18 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
-
- has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
- && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
- type = has_2nd_order ? 0 : 3;
-
+ type = 0;
for (b = 0; b < 16; b+=4)
{
optimize_b_8x8(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+4],
rtcd);
- if(b==0)
- {
- *(ta + vp8_block2above[1]) = *(ta + vp8_block2above[4]) = *(ta + vp8_block2above[5]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[1]) = *(tl + vp8_block2left[4]) = *(tl + vp8_block2left[5]) = *(tl + vp8_block2left[b]);
- }
- else if(b==4)
- {
- *(ta + vp8_block2above[2]) = *(ta + vp8_block2above[3]) = *(ta + vp8_block2above[6]) = *(ta + vp8_block2above[7]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[2]) = *(tl + vp8_block2left[3]) = *(tl + vp8_block2left[6]) = *(tl + vp8_block2left[7]) = *(tl + vp8_block2left[b]);
- *(ta + vp8_block2above[4]) = *(ta + vp8_block2above[1]);
- *(tl + vp8_block2left[4]) = *(tl + vp8_block2left[1]);
- }
- else if(b==8)
- {
- *(ta + vp8_block2above[9]) = *(ta + vp8_block2above[12]) = *(ta + vp8_block2above[13]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[9]) = *(tl + vp8_block2left[12]) = *(tl + vp8_block2left[13]) = *(tl + vp8_block2left[b]);
-
- }
- else if(b==12)
- {
- *(ta + vp8_block2above[10]) = *(ta + vp8_block2above[11]) = *(ta + vp8_block2above[14]) = *(ta + vp8_block2above[15]) = *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[10]) = *(tl + vp8_block2left[11]) = *(tl + vp8_block2left[14]) = *(tl + vp8_block2left[15]) = *(tl + vp8_block2left[b]);
- *(ta + vp8_block2above[12]) = *(ta + vp8_block2above[8]);
- *(tl + vp8_block2left[12]) = *(tl + vp8_block2left[8]);
-
- }
-
-
+ *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b] );
}
//8x8 always have 2nd roder haar block
check_reset_8x8_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24]);
+ ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
}
@@ -1226,30 +1140,13 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- for (b = 16; b < 20; b+=4)
+ for (b = 16; b < 24; b+=4)
{
- optimize_b_8x8(x, b, PLANE_TYPE_UV, //vp8_block2type[b],
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+2],
+ optimize_b_8x8(x, b, PLANE_TYPE_UV,
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd);
- *(ta + vp8_block2above[b+1]) = *(ta + vp8_block2above[b+2]) = *(ta + vp8_block2above[b+3]) =
- *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[b+1]) = *(tl + vp8_block2left[b+2]) = *(tl + vp8_block2left[b+3]) =
- *(tl + vp8_block2left[b]);
-
- }
-
- for (b = 20; b < 24; b+=4)
- {
- optimize_b_8x8(x, b, PLANE_TYPE_UV, //vp8_block2type[b],
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+2],
- rtcd);
- *(ta + vp8_block2above[b+1]) = *(ta + vp8_block2above[b+2]) = *(ta + vp8_block2above[b+3]) =
- *(ta + vp8_block2above[b]);
- *(tl + vp8_block2left[b+1]) = *(tl + vp8_block2left[b+2]) = *(tl + vp8_block2left[b+3]) =
- *(tl + vp8_block2left[b]);
-
+ *(ta + vp8_block2above_8x8[b]+1) = *(ta + vp8_block2above_8x8[b]);
+ *(tl + vp8_block2left_8x8[b]+1 ) = *(tl + vp8_block2left_8x8[b]);
}
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 61ee7fe5c..73cd21e2a 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -690,8 +690,7 @@ static int cost_coeffs_2x2(MACROBLOCK *mb,
static int cost_coeffs_8x8(MACROBLOCK *mb,
BLOCKD *b, int type,
- ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- ENTROPY_CONTEXT *a1, ENTROPY_CONTEXT *l1)
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
{
int c = !type; /* start at coef 0, unless Y with Y2 */
int eob = b->eob;
@@ -699,7 +698,7 @@ static int cost_coeffs_8x8(MACROBLOCK *mb,
int cost = 0;
short *qcoeff_ptr = b->qcoeff;
- VP8_COMBINEENTROPYCONTEXTS_8x8(pt, *a, *l, *a1, *l1);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
# define QC8X8( I) ( qcoeff_ptr [vp8_default_zig_zag1d_8x8[I]] )
@@ -738,8 +737,7 @@ static int vp8_rdcost_mby_8x8(MACROBLOCK *mb)
for (b = 0; b < 16; b+=4)
cost += cost_coeffs_8x8(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
- ta + vp8_block2above[b+1], tl + vp8_block2left[b+4]);
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b]);
cost += cost_coeffs_2x2(mb, x->block + 24, PLANE_TYPE_Y2,
ta + vp8_block2above[24], tl + vp8_block2left[24]);
@@ -1204,10 +1202,8 @@ static int rd_cost_mbuv_8x8(MACROBLOCK *mb)
for (b = 16; b < 24; b+=4)
cost += cost_coeffs_8x8(mb, x->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above[b],
- tl + vp8_block2left[b],
- ta + vp8_block2above[b+1],
- tl + vp8_block2left[b+2]);
+ ta + vp8_block2above_8x8[b],
+ tl + vp8_block2left_8x8[b]);
return cost;
}
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index aa9dd8ee8..d496bbde1 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -257,8 +257,6 @@ static void tokenize1st_order_b_8x8
const FRAME_TYPE frametype,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
- ENTROPY_CONTEXT *a1,
- ENTROPY_CONTEXT *l1,
VP8_COMP *cpi
)
{
@@ -277,7 +275,7 @@ static void tokenize1st_order_b_8x8
seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
}
- VP8_COMBINEENTROPYCONTEXTS_8x8(pt, *a, *l, *a1, *l1);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
do
{
@@ -559,7 +557,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
tokenize2nd_order_b_8x8(x,
x->block + 24, t, 1, x->frame_type,
- A + vp8_block2above[24], L + vp8_block2left[24], cpi);
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi);
}
else
#endif
@@ -576,56 +575,22 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
for (b = 0; b < 16; b+=4)
{
tokenize1st_order_b_8x8(x,
- x->block + b, t, plane_type, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- A + vp8_block2above[b+1],
- L + vp8_block2left[b+4],
- cpi);
- /* *(A + vp8_block2above[b+1]) = *(A + vp8_block2above[b+2]) = *(A + vp8_block2above[b+3]) =
- *(A + vp8_block2above[b]);
- *(L + vp8_block2left[b+1]) = *(L + vp8_block2left[b+2]) = *(L + vp8_block2left[b+3]) =
- *(L + vp8_block2left[b]);*/
- // build coeff context for 8x8 transform
- if(b==0)
- {
- *(A + vp8_block2above[1]) = *(A + vp8_block2above[4]) = *(A + vp8_block2above[5]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[1]) = *(L + vp8_block2left[4]) = *(L + vp8_block2left[5]) = *(L + vp8_block2left[b]);
- }
- else if(b==4)
- {
- *(A + vp8_block2above[2]) = *(A + vp8_block2above[3]) = *(A + vp8_block2above[6]) = *(A + vp8_block2above[7]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[2]) = *(L + vp8_block2left[3]) = *(L + vp8_block2left[6]) = *(L + vp8_block2left[7]) = *(L + vp8_block2left[b]);
- *(A + vp8_block2above[4]) = *(A + vp8_block2above[1]);
- *(L + vp8_block2left[4]) = *(L + vp8_block2left[1]);
- }
- else if(b==8)
- {
- *(A + vp8_block2above[9]) = *(A + vp8_block2above[12]) = *(A + vp8_block2above[13]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[9]) = *(L + vp8_block2left[12]) = *(L + vp8_block2left[13]) = *(L + vp8_block2left[b]);
- }
- else if(b==12)
- {
- *(A + vp8_block2above[10]) = *(A + vp8_block2above[11]) = *(A + vp8_block2above[14]) = *(A + vp8_block2above[15]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[10]) = *(L + vp8_block2left[11]) = *(L + vp8_block2left[14]) = *(L + vp8_block2left[15]) = *(L + vp8_block2left[b]);
- *(A + vp8_block2above[12]) = *(A + vp8_block2above[8]);
- *(L + vp8_block2left[12]) = *(L + vp8_block2left[8]);
- }
-
+ x->block + b, t, plane_type, x->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b] );
}
-
- for (b = 16; b < 24; b+=4) {
+ for (b = 16; b < 24; b+=4)
+ {
tokenize1st_order_b_8x8(x,
- x->block + b, t, 2, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- A + vp8_block2above[b+1],
- L + vp8_block2left[b+2],
- cpi);
- *(A + vp8_block2above[b+1]) = *(A + vp8_block2above[b+2]) = *(A + vp8_block2above[b+3]) =
- *(A + vp8_block2above[b]);
- *(L + vp8_block2left[b+1]) = *(L + vp8_block2left[b+2]) = *(L + vp8_block2left[b+3]) =
- *(L + vp8_block2left[b]);
+ x->block + b, t, 2, x->frame_type,
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
}
else
@@ -806,14 +771,12 @@ static __inline void stuff1st_order_b_8x8
const FRAME_TYPE frametype,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
- ENTROPY_CONTEXT *a1,
- ENTROPY_CONTEXT *l1,
VP8_COMP *cpi
)
{
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS_8x8(pt, *a, *l, *a1, *l1);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) frametype;
(void) type;
(void) b;
@@ -840,14 +803,12 @@ void stuff1st_order_buv_8x8
const FRAME_TYPE frametype,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
- ENTROPY_CONTEXT *a1,
- ENTROPY_CONTEXT *l1,
VP8_COMP *cpi
)
{
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
- VP8_COMBINEENTROPYCONTEXTS_8x8(pt, *a, *l, *a1, *l1);
+ VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) frametype;
(void) type;
(void) b;
@@ -872,80 +833,29 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
int b;
stuff2nd_order_b_8x8(x->block + 24, t, 1, x->frame_type,
- A + vp8_block2above[24], L + vp8_block2left[24], cpi);
+ A + vp8_block2above_8x8[24],
+ L + vp8_block2left_8x8[24], cpi);
plane_type = 0;
- for (b = 0; b < 16; b+=4) {
- stuff1st_order_b_8x8(x->block + b, t, plane_type, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- A + vp8_block2above[b+1],
- L + vp8_block2left[b+4],
- cpi);
- // build coeff context for 8x8 transform
- if(b==0)
- {
- *(A + vp8_block2above[1]) = *(A + vp8_block2above[4]) = *(A + vp8_block2above[5]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[1]) = *(L + vp8_block2left[4]) = *(L + vp8_block2left[5]) = *(L + vp8_block2left[b]);
- }
- else if(b==4)
- {
- *(A + vp8_block2above[2]) = *(A + vp8_block2above[3]) = *(A + vp8_block2above[6]) = *(A + vp8_block2above[7]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[2]) = *(L + vp8_block2left[3]) = *(L + vp8_block2left[6]) = *(L + vp8_block2left[7]) = *(L + vp8_block2left[b]);
- *(A + vp8_block2above[4]) = *(A + vp8_block2above[1]);
- *(L + vp8_block2left[4]) = *(L + vp8_block2left[1]);
- }
- else if(b==8)
- {
- *(A + vp8_block2above[9]) = *(A + vp8_block2above[12]) = *(A + vp8_block2above[13]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[9]) = *(L + vp8_block2left[12]) = *(L + vp8_block2left[13]) = *(L + vp8_block2left[b]);
-
- }
- else if(b==12)
- {
- *(A + vp8_block2above[10]) = *(A + vp8_block2above[11]) = *(A + vp8_block2above[14]) = *(A + vp8_block2above[15]) = *(A + vp8_block2above[b]);
- *(L + vp8_block2left[10]) = *(L + vp8_block2left[11]) = *(L + vp8_block2left[14]) = *(L + vp8_block2left[15]) = *(L + vp8_block2left[b]);
- *(A + vp8_block2above[12]) = *(A + vp8_block2above[8]);
- *(L + vp8_block2left[12]) = *(L + vp8_block2left[8]);
-
- }
-
- }
- /*
- for (b = 0; b < 16; b+=4) {
+ for (b = 0; b < 16; b+=4)
+ {
stuff1st_order_b_8x8(x->block + b, t, plane_type, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b], cpi);
- *(A + vp8_block2above[b+1]) = *(A + vp8_block2above[b+2]) = *(A + vp8_block2above[b+3]) =
- *(A + vp8_block2above[b]);
- *(L + vp8_block2left[b+1]) = *(L + vp8_block2left[b+2]) = *(L + vp8_block2left[b+3]) =
- *(L + vp8_block2left[b]);
- }
- */
-
- for (b = 16; b < 24; b+=4) {
- stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b],
- A + vp8_block2above[b+1],
- L + vp8_block2left[b+2],
- cpi);
- *(A + vp8_block2above[b+1]) = *(A + vp8_block2above[b+2]) = *(A + vp8_block2above[b+3]) =
- *(A + vp8_block2above[b]);
- *(L + vp8_block2left[b+1]) = *(L + vp8_block2left[b+2]) = *(L + vp8_block2left[b+3]) =
- *(L + vp8_block2left[b]);
+ A + vp8_block2above_8x8[b],
+ L + vp8_block2left_8x8[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b] );
}
- /*
- for (b = 16; b < 24; b+=4) {
+
+ for (b = 16; b < 24; b+=4)
+ {
stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type,
- A + vp8_block2above[b],
- L + vp8_block2left[b], cpi);
- *(A + vp8_block2above[b+1]) = *(A + vp8_block2above[b+2]) = *(A + vp8_block2above[b+3]) =
- *(A + vp8_block2above[b]);
- *(L + vp8_block2left[b+1]) = *(L + vp8_block2left[b+2]) = *(L + vp8_block2left[b+3]) =
- *(L + vp8_block2left[b]);
+ A + vp8_block2above[b],
+ L + vp8_block2left[b],
+ cpi);
+ *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
+ *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
- */
}
#endif