summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/bitstream.c350
-rw-r--r--vp8/encoder/block.h4
-rw-r--r--vp8/encoder/dct.c3
-rw-r--r--vp8/encoder/encodeframe.c13
-rw-r--r--vp8/encoder/encodeintra.c29
-rw-r--r--vp8/encoder/encodemb.c1
-rw-r--r--vp8/encoder/onyx_if.c16
-rw-r--r--vp8/encoder/onyx_int.h37
-rw-r--r--vp8/encoder/ratectrl.c30
-rw-r--r--vp8/encoder/rdopt.c127
-rw-r--r--vp8/encoder/tokenize.c243
11 files changed, 726 insertions, 127 deletions
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index a4df16fee..2f748015f 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -45,15 +45,33 @@ unsigned int tree_update_hist [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
+#if CONFIG_HYBRIDTRANSFORM
+unsigned int hybrid_tree_update_hist [BLOCK_TYPES]
+ [COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES][2];
+#endif
unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8]
+ [COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES] [2];
+#endif
+#if CONFIG_TX16X16
unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
+#if CONFIG_HYBRIDTRANSFORM16X16
+unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16]
+ [COEF_BANDS]
+ [PREV_COEF_CONTEXTS]
+ [ENTROPY_NODES] [2];
+#endif
#endif
extern unsigned int active_section;
@@ -1522,6 +1540,28 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
+#if CONFIG_HYBRIDTRANSFORM
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = 0; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_hybrid_coef_probs [i][j][k],
+ cpi->frame_hybrid_branch_ct [i][j][k],
+ cpi->hybrid_coef_counts [i][j][k],
+ 256, 1
+ );
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ hybrid_context_counters[i][j][k][t] += cpi->hybrid_coef_counts[i][j][k][t];
+#endif
+ }
+ }
+ }
+#endif
if (cpi->common.txfm_mode == ALLOW_8X8) {
@@ -1549,9 +1589,35 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
+#if CONFIG_HYBRIDTRANSFORM8X8
+ for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
+ for (j = 0; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ /* at every context */
+ /* calc probs and branch cts for this frame only */
+ // vp8_prob new_p [ENTROPY_NODES];
+ // unsigned int branch_ct [ENTROPY_NODES] [2];
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_hybrid_coef_probs_8x8 [i][j][k],
+ cpi->frame_hybrid_branch_ct_8x8 [i][j][k],
+ cpi->hybrid_coef_counts_8x8 [i][j][k],
+ 256, 1
+ );
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ hybrid_context_counters_8x8[i][j][k][t] += cpi->hybrid_coef_counts_8x8[i][j][k][t];
+#endif
+ }
+ }
+ }
+#endif
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
//16x16
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
@@ -1571,9 +1637,30 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
+#if CONFIG_HYBRIDTRANSFORM16X16
+ for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
+ for (j = 0; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+ vp8_tree_probs_from_distribution(
+ MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ cpi->frame_hybrid_coef_probs_16x16[i][j][k],
+ cpi->frame_hybrid_branch_ct_16x16[i][j][k],
+ cpi->hybrid_coef_counts_16x16[i][j][k], 256, 1);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
+ hybrid_context_counters_16x16[i][j][k][t] += cpi->hybrid_coef_counts_16x16[i][j][k][t];
+#endif
+ }
+ }
+ }
+#endif
#endif
}
+#if 0
static void update_coef_probs2(VP8_COMP *cpi) {
const vp8_prob grpupd = 192;
int i, j, k, t;
@@ -1741,6 +1828,7 @@ static void update_coef_probs2(VP8_COMP *cpi) {
}
}
}
+#endif
static void update_coef_probs(VP8_COMP *cpi) {
int i, j, k, t;
@@ -1844,6 +1932,96 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
+#if CONFIG_HYBRIDTRANSFORM
+ savings = 0;
+ update[0] = update[1] = 0;
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ int prev_coef_savings[ENTROPY_NODES] = {0};
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ vp8_prob newp = cpi->frame_hybrid_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.hybrid_coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s = prev_coef_savings[t];
+ int u = 0;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+#if defined(SEARCH_NEWP)
+ s = prob_diff_update_savings_search(
+ cpi->frame_hybrid_branch_ct [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
+#else
+ s = prob_update_savings(
+ cpi->frame_hybrid_branch_ct [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
+ if (u)
+ savings += s;
+#endif
+
+ update[u]++;
+ }
+ }
+ }
+ }
+
+ // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
+ /* Is coef updated at all */
+ if (update[1] == 0 || savings < 0)
+ vp8_write_bit(w, 0);
+ else {
+ vp8_write_bit(w, 1);
+ for (i = 0; i < BLOCK_TYPES; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ int prev_coef_savings[ENTROPY_NODES] = {0};
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ vp8_prob newp = cpi->frame_hybrid_coef_probs [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.hybrid_coef_probs [i][j][k] + t;
+ const vp8_prob upd = COEF_UPDATE_PROB;
+ int s = prev_coef_savings[t];
+ int u = 0;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+
+#if defined(SEARCH_NEWP)
+ s = prob_diff_update_savings_search(
+ cpi->frame_hybrid_branch_ct [i][j][k][t],
+ *Pold, &newp, upd);
+ if (s > 0 && newp != *Pold)
+ u = 1;
+#else
+ s = prob_update_savings(
+ cpi->frame_hybrid_branch_ct [i][j][k][t],
+ *Pold, newp, upd);
+ if (s > 0)
+ u = 1;
+#endif
+ vp8_write(w, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++ hybrid_tree_update_hist [i][j][k][t] [u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, *Pold);
+ *Pold = newp;
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
/* do not do this if not even allowed */
if (cpi->common.txfm_mode == ALLOW_8X8) {
@@ -1921,9 +2099,84 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
+#if CONFIG_HYBRIDTRANSFORM8X8
+ update[0] = update[1] = 0;
+ savings = 0;
+ for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ const unsigned int *ct = cpi->frame_hybrid_branch_ct_8x8 [i][j][k][t];
+ vp8_prob newp = cpi->frame_hybrid_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.hybrid_coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob oldp = *Pold;
+ int s, u;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+#if defined(SEARCH_NEWP)
+ s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
+ u = s > 0 && newp != oldp ? 1 : 0;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
+#else
+ s = prob_update_savings(ct, oldp, newp, upd);
+ u = s > 0 ? 1 : 0;
+ if (u)
+ savings += s;
+#endif
+ update[u]++;
+ }
+ }
+ }
+ }
+
+ if (update[1] == 0 || savings < 0)
+ vp8_write_bit(w, 0);
+ else {
+ vp8_write_bit(w, 1);
+ for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ const unsigned int *ct = cpi->frame_hybrid_branch_ct_8x8 [i][j][k][t];
+ vp8_prob newp = cpi->frame_hybrid_coef_probs_8x8 [i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.hybrid_coef_probs_8x8 [i][j][k] + t;
+ const vp8_prob oldp = *Pold;
+ const vp8_prob upd = COEF_UPDATE_PROB_8X8;
+ int s, u;
+ if (k >= 3 && ((i == 0 && j == 1) ||
+ (i > 0 && j == 0)))
+ continue;
+#if defined(SEARCH_NEWP)
+ s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
+ u = s > 0 && newp != oldp ? 1 : 0;
+#else
+ s = prob_update_savings(ct, oldp, newp, upd);
+ u = s > 0 ? 1 : 0;
+#endif
+ vp8_write(w, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++ hybrid_tree_update_hist_8x8 [i][j][k][t] [u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, oldp);
+ *Pold = newp;
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
// 16x16
/* dry run to see if update is necessary */
update[0] = update[1] = 0;
@@ -1999,6 +2252,81 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
+#if CONFIG_HYBRIDTRANSFORM16X16
+ update[0] = update[1] = 0;
+ savings = 0;
+ for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ // calc probs and branch cts for this frame only
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ const unsigned int *ct = cpi->frame_hybrid_branch_ct_16x16[i][j][k][t];
+ vp8_prob newp = cpi->frame_hybrid_coef_probs_16x16[i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.hybrid_coef_probs_16x16[i][j][k] + t;
+ const vp8_prob oldp = *Pold;
+ int s, u;
+ const vp8_prob upd = COEF_UPDATE_PROB_16X16;
+ if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
+ continue;
+#if defined(SEARCH_NEWP)
+ s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
+ u = s > 0 && newp != oldp ? 1 : 0;
+ if (u)
+ savings += s - (int)(vp8_cost_zero(upd));
+ else
+ savings -= (int)(vp8_cost_zero(upd));
+#else
+ s = prob_update_savings(ct, oldp, newp, upd);
+ u = s > 0 ? 1 : 0;
+ if (u)
+ savings += s;
+#endif
+ update[u]++;
+ }
+ }
+ }
+ }
+
+ if (update[1] == 0 || savings < 0)
+ vp8_write_bit(w, 0);
+ else {
+ vp8_write_bit(w, 1);
+ for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
+ for (j = !i; j < COEF_BANDS; ++j) {
+ for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
+ for (t = 0; t < ENTROPY_NODES; ++t) {
+ const unsigned int *ct = cpi->frame_hybrid_branch_ct_16x16[i][j][k][t];
+ vp8_prob newp = cpi->frame_hybrid_coef_probs_16x16[i][j][k][t];
+ vp8_prob *Pold = cpi->common.fc.hybrid_coef_probs_16x16[i][j][k] + t;
+ const vp8_prob oldp = *Pold;
+ const vp8_prob upd = COEF_UPDATE_PROB_16X16;
+ int s, u;
+ if (k >= 3 && ((i == 0 && j == 1) ||
+ (i > 0 && j == 0)))
+ continue;
+#if defined(SEARCH_NEWP)
+ s = prob_diff_update_savings_search(ct, oldp, &newp, upd);
+ u = s > 0 && newp != oldp ? 1 : 0;
+#else
+ s = prob_update_savings(ct, oldp, newp, upd);
+ u = s > 0 ? 1 : 0;
+#endif
+ vp8_write(w, u, upd);
+#ifdef ENTROPY_STATS
+ if (!cpi->dummy_packing)
+ ++hybrid_tree_update_hist_16x16[i][j][k][t][u];
+#endif
+ if (u) {
+ /* send/use new probability */
+ write_prob_diff_update(w, newp, oldp);
+ *Pold = newp;
+ }
+ }
+ }
+ }
+ }
+ }
+#endif
#endif
}
@@ -2432,9 +2760,18 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_clear_system_state(); // __asm emms;
vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs);
+#if CONFIG_HYBRIDTRANSFORM
+ vp8_copy(cpi->common.fc.pre_hybrid_coef_probs, cpi->common.fc.hybrid_coef_probs);
+#endif
vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+ vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8);
+#endif
+#if CONFIG_TX16X16
vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16);
+#if CONFIG_HYBRIDTRANSFORM16X16
+ vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16);
+#endif
#endif
vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
@@ -2452,11 +2789,8 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_zero(cpi->mbsplit_count);
vp8_zero(cpi->common.fc.mv_ref_ct)
vp8_zero(cpi->common.fc.mv_ref_ct_a)
-#if COEFUPDATETYPE == 2
- update_coef_probs2(cpi);
-#else
+
update_coef_probs(cpi);
-#endif
#ifdef ENTROPY_STATS
active_section = 2;
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index c0cd2e783..a204c8b60 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -167,6 +167,10 @@ typedef struct {
unsigned int token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
+#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
+ unsigned int hybrid_token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
+#endif
int optimize;
int q_index;
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index f5c666e24..209ba20b5 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -478,10 +478,11 @@ void vp8_fht_c(short *input, short *output, int pitch,
pfa[i] += pfb[k] * pth[k];
}
pth += tx_dim;
- }
+ }
pfa += tx_dim;
pfb += tx_dim;
+ // pth -= tx_dim * tx_dim;
switch(tx_type) {
case ADST_ADST :
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index 9b793e20a..584570da9 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -1297,6 +1297,8 @@ static void encode_frame_internal(VP8_COMP *cpi) {
TOKENEXTRA *tp = cpi->tok;
int totalrate;
+ //printf("encode_frame_internal\n");
+
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
// this frame which may be updated with each iteration of the recode loop.
@@ -1360,9 +1362,18 @@ static void encode_frame_internal(VP8_COMP *cpi) {
vp8_zero(cpi->MVcount_hp);
#endif
vp8_zero(cpi->coef_counts);
+#if CONFIG_HYBRIDTRANSFORM
+ vp8_zero(cpi->hybrid_coef_counts);
+#endif
vp8_zero(cpi->coef_counts_8x8);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+ vp8_zero(cpi->hybrid_coef_counts_8x8);
+#endif
+#if CONFIG_TX16X16
vp8_zero(cpi->coef_counts_16x16);
+#if CONFIG_HYBRIDTRANSFORM16X16
+ vp8_zero(cpi->hybrid_coef_counts_16x16);
+#endif
#endif
vp8cx_frame_init_quantizer(cpi);
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index c2f123c92..d8757c531 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -85,21 +85,21 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
#if CONFIG_HYBRIDTRANSFORM
- if(active_ht) {
- b->bmi.as_mode.test = b->bmi.as_mode.first;
- txfm_map(b, b->bmi.as_mode.first);
- vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b(be, b);
- vp8_inverse_htransform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
- } else {
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
- x->quantize_b(be, b) ;
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
- }
+ if (active_ht) {
+ b->bmi.as_mode.test = b->bmi.as_mode.first;
+ txfm_map(b, b->bmi.as_mode.first);
+ vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
+ vp8_ht_quantize_b(be, b);
+ vp8_inverse_htransform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
+ } else {
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
+ x->quantize_b(be, b) ;
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
+ }
#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b(be, b);
- vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
+ x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
+ x->quantize_b(be, b);
+ vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
#endif
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -298,7 +298,6 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
}
}
-extern const int vp8_i8x8_block[4];
void vp8_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib;
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index a66dbe884..66b9fb970 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -1239,4 +1239,3 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
RECON_INVOKE(&rtcd->common->recon, recon_mby)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}
-
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index ca6570e68..9a88eddb9 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -3048,6 +3048,7 @@ static void encode_frame_to_data_rate
// Clear down mmx registers to allow floating point in what follows
vp8_clear_system_state();
+
// For an alt ref frame in 2 pass we skip the call to the second
// pass function that sets the target bandwidth so must set it here
if (cpi->common.refresh_alt_ref_frame) {
@@ -3758,9 +3759,19 @@ static void encode_frame_to_data_rate
update_reference_frames(cm);
vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts);
+#if CONFIG_HYBRIDTRANSFORM
+ vp8_copy(cpi->common.fc.hybrid_coef_counts, cpi->hybrid_coef_counts);
+#endif
vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+ vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8);
+#endif
+#if CONFIG_TX16X16
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
+#if CONFIG_HYBRIDTRANSFORM16X16
+ vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
+ cpi->hybrid_coef_counts_16x16);
+#endif
#endif
vp8_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {
@@ -4330,8 +4341,9 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
Pass1Encode(cpi, size, dest, frame_flags);
} else if (cpi->pass == 2) {
Pass2Encode(cpi, size, dest, frame_flags);
- } else
+ } else {
encode_frame_to_data_rate(cpi, size, dest, frame_flags);
+ }
if (cm->refresh_entropy_probs) {
if (cm->refresh_alt_ref_frame)
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 159cb8527..5cc87d7a9 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -97,11 +97,25 @@ typedef struct {
vp8_prob coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+#if CONFIG_HYBRIDTRANSFORM
+ vp8_prob hybrid_coef_probs[BLOCK_TYPES]
+ [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+#endif
+
vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+ vp8_prob hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
+ [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+#endif
+
+#if CONFIG_TX16X16
vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+#if CONFIG_HYBRIDTRANSFORM16X16
+ vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
+ [COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
+#endif
#endif
vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
@@ -557,17 +571,32 @@ typedef struct VP8_COMP {
#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
- // DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
- // save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+#if CONFIG_HYBRIDTRANSFORM
+ unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
+ vp8_prob frame_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
+ unsigned int frame_hybrid_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+#endif
+
unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+ unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
+ vp8_prob frame_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
+ unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+#endif
+
+#if CONFIG_TX16X16
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+#if CONFIG_HYBRIDTRANSFORM16X16
+ unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
+ vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
+ unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
+#endif
#endif
int gfu_boost;
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index b7fd07e82..e059a10e2 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -178,12 +178,21 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
vp8_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas);
vp8_copy(cc->coef_probs, cm->fc.coef_probs);
+#if CONFIG_HYBRIDTRANSFORM
+ vp8_copy(cc->hybrid_coef_probs, cm->fc.hybrid_coef_probs);
+#endif
vp8_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8);
-#if CONFIG_SWITCHABLE_INTERP
- vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
+#if CONFIG_HYBRIDTRANSFORM16X16
+ vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
+#endif
+#endif
+#if CONFIG_SWITCHABLE_INTERP
+ vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
#endif
}
@@ -242,12 +251,21 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
vp8_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas);
vp8_copy(cm->fc.coef_probs, cc->coef_probs);
+#if CONFIG_HYBRIDTRANSFORM
+ vp8_copy(cm->fc.hybrid_coef_probs, cc->hybrid_coef_probs);
+#endif
vp8_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8);
-#if CONFIG_SWITCHABLE_INTERP
- vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
+#if CONFIG_HYBRIDTRANSFORM16X16
+ vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
+#endif
+#endif
+#if CONFIG_SWITCHABLE_INTERP
+ vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
#endif
}
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index fd03fdb40..0613355fc 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -41,6 +41,7 @@
#include "vp8/common/seg_common.h"
#include "vp8/common/pred_common.h"
+#include "vp8/common/entropy.h"
#if CONFIG_NEWBESTREFMV
#include "vp8/common/mvref_common.h"
@@ -358,17 +359,38 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
cpi->mb.token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs,
BLOCK_TYPES);
+#if CONFIG_HYBRIDTRANSFORM
+ fill_token_costs(
+ cpi->mb.hybrid_token_costs[TX_4X4],
+ (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
+ cpi->common.fc.hybrid_coef_probs,
+ BLOCK_TYPES);
+#endif
fill_token_costs(
cpi->mb.token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8,
BLOCK_TYPES_8X8);
+#if CONFIG_HYBRIDTRANSFORM8X8
+ fill_token_costs(
+ cpi->mb.hybrid_token_costs[TX_8X8],
+ (const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
+ cpi->common.fc.hybrid_coef_probs_8x8,
+ BLOCK_TYPES_8X8);
+#endif
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
fill_token_costs(
cpi->mb.token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16,
BLOCK_TYPES_16X16);
+#if CONFIG_HYBRIDTRANSFORM16X16
+ fill_token_costs(
+ cpi->mb.hybrid_token_costs[TX_16X16],
+ (const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11])
+ cpi->common.fc.hybrid_coef_probs_16x16,
+ BLOCK_TYPES_16X16);
+#endif
#endif
/*rough estimate for costing*/
@@ -582,44 +604,44 @@ static int cost_coeffs_2x2(MACROBLOCK *mb,
static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- int tx_type) {
+ int tx_size) {
const int eob = b->eob;
int c = !type; /* start at coef 0, unless Y with Y2 */
int cost = 0, default_eob, seg_eob;
int pt; /* surrounding block/prev coef predictor */
int const *scan, *band;
short *qcoeff_ptr = b->qcoeff;
- MB_MODE_INFO * mbmi = &mb->e_mbd.mode_info_context->mbmi;
-
+ MACROBLOCKD *xd = &mb->e_mbd;
+ MB_MODE_INFO *mbmi = &mb->e_mbd.mode_info_context->mbmi;
+#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
+ TX_TYPE tx_type = DCT_DCT;
+#endif
int segment_id = mbmi->segment_id;
- switch (tx_type) {
+ switch (tx_size) {
case TX_4X4:
scan = vp8_default_zig_zag1d;
band = vp8_coef_bands;
default_eob = 16;
#if CONFIG_HYBRIDTRANSFORM
- {
- int active_ht = (mb->q_index < ACTIVE_HT) &&
- (mbmi->mode_rdopt == B_PRED);
-
- if((type == PLANE_TYPE_Y_WITH_DC) && active_ht) {
- switch (b->bmi.as_mode.tx_type) {
- case ADST_DCT:
- scan = vp8_row_scan;
- break;
-
- case DCT_ADST:
- scan = vp8_col_scan;
- break;
-
- default:
- scan = vp8_default_zig_zag1d;
- break;
- }
+ if (type == PLANE_TYPE_Y_WITH_DC &&
+ mb->q_index < ACTIVE_HT &&
+ mbmi->mode_rdopt == B_PRED) {
+ tx_type = b->bmi.as_mode.tx_type;
+ switch (tx_type) {
+ case ADST_DCT:
+ scan = vp8_row_scan;
+ break;
+
+ case DCT_ADST:
+ scan = vp8_col_scan;
+ break;
+
+ default:
+ scan = vp8_default_zig_zag1d;
+ break;
+ }
- } else
- scan = vp8_default_zig_zag1d;
}
#endif
break;
@@ -627,12 +649,29 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
scan = vp8_default_zig_zag1d_8x8;
band = vp8_coef_bands_8x8;
default_eob = 64;
+#if CONFIG_HYBRIDTRANSFORM8X8
+ {
+ BLOCKD *bb;
+ int ib = (b - xd->block);
+ if (ib >= 16) tx_type = DCT_DCT;
+ ib = (ib & 8) + ((ib & 4) >> 1);
+ bb = xd->block + ib;
+ if (mbmi->mode_rdopt == I8X8_PRED)
+ tx_type = bb->bmi.as_mode.tx_type;
+ }
+#endif
break;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
case TX_16X16:
scan = vp8_default_zig_zag1d_16x16;
band = vp8_coef_bands_16x16;
default_eob = 256;
+#if CONFIG_HYBRIDTRANSFORM16X16
+ if (type == PLANE_TYPE_Y_WITH_DC &&
+ mbmi->mode_rdopt < I8X8_PRED &&
+ mb->q_index < ACTIVE_HT16)
+ tx_type = b->bmi.as_mode.tx_type;
+#endif
break;
#endif
default:
@@ -643,21 +682,37 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
else
seg_eob = default_eob;
+ //mbmi->mode = mode;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- for (; c < eob; c++) {
- int v = qcoeff_ptr[scan[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
- cost += mb->token_costs[tx_type][type][band[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
+ if (tx_type != DCT_DCT) {
+ for (; c < eob; c++) {
+ int v = qcoeff_ptr[scan[c]];
+ int t = vp8_dct_value_tokens_ptr[v].Token;
+ cost += mb->hybrid_token_costs[tx_size][type][band[c]][pt][t];
+ cost += vp8_dct_value_cost_ptr[v];
+ pt = vp8_prev_token_class[t];
+ }
+ if (c < seg_eob)
+ cost += mb->hybrid_token_costs[tx_size][type][band[c]]
+ [pt][DCT_EOB_TOKEN];
+ } else
+#endif
+ {
+ for (; c < eob; c++) {
+ int v = qcoeff_ptr[scan[c]];
+ int t = vp8_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs[tx_size][type][band[c]][pt][t];
+ cost += vp8_dct_value_cost_ptr[v];
+ pt = vp8_prev_token_class[t];
+ }
+ if (c < seg_eob)
+ cost += mb->token_costs[tx_size][type][band[c]]
+ [pt][DCT_EOB_TOKEN];
}
- if (c < seg_eob)
- cost += mb->token_costs[tx_type][type][band[c]]
- [pt][DCT_EOB_TOKEN];
-
pt = (c != !type); // is eob first coefficient;
*a = *l = pt;
return cost;
@@ -816,6 +871,7 @@ static int vp8_rdcost_mby_16x16(MACROBLOCK *mb) {
cost = cost_coeffs(mb, xd->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_16X16);
return cost;
}
+
static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
const VP8_ENCODER_RTCD *rtcd) {
int d;
@@ -1427,7 +1483,6 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
return best_rd;
}
-const int vp8_i8x8_block[4] = {0, 2, 8, 10};
int64_t rd_pick_intra8x8mby_modes(VP8_COMP *cpi, MACROBLOCK *mb,
int *Rate, int *rate_y,
int *Distortion, int64_t best_rd) {
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index d992e1fe6..91fafd088 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -19,23 +19,48 @@
#include "vp8/common/pred_common.h"
#include "vp8/common/seg_common.h"
+#include "vp8/common/entropy.h"
/* Global event counters used for accumulating statistics across several
compressions, then generating context.c = initial stats. */
#ifdef ENTROPY_STATS
INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
+#if CONFIG_HYBRIDTRANSFORM
+INT64 hybrid_context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
+#endif
+
INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
+#endif
+
+#if CONFIG_TX16X16
INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
+#if CONFIG_HYBRIDTRANSFORM16X16
+INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
+#endif
+
extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
+#if CONFIG_HYBRIDTRANSFORM
+extern unsigned int hybrid_tree_update_hist[BLOCK_TYPES][COEF_BANDS]
+ [PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
+#endif
extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_HYBRIDTRANSFORM8X8
+extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
+ [PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
+#endif
+#if CONFIG_TX16X16
extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
+#if CONFIG_HYBRIDTRANSFORM16X16
+extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
+ [PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
+#endif
#endif
#endif
void vp8_stuff_mb(VP8_COMP *cpi,
@@ -44,7 +69,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
#endif
@@ -108,7 +133,7 @@ static void fill_value_tokens() {
vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
@@ -124,10 +149,15 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
TOKENEXTRA *t = *tp; /* store tokens starting here */
int x;
const short *qcoeff_ptr = b->qcoeff;
+#if CONFIG_HYBRIDTRANSFORM16X16
+ TX_TYPE tx_type = get_tx_type(xd, b);
+#endif
int seg_eob = 256;
int segment_id = xd->mode_info_context->mbmi.segment_id;
+ //if (!dry_run) printf("16: %d\n", tx_type);
+
if (segfeature_active(xd, segment_id, SEG_LVL_EOB))
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
@@ -149,11 +179,22 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
}
t->Token = x;
- t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
+#if CONFIG_HYBRIDTRANSFORM16X16
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
+ else
+#endif
+ t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
- if (!dry_run)
- ++cpi->coef_counts_16x16[type][band][pt][x];
+ if (!dry_run) {
+#if CONFIG_HYBRIDTRANSFORM16X16
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts_16x16[type][band][pt][x];
+ else
+#endif
+ ++cpi->coef_counts_16x16[type][band][pt][x];
+ }
} while (pt = vp8_prev_token_class[x], ++t, c < eob && ++c < seg_eob);
*tp = t;
@@ -304,6 +345,9 @@ static void tokenize1st_order_b_8x8
int c = type ? 0 : 1; /* start at DC unless type 0 */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
+#if CONFIG_HYBRIDTRANSFORM8X8
+ TX_TYPE tx_type = type == 3 ? get_tx_type(xd, b) : DCT_DCT;
+#endif
int seg_eob = 64;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -313,6 +357,7 @@ static void tokenize1st_order_b_8x8
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
+ //if (!dry_run) printf("8: %d\n", tx_type);
for (; c < b->eob; ++c) {
const int band = vp8_coef_bands_8x8[c];
int rc = vp8_default_zig_zag1d_8x8[c];
@@ -324,12 +369,23 @@ static void tokenize1st_order_b_8x8
x = vp8_dct_value_tokens_ptr[v].Token;
t->Token = x;
- t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
+ else
+#endif
+ t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
- if (!dry_run)
- ++cpi->coef_counts_8x8[type][band][pt][x];
+ if (!dry_run) {
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts_8x8[type][band][pt][x];
+ else
+#endif
+ ++cpi->coef_counts_8x8[type][band][pt][x];
+ }
pt = vp8_prev_token_class[x];
++t;
@@ -339,11 +395,23 @@ static void tokenize1st_order_b_8x8
const int band = vp8_coef_bands_8x8[c];
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8 [type] [band] [pt];
+ else
+#endif
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [type] [band] [pt];
+
t->skip_eob_node = pt == 0 && ((band > 0 && type > 0) || (band > 1 && type == 0));
- if (!dry_run)
- ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
+ if (!dry_run) {
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
+ else
+#endif
+ ++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
+ }
++t;
}
@@ -384,10 +452,13 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
/* Luma */
for (block = 0; block < 16; block++, b++) {
B_PREDICTION_MODE b_mode;
+ TX_TYPE tx_type = DCT_DCT;
if( xd->mode_info_context->mbmi.mode == B_PRED ) {
b_mode = b->bmi.as_mode.first;
+ tx_type = get_tx_type(xd, b);
}
+ //if (!dry_run) printf("4: %d\n", tx_type);
// assign scanning order for luma components coded in intra4x4 mode
if( (xd->mode_info_context->mbmi.mode == B_PRED) &&
@@ -430,13 +501,20 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
token = vp8_dct_value_tokens_ptr[v].Token;
t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs [type] [band] [pt];
+ else
+ t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
t->skip_eob_node = pt == 0 &&
((band > 0 && type > 0) || (band > 1 && type == 0));
- if (!dry_run)
- ++cpi->coef_counts [type] [band] [pt] [token];
+ if (!dry_run) {
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts[type] [band] [pt] [token];
+ else
+ ++cpi->coef_counts [type] [band] [pt] [token];
+ }
pt = vp8_prev_token_class[token];
t++;
@@ -445,12 +523,19 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
if (c < seg_eob) {
band = vp8_coef_bands[c];
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs [type] [band] [pt];
+ else
+ t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt];
t->skip_eob_node = pt == 0 &&
((band > 0 && type > 0) || (band > 1 && type == 0));
- if (!dry_run)
- ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
+ if (!dry_run) {
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts[type] [band] [pt] [DCT_EOB_TOKEN];
+ else
+ ++cpi->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN];
+ }
t++;
}
@@ -787,7 +872,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
int plane_type;
int has_y2_block;
int b;
- int tx_type = xd->mode_info_context->mbmi.txfm_size;
+ int tx_size = xd->mode_info_context->mbmi.txfm_size;
int mb_skip_context = get_pred_context(&cpi->common, xd, PRED_MBSKIP);
TOKENEXTRA *t_backup = *t;
@@ -813,10 +898,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
&& xd->mode_info_context->mbmi.mode != SPLITMV);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16) has_y2_block = 0; // Because of inter frames
+ if (tx_size == TX_16X16) has_y2_block = 0; // Because of inter frames
#endif
- switch (tx_type) {
+ switch (tx_size) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
case TX_16X16:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(xd);
@@ -840,12 +925,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_true_count[mb_skip_context] += skip_inc;
if (!cpi->common.mb_no_coeff_skip) {
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16)
+#if CONFIG_TX16X16 && CONFIG_HYBRIDTRANSFORM16X16
+ if (tx_size == TX_16X16)
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
else
#endif
- if (tx_type == TX_8X8) {
+ if (tx_size == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
@@ -867,7 +952,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
plane_type = 3;
if (has_y2_block) {
- if (tx_type == TX_8X8) {
+ if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
tokenize2nd_order_b_8x8(xd,
@@ -881,8 +966,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
plane_type = 0;
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
- if (tx_type == TX_16X16) {
+#if CONFIG_TX16X16
+ if (tx_size == TX_16X16) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
@@ -904,7 +989,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
}
else
#endif
- if (tx_type == TX_8X8) {
+ if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
#if CONFIG_HYBRIDTRANSFORM8X8
@@ -914,7 +999,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
#endif
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
- xd->block + b, t, plane_type, xd->frame_type,
+ xd->block + b,
+ t, plane_type, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -945,7 +1031,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
#endif
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
}
-
if (dry_run)
*t = t_backup;
}
@@ -1214,6 +1299,7 @@ void vp8_tokenize_initialize() {
static __inline void stuff2nd_order_b_8x8(
+ MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
@@ -1245,6 +1331,7 @@ static __inline void stuff2nd_order_b_8x8(
static __inline void stuff1st_order_b_8x8
(
+ MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
@@ -1255,19 +1342,34 @@ static __inline void stuff1st_order_b_8x8
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
+#if CONFIG_HYBRIDTRANSFORM8X8
+ TX_TYPE tx_type = type == 3 ? get_tx_type(xd, b) : DCT_DCT;
+#endif
+
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) frametype;
(void) type;
(void) b;
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs_8x8 [0] [1] [pt];
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8 [0] [1] [pt];
+ else
+#endif
+ t->context_tree = cpi->common.fc.coef_probs_8x8 [0] [1] [pt];
// t->section = 8;
t->skip_eob_node = 0;
++t;
*tp = t;
- if (!dry_run)
- ++cpi->coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ if (!dry_run) {
+#if CONFIG_HYBRIDTRANSFORM8X8
+ if (tx_type == DCT_DCT)
+ ++cpi->hybrid_coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ else
+#endif
+ ++cpi->coef_counts_8x8[0] [1] [pt] [DCT_EOB_TOKEN];
+ }
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
@@ -1277,6 +1379,7 @@ static __inline void stuff1st_order_b_8x8
static __inline
void stuff1st_order_buv_8x8
(
+ MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
const int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
@@ -1315,13 +1418,13 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
int b;
TOKENEXTRA *t_backup = *t;
- stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type,
+ stuff2nd_order_b_8x8(xd, xd->block + 24, t, 1, xd->frame_type,
A + vp8_block2above_8x8[24],
L + vp8_block2left_8x8[24], cpi, dry_run);
plane_type = 0;
for (b = 0; b < 16; b += 4) {
- stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type,
+ stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1330,7 +1433,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
}
for (b = 16; b < 24; b += 4) {
- stuff1st_order_buv_8x8(xd->block + b, t, 2, xd->frame_type,
+ stuff1st_order_buv_8x8(xd, xd->block + b, t, 2, xd->frame_type,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1341,9 +1444,10 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
*t = t_backup;
}
-#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
+#if CONFIG_TX16X16
static __inline
-void stuff1st_order_b_16x16(const BLOCKD *const b,
+void stuff1st_order_b_16x16(MACROBLOCKD *xd,
+ const BLOCKD *const b,
TOKENEXTRA **tp,
const FRAME_TYPE frametype,
ENTROPY_CONTEXT *a,
@@ -1352,17 +1456,31 @@ void stuff1st_order_b_16x16(const BLOCKD *const b,
int dry_run){
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
+#if CONFIG_HYBRIDTRANSFORM16X16
+ TX_TYPE tx_type = get_tx_type(xd, b);
+#endif
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) frametype;
(void) b;
t->Token = DCT_EOB_TOKEN;
+#if CONFIG_HYBRIDTRANSFORM16X16
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[3][1][pt];
+ else
+#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[3][1][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
- if (!dry_run)
+ if (!dry_run) {
+#if CONFIG_HYBRIDTRANSFORM16X16
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts_16x16[3][1][pt][DCT_EOB_TOKEN];
+ else
+#endif
++cpi->coef_counts_16x16[3][1][pt][DCT_EOB_TOKEN];
+ }
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
@@ -1376,13 +1494,13 @@ void vp8_stuff_mb_16x16(VP8_COMP *cpi,
int b, i;
TOKENEXTRA *t_backup = *t;
- stuff1st_order_b_16x16(xd->block, t, xd->frame_type, A, L, cpi, dry_run);
+ stuff1st_order_b_16x16(xd, xd->block, t, xd->frame_type, A, L, cpi, dry_run);
for (i = 1; i < 16; i++) {
*(A + vp8_block2above[i]) = *(A);
*(L + vp8_block2left[i]) = *(L);
}
for (b = 16; b < 24; b += 4) {
- stuff1st_order_buv_8x8(xd->block + b, t, 2, xd->frame_type,
+ stuff1st_order_buv_8x8(xd, xd->block + b, t, 2, xd->frame_type,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1398,6 +1516,8 @@ void vp8_stuff_mb_16x16(VP8_COMP *cpi,
static __inline void stuff2nd_order_b
(
+ MACROBLOCKD *xd,
+ const BLOCKD *const b,
TOKENEXTRA **tp,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
@@ -1420,22 +1540,38 @@ static __inline void stuff2nd_order_b
}
-static __inline void stuff1st_order_b(TOKENEXTRA **tp,
+static __inline void stuff1st_order_b(MACROBLOCKD *xd,
+ const BLOCKD *const b,
+ TOKENEXTRA **tp,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
VP8_COMP *cpi,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
+#if CONFIG_HYBRIDTRANSFORM
+ TX_TYPE tx_type = get_tx_type(xd, b);
+#endif
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
- t->context_tree = cpi->common.fc.coef_probs [0] [1] [pt];
+#if CONFIG_HYBRIDTRANSFORM
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs [0] [1] [pt];
+ else
+#endif
+ t->context_tree = cpi->common.fc.coef_probs [0] [1] [pt];
t->skip_eob_node = 0;
++t;
*tp = t;
- if (!dry_run)
- ++cpi->coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ if (!dry_run) {
+#if CONFIG_HYBRIDTRANSFORM
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ else
+#endif
+ ++cpi->coef_counts[0] [1] [pt] [DCT_EOB_TOKEN];
+ }
pt = 0; /* 0 <-> all coeff data is zero */
*a = *l = pt;
@@ -1443,6 +1579,8 @@ static __inline void stuff1st_order_b(TOKENEXTRA **tp,
static __inline
void stuff1st_order_buv
(
+ MACROBLOCKD *xd,
+ const BLOCKD *const b,
TOKENEXTRA **tp,
ENTROPY_CONTEXT *a,
ENTROPY_CONTEXT *l,
@@ -1467,23 +1605,22 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
- int plane_type;
int b;
TOKENEXTRA *t_backup = *t;
- stuff2nd_order_b(t,
+ stuff2nd_order_b(xd, xd->block + 24, t,
A + vp8_block2above[24],
L + vp8_block2left[24],
cpi, dry_run);
for (b = 0; b < 16; b++)
- stuff1st_order_b(t,
+ stuff1st_order_b(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
for (b = 16; b < 24; b++)
- stuff1st_order_buv(t,
+ stuff1st_order_buv(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
@@ -1502,13 +1639,13 @@ void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
int b;
TOKENEXTRA *t_backup = *t;
- stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type,
+ stuff2nd_order_b_8x8(xd, xd->block + 24, t, 1, xd->frame_type,
A + vp8_block2above_8x8[24],
L + vp8_block2left_8x8[24], cpi, dry_run);
plane_type = 3;
for (b = 0; b < 16; b += 4) {
- stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type,
+ stuff1st_order_b_8x8(xd, xd->block + b, t, plane_type, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
@@ -1517,7 +1654,7 @@ void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
}
for (b = 16; b < 24; b++)
- stuff1st_order_buv(t,
+ stuff1st_order_buv(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);