summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/asm_enc_offsets.c1
-rw-r--r--vp8/encoder/bitstream.c1115
-rw-r--r--vp8/encoder/block.h23
-rw-r--r--vp8/encoder/dct.c8
-rw-r--r--vp8/encoder/dct.h2
-rw-r--r--vp8/encoder/encodeframe.c104
-rw-r--r--vp8/encoder/encodeintra.c70
-rw-r--r--vp8/encoder/encodemb.c78
-rw-r--r--vp8/encoder/encodemv.c672
-rw-r--r--vp8/encoder/encodemv.h26
-rw-r--r--vp8/encoder/firstpass.c20
-rw-r--r--vp8/encoder/generic/csystemdependent.c79
-rw-r--r--vp8/encoder/mbgraph.c26
-rw-r--r--vp8/encoder/mcomp.c137
-rw-r--r--vp8/encoder/mcomp.h9
-rw-r--r--vp8/encoder/modecosts.c3
-rw-r--r--vp8/encoder/onyx_if.c285
-rw-r--r--vp8/encoder/onyx_int.h44
-rw-r--r--vp8/encoder/picklpf.c28
-rw-r--r--vp8/encoder/quantize.c10
-rw-r--r--vp8/encoder/quantize.h6
-rw-r--r--vp8/encoder/ratectrl.c44
-rw-r--r--vp8/encoder/rdopt.c902
-rw-r--r--vp8/encoder/sad_c.c44
-rw-r--r--vp8/encoder/segmentation.c4
-rw-r--r--vp8/encoder/ssim.c109
-rw-r--r--vp8/encoder/tokenize.c236
-rw-r--r--vp8/encoder/variance.h568
-rw-r--r--vp8/encoder/x86/variance_x86.h328
-rw-r--r--vp8/encoder/x86/x86_csystemdependent.c85
30 files changed, 1401 insertions, 3665 deletions
diff --git a/vp8/encoder/asm_enc_offsets.c b/vp8/encoder/asm_enc_offsets.c
index c79e915f8..8e74901b3 100644
--- a/vp8/encoder/asm_enc_offsets.c
+++ b/vp8/encoder/asm_enc_offsets.c
@@ -68,7 +68,6 @@ DEFINE(vp8_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, b
DEFINE(vp8_comp_tplist, offsetof(VP8_COMP, tplist));
DEFINE(vp8_comp_common, offsetof(VP8_COMP, common));
-DEFINE(vp8_comp_bc2, offsetof(VP8_COMP, bc2));
DEFINE(tokenlist_start, offsetof(TOKENLIST, start));
DEFINE(tokenlist_stop, offsetof(TOKENLIST, stop));
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 812565915..36776ab21 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -45,32 +45,26 @@ unsigned int tree_update_hist [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_tree_update_hist [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES][2];
-#endif
unsigned int tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#endif
unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
-#endif
extern unsigned int active_section;
#endif
@@ -111,10 +105,10 @@ static int remap_prob(int v, int m) {
return i;
}
-static void write_prob_diff_update(vp8_writer *const w,
+static void write_prob_diff_update(vp8_writer *const bc,
vp8_prob newp, vp8_prob oldp) {
int delp = remap_prob(newp, oldp);
- vp8_encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
+ vp8_encode_term_subexp(bc, delp, SUBEXP_PARAM, 255);
}
static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp) {
@@ -186,7 +180,7 @@ unsigned int pick_best_mv_ref( MACROBLOCK *x,
#endif
static void update_mode(
- vp8_writer *const w,
+ vp8_writer *const bc,
int n,
vp8_token tok [/* n */],
vp8_tree tree,
@@ -212,34 +206,33 @@ static void update_mode(
if (new_b + (n << 8) < old_b) {
int i = 0;
- vp8_write_bit(w, 1);
+ vp8_write_bit(bc, 1);
do {
const vp8_prob p = Pnew[i];
- vp8_write_literal(w, Pcur[i] = p ? p : 1, 8);
+ vp8_write_literal(bc, Pcur[i] = p ? p : 1, 8);
} while (++i < n);
} else
- vp8_write_bit(w, 0);
+ vp8_write_bit(bc, 0);
}
-static void update_mbintra_mode_probs(VP8_COMP *cpi) {
- VP8_COMMON *const cm = & cpi->common;
-
- vp8_writer *const w = & cpi->bc;
+static void update_mbintra_mode_probs(VP8_COMP* const cpi,
+ vp8_writer* const bc) {
+ VP8_COMMON *const cm = &cpi->common;
{
vp8_prob Pnew [VP8_YMODES - 1];
unsigned int bct [VP8_YMODES - 1] [2];
update_mode(
- w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
+ bc, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
);
}
}
-static __inline int get_prob(int num, int den) {
+static int get_prob(int num, int den) {
int p;
if (den <= 0)
return 128;
@@ -251,85 +244,42 @@ static __inline int get_prob(int num, int den) {
return p;
}
+static int get_binary_prob(int n0, int n1) {
+ return get_prob(n0, n0 + n1);
+}
+
void update_skip_probs(VP8_COMP *cpi) {
- VP8_COMMON *const pc = & cpi->common;
+ VP8_COMMON *const pc = &cpi->common;
int prob_skip_false[3] = {0, 0, 0};
int k;
for (k = 0; k < MBSKIP_CONTEXTS; ++k) {
- if ((cpi->skip_false_count[k] + cpi->skip_true_count[k])) {
- prob_skip_false[k] =
- cpi->skip_false_count[k] * 256 /
- (cpi->skip_false_count[k] + cpi->skip_true_count[k]);
-
- if (prob_skip_false[k] <= 1)
- prob_skip_false[k] = 1;
-
- if (prob_skip_false[k] > 255)
- prob_skip_false[k] = 255;
- } else
- prob_skip_false[k] = 128;
-
- pc->mbskip_pred_probs[k] = prob_skip_false[k];
+ pc->mbskip_pred_probs[k] = get_binary_prob(cpi->skip_false_count[k],
+ cpi->skip_true_count[k]);
}
}
-#if CONFIG_SWITCHABLE_INTERP
-void update_switchable_interp_probs(VP8_COMP *cpi) {
- VP8_COMMON *const pc = & cpi->common;
- vp8_writer *const w = & cpi->bc;
+void update_switchable_interp_probs(VP8_COMP *cpi, vp8_writer* const bc) {
+ VP8_COMMON *const pc = &cpi->common;
unsigned int branch_ct[32][2];
int i, j;
for (j = 0; j <= VP8_SWITCHABLE_FILTERS; ++j) {
- //for (j = 0; j <= 0; ++j) {
-/*
- if (!cpi->dummy_packing)
-#if VP8_SWITCHABLE_FILTERS == 3
- printf("HELLO %d %d %d\n", cpi->switchable_interp_count[j][0],
- cpi->switchable_interp_count[j][1], cpi->switchable_interp_count[j][2]);
-#else
- printf("HELLO %d %d\n", cpi->switchable_interp_count[j][0],
- cpi->switchable_interp_count[j][1]);
-#endif
-*/
vp8_tree_probs_from_distribution(
VP8_SWITCHABLE_FILTERS,
vp8_switchable_interp_encodings, vp8_switchable_interp_tree,
- pc->fc.switchable_interp_prob[j], branch_ct, cpi->switchable_interp_count[j],
- 256, 1
- );
+ pc->fc.switchable_interp_prob[j], branch_ct,
+ cpi->switchable_interp_count[j], 256, 1);
for (i = 0; i < VP8_SWITCHABLE_FILTERS - 1; ++i) {
if (pc->fc.switchable_interp_prob[j][i] < 1)
pc->fc.switchable_interp_prob[j][i] = 1;
- vp8_write_literal(w, pc->fc.switchable_interp_prob[j][i], 8);
-/*
- if (!cpi->dummy_packing)
-#if VP8_SWITCHABLE_FILTERS == 3
- printf("Probs %d %d [%d]\n",
- pc->fc.switchable_interp_prob[j][0],
- pc->fc.switchable_interp_prob[j][1], pc->frame_type);
-#else
- printf("Probs %d [%d]\n", pc->fc.switchable_interp_prob[j][0],
- pc->frame_type);
-#endif
-*/
+ vp8_write_literal(bc, pc->fc.switchable_interp_prob[j][i], 8);
}
}
- /*
- if (!cpi->dummy_packing)
-#if VP8_SWITCHABLE_FILTERS == 3
- printf("Probs %d %d [%d]\n",
- pc->fc.switchable_interp_prob[0], pc->fc.switchable_interp_prob[1], pc->frame_type);
-#else
- printf("Probs %d [%d]\n", pc->fc.switchable_interp_prob[0], pc->frame_type);
-#endif
- */
}
-#endif
// This function updates the reference frame prediction stats
static void update_refpred_stats(VP8_COMP *cpi) {
- VP8_COMMON *const cm = & cpi->common;
+ VP8_COMMON *const cm = &cpi->common;
int i;
int tot_count;
vp8_prob new_pred_probs[PREDICTION_PROBS];
@@ -347,15 +297,8 @@ static void update_refpred_stats(VP8_COMP *cpi) {
} else {
// From the prediction counts set the probabilities for each context
for (i = 0; i < PREDICTION_PROBS; i++) {
- tot_count = cpi->ref_pred_count[i][0] + cpi->ref_pred_count[i][1];
- if (tot_count) {
- new_pred_probs[i] =
- (cpi->ref_pred_count[i][0] * 255 + (tot_count >> 1)) / tot_count;
-
- // Clamp to minimum allowed value
- new_pred_probs[i] += !new_pred_probs[i];
- } else
- new_pred_probs[i] = 128;
+ new_pred_probs[i] = get_binary_prob(cpi->ref_pred_count[i][0],
+ cpi->ref_pred_count[i][1]);
// Decide whether or not to update the reference frame probs.
// Returned costs are in 1/256 bit units.
@@ -454,13 +397,15 @@ static int prob_diff_update_savings_search(const unsigned int *ct,
return bestsavings;
}
-static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
- const TOKENEXTRA *const stop = p + xcount;
+static void pack_mb_tokens(vp8_writer* const bc,
+ TOKENEXTRA **tp,
+ const TOKENEXTRA *const stop) {
unsigned int split;
unsigned int shift;
- int count = w->count;
- unsigned int range = w->range;
- unsigned int lowvalue = w->lowvalue;
+ int count = bc->count;
+ unsigned int range = bc->range;
+ unsigned int lowvalue = bc->lowvalue;
+ TOKENEXTRA *p = *tp;
while (p < stop) {
const int t = p->Token;
@@ -471,6 +416,12 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
int v = a->value;
int n = a->Len;
+ if (t == EOSB_TOKEN)
+ {
+ ++p;
+ break;
+ }
+
/* skip one or two nodes */
if (p->skip_eob_node) {
n -= p->skip_eob_node;
@@ -497,17 +448,17 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000) {
- int x = w->pos - 1;
+ int x = bc->pos - 1;
- while (x >= 0 && w->buffer[x] == 0xff) {
- w->buffer[x] = (unsigned char)0;
+ while (x >= 0 && bc->buffer[x] == 0xff) {
+ bc->buffer[x] = (unsigned char)0;
x--;
}
- w->buffer[x] += 1;
+ bc->buffer[x] += 1;
}
- w->buffer[w->pos++] = (lowvalue >> (24 - offset));
+ bc->buffer[bc->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
@@ -547,17 +498,17 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000) {
- int x = w->pos - 1;
+ int x = bc->pos - 1;
- while (x >= 0 && w->buffer[x] == 0xff) {
- w->buffer[x] = (unsigned char)0;
+ while (x >= 0 && bc->buffer[x] == 0xff) {
+ bc->buffer[x] = (unsigned char)0;
x--;
}
- w->buffer[x] += 1;
+ bc->buffer[x] += 1;
}
- w->buffer[w->pos++] = (lowvalue >> (24 - offset));
+ bc->buffer[bc->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
@@ -583,14 +534,14 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
range <<= 1;
if ((lowvalue & 0x80000000)) {
- int x = w->pos - 1;
+ int x = bc->pos - 1;
- while (x >= 0 && w->buffer[x] == 0xff) {
- w->buffer[x] = (unsigned char)0;
+ while (x >= 0 && bc->buffer[x] == 0xff) {
+ bc->buffer[x] = (unsigned char)0;
x--;
}
- w->buffer[x] += 1;
+ bc->buffer[x] += 1;
}
@@ -598,20 +549,19 @@ static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount) {
if (!++count) {
count = -8;
- w->buffer[w->pos++] = (lowvalue >> 24);
+ bc->buffer[bc->pos++] = (lowvalue >> 24);
lowvalue &= 0xffffff;
}
}
}
-
++p;
}
- w->count = count;
- w->lowvalue = lowvalue;
- w->range = range;
-
+ bc->count = count;
+ bc->lowvalue = lowvalue;
+ bc->range = range;
+ *tp = p;
}
static void write_partition_size(unsigned char *cx_data, int size) {
@@ -628,107 +578,82 @@ static void write_partition_size(unsigned char *cx_data, int size) {
static void write_mv_ref
(
- vp8_writer *w, MB_PREDICTION_MODE m, const vp8_prob *p
+ vp8_writer *bc, MB_PREDICTION_MODE m, const vp8_prob *p
) {
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m <= SPLITMV);
#endif
- vp8_write_token(w, vp8_mv_ref_tree, p,
+ vp8_write_token(bc, vp8_mv_ref_tree, p,
vp8_mv_ref_encoding_array - NEARESTMV + m);
}
#if CONFIG_SUPERBLOCKS
-static void write_sb_mv_ref(vp8_writer *w, MB_PREDICTION_MODE m, const vp8_prob *p) {
+static void write_sb_mv_ref(vp8_writer *bc, MB_PREDICTION_MODE m,
+ const vp8_prob *p) {
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m < SPLITMV);
#endif
- vp8_write_token(w, vp8_sb_mv_ref_tree, p,
+ vp8_write_token(bc, vp8_sb_mv_ref_tree, p,
vp8_sb_mv_ref_encoding_array - NEARESTMV + m);
}
#endif
static void write_sub_mv_ref
(
- vp8_writer *w, B_PREDICTION_MODE m, const vp8_prob *p
+ vp8_writer *bc, B_PREDICTION_MODE m, const vp8_prob *p
) {
#if CONFIG_DEBUG
assert(LEFT4X4 <= m && m <= NEW4X4);
#endif
- vp8_write_token(w, vp8_sub_mv_ref_tree, p,
+ vp8_write_token(bc, vp8_sub_mv_ref_tree, p,
vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
-#if CONFIG_NEWMVENTROPY
-static void write_nmv (vp8_writer *w, const MV *mv, const int_mv *ref,
- const nmv_context *nmvc, int usehp) {
- MV e;
- e.row = mv->row - ref->as_mv.row;
- e.col = mv->col - ref->as_mv.col;
-
- vp8_encode_nmv(w, &e, &ref->as_mv, nmvc);
- vp8_encode_nmv_fp(w, &e, &ref->as_mv, nmvc, usehp);
-}
-
-#else
-
-static void write_mv
-(
- vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
-) {
+static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
+ const nmv_context *nmvc, int usehp) {
MV e;
e.row = mv->row - ref->as_mv.row;
e.col = mv->col - ref->as_mv.col;
- vp8_encode_motion_vector(w, &e, mvc);
+ vp8_encode_nmv(bc, &e, &ref->as_mv, nmvc);
+ vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
-static void write_mv_hp
-(
- vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
-) {
- MV e;
- e.row = mv->row - ref->as_mv.row;
- e.col = mv->col - ref->as_mv.col;
-
- vp8_encode_motion_vector_hp(w, &e, mvc);
-}
-#endif /* CONFIG_NEWMVENTROPY */
-
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
-static void write_mb_segid(vp8_writer *w,
+static void write_mb_segid(vp8_writer *bc,
const MB_MODE_INFO *mi, const MACROBLOCKD *xd) {
// Encode the MB segment id.
if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
switch (mi->segment_id) {
case 0:
- vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
- vp8_write(w, 0, xd->mb_segment_tree_probs[1]);
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[0]);
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[1]);
break;
case 1:
- vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
- vp8_write(w, 1, xd->mb_segment_tree_probs[1]);
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[0]);
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[1]);
break;
case 2:
- vp8_write(w, 1, xd->mb_segment_tree_probs[0]);
- vp8_write(w, 0, xd->mb_segment_tree_probs[2]);
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[0]);
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[2]);
break;
case 3:
- vp8_write(w, 1, xd->mb_segment_tree_probs[0]);
- vp8_write(w, 1, xd->mb_segment_tree_probs[2]);
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[0]);
+ vp8_write(bc, 1, xd->mb_segment_tree_probs[2]);
break;
// TRAP.. This should not happen
default:
- vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
- vp8_write(w, 0, xd->mb_segment_tree_probs[1]);
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[0]);
+ vp8_write(bc, 0, xd->mb_segment_tree_probs[1]);
break;
}
}
}
// This function encodes the reference frame
-static void encode_ref_frame(vp8_writer *const w,
+static void encode_ref_frame(vp8_writer *const bc,
VP8_COMMON *const cm,
MACROBLOCKD *xd,
int segment_id,
@@ -765,7 +690,7 @@ static void encode_ref_frame(vp8_writer *const w,
(xd->mode_info_context->mbmi.ref_frame == pred_rf);
set_pred_flag(xd, PRED_REF, prediction_flag);
- vp8_write(w, prediction_flag, pred_prob);
+ vp8_write(bc, prediction_flag, pred_prob);
// If not predicted correctly then code value explicitly
if (!prediction_flag) {
@@ -787,18 +712,18 @@ static void encode_ref_frame(vp8_writer *const w,
}
if (mod_refprobs[0]) {
- vp8_write(w, (rf != INTRA_FRAME), mod_refprobs[0]);
+ vp8_write(bc, (rf != INTRA_FRAME), mod_refprobs[0]);
}
// Inter coded
if (rf != INTRA_FRAME) {
if (mod_refprobs[1]) {
- vp8_write(w, (rf != LAST_FRAME), mod_refprobs[1]);
+ vp8_write(bc, (rf != LAST_FRAME), mod_refprobs[1]);
}
if (rf != LAST_FRAME) {
if (mod_refprobs[2]) {
- vp8_write(w, (rf != GOLDEN_FRAME), mod_refprobs[2]);
+ vp8_write(bc, (rf != GOLDEN_FRAME), mod_refprobs[2]);
}
}
}
@@ -811,50 +736,32 @@ static void encode_ref_frame(vp8_writer *const w,
// Update the probabilities used to encode reference frame data
static void update_ref_probs(VP8_COMP *const cpi) {
- VP8_COMMON *const cm = & cpi->common;
+ VP8_COMMON *const cm = &cpi->common;
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] +
rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
- cm->prob_intra_coded = (rf_intra + rf_inter)
- ? rf_intra * 255 / (rf_intra + rf_inter) : 1;
-
- if (!cm->prob_intra_coded)
- cm->prob_intra_coded = 1;
-
- cm->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
-
- if (!cm->prob_last_coded)
- cm->prob_last_coded = 1;
-
- cm->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
- ? (rfct[GOLDEN_FRAME] * 255) /
- (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
-
- if (!cm->prob_gf_coded)
- cm->prob_gf_coded = 1;
+ cm->prob_intra_coded = get_binary_prob(rf_intra, rf_inter);
+ cm->prob_last_coded = get_prob(rfct[LAST_FRAME], rf_inter);
+ cm->prob_gf_coded = get_binary_prob(rfct[GOLDEN_FRAME], rfct[ALTREF_FRAME]);
// Compute a modified set of probabilities to use when prediction of the
// reference frame fails
compute_mod_refprobs(cm);
}
-static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
+static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
int i;
- VP8_COMMON *const pc = & cpi->common;
- vp8_writer *const w = & cpi->bc;
-#if CONFIG_NEWMVENTROPY
+ VP8_COMMON *const pc = &cpi->common;
const nmv_context *nmvc = &pc->fc.nmvc;
-#else
- const MV_CONTEXT *mvc = pc->fc.mvc;
- const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
-#endif
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
MODE_INFO *prev_m;
+ TOKENEXTRA *tok = cpi->tok;
+ TOKENEXTRA *tok_end = tok + cpi->tok_count;
const int mis = pc->mode_info_stride;
int mb_row, mb_col;
@@ -871,73 +778,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
cpi->mb.partition_info = cpi->mb.pi;
- // Update the probabilities used to encode reference frame data
- update_ref_probs(cpi);
-
-#ifdef ENTROPY_STATS
- active_section = 1;
-#endif
-
- if (pc->mb_no_coeff_skip) {
- int k;
-
- update_skip_probs(cpi);
- for (k = 0; k < MBSKIP_CONTEXTS; ++k)
- vp8_write_literal(w, pc->mbskip_pred_probs[k], 8);
- }
-
-#if CONFIG_PRED_FILTER
- // Write the prediction filter mode used for this frame
- vp8_write_literal(w, pc->pred_filter_mode, 2);
-
- // Write prediction filter on/off probability if signaling at MB level
- if (pc->pred_filter_mode == 2)
- vp8_write_literal(w, pc->prob_pred_filter_off, 8);
-
- // printf("pred_filter_mode:%d prob_pred_filter_off:%d\n",
- // pc->pred_filter_mode, pc->prob_pred_filter_off);
-#endif
-#if CONFIG_SWITCHABLE_INTERP
- if (pc->mcomp_filter_type == SWITCHABLE)
- update_switchable_interp_probs(cpi);
-#endif
-
- vp8_write_literal(w, pc->prob_intra_coded, 8);
- vp8_write_literal(w, pc->prob_last_coded, 8);
- vp8_write_literal(w, pc->prob_gf_coded, 8);
-
- if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
- vp8_write(w, 1, 128);
- vp8_write(w, 1, 128);
- for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
- if (cpi->single_pred_count[i] + cpi->comp_pred_count[i]) {
- pc->prob_comppred[i] = cpi->single_pred_count[i] * 255 /
- (cpi->single_pred_count[i] + cpi->comp_pred_count[i]);
- if (pc->prob_comppred[i] < 1)
- pc->prob_comppred[i] = 1;
- } else {
- pc->prob_comppred[i] = 128;
- }
- vp8_write_literal(w, pc->prob_comppred[i], 8);
- }
- } else if (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY) {
- vp8_write(w, 0, 128);
- } else { /* compound prediction only */
- vp8_write(w, 1, 128);
- vp8_write(w, 0, 128);
- }
-
- update_mbintra_mode_probs(cpi);
-
-#if CONFIG_NEWMVENTROPY
- vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv)
- vp8_write_mvprobs_hp(cpi);
- else
- vp8_write_mvprobs(cpi);
-#endif
-
mb_row = 0;
for (row = 0; row < pc->mb_rows; row += 2) {
m = pc->mi + row * mis;
@@ -950,7 +790,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
// Process the 4 MBs in the order:
// top-left, top-right, bottom-left, bottom-right
#if CONFIG_SUPERBLOCKS
- vp8_write(w, m->mbmi.encoded_as_sb, pc->sb_coded);
+ vp8_write(bc, m->mbmi.encoded_as_sb, pc->sb_coded);
#endif
for (i = 0; i < 4; i++) {
MB_MODE_INFO *mi;
@@ -972,7 +812,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
continue;
}
- mi = & m->mbmi;
+ mi = &m->mbmi;
rf = mi->ref_frame;
mode = mi->mode;
segment_id = mi->segment_id;
@@ -1000,14 +840,14 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
pred_prob = get_pred_prob(pc, xd, PRED_SEG_ID);
// Code the segment id prediction flag for this mb
- vp8_write(w, prediction_flag, pred_prob);
+ vp8_write(bc, prediction_flag, pred_prob);
// If the mb segment id wasn't predicted code explicitly
if (!prediction_flag)
- write_mb_segid(w, mi, &cpi->mb.e_mbd);
+ write_mb_segid(bc, mi, &cpi->mb.e_mbd);
} else {
// Normal unpredicted coding
- write_mb_segid(w, mi, &cpi->mb.e_mbd);
+ write_mb_segid(bc, mi, &cpi->mb.e_mbd);
}
}
@@ -1022,12 +862,12 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff;
}
#endif
- vp8_encode_bool(w, skip_coeff,
+ vp8_encode_bool(bc, skip_coeff,
get_pred_prob(pc, xd, PRED_MBSKIP));
}
// Encode the reference frame.
- encode_ref_frame(w, pc, xd, segment_id, rf);
+ encode_ref_frame(bc, pc, xd, segment_id, rf);
if (rf == INTRA_FRAME) {
#ifdef ENTROPY_STATS
@@ -1037,7 +877,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
// TODO(rbultje) write using SB tree structure
if (!segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
- write_ymode(w, mode, pc->fc.ymode_prob);
+ write_ymode(bc, mode, pc->fc.ymode_prob);
}
if (mode == B_PRED) {
@@ -1046,32 +886,40 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
int uses_second =
m->bmi[0].as_mode.second !=
(B_PREDICTION_MODE)(B_DC_PRED - 1);
- vp8_write(w, uses_second, 128);
+ vp8_write(bc, uses_second, 128);
#endif
do {
#if CONFIG_COMP_INTRA_PRED
B_PREDICTION_MODE mode2 = m->bmi[j].as_mode.second;
#endif
- write_bmode(w, m->bmi[j].as_mode.first,
+ write_bmode(bc, m->bmi[j].as_mode.first,
pc->fc.bmode_prob);
+ /*
+ if (!cpi->dummy_packing) {
+ int p;
+ for (p = 0; p < VP8_BINTRAMODES - 1; ++p)
+ printf(" %d", pc->fc.bmode_prob[p]);
+ printf("\nbmode[%d][%d]: %d\n", pc->current_video_frame, j, m->bmi[j].as_mode.first);
+ }
+ */
#if CONFIG_COMP_INTRA_PRED
if (uses_second) {
- write_bmode(w, mode2, pc->fc.bmode_prob);
+ write_bmode(bc, mode2, pc->fc.bmode_prob);
}
#endif
} while (++j < 16);
}
if (mode == I8X8_PRED) {
- write_i8x8_mode(w, m->bmi[0].as_mode.first,
+ write_i8x8_mode(bc, m->bmi[0].as_mode.first,
pc->fc.i8x8_mode_prob);
- write_i8x8_mode(w, m->bmi[2].as_mode.first,
+ write_i8x8_mode(bc, m->bmi[2].as_mode.first,
pc->fc.i8x8_mode_prob);
- write_i8x8_mode(w, m->bmi[8].as_mode.first,
+ write_i8x8_mode(bc, m->bmi[8].as_mode.first,
pc->fc.i8x8_mode_prob);
- write_i8x8_mode(w, m->bmi[10].as_mode.first,
+ write_i8x8_mode(bc, m->bmi[10].as_mode.first,
pc->fc.i8x8_mode_prob);
} else {
- write_uv_mode(w, mi->uv_mode,
+ write_uv_mode(bc, mi->uv_mode,
pc->fc.uv_mode_prob[mode]);
}
} else {
@@ -1103,11 +951,11 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
if (!segfeature_active(xd, segment_id, SEG_LVL_MODE)) {
#if CONFIG_SUPERBLOCKS
if (mi->encoded_as_sb) {
- write_sb_mv_ref(w, mode, mv_ref_p);
+ write_sb_mv_ref(bc, mode, mv_ref_p);
} else
#endif
{
- write_mv_ref(w, mode, mv_ref_p);
+ write_mv_ref(bc, mode, mv_ref_p);
}
vp8_accum_mv_refs(&cpi->common, mode, ct);
}
@@ -1116,28 +964,26 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
// Is the prediction filter enabled
if (mode >= NEARESTMV && mode < SPLITMV) {
if (cpi->common.pred_filter_mode == 2)
- vp8_write(w, mi->pred_filter_enabled,
+ vp8_write(bc, mi->pred_filter_enabled,
pc->prob_pred_filter_off);
else
assert(mi->pred_filter_enabled ==
cpi->common.pred_filter_mode);
}
#endif
-#if CONFIG_SWITCHABLE_INTERP
if (mode >= NEARESTMV && mode <= SPLITMV)
{
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
- vp8_write_token(w, vp8_switchable_interp_tree,
- get_pred_probs(&cpi->common, xd, PRED_SWITCHABLE_INTERP),
+ vp8_write_token(bc, vp8_switchable_interp_tree,
+ get_pred_probs(&cpi->common, xd,
+ PRED_SWITCHABLE_INTERP),
vp8_switchable_interp_encodings +
vp8_switchable_interp_map[mi->interp_filter]);
- //if (!cpi->dummy_packing) printf("Reading: %d\n", mi->interp_filter);
} else {
assert (mi->interp_filter ==
cpi->common.mcomp_filter_type);
}
}
-#endif
if (mi->second_ref_frame &&
(mode == NEWMV || mode == SPLITMV)) {
int_mv n1, n2;
@@ -1155,7 +1001,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
- vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
+ vp8_write(bc, mi->second_ref_frame != INTRA_FRAME,
get_pred_prob(pc, xd, PRED_COMP));
}
@@ -1180,17 +1026,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
}
#endif
-#if CONFIG_NEWMVENTROPY
- write_nmv(w, &mi->mv[0].as_mv, &best_mv,
+ write_nmv(bc, &mi->mv[0].as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(w, &mi->mv[0].as_mv, &best_mv, mvc_hp);
- } else {
- write_mv(w, &mi->mv[0].as_mv, &best_mv, mvc);
- }
-#endif
if (mi->second_ref_frame) {
#if 0 //CONFIG_NEW_MVREF
@@ -1207,17 +1045,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
&best_second_mv);
cpi->best_ref_index_counts[best_index]++;
#endif
-#if CONFIG_NEWMVENTROPY
- write_nmv(w, &mi->mv[1].as_mv, &best_second_mv,
+ write_nmv(bc, &mi->mv[1].as_mv, &best_second_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(w, &mi->mv[1].as_mv, &best_second_mv, mvc_hp);
- } else {
- write_mv(w, &mi->mv[1].as_mv, &best_second_mv, mvc);
- }
-#endif
}
break;
case SPLITMV: {
@@ -1227,7 +1057,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
++count_mb_seg [mi->partitioning];
#endif
- write_split(w, mi->partitioning, cpi->common.fc.mbsplit_prob);
+ write_split(bc, mi->partitioning, cpi->common.fc.mbsplit_prob);
cpi->mbsplit_count[mi->partitioning]++;
do {
@@ -1252,43 +1082,23 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
abovemv.as_int = above_block_mv(m, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
- write_sub_mv_ref(w, blockmode,
+ write_sub_mv_ref(bc, blockmode,
cpi->common.fc.sub_mv_ref_prob [mv_contz]);
cpi->sub_mv_ref_count[mv_contz][blockmode - LEFT4X4]++;
if (blockmode == NEW4X4) {
#ifdef ENTROPY_STATS
active_section = 11;
#endif
-#if CONFIG_NEWMVENTROPY
- write_nmv(w, &blockmv.as_mv, &best_mv,
+ write_nmv(bc, &blockmv.as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(w, &blockmv.as_mv, &best_mv,
- (const MV_CONTEXT_HP *) mvc_hp);
- } else {
- write_mv(w, &blockmv.as_mv, &best_mv,
- (const MV_CONTEXT *) mvc);
- }
-#endif
if (mi->second_ref_frame) {
-#if CONFIG_NEWMVENTROPY
- write_nmv(w,
+ write_nmv(bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
- } else {
- write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &best_second_mv, (const MV_CONTEXT *) mvc);
- }
-#endif
}
}
} while (++j < cpi->mb.partition_info->count);
@@ -1300,20 +1110,25 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
}
}
-#if CONFIG_TX_SELECT
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
- (rf != INTRA_FRAME && mode != SPLITMV)) &&
+ (rf != INTRA_FRAME && !(mode == SPLITMV &&
+ mi->partitioning == PARTITIONING_4X4))) &&
pc->txfm_mode == TX_MODE_SELECT &&
!((pc->mb_no_coeff_skip && mi->mb_skip_coeff) ||
(segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
- vp8_write(w, sz != TX_4X4, pc->prob_tx[0]);
- if (sz != TX_4X4 && mode != I8X8_PRED)
- vp8_write(w, sz != TX_8X8, pc->prob_tx[1]);
+ vp8_write(bc, sz != TX_4X4, pc->prob_tx[0]);
+ if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV)
+ vp8_write(bc, sz != TX_8X8, pc->prob_tx[1]);
}
+
+#ifdef ENTROPY_STATS
+ active_section = 1;
#endif
+ assert(tok < tok_end);
+ pack_mb_tokens(bc, &tok, tok_end);
#if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) {
@@ -1348,9 +1163,108 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
}
-static void write_kfmodes(VP8_COMP *cpi) {
- vp8_writer *const bc = & cpi->bc;
- VP8_COMMON *const c = & cpi->common;
+static void write_mb_modes_kf(const VP8_COMMON *c,
+ const MACROBLOCKD *xd,
+ const MODE_INFO *m,
+ int mode_info_stride,
+ vp8_writer *const bc) {
+ const int mis = mode_info_stride;
+ int ym;
+ int segment_id;
+
+ ym = m->mbmi.mode;
+ segment_id = m->mbmi.segment_id;
+
+ if (xd->update_mb_segmentation_map) {
+ write_mb_segid(bc, &m->mbmi, xd);
+ }
+
+ if (c->mb_no_coeff_skip &&
+ (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
+ (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) {
+ int skip_coeff = m->mbmi.mb_skip_coeff;
+#if CONFIG_SUPERBLOCKS
+ if (m->mbmi.encoded_as_sb) {
+ skip_coeff &= m[1].mbmi.mb_skip_coeff;
+ skip_coeff &= m[mis].mbmi.mb_skip_coeff;
+ skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff;
+ }
+#endif
+ vp8_encode_bool(bc, skip_coeff,
+ get_pred_prob(c, xd, PRED_MBSKIP));
+ }
+
+#if CONFIG_SUPERBLOCKS
+ if (m->mbmi.encoded_as_sb) {
+ sb_kfwrite_ymode(bc, ym,
+ c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
+ } else
+#endif
+ {
+ kfwrite_ymode(bc, ym,
+ c->kf_ymode_prob[c->kf_ymode_probs_index]);
+ }
+
+ if (ym == B_PRED) {
+ const int mis = c->mode_info_stride;
+ int i = 0;
+#if CONFIG_COMP_INTRA_PRED
+ int uses_second =
+ m->bmi[0].as_mode.second !=
+ (B_PREDICTION_MODE)(B_DC_PRED - 1);
+ vp8_write(bc, uses_second, 128);
+#endif
+ do {
+ const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
+ const B_PREDICTION_MODE L = left_block_mode(m, i);
+ const int bm = m->bmi[i].as_mode.first;
+#if CONFIG_COMP_INTRA_PRED
+ const int bm2 = m->bmi[i].as_mode.second;
+#endif
+
+#ifdef ENTROPY_STATS
+ ++intra_mode_stats [A] [L] [bm];
+#endif
+
+ write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
+ // printf(" mode: %d\n", bm);
+#if CONFIG_COMP_INTRA_PRED
+ if (uses_second) {
+ write_bmode(bc, bm2, c->kf_bmode_prob [A] [L]);
+ }
+#endif
+ } while (++i < 16);
+ }
+ if (ym == I8X8_PRED) {
+ write_i8x8_mode(bc, m->bmi[0].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[2].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[2].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[8].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[8].as_mode.first); fflush(stdout);
+ write_i8x8_mode(bc, m->bmi[10].as_mode.first,
+ c->fc.i8x8_mode_prob);
+ // printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
+ } else
+ write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
+
+ if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT &&
+ !((c->mb_no_coeff_skip && m->mbmi.mb_skip_coeff) ||
+ (segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
+ get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
+ TX_SIZE sz = m->mbmi.txfm_size;
+ // FIXME(rbultje) code ternary symbol once all experiments are merged
+ vp8_write(bc, sz != TX_4X4, c->prob_tx[0]);
+ if (sz != TX_4X4 && ym <= TM_PRED)
+ vp8_write(bc, sz != TX_8X8, c->prob_tx[1]);
+ }
+}
+
+static void write_kfmodes(VP8_COMP* const cpi, vp8_writer* const bc) {
+ VP8_COMMON *const c = &cpi->common;
const int mis = c->mode_info_stride;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
@@ -1359,16 +1273,8 @@ static void write_kfmodes(VP8_COMP *cpi) {
int mb_row, mb_col;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
-
- if (c->mb_no_coeff_skip) {
- update_skip_probs(cpi);
- for (i = 0; i < MBSKIP_CONTEXTS; ++i)
- vp8_write_literal(bc, c->mbskip_pred_probs[i], 8);
- }
-
- if (!c->kf_ymode_probs_update) {
- vp8_write_literal(bc, c->kf_ymode_probs_index, 3);
- }
+ TOKENEXTRA *tok = cpi->tok;
+ TOKENEXTRA *tok_end = tok + cpi->tok_count;
mb_row = 0;
for (row = 0; row < c->mb_rows; row += 2) {
@@ -1382,8 +1288,6 @@ static void write_kfmodes(VP8_COMP *cpi) {
// Process the 4 MBs in the order:
// top-left, top-right, bottom-left, bottom-right
for (i = 0; i < 4; i++) {
- int ym;
- int segment_id;
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * mis + dx;
@@ -1399,97 +1303,12 @@ static void write_kfmodes(VP8_COMP *cpi) {
// Make sure the MacroBlockD mode info pointer is set correctly
xd->mode_info_context = m;
- ym = m->mbmi.mode;
- segment_id = m->mbmi.segment_id;
-
- if (cpi->mb.e_mbd.update_mb_segmentation_map) {
- write_mb_segid(bc, &m->mbmi, &cpi->mb.e_mbd);
- }
-
- if (c->mb_no_coeff_skip &&
- (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
- (get_segdata(xd, segment_id, SEG_LVL_EOB) != 0))) {
- int skip_coeff = m->mbmi.mb_skip_coeff;
-#if CONFIG_SUPERBLOCKS
- if (m->mbmi.encoded_as_sb) {
- skip_coeff &= m[1].mbmi.mb_skip_coeff;
- skip_coeff &= m[mis].mbmi.mb_skip_coeff;
- skip_coeff &= m[mis + 1].mbmi.mb_skip_coeff;
- }
-#endif
- vp8_encode_bool(bc, skip_coeff,
- get_pred_prob(c, xd, PRED_MBSKIP));
- }
-
-#if CONFIG_SUPERBLOCKS
- if (m->mbmi.encoded_as_sb) {
- sb_kfwrite_ymode(bc, ym,
- c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
- } else
-#endif
- {
- kfwrite_ymode(bc, ym,
- c->kf_ymode_prob[c->kf_ymode_probs_index]);
- }
-
- if (ym == B_PRED) {
- const int mis = c->mode_info_stride;
- int i = 0;
-#if CONFIG_COMP_INTRA_PRED
- int uses_second =
- m->bmi[0].as_mode.second !=
- (B_PREDICTION_MODE)(B_DC_PRED - 1);
- vp8_write(bc, uses_second, 128);
-#endif
- do {
- const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
- const B_PREDICTION_MODE L = left_block_mode(m, i);
- const int bm = m->bmi[i].as_mode.first;
-#if CONFIG_COMP_INTRA_PRED
- const int bm2 = m->bmi[i].as_mode.second;
-#endif
-
+ write_mb_modes_kf(c, xd, m, mis, bc);
#ifdef ENTROPY_STATS
- ++intra_mode_stats [A] [L] [bm];
-#endif
-
- write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
- // printf(" mode: %d\n", bm);
-#if CONFIG_COMP_INTRA_PRED
- if (uses_second) {
- write_bmode(bc, bm2, c->kf_bmode_prob [A] [L]);
- }
-#endif
- } while (++i < 16);
- }
- if (ym == I8X8_PRED) {
- write_i8x8_mode(bc, m->bmi[0].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[0].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[2].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[2].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[8].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[8].as_mode.first); fflush(stdout);
- write_i8x8_mode(bc, m->bmi[10].as_mode.first,
- c->fc.i8x8_mode_prob);
- // printf(" mode: %d\n", m->bmi[10].as_mode.first); fflush(stdout);
- } else
- write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
-
-#if CONFIG_TX_SELECT
- if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT &&
- !((c->mb_no_coeff_skip && m->mbmi.mb_skip_coeff) ||
- (segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
- get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
- TX_SIZE sz = m->mbmi.txfm_size;
- // FIXME(rbultje) code ternary symbol once all experiments are merged
- vp8_write(bc, sz != TX_4X4, c->prob_tx[0]);
- if (sz != TX_4X4 && ym <= TM_PRED)
- vp8_write(bc, sz != TX_8X8, c->prob_tx[1]);
- }
+ active_section = 8;
#endif
+ assert(tok < tok_end);
+ pack_mb_tokens(bc, &tok, tok_end);
#if CONFIG_SUPERBLOCKS
if (m->mbmi.encoded_as_sb) {
@@ -1563,7 +1382,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1584,8 +1402,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#endif
-
if (cpi->common.txfm_mode != ONLY_4X4) {
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
@@ -1612,7 +1428,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1637,7 +1452,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#endif
}
if (cpi->common.txfm_mode > ALLOW_8X8) {
@@ -1660,7 +1474,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -1679,14 +1492,13 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
}
-#endif
}
#if 0
static void update_coef_probs2(VP8_COMP *cpi) {
const vp8_prob grpupd = 192;
int i, j, k, t;
- vp8_writer *const w = & cpi->bc;
+ vp8_writer *const w = &cpi->bc;
int update[2];
int savings;
@@ -1851,9 +1663,8 @@ static void update_coef_probs2(VP8_COMP *cpi) {
}
#endif
-static void update_coef_probs(VP8_COMP *cpi) {
+static void update_coef_probs(VP8_COMP* const cpi, vp8_writer* const bc) {
int i, j, k, t;
- vp8_writer *const w = & cpi->bc;
int update[2] = {0, 0};
int savings;
@@ -1906,10 +1717,10 @@ static void update_coef_probs(VP8_COMP *cpi) {
// printf("Update %d %d, savings %d\n", update[0], update[1], savings);
/* Is coef updated at all */
- if (update[1] == 0 || savings < 0)
- vp8_write_bit(w, 0);
- else {
- vp8_write_bit(w, 1);
+ if (update[1] == 0 || savings < 0) {
+ vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(bc, 1);
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = !i; j < COEF_BANDS; ++j) {
int prev_coef_savings[ENTROPY_NODES] = {0};
@@ -1937,14 +1748,14 @@ static void update_coef_probs(VP8_COMP *cpi) {
if (s > 0)
u = 1;
#endif
- vp8_write(w, u, upd);
+ vp8_write(bc, u, upd);
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing)
++ tree_update_hist [i][j][k][t] [u];
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
+ write_prob_diff_update(bc, newp, *Pold);
*Pold = newp;
}
}
@@ -1953,7 +1764,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
-#if CONFIG_HYBRIDTRANSFORM
savings = 0;
update[0] = update[1] = 0;
for (i = 0; i < BLOCK_TYPES; ++i) {
@@ -1996,10 +1806,10 @@ static void update_coef_probs(VP8_COMP *cpi) {
// printf("Update %d %d, savings %d\n", update[0], update[1], savings);
/* Is coef updated at all */
- if (update[1] == 0 || savings < 0)
- vp8_write_bit(w, 0);
- else {
- vp8_write_bit(w, 1);
+ if (update[1] == 0 || savings < 0) {
+ vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(bc, 1);
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = !i; j < COEF_BANDS; ++j) {
int prev_coef_savings[ENTROPY_NODES] = {0};
@@ -2027,14 +1837,14 @@ static void update_coef_probs(VP8_COMP *cpi) {
if (s > 0)
u = 1;
#endif
- vp8_write(w, u, upd);
+ vp8_write(bc, u, upd);
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing)
++ hybrid_tree_update_hist [i][j][k][t] [u];
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(w, newp, *Pold);
+ write_prob_diff_update(bc, newp, *Pold);
*Pold = newp;
}
}
@@ -2042,7 +1852,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
-#endif
/* do not do this if not even allowed */
if (cpi->common.txfm_mode != ONLY_4X4) {
@@ -2081,10 +1890,10 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
- if (update[1] == 0 || savings < 0)
- vp8_write_bit(w, 0);
- else {
- vp8_write_bit(w, 1);
+ if (update[1] == 0 || savings < 0) {
+ vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(bc, 1);
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
for (j = !i; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -2105,14 +1914,14 @@ static void update_coef_probs(VP8_COMP *cpi) {
s = prob_update_savings(ct, oldp, newp, upd);
u = s > 0 ? 1 : 0;
#endif
- vp8_write(w, u, upd);
+ vp8_write(bc, u, upd);
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing)
++ tree_update_hist_8x8 [i][j][k][t] [u];
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(w, newp, oldp);
+ write_prob_diff_update(bc, newp, oldp);
*Pold = newp;
}
}
@@ -2120,7 +1929,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM8X8
update[0] = update[1] = 0;
savings = 0;
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
@@ -2155,10 +1963,10 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
- if (update[1] == 0 || savings < 0)
- vp8_write_bit(w, 0);
- else {
- vp8_write_bit(w, 1);
+ if (update[1] == 0 || savings < 0) {
+ vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(bc, 1);
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
for (j = !i; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -2179,14 +1987,14 @@ static void update_coef_probs(VP8_COMP *cpi) {
s = prob_update_savings(ct, oldp, newp, upd);
u = s > 0 ? 1 : 0;
#endif
- vp8_write(w, u, upd);
+ vp8_write(bc, u, upd);
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing)
++ hybrid_tree_update_hist_8x8 [i][j][k][t] [u];
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(w, newp, oldp);
+ write_prob_diff_update(bc, newp, oldp);
*Pold = newp;
}
}
@@ -2194,7 +2002,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
-#endif
}
if (cpi->common.txfm_mode > ALLOW_8X8) {
@@ -2233,10 +2040,10 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
- if (update[1] == 0 || savings < 0)
- vp8_write_bit(w, 0);
- else {
- vp8_write_bit(w, 1);
+ if (update[1] == 0 || savings < 0) {
+ vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(bc, 1);
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = !i; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -2257,14 +2064,14 @@ static void update_coef_probs(VP8_COMP *cpi) {
s = prob_update_savings(ct, oldp, newp, upd);
u = s > 0 ? 1 : 0;
#endif
- vp8_write(w, u, upd);
+ vp8_write(bc, u, upd);
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing)
++tree_update_hist_16x16[i][j][k][t][u];
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(w, newp, oldp);
+ write_prob_diff_update(bc, newp, oldp);
*Pold = newp;
}
}
@@ -2272,7 +2079,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
update[0] = update[1] = 0;
savings = 0;
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
@@ -2307,10 +2113,10 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
- if (update[1] == 0 || savings < 0)
- vp8_write_bit(w, 0);
- else {
- vp8_write_bit(w, 1);
+ if (update[1] == 0 || savings < 0) {
+ vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(bc, 1);
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = !i; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@@ -2331,14 +2137,14 @@ static void update_coef_probs(VP8_COMP *cpi) {
s = prob_update_savings(ct, oldp, newp, upd);
u = s > 0 ? 1 : 0;
#endif
- vp8_write(w, u, upd);
+ vp8_write(bc, u, upd);
#ifdef ENTROPY_STATS
if (!cpi->dummy_packing)
++hybrid_tree_update_hist_16x16[i][j][k][t][u];
#endif
if (u) {
/* send/use new probability */
- write_prob_diff_update(w, newp, oldp);
+ write_prob_diff_update(bc, newp, oldp);
*Pold = newp;
}
}
@@ -2346,7 +2152,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
}
}
-#endif
}
}
@@ -2402,7 +2207,7 @@ static void segment_reference_frames(VP8_COMP *cpi) {
int ref[MAX_MB_SEGMENTS] = {0};
int i, j;
int mb_index = 0;
- MACROBLOCKD *const xd = & cpi->mb.e_mbd;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
for (i = 0; i < oci->mb_rows; i++) {
for (j = 0; j < oci->mb_cols; j++, mb_index++) {
@@ -2419,9 +2224,9 @@ static void segment_reference_frames(VP8_COMP *cpi) {
void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) {
int i, j;
VP8_HEADER oh;
- VP8_COMMON *const pc = & cpi->common;
- vp8_writer *const bc = & cpi->bc;
- MACROBLOCKD *const xd = & cpi->mb.e_mbd;
+ VP8_COMMON *const pc = &cpi->common;
+ vp8_writer header_bc, residual_bc;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int extra_bytes_packed = 0;
unsigned char *cx_data = dest;
@@ -2464,40 +2269,65 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
extra_bytes_packed = 7;
cx_data += extra_bytes_packed;
- vp8_start_encode(bc, cx_data);
+ vp8_start_encode(&header_bc, cx_data);
// signal clr type
- vp8_write_bit(bc, pc->clr_type);
- vp8_write_bit(bc, pc->clamp_type);
+ vp8_write_bit(&header_bc, pc->clr_type);
+ vp8_write_bit(&header_bc, pc->clamp_type);
- } else
- vp8_start_encode(bc, cx_data);
+ } else {
+ vp8_start_encode(&header_bc, cx_data);
+ }
// Signal whether or not Segmentation is enabled
- vp8_write_bit(bc, (xd->segmentation_enabled) ? 1 : 0);
+ vp8_write_bit(&header_bc, (xd->segmentation_enabled) ? 1 : 0);
// Indicate which features are enabled
if (xd->segmentation_enabled) {
// Indicate whether or not the segmentation map is being updated.
- vp8_write_bit(bc, (xd->update_mb_segmentation_map) ? 1 : 0);
+ vp8_write_bit(&header_bc, (xd->update_mb_segmentation_map) ? 1 : 0);
// If it is, then indicate the method that will be used.
if (xd->update_mb_segmentation_map) {
// Select the coding strategy (temporal or spatial)
choose_segmap_coding_method(cpi);
+ // Send the tree probabilities used to decode unpredicted
+ // macro-block segments
+ for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
+ int data = xd->mb_segment_tree_probs[i];
+
+ if (data != 255) {
+ vp8_write_bit(&header_bc, 1);
+ vp8_write_literal(&header_bc, data, 8);
+ } else {
+ vp8_write_bit(&header_bc, 0);
+ }
+ }
// Write out the chosen coding method.
- vp8_write_bit(bc, (pc->temporal_update) ? 1 : 0);
+ vp8_write_bit(&header_bc, (pc->temporal_update) ? 1 : 0);
+ if (pc->temporal_update) {
+ for (i = 0; i < PREDICTION_PROBS; i++) {
+ int data = pc->segment_pred_probs[i];
+
+ if (data != 255) {
+ vp8_write_bit(&header_bc, 1);
+ vp8_write_literal(&header_bc, data, 8);
+ } else {
+ vp8_write_bit(&header_bc, 0);
+ }
+ }
+ }
}
- vp8_write_bit(bc, (xd->update_mb_segmentation_data) ? 1 : 0);
+ vp8_write_bit(&header_bc, (xd->update_mb_segmentation_data) ? 1 : 0);
// segment_reference_frames(cpi);
if (xd->update_mb_segmentation_data) {
signed char Data;
- vp8_write_bit(bc, (xd->mb_segment_abs_delta) ? 1 : 0);
+ vp8_write_bit(&header_bc, (xd->mb_segment_abs_delta) ? 1 : 0);
// For each segments id...
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
@@ -2510,67 +2340,67 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
// check if there's an update
if (segfeature_changed(xd, i, j)) {
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
if (segfeature_active(xd, i, j)) {
// this bit is to say we are still
// active/ if we were inactive
// this is unnecessary
if (old_segfeature_active(xd, i, j)) {
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
}
// Is the segment data signed..
if (is_segfeature_signed(j)) {
// Encode the relevant feature data
if (Data < 0) {
Data = - Data;
- vp8_write_literal(bc, Data,
+ vp8_write_literal(&header_bc, Data,
seg_feature_data_bits(j));
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
} else {
- vp8_write_literal(bc, Data,
+ vp8_write_literal(&header_bc, Data,
seg_feature_data_bits(j));
- vp8_write_bit(bc, 0);
+ vp8_write_bit(&header_bc, 0);
}
}
// Unsigned data element so no sign bit needed
else
- vp8_write_literal(bc, Data,
+ vp8_write_literal(&header_bc, Data,
seg_feature_data_bits(j));
}
// feature is inactive now
else if (old_segfeature_active(xd, i, j)) {
- vp8_write_bit(bc, 0);
+ vp8_write_bit(&header_bc, 0);
}
} else {
- vp8_write_bit(bc, 0);
+ vp8_write_bit(&header_bc, 0);
}
#else
// If the feature is enabled...
if (segfeature_active(xd, i, j)) {
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
// Is the segment data signed..
if (is_segfeature_signed(j)) {
// Encode the relevant feature data
if (Data < 0) {
Data = - Data;
- vp8_write_literal(bc, Data,
+ vp8_write_literal(&header_bc, Data,
seg_feature_data_bits(j));
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
} else {
- vp8_write_literal(bc, Data,
+ vp8_write_literal(&header_bc, Data,
seg_feature_data_bits(j));
- vp8_write_bit(bc, 0);
+ vp8_write_bit(&header_bc, 0);
}
}
// Unsigned data element so no sign bit needed
else
- vp8_write_literal(bc, Data,
+ vp8_write_literal(&header_bc, Data,
seg_feature_data_bits(j));
} else
- vp8_write_bit(bc, 0);
+ vp8_write_bit(&header_bc, 0);
#endif
}
}
@@ -2581,33 +2411,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
save_segment_info(xd);
#endif
- if (xd->update_mb_segmentation_map) {
- // Send the tree probabilities used to decode unpredicted
- // macro-block segments
- for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
- int Data = xd->mb_segment_tree_probs[i];
-
- if (Data != 255) {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, Data, 8);
- } else
- vp8_write_bit(bc, 0);
- }
-
- // If predictive coding of segment map is enabled send the
- // prediction probabilities.
- if (pc->temporal_update) {
- for (i = 0; i < PREDICTION_PROBS; i++) {
- int Data = pc->segment_pred_probs[i];
-
- if (Data != 255) {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, Data, 8);
- } else
- vp8_write_bit(bc, 0);
- }
- }
- }
}
// Encode the common prediction model status flag probability updates for
@@ -2616,27 +2419,24 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
if (pc->frame_type != KEY_FRAME) {
for (i = 0; i < PREDICTION_PROBS; i++) {
if (cpi->ref_pred_probs_update[i]) {
- vp8_write_bit(bc, 1);
- vp8_write_literal(bc, pc->ref_pred_probs[i], 8);
- } else
- vp8_write_bit(bc, 0);
+ vp8_write_bit(&header_bc, 1);
+ vp8_write_literal(&header_bc, pc->ref_pred_probs[i], 8);
+ } else {
+ vp8_write_bit(&header_bc, 0);
+ }
}
}
#if CONFIG_SUPERBLOCKS
{
/* sb mode probability */
- int sb_coded = 256 - (cpi->sb_count << 8) / (((pc->mb_rows + 1) >> 1) * ((pc->mb_cols + 1) >> 1));
- if (sb_coded <= 0)
- sb_coded = 1;
- else if (sb_coded >= 256)
- sb_coded = 255;
- pc->sb_coded = sb_coded;
- vp8_write_literal(bc, pc->sb_coded, 8);
+ const int sb_max = (((pc->mb_rows + 1) >> 1) * ((pc->mb_cols + 1) >> 1));
+
+ pc->sb_coded = get_prob(sb_max - cpi->sb_count, sb_max);
+ vp8_write_literal(&header_bc, pc->sb_coded, 8);
}
#endif
-#if CONFIG_TX_SELECT
{
if (pc->txfm_mode == TX_MODE_SELECT) {
pc->prob_tx[0] = get_prob(cpi->txfm_count[0] + cpi->txfm_count_8x8p[0],
@@ -2647,29 +2447,26 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
pc->prob_tx[0] = 128;
pc->prob_tx[1] = 128;
}
- vp8_write_literal(bc, pc->txfm_mode, 2);
+ vp8_write_literal(&header_bc, pc->txfm_mode, 2);
if (pc->txfm_mode == TX_MODE_SELECT) {
- vp8_write_literal(bc, pc->prob_tx[0], 8);
- vp8_write_literal(bc, pc->prob_tx[1], 8);
+ vp8_write_literal(&header_bc, pc->prob_tx[0], 8);
+ vp8_write_literal(&header_bc, pc->prob_tx[1], 8);
}
}
-#else
- vp8_write_bit(bc, !!pc->txfm_mode);
-#endif
// Encode the loop filter level and type
- vp8_write_bit(bc, pc->filter_type);
- vp8_write_literal(bc, pc->filter_level, 6);
- vp8_write_literal(bc, pc->sharpness_level, 3);
+ vp8_write_bit(&header_bc, pc->filter_type);
+ vp8_write_literal(&header_bc, pc->filter_level, 6);
+ vp8_write_literal(&header_bc, pc->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or ref frame (if they are enabled).
- vp8_write_bit(bc, (xd->mode_ref_lf_delta_enabled) ? 1 : 0);
+ vp8_write_bit(&header_bc, (xd->mode_ref_lf_delta_enabled) ? 1 : 0);
if (xd->mode_ref_lf_delta_enabled) {
// Do the deltas need to be updated
int send_update = xd->mode_ref_lf_delta_update;
- vp8_write_bit(bc, send_update);
+ vp8_write_bit(&header_bc, send_update);
if (send_update) {
int Data;
@@ -2680,18 +2477,19 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
// Frame level data
if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i]) {
xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
if (Data > 0) {
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 0); // sign
+ vp8_write_literal(&header_bc, (Data & 0x3F), 6);
+ vp8_write_bit(&header_bc, 0); // sign
} else {
Data = -Data;
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 1); // sign
+ vp8_write_literal(&header_bc, (Data & 0x3F), 6);
+ vp8_write_bit(&header_bc, 1); // sign
}
- } else
- vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(&header_bc, 0);
+ }
}
// Send update
@@ -2700,41 +2498,42 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i]) {
xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
- vp8_write_bit(bc, 1);
+ vp8_write_bit(&header_bc, 1);
if (Data > 0) {
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 0); // sign
+ vp8_write_literal(&header_bc, (Data & 0x3F), 6);
+ vp8_write_bit(&header_bc, 0); // sign
} else {
Data = -Data;
- vp8_write_literal(bc, (Data & 0x3F), 6);
- vp8_write_bit(bc, 1); // sign
+ vp8_write_literal(&header_bc, (Data & 0x3F), 6);
+ vp8_write_bit(&header_bc, 1); // sign
}
- } else
- vp8_write_bit(bc, 0);
+ } else {
+ vp8_write_bit(&header_bc, 0);
+ }
}
}
}
// signal here is multi token partition is enabled
- // vp8_write_literal(bc, pc->multi_token_partition, 2);
- vp8_write_literal(bc, 0, 2);
+ // vp8_write_literal(&header_bc, pc->multi_token_partition, 2);
+ vp8_write_literal(&header_bc, 0, 2);
// Frame Q baseline quantizer index
- vp8_write_literal(bc, pc->base_qindex, QINDEX_BITS);
+ vp8_write_literal(&header_bc, pc->base_qindex, QINDEX_BITS);
// Transmit Dc, Second order and Uv quantizer delta information
- put_delta_q(bc, pc->y1dc_delta_q);
- put_delta_q(bc, pc->y2dc_delta_q);
- put_delta_q(bc, pc->y2ac_delta_q);
- put_delta_q(bc, pc->uvdc_delta_q);
- put_delta_q(bc, pc->uvac_delta_q);
+ put_delta_q(&header_bc, pc->y1dc_delta_q);
+ put_delta_q(&header_bc, pc->y2dc_delta_q);
+ put_delta_q(&header_bc, pc->y2ac_delta_q);
+ put_delta_q(&header_bc, pc->uvdc_delta_q);
+ put_delta_q(&header_bc, pc->uvac_delta_q);
// When there is a key frame all reference buffers are updated using the new key frame
if (pc->frame_type != KEY_FRAME) {
// Should the GF or ARF be updated using the transmitted frame or buffer
- vp8_write_bit(bc, pc->refresh_golden_frame);
- vp8_write_bit(bc, pc->refresh_alt_ref_frame);
+ vp8_write_bit(&header_bc, pc->refresh_golden_frame);
+ vp8_write_bit(&header_bc, pc->refresh_alt_ref_frame);
// For inter frames the current default behavior is that when
// cm->refresh_golden_frame is set we copy the old GF over to
@@ -2744,18 +2543,17 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
// If not being updated from current frame should either GF or ARF be updated from another buffer
if (!pc->refresh_golden_frame)
- vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
+ vp8_write_literal(&header_bc, pc->copy_buffer_to_gf, 2);
if (!pc->refresh_alt_ref_frame)
- vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
+ vp8_write_literal(&header_bc, pc->copy_buffer_to_arf, 2);
// Indicate reference frame sign bias for Golden and ARF frames (always 0 for last frame buffer)
- vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
- vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
+ vp8_write_bit(&header_bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
+ vp8_write_bit(&header_bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
// Signal whether to allow high MV precision
- vp8_write_bit(bc, (xd->allow_high_precision_mv) ? 1 : 0);
-#if CONFIG_SWITCHABLE_INTERP
+ vp8_write_bit(&header_bc, (xd->allow_high_precision_mv) ? 1 : 0);
if (pc->mcomp_filter_type == SWITCHABLE) {
/* Check to see if only one of the filters is actually used */
int count[VP8_SWITCHABLE_FILTERS];
@@ -2778,16 +2576,15 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
}
// Signal the type of subpel filter to use
- vp8_write_bit(bc, (pc->mcomp_filter_type == SWITCHABLE));
+ vp8_write_bit(&header_bc, (pc->mcomp_filter_type == SWITCHABLE));
if (pc->mcomp_filter_type != SWITCHABLE)
-#endif /* CONFIG_SWITCHABLE_INTERP */
- vp8_write_literal(bc, (pc->mcomp_filter_type), 2);
+ vp8_write_literal(&header_bc, (pc->mcomp_filter_type), 2);
}
- vp8_write_bit(bc, pc->refresh_entropy_probs);
+ vp8_write_bit(&header_bc, pc->refresh_entropy_probs);
if (pc->frame_type != KEY_FRAME)
- vp8_write_bit(bc, pc->refresh_last_frame);
+ vp8_write_bit(&header_bc, pc->refresh_last_frame);
#ifdef ENTROPY_STATS
if (pc->frame_type == INTER_FRAME)
@@ -2799,62 +2596,93 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_clear_system_state(); // __asm emms;
vp8_copy(cpi->common.fc.pre_coef_probs, cpi->common.fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs, cpi->common.fc.hybrid_coef_probs);
-#endif
vp8_copy(cpi->common.fc.pre_coef_probs_8x8, cpi->common.fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16);
-#endif
vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
vp8_copy(cpi->common.fc.pre_bmode_prob, cpi->common.fc.bmode_prob);
vp8_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
-#if CONFIG_NEWMVENTROPY
cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
-#else
- vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
- vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
-#endif
vp8_zero(cpi->sub_mv_ref_count);
vp8_zero(cpi->mbsplit_count);
vp8_zero(cpi->common.fc.mv_ref_ct)
vp8_zero(cpi->common.fc.mv_ref_ct_a)
- update_coef_probs(cpi);
+ update_coef_probs(cpi, &header_bc);
#ifdef ENTROPY_STATS
active_section = 2;
#endif
// Write out the mb_no_coeff_skip flag
- vp8_write_bit(bc, pc->mb_no_coeff_skip);
+ vp8_write_bit(&header_bc, pc->mb_no_coeff_skip);
+ if (pc->mb_no_coeff_skip) {
+ int k;
- if (pc->frame_type == KEY_FRAME) {
- decide_kf_ymode_entropy(cpi);
- write_kfmodes(cpi);
+ update_skip_probs(cpi);
+ for (k = 0; k < MBSKIP_CONTEXTS; ++k)
+ vp8_write_literal(&header_bc, pc->mbskip_pred_probs[k], 8);
+ }
-#ifdef ENTROPY_STATS
- active_section = 8;
-#endif
+ if (pc->frame_type == KEY_FRAME) {
+ if (!pc->kf_ymode_probs_update) {
+ vp8_write_literal(&header_bc, pc->kf_ymode_probs_index, 3);
+ }
} else {
- pack_inter_mode_mvs(cpi);
- vp8_update_mode_context(&cpi->common);
+ // Update the probabilities used to encode reference frame data
+ update_ref_probs(cpi);
#ifdef ENTROPY_STATS
active_section = 1;
#endif
+
+#if CONFIG_PRED_FILTER
+ // Write the prediction filter mode used for this frame
+ vp8_write_literal(&header_bc, pc->pred_filter_mode, 2);
+
+ // Write prediction filter on/off probability if signaling at MB level
+ if (pc->pred_filter_mode == 2)
+ vp8_write_literal(&header_bc, pc->prob_pred_filter_off, 8);
+
+#endif
+ if (pc->mcomp_filter_type == SWITCHABLE)
+ update_switchable_interp_probs(cpi, &header_bc);
+
+ vp8_write_literal(&header_bc, pc->prob_intra_coded, 8);
+ vp8_write_literal(&header_bc, pc->prob_last_coded, 8);
+ vp8_write_literal(&header_bc, pc->prob_gf_coded, 8);
+
+ {
+ const int comp_pred_mode = cpi->common.comp_pred_mode;
+ const int use_compound_pred = (comp_pred_mode != SINGLE_PREDICTION_ONLY);
+ const int use_hybrid_pred = (comp_pred_mode == HYBRID_PREDICTION);
+
+ vp8_write(&header_bc, use_compound_pred, 128);
+ if (use_compound_pred) {
+ vp8_write(&header_bc, use_hybrid_pred, 128);
+ if (use_hybrid_pred) {
+ for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
+ pc->prob_comppred[i] = get_binary_prob(cpi->single_pred_count[i],
+ cpi->comp_pred_count[i]);
+ vp8_write_literal(&header_bc, pc->prob_comppred[i], 8);
+ }
+ }
+ }
+ }
+
+ update_mbintra_mode_probs(cpi, &header_bc);
+
+ vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
}
- vp8_stop_encode(bc);
+ vp8_stop_encode(&header_bc);
- oh.first_partition_length_in_bytes = cpi->bc.pos;
+ oh.first_partition_length_in_bytes = header_bc.pos;
/* update frame tag */
{
@@ -2868,15 +2696,21 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
dest[2] = v >> 16;
}
- *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc.pos;
+ *size = VP8_HEADER_SIZE + extra_bytes_packed + header_bc.pos;
+ vp8_start_encode(&residual_bc, cx_data + header_bc.pos);
- vp8_start_encode(&cpi->bc2, cx_data + bc->pos);
+ if (pc->frame_type == KEY_FRAME) {
+ decide_kf_ymode_entropy(cpi);
+ write_kfmodes(cpi, &residual_bc);
+ } else {
+ pack_inter_mode_mvs(cpi, &residual_bc);
+ vp8_update_mode_context(&cpi->common);
+ }
- pack_tokens(&cpi->bc2, cpi->tok, cpi->tok_count);
- vp8_stop_encode(&cpi->bc2);
+ vp8_stop_encode(&residual_bc);
- *size += cpi->bc2.pos;
+ *size += residual_bc.pos;
}
@@ -2899,14 +2733,9 @@ void print_tree_update_probs() {
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
fprintf(f, " {");
for (l = 0; l < ENTROPY_NODES; l++) {
- Sum = tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1];
- if (Sum > 0) {
- if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0)
- fprintf(f, "%3ld, ", (tree_update_hist[i][j][k][l][0] * 255) / Sum);
- else
- fprintf(f, "%3ld, ", 1);
- } else
- fprintf(f, "%3ld, ", 128);
+ fprintf(f, "%3ld, ",
+ get_binary_prob(tree_update_hist[i][j][k][l][0],
+ tree_update_hist[i][j][k][l][1]));
}
fprintf(f, "},\n");
}
@@ -2928,14 +2757,9 @@ void print_tree_update_probs() {
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
fprintf(f, " {");
for (l = 0; l < MAX_ENTROPY_TOKENS - 1; l++) {
- Sum = tree_update_hist_8x8[i][j][k][l][0] + tree_update_hist_8x8[i][j][k][l][1];
- if (Sum > 0) {
- if (((tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum) > 0)
- fprintf(f, "%3ld, ", (tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum);
- else
- fprintf(f, "%3ld, ", 1);
- } else
- fprintf(f, "%3ld, ", 128);
+ fprintf(f, "%3ld, ",
+ get_binary_prob(tree_update_hist_8x8[i][j][k][l][0],
+ tree_update_hist_8x8[i][j][k][l][1]));
}
fprintf(f, "},\n");
}
@@ -2956,14 +2780,9 @@ void print_tree_update_probs() {
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
fprintf(f, " {");
for (l = 0; l < MAX_ENTROPY_TOKENS - 1; l++) {
- Sum = tree_update_hist_16x16[i][j][k][l][0] + tree_update_hist_16x16[i][j][k][l][1];
- if (Sum > 0) {
- if (((tree_update_hist_16x16[i][j][k][l][0] * 255) / Sum) > 0)
- fprintf(f, "%3ld, ", (tree_update_hist_16x16[i][j][k][l][0] * 255) / Sum);
- else
- fprintf(f, "%3ld, ", 1);
- } else
- fprintf(f, "%3ld, ", 128);
+ fprintf(f, "%3ld, ",
+ get_binary_prob(tree_update_hist_16x16[i][j][k][l][0],
+ tree_update_hist_16x16[i][j][k][l][1]));
}
fprintf(f, "},\n");
}
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 861700409..48623be8c 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -79,9 +79,7 @@ typedef struct {
int hybrid_pred_diff;
int comp_pred_diff;
int single_pred_diff;
-#if CONFIG_TX_SELECT
int64_t txfm_rd_diff[NB_TXFM_MODES];
-#endif
} PICK_MODE_CONTEXT;
typedef struct {
@@ -114,7 +112,6 @@ typedef struct {
int *mb_norm_activity_ptr;
signed int act_zbin_adj;
-#if CONFIG_NEWMVENTROPY
int nmvjointcost[MV_JOINTS];
int nmvcosts[2][MV_VALS];
int *nmvcost[2];
@@ -126,28 +123,17 @@ typedef struct {
int *nmvsadcost[2];
int nmvsadcosts_hp[2][MV_VALS];
int *nmvsadcost_hp[2];
-#else
- int mvcosts[2][MVvals + 1];
- int *mvcost[2];
- int mvsadcosts[2][MVfpvals + 1];
- int *mvsadcost[2];
- int mvcosts_hp[2][MVvals_hp + 1];
- int *mvcost_hp[2];
- int mvsadcosts_hp[2][MVfpvals_hp + 1];
- int *mvsadcost_hp[2];
-#endif /* CONFIG_NEWMVENTROPY */
int mbmode_cost[2][MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
int bmode_costs[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
int i8x8_mode_costs[MB_MODE_COUNT];
int inter_bmode_costs[B_MODE_COUNT];
-#if CONFIG_SWITCHABLE_INTERP
- int switchable_interp_costs[VP8_SWITCHABLE_FILTERS+1]
+ int switchable_interp_costs[VP8_SWITCHABLE_FILTERS + 1]
[VP8_SWITCHABLE_FILTERS];
-#endif
- // These define limits to motion vector components to prevent them from extending outside the UMV borders
+ // These define limits to motion vector components to prevent them
+ // from extending outside the UMV borders
int mv_col_min;
int mv_col_max;
int mv_row_min;
@@ -164,13 +150,10 @@ typedef struct {
unsigned int token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_token_costs[TX_SIZE_MAX][BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
-#endif
int optimize;
- int q_index;
// Structure to hold context for each of the 4 MBs within a SB:
// when encoded as 4 independent MBs:
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index d81a547d2..0983b1c0a 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -14,8 +14,6 @@
#include "vp8/common/idct.h"
#include "vp8/common/systemdependent.h"
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
-
#include "vp8/common/blockd.h"
// TODO: these transforms can be converted into integer forms to reduce
@@ -71,9 +69,7 @@ float adst_8[64] = {
0.175227946595735, -0.326790388032145, 0.434217976756762, -0.483002021635509,
0.466553967085785, -0.387095214016348, 0.255357107325376, -0.089131608307532
};
-#endif
-#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8
float dct_16[256] = {
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
@@ -143,7 +139,6 @@ float adst_16[256] = {
0.065889, -0.129396, 0.188227, -0.240255, 0.283599, -0.316693, 0.338341, -0.347761,
0.344612, -0.329007, 0.301511, -0.263118, 0.215215, -0.159534, 0.098087, -0.033094
};
-#endif
static const int xC1S7 = 16069;
static const int xC2S6 = 15137;
@@ -400,7 +395,6 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) { // pitch = 8
}
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim) {
@@ -419,6 +413,7 @@ void vp8_fht_c(short *input, short *output, int pitch,
// pointers to vertical and horizontal transforms
float *ptv, *pth;
+ assert(tx_type != DCT_DCT);
// load and convert residual array into floating-point
for(j = 0; j < tx_dim; j++) {
for(i = 0; i < tx_dim; i++) {
@@ -517,7 +512,6 @@ void vp8_fht_c(short *input, short *output, int pitch,
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
-#endif
void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
int i;
diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h
index 180192bbb..4ad1fe85d 100644
--- a/vp8/encoder/dct.h
+++ b/vp8/encoder/dct.h
@@ -26,10 +26,8 @@
#endif
-#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
-#endif
#ifndef vp8_fdct_short16x16
#define vp8_fdct_short16x16 vp8_short_fdct16x16_c
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index f784cf434..0910cfd35 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -120,8 +120,8 @@ static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) {
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
- act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
- x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
+ act = vp8_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0,
+ &sse);
act = act << 4;
/* If the region is flat, lower the activity some more. */
@@ -222,7 +222,7 @@ static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
#if USE_ACT_INDEX
// Calculate and activity index for each mb
static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
- VP8_COMMON *const cm = & cpi->common;
+ VP8_COMMON *const cm = &cpi->common;
int mb_row, mb_col;
int64_t act;
@@ -276,9 +276,9 @@ static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP8_COMP *cpi) {
- MACROBLOCK *const x = & cpi->mb;
+ MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *xd = &x->e_mbd;
- VP8_COMMON *const cm = & cpi->common;
+ VP8_COMMON *const cm = &cpi->common;
#if ALT_ACT_MEASURE
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
@@ -411,7 +411,6 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
}
-#if CONFIG_TX_SELECT
{
int segment_id = mbmi->segment_id;
if (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
@@ -421,7 +420,6 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
}
}
}
-#endif
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
@@ -1051,9 +1049,6 @@ static void encode_sb(VP8_COMP *cpi,
cpi->inter_zz_count++;
}
- // TODO Partitioning is broken!
- cpi->tplist[mb_row].stop = *tp;
-
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
x->src.y_buffer += 32;
@@ -1064,7 +1059,10 @@ static void encode_sb(VP8_COMP *cpi,
x->partition_info += 2;
xd->mode_info_context += 2;
xd->prev_mode_info_context += 2;
-
+
+ (*tp)->Token = EOSB_TOKEN;
+ (*tp)++;
+ if (mb_row < cm->mb_rows) cpi->tplist[mb_row].stop = *tp;
break;
}
#endif
@@ -1086,6 +1084,9 @@ static void encode_sb(VP8_COMP *cpi,
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
#endif
+ (*tp)->Token = EOSB_TOKEN;
+ (*tp)++;
+ if (mb_row < cm->mb_rows) cpi->tplist[mb_row].stop = *tp;
}
// debug output
@@ -1216,9 +1217,9 @@ void encode_sb_row(VP8_COMP *cpi,
}
void init_encode_frame_mb_context(VP8_COMP *cpi) {
- MACROBLOCK *const x = & cpi->mb;
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & x->e_mbd;
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
// GF active flags data structure
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
@@ -1287,9 +1288,9 @@ void init_encode_frame_mb_context(VP8_COMP *cpi) {
static void encode_frame_internal(VP8_COMP *cpi) {
int mb_row;
- MACROBLOCK *const x = & cpi->mb;
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & x->e_mbd;
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
TOKENEXTRA *tp = cpi->tok;
int totalrate;
@@ -1339,37 +1340,18 @@ static void encode_frame_internal(VP8_COMP *cpi) {
cpi->pred_filter_on_count = 0;
cpi->pred_filter_off_count = 0;
#endif
-#if CONFIG_SWITCHABLE_INTERP
vp8_zero(cpi->switchable_interp_count);
-#endif
-
-#if 0
- // Experimental code
- cpi->frame_distortion = 0;
- cpi->last_mb_distortion = 0;
-#endif
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
-#if CONFIG_NEWMVENTROPY
vp8_zero(cpi->NMVcount);
-#else
- vp8_zero(cpi->MVcount);
- vp8_zero(cpi->MVcount_hp);
-#endif
vp8_zero(cpi->coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_zero(cpi->hybrid_coef_counts);
-#endif
vp8_zero(cpi->coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(cpi->hybrid_coef_counts_8x8);
-#endif
vp8_zero(cpi->coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(cpi->hybrid_coef_counts_16x16);
-#endif
vp8cx_frame_init_quantizer(cpi);
@@ -1390,11 +1372,9 @@ static void encode_frame_internal(VP8_COMP *cpi) {
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
-#if CONFIG_TX_SELECT
vpx_memset(cpi->txfm_count, 0, sizeof(cpi->txfm_count));
vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
-#endif
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
@@ -1454,7 +1434,6 @@ static int check_dual_ref_flags(VP8_COMP *cpi) {
}
}
-#if CONFIG_TX_SELECT
static void reset_skip_txfm_size(VP8_COMP *cpi, TX_SIZE txfm_max) {
VP8_COMMON *cm = &cpi->common;
int mb_row, mb_col, mis = cm->mode_info_stride;
@@ -1478,7 +1457,6 @@ static void reset_skip_txfm_size(VP8_COMP *cpi, TX_SIZE txfm_max) {
}
}
}
-#endif
void vp8_encode_frame(VP8_COMP *cpi) {
if (cpi->sf.RD) {
@@ -1524,7 +1502,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
txfm_type = ONLY_4X4;
} else
#endif
-#if CONFIG_TX_SELECT
/* FIXME (rbultje)
* this is a hack (no really), basically to work around the complete
* nonsense coefficient cost prediction for keyframes. The probabilities
@@ -1572,16 +1549,11 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_16X16 : TX_MODE_SELECT;
#endif
-#else
- txfm_type = ALLOW_16X16;
-#endif // CONFIG_TX_SELECT
cpi->common.txfm_mode = txfm_type;
-#if CONFIG_TX_SELECT
if (txfm_type != TX_MODE_SELECT) {
cpi->common.prob_tx[0] = 128;
cpi->common.prob_tx[1] = 128;
}
-#endif
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
@@ -1591,7 +1563,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
-#if CONFIG_TX_SELECT
for (i = 0; i < NB_TXFM_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
@@ -1601,7 +1572,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
-#endif
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
@@ -1619,7 +1589,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
}
}
-#if CONFIG_TX_SELECT
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
const int count4x4 = cpi->txfm_count[TX_4X4] + cpi->txfm_count_8x8p[TX_4X4];
const int count8x8 = cpi->txfm_count[TX_8X8];
@@ -1636,7 +1605,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->common.txfm_mode = ALLOW_16X16;
}
}
-#endif
} else {
encode_frame_internal(cpi);
}
@@ -1719,7 +1687,7 @@ void vp8_build_block_offsets(MACROBLOCK *x) {
}
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
- const MACROBLOCKD *xd = & x->e_mbd;
+ const MACROBLOCKD *xd = &x->e_mbd;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
@@ -1928,7 +1896,7 @@ void vp8cx_encode_intra_super_block(VP8_COMP *cpi,
update_sb_skip_coeff_state(cpi, x, ta, tl, tp, t, skip);
}
}
-#endif
+#endif /* CONFIG_SUPERBLOCKS */
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
MACROBLOCK *x,
@@ -1939,29 +1907,27 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
}
-
if (mbmi->mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
- } else if (mbmi->mode == B_PRED)
+ } else if (mbmi->mode == B_PRED) {
+ vp8_intra_prediction_down_copy(&x->e_mbd);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
- else
+ } else {
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
+ }
if (mbmi->mode != I8X8_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
}
if (output_enabled) {
-#if CONFIG_TX_SELECT
int segment_id = mbmi->segment_id;
-#endif
// Tokenize
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
-#if CONFIG_TX_SELECT
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
(segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
@@ -1971,9 +1937,7 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
} else if (mbmi->mode == I8X8_PRED) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
}
- } else
-#endif
- if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
+ } else if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
mbmi->txfm_size = TX_16X16;
} else
if (cpi->common.txfm_mode >= ALLOW_8X8 && mbmi->mode != B_PRED) {
@@ -2008,9 +1972,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
assert(!xd->mode_info_context->mbmi.encoded_as_sb);
#endif
-#if CONFIG_SWITCHABLE_INTERP
vp8_setup_interp_filters(xd, mbmi->interp_filter, cm);
-#endif
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
@@ -2047,6 +2009,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
if (mbmi->ref_frame == INTRA_FRAME) {
if (mbmi->mode == B_PRED) {
+ vp8_intra_prediction_down_copy(xd);
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
} else if (mbmi->mode == I8X8_PRED) {
@@ -2153,7 +2116,6 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
if (output_enabled) {
-#if CONFIG_TX_SELECT
int segment_id = mbmi->segment_id;
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
@@ -2162,16 +2124,18 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV) {
cpi->txfm_count[mbmi->txfm_size]++;
- } else if (mbmi->mode == I8X8_PRED) {
+ } else if (mbmi->mode == I8X8_PRED ||
+ (mbmi->mode == SPLITMV &&
+ mbmi->partitioning != PARTITIONING_4X4)) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
}
- } else
-#endif
- if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
+ } else if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16;
- } else if (mbmi->mode != B_PRED && mbmi->mode != SPLITMV &&
- cpi->common.txfm_mode >= ALLOW_8X8) {
+ } else if (mbmi->mode != B_PRED &&
+ !(mbmi->mode == SPLITMV &&
+ mbmi->partitioning == PARTITIONING_4X4) &&
+ cpi->common.txfm_mode >= ALLOW_8X8) {
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 703a1015e..f44df22ea 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -48,7 +48,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
}
}
- intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
+ intra_pred_var = vp8_get_mb_ss(x->src_diff);
return intra_pred_var;
}
@@ -57,6 +57,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
MACROBLOCK *x, int ib) {
BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib];
+ TX_TYPE tx_type;
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (B_PREDICTION_MODE)(B_DC_PRED - 1)) {
@@ -71,15 +72,12 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM
- if (x->q_index < ACTIVE_HT) {
- txfm_map(b, b->bmi.as_mode.first);
- vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b_4x4(be, b);
- vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
- } else
-#endif
- {
+ tx_type = get_tx_type(&x->e_mbd, b);
+ if (tx_type != DCT_DCT) {
+ vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4);
+ vp8_ht_quantize_b_4x4(be, b, tx_type);
+ vp8_ihtllm_c(b->dqcoeff, b->diff, 32, tx_type, 4);
+ } else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->quantize_b_4x4(be, b) ;
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
@@ -91,12 +89,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
int i;
-#if 0
- MACROBLOCKD *xd = &mb->e_mbd;
- // Intra modes requiring top-right MB reconstructed data have been disabled
- vp8_intra_prediction_down_copy(xd);
-#endif
-
for (i = 0; i < 16; i++)
vp8_encode_intra4x4block(rtcd, mb, i);
return;
@@ -106,9 +98,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
-#if CONFIG_HYBRIDTRANSFORM16X16
- TX_TYPE txfm_type = xd->mode_info_context->bmi[0].as_mode.tx_type;
-#endif
+ TX_TYPE tx_type;
#if CONFIG_COMP_INTRA_PRED
if (xd->mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE)(DC_PRED - 1))
@@ -123,20 +113,15 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
xd->predictor, b->src_stride);
if (tx_size == TX_16X16) {
-#if CONFIG_HYBRIDTRANSFORM16X16
- if ((xd->mode_info_context->mbmi.mode < I8X8_PRED) &&
- (x->q_index < ACTIVE_HT16)) {
- BLOCKD *bd = &xd->block[0];
- txfm_map(bd, pred_mode_conv(xd->mode_info_context->mbmi.mode));
- txfm_type = bd->bmi.as_mode.tx_type;
- vp8_fht_c(b->src_diff, b->coeff, 32, txfm_type, 16);
+ BLOCKD *bd = &xd->block[0];
+ tx_type = get_tx_type(xd, bd);
+ if (tx_type != DCT_DCT) {
+ vp8_fht_c(b->src_diff, b->coeff, 32, tx_type, 16);
vp8_quantize_mby_16x16(x);
if (x->optimize)
vp8_optimize_mby_16x16(x, rtcd);
- vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, txfm_type, 16);
- } else
-#endif
- {
+ vp8_ihtllm_c(bd->dqcoeff, bd->diff, 32, tx_type, 16);
+ } else {
vp8_transform_mby_16x16(x);
vp8_quantize_mby_16x16(x);
if (x->optimize)
@@ -201,6 +186,7 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
BLOCK *be = &x->block[ib];
const int iblock[4] = {0, 1, 4, 5};
int i;
+ TX_TYPE tx_type;
#if CONFIG_COMP_INTRA_PRED
if (b->bmi.as_mode.second == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
@@ -219,18 +205,18 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
// generate residual blocks
vp8_subtract_4b_c(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM8X8
- txfm_map(b, pred_mode_conv(b->bmi.as_mode.first));
- vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
- b->bmi.as_mode.tx_type, 8);
- x->quantize_b_8x8(x->block + idx, xd->block + idx);
- vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
- b->bmi.as_mode.tx_type, 8);
-#else
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
- x->quantize_b_8x8(x->block + idx, xd->block + idx);
- vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
-#endif
+ tx_type = get_tx_type(xd, xd->block + idx);
+ if (tx_type != DCT_DCT) {
+ vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
+ tx_type, 8);
+ x->quantize_b_8x8(x->block + idx, xd->block + idx);
+ vp8_ihtllm_c(xd->block[idx].dqcoeff, xd->block[ib].diff, 32,
+ tx_type, 8);
+ } else {
+ x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
+ x->quantize_b_8x8(x->block + idx, xd->block + idx);
+ vp8_idct_idct8(xd->block[idx].dqcoeff, xd->block[ib].diff, 32);
+ }
} else {
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index 50de2f2c0..d3bd0f1dd 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -267,7 +267,7 @@ static const int plane_rd_mult[4] = {
void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
- const VP8_ENCODER_RTCD *rtcd, int tx_type) {
+ const VP8_ENCODER_RTCD *rtcd, int tx_size) {
BLOCK *b;
BLOCKD *d;
vp8_token_state tokens[65][2];
@@ -298,21 +298,18 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
b = &mb->block[i];
d = &mb->e_mbd.block[i];
- switch (tx_type) {
+ switch (tx_size) {
default:
case TX_4X4:
scan = vp8_default_zig_zag1d;
bands = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
{
- int active_ht = (mb->q_index < ACTIVE_HT) &&
- (mb->e_mbd.mode_info_context->mbmi.mode == B_PRED);
-
- if((type == PLANE_TYPE_Y_WITH_DC) && active_ht) {
- switch (d->bmi.as_mode.tx_type) {
+ TX_TYPE tx_type = get_tx_type(&mb->e_mbd, d);
+ if (tx_type != DCT_DCT) {
+ switch (tx_type) {
case ADST_DCT:
scan = vp8_row_scan;
break;
@@ -325,11 +322,10 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
scan = vp8_default_zig_zag1d;
break;
}
-
- } else
+ } else {
scan = vp8_default_zig_zag1d;
+ }
}
-#endif
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
@@ -380,9 +376,9 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
band = bands[i + 1];
pt = vp8_prev_token_class[t0];
rate0 +=
- mb->token_costs[tx_type][type][band][pt][tokens[next][0].token];
+ mb->token_costs[tx_size][type][band][pt][tokens[next][0].token];
rate1 +=
- mb->token_costs[tx_type][type][band][pt][tokens[next][1].token];
+ mb->token_costs[tx_size][type][band][pt][tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
@@ -427,12 +423,12 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
band = bands[i + 1];
if (t0 != DCT_EOB_TOKEN) {
pt = vp8_prev_token_class[t0];
- rate0 += mb->token_costs[tx_type][type][band][pt][
+ rate0 += mb->token_costs[tx_size][type][band][pt][
tokens[next][0].token];
}
if (t1 != DCT_EOB_TOKEN) {
pt = vp8_prev_token_class[t1];
- rate1 += mb->token_costs[tx_type][type][band][pt][
+ rate1 += mb->token_costs[tx_size][type][band][pt][
tokens[next][1].token];
}
}
@@ -464,11 +460,11 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
t1 = tokens[next][1].token;
/* Update the cost of each path if we're past the EOB token. */
if (t0 != DCT_EOB_TOKEN) {
- tokens[next][0].rate += mb->token_costs[tx_type][type][band][0][t0];
+ tokens[next][0].rate += mb->token_costs[tx_size][type][band][0][t0];
tokens[next][0].token = ZERO_TOKEN;
}
if (t1 != DCT_EOB_TOKEN) {
- tokens[next][1].rate += mb->token_costs[tx_type][type][band][0][t1];
+ tokens[next][1].rate += mb->token_costs[tx_size][type][band][0][t1];
tokens[next][1].token = ZERO_TOKEN;
}
/* Don't update next, because we didn't add a new node. */
@@ -484,8 +480,8 @@ void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
error1 = tokens[next][1].error;
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
- rate0 += mb->token_costs[tx_type][type][band][pt][t0];
- rate1 += mb->token_costs[tx_type][type][band][pt][t1];
+ rate0 += mb->token_costs[tx_size][type][band][pt][t0];
+ rate1 += mb->token_costs[tx_size][type][band][pt][t1];
UPDATE_RD_COST();
best = rd_cost1 < rd_cost0;
final_eob = i0 - 1;
@@ -640,6 +636,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
+ int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
@@ -649,18 +646,21 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
- type = PLANE_TYPE_Y_NO_DC;
+ type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
+ ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
+ tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
}
// 8x8 always have 2nd roder haar block
- check_reset_8x8_2nd_coeffs(&x->e_mbd,
- ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]);
+ if (has_2nd_order) {
+ check_reset_8x8_2nd_coeffs(&x->e_mbd,
+ ta + vp8_block2above_8x8[24],
+ tl + vp8_block2left_8x8[24]);
+ }
}
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
@@ -682,8 +682,8 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
+ tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
}
}
@@ -900,11 +900,25 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
optimize_mb_16x16(x, rtcd);
vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
} else if (tx_size == TX_8X8) {
- vp8_transform_mb_8x8(x);
- vp8_quantize_mb_8x8(x);
- if (x->optimize)
- optimize_mb_8x8(x, rtcd);
- vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ if (xd->mode_info_context->mbmi.mode == SPLITMV) {
+ assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
+ vp8_transform_mby_8x8(x);
+ vp8_transform_mbuv_4x4(x);
+ vp8_quantize_mby_8x8(x);
+ vp8_quantize_mbuv_4x4(x);
+ if (x->optimize) {
+ vp8_optimize_mby_8x8(x, rtcd);
+ vp8_optimize_mbuv_4x4(x, rtcd);
+ }
+ vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
+ } else {
+ vp8_transform_mb_8x8(x);
+ vp8_quantize_mb_8x8(x);
+ if (x->optimize)
+ optimize_mb_8x8(x, rtcd);
+ vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
+ }
} else {
transform_mb_4x4(x);
vp8_quantize_mb_4x4(x);
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index 1289d89bb..75dad2f9b 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -22,44 +22,42 @@ extern unsigned int active_section;
//extern int final_packing;
-#if CONFIG_NEWMVENTROPY
-
#ifdef NMV_STATS
nmv_context_counts tnmvcounts;
#endif
-static void encode_nmv_component(vp8_writer *w,
+static void encode_nmv_component(vp8_writer* const bc,
int v,
int r,
- const nmv_component *mvcomp) {
+ const nmv_component* const mvcomp) {
int s, z, c, o, d;
assert (v != 0); /* should not be zero */
s = v < 0;
- vp8_write(w, s, mvcomp->sign);
+ vp8_write(bc, s, mvcomp->sign);
z = (s ? -v : v) - 1; /* magnitude - 1 */
c = vp8_get_mv_class(z, &o);
- vp8_write_token(w, vp8_mv_class_tree, mvcomp->classes,
+ vp8_write_token(bc, vp8_mv_class_tree, mvcomp->classes,
vp8_mv_class_encodings + c);
d = (o >> 3); /* int mv data */
if (c == MV_CLASS_0) {
- vp8_write_token(w, vp8_mv_class0_tree, mvcomp->class0,
+ vp8_write_token(bc, vp8_mv_class0_tree, mvcomp->class0,
vp8_mv_class0_encodings + d);
} else {
int i, b;
b = c + CLASS0_BITS - 1; /* number of bits */
for (i = 0; i < b; ++i)
- vp8_write(w, ((d >> i) & 1), mvcomp->bits[i]);
+ vp8_write(bc, ((d >> i) & 1), mvcomp->bits[i]);
}
}
-static void encode_nmv_component_fp(vp8_writer *w,
+static void encode_nmv_component_fp(vp8_writer *bc,
int v,
int r,
- const nmv_component *mvcomp,
+ const nmv_component* const mvcomp,
int usehp) {
int s, z, c, o, d, f, e;
assert (v != 0); /* should not be zero */
@@ -74,24 +72,24 @@ static void encode_nmv_component_fp(vp8_writer *w,
/* Code the fractional pel bits */
if (c == MV_CLASS_0) {
- vp8_write_token(w, vp8_mv_fp_tree, mvcomp->class0_fp[d],
+ vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->class0_fp[d],
vp8_mv_fp_encodings + f);
} else {
- vp8_write_token(w, vp8_mv_fp_tree, mvcomp->fp,
+ vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->fp,
vp8_mv_fp_encodings + f);
}
/* Code the high precision bit */
if (usehp) {
if (c == MV_CLASS_0) {
- vp8_write(w, e, mvcomp->class0_hp);
+ vp8_write(bc, e, mvcomp->class0_hp);
} else {
- vp8_write(w, e, mvcomp->hp);
+ vp8_write(bc, e, mvcomp->hp);
}
}
}
static void build_nmv_component_cost_table(int *mvcost,
- const nmv_component *mvcomp,
+ const nmv_component* const mvcomp,
int usehp) {
int i, v;
int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE];
@@ -177,7 +175,7 @@ static int update_nmv_savings(const unsigned int ct[2],
}
static int update_nmv(
- vp8_writer *const w,
+ vp8_writer *const bc,
const unsigned int ct[2],
vp8_prob *const cur_p,
const vp8_prob new_p,
@@ -199,15 +197,15 @@ static int update_nmv(
if (cur_b - mod_b > cost) {
*cur_p = mod_p;
- vp8_write(w, 1, upd_p);
+ vp8_write(bc, 1, upd_p);
#ifdef LOW_PRECISION_MV_UPDATE
- vp8_write_literal(w, mod_p >> 1, 7);
+ vp8_write_literal(bc, mod_p >> 1, 7);
#else
- vp8_write_literal(w, mod_p, 8);
+ vp8_write_literal(bc, mod_p, 8);
#endif
return 1;
} else {
- vp8_write(w, 0, upd_p);
+ vp8_write(bc, 0, upd_p);
return 0;
}
}
@@ -318,7 +316,8 @@ void print_nmvstats() {
}
}
-static void add_nmvcount(nmv_context_counts *dst, nmv_context_counts *src) {
+static void add_nmvcount(nmv_context_counts* const dst,
+ const nmv_context_counts* const src) {
int i, j, k;
for (j = 0; j < MV_JOINTS; ++j) {
dst->joints[j] += src->joints[j];
@@ -357,8 +356,7 @@ static void add_nmvcount(nmv_context_counts *dst, nmv_context_counts *src) {
}
#endif
-void vp8_write_nmvprobs(VP8_COMP * cpi, int usehp) {
- vp8_writer *const w = & cpi->bc;
+void vp8_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
int i, j;
nmv_context prob;
unsigned int branch_ct_joint[MV_JOINTS - 1][2];
@@ -443,37 +441,37 @@ void vp8_write_nmvprobs(VP8_COMP * cpi, int usehp) {
}
}
if (savings <= 0) {
- vp8_write_bit(w, 0);
+ vp8_write_bit(bc, 0);
return;
}
- vp8_write_bit(w, 1);
+ vp8_write_bit(bc, 1);
#endif
for (j = 0; j < MV_JOINTS - 1; ++j) {
- update_nmv(w, branch_ct_joint[j],
+ update_nmv(bc, branch_ct_joint[j],
&cpi->common.fc.nmvc.joints[j],
prob.joints[j],
VP8_NMV_UPDATE_PROB);
}
for (i = 0; i < 2; ++i) {
- update_nmv(w, branch_ct_sign[i],
+ update_nmv(bc, branch_ct_sign[i],
&cpi->common.fc.nmvc.comps[i].sign,
prob.comps[i].sign,
VP8_NMV_UPDATE_PROB);
for (j = 0; j < MV_CLASSES - 1; ++j) {
- update_nmv(w, branch_ct_classes[i][j],
+ update_nmv(bc, branch_ct_classes[i][j],
&cpi->common.fc.nmvc.comps[i].classes[j],
prob.comps[i].classes[j],
VP8_NMV_UPDATE_PROB);
}
for (j = 0; j < CLASS0_SIZE - 1; ++j) {
- update_nmv(w, branch_ct_class0[i][j],
+ update_nmv(bc, branch_ct_class0[i][j],
&cpi->common.fc.nmvc.comps[i].class0[j],
prob.comps[i].class0[j],
VP8_NMV_UPDATE_PROB);
}
for (j = 0; j < MV_OFFSET_BITS; ++j) {
- update_nmv(w, branch_ct_bits[i][j],
+ update_nmv(bc, branch_ct_bits[i][j],
&cpi->common.fc.nmvc.comps[i].bits[j],
prob.comps[i].bits[j],
VP8_NMV_UPDATE_PROB);
@@ -483,14 +481,14 @@ void vp8_write_nmvprobs(VP8_COMP * cpi, int usehp) {
for (j = 0; j < CLASS0_SIZE; ++j) {
int k;
for (k = 0; k < 3; ++k) {
- update_nmv(w, branch_ct_class0_fp[i][j][k],
+ update_nmv(bc, branch_ct_class0_fp[i][j][k],
&cpi->common.fc.nmvc.comps[i].class0_fp[j][k],
prob.comps[i].class0_fp[j][k],
VP8_NMV_UPDATE_PROB);
}
}
for (j = 0; j < 3; ++j) {
- update_nmv(w, branch_ct_fp[i][j],
+ update_nmv(bc, branch_ct_fp[i][j],
&cpi->common.fc.nmvc.comps[i].fp[j],
prob.comps[i].fp[j],
VP8_NMV_UPDATE_PROB);
@@ -498,11 +496,11 @@ void vp8_write_nmvprobs(VP8_COMP * cpi, int usehp) {
}
if (usehp) {
for (i = 0; i < 2; ++i) {
- update_nmv(w, branch_ct_class0_hp[i],
+ update_nmv(bc, branch_ct_class0_hp[i],
&cpi->common.fc.nmvc.comps[i].class0_hp,
prob.comps[i].class0_hp,
VP8_NMV_UPDATE_PROB);
- update_nmv(w, branch_ct_hp[i],
+ update_nmv(bc, branch_ct_hp[i],
&cpi->common.fc.nmvc.comps[i].hp,
prob.comps[i].hp,
VP8_NMV_UPDATE_PROB);
@@ -510,34 +508,35 @@ void vp8_write_nmvprobs(VP8_COMP * cpi, int usehp) {
}
}
-void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref,
- const nmv_context *mvctx) {
+void vp8_encode_nmv(vp8_writer* const bc, const MV* const mv,
+ const MV* const ref, const nmv_context* const mvctx) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
- vp8_write_token(w, vp8_mv_joint_tree, mvctx->joints,
+ vp8_write_token(bc, vp8_mv_joint_tree, mvctx->joints,
vp8_mv_joint_encodings + j);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
- encode_nmv_component(w, mv->row, ref->col, &mvctx->comps[0]);
+ encode_nmv_component(bc, mv->row, ref->col, &mvctx->comps[0]);
}
if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
- encode_nmv_component(w, mv->col, ref->col, &mvctx->comps[1]);
+ encode_nmv_component(bc, mv->col, ref->col, &mvctx->comps[1]);
}
}
-void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
- const nmv_context *mvctx, int usehp) {
+void vp8_encode_nmv_fp(vp8_writer* const bc, const MV* const mv,
+ const MV* const ref, const nmv_context* const mvctx,
+ int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
- encode_nmv_component_fp(w, mv->row, ref->row, &mvctx->comps[0], usehp);
+ encode_nmv_component_fp(bc, mv->row, ref->row, &mvctx->comps[0], usehp);
}
if (j == MV_JOINT_HNZVZ || j == MV_JOINT_HNZVNZ) {
- encode_nmv_component_fp(w, mv->col, ref->col, &mvctx->comps[1], usehp);
+ encode_nmv_component_fp(bc, mv->col, ref->col, &mvctx->comps[1], usehp);
}
}
void vp8_build_nmv_cost_table(int *mvjoint,
int *mvcost[2],
- const nmv_context *mvctx,
+ const nmv_context* const mvctx,
int usehp,
int mvc_flag_v,
int mvc_flag_h) {
@@ -548,588 +547,3 @@ void vp8_build_nmv_cost_table(int *mvjoint,
if (mvc_flag_h)
build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
}
-
-#else /* CONFIG_NEWMVENTROPY */
-
-static void encode_mvcomponent(
- vp8_writer *const w,
- const int v,
- const struct mv_context *mvc
-) {
- const vp8_prob *p = mvc->prob;
- const int x = v < 0 ? -v : v;
-
- if (x < mvnum_short) { // Small
- vp8_write(w, 0, p [mvpis_short]);
- vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
- if (!x)
- return; // no sign bit
- } else { // Large
- int i = 0;
-
- vp8_write(w, 1, p [mvpis_short]);
-
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits + i]);
-
- while (++i < mvnum_short_bits);
-
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits + i]);
-
- while (--i > mvnum_short_bits);
-
- if (x & ~((2 << mvnum_short_bits) - 1))
- vp8_write(w, (x >> mvnum_short_bits) & 1, p [MVPbits + mvnum_short_bits]);
- }
-
- vp8_write(w, v < 0, p [MVPsign]);
-}
-
-void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc) {
- encode_mvcomponent(w, mv->row >> 1, &mvc[0]);
- encode_mvcomponent(w, mv->col >> 1, &mvc[1]);
-}
-
-
-static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc) {
- const vp8_prob *p = mvc->prob;
- const int x = v; // v<0? -v:v;
- unsigned int cost;
-
- if (x < mvnum_short) {
- cost = vp8_cost_zero(p [mvpis_short])
- + vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
-
- if (!x)
- return cost;
- } else {
- int i = 0;
- cost = vp8_cost_one(p [mvpis_short]);
-
- do
- cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
-
- while (++i < mvnum_short_bits);
-
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
-
- while (--i > mvnum_short_bits);
-
- if (x & ~((2 << mvnum_short_bits) - 1))
- cost += vp8_cost_bit(p [MVPbits + mvnum_short_bits], (x >> mvnum_short_bits) & 1);
- }
-
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
-}
-
-void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]) {
- int i = 1; // -mv_max;
- unsigned int cost0 = 0;
- unsigned int cost1 = 0;
-
- vp8_clear_system_state();
-
- i = 1;
-
- if (mvc_flag[0]) {
- mvcost [0] [0] = cost_mvcomponent(0, &mvc[0]);
-
- do {
- // mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
- cost0 = cost_mvcomponent(i, &mvc[0]);
-
- mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
- mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]);
- } while (++i <= mv_max);
- }
-
- i = 1;
-
- if (mvc_flag[1]) {
- mvcost [1] [0] = cost_mvcomponent(0, &mvc[1]);
-
- do {
- // mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
- cost1 = cost_mvcomponent(i, &mvc[1]);
-
- mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
- mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]);
- } while (++i <= mv_max);
- }
-}
-
-
-// Motion vector probability table update depends on benefit.
-// Small correction allows for the fact that an update to an MV probability
-// may have benefit in subsequent frames as well as the current one.
-
-#define MV_PROB_UPDATE_CORRECTION -1
-
-
-__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2]) {
- const unsigned int tot = ct[0] + ct[1];
-
- if (tot) {
- const vp8_prob x = ((ct[0] * 255) / tot) & -2;
- *p = x ? x : 1;
- }
-}
-
-static void update(
- vp8_writer *const w,
- const unsigned int ct[2],
- vp8_prob *const cur_p,
- const vp8_prob new_p,
- const vp8_prob update_p,
- int *updated
-) {
- const int cur_b = vp8_cost_branch(ct, *cur_p);
- const int new_b = vp8_cost_branch(ct, new_p);
- const int cost = 7 + MV_PROB_UPDATE_CORRECTION + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8);
-
- if (cur_b - new_b > cost) {
- *cur_p = new_p;
- vp8_write(w, 1, update_p);
- vp8_write_literal(w, new_p >> 1, 7);
- *updated = 1;
-
- } else
- vp8_write(w, 0, update_p);
-}
-
-static void write_component_probs(
- vp8_writer *const w,
- struct mv_context *cur_mvc,
- const struct mv_context *default_mvc_,
- const struct mv_context *update_mvc,
- const unsigned int events [MVvals],
- unsigned int rc,
- int *updated
-) {
- vp8_prob *Pcur = cur_mvc->prob;
- const vp8_prob *default_mvc = default_mvc_->prob;
- const vp8_prob *Pupdate = update_mvc->prob;
- unsigned int is_short_ct[2], sign_ct[2];
-
- unsigned int bit_ct [mvlong_width] [2];
-
- unsigned int short_ct [mvnum_short];
- unsigned int short_bct [mvnum_short - 1] [2];
-
- vp8_prob Pnew [MVPcount];
-
- (void) rc;
- vp8_copy_array(Pnew, default_mvc, MVPcount);
-
- vp8_zero(is_short_ct)
- vp8_zero(sign_ct)
- vp8_zero(bit_ct)
- vp8_zero(short_ct)
- vp8_zero(short_bct)
-
-
- // j=0
- {
- const int c = events [mv_max];
-
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
-
- // j: 1 ~ mv_max (1023)
- {
- int j = 1;
-
- do {
- const int c1 = events [mv_max + j]; // positive
- const int c2 = events [mv_max - j]; // negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short) {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- } else {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- /* bit 3 not always encoded. */
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- } while (++j <= mv_max);
- }
-
- calc_prob(Pnew + mvpis_short, is_short_ct);
-
- calc_prob(Pnew + MVPsign, sign_ct);
-
- {
- vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
- int j = 0;
-
- vp8_tree_probs_from_distribution(
- mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
- p, short_bct, short_ct,
- 256, 1
- );
-
- do
- calc_prob(Pnew + MVPshort + j, short_bct[j]);
-
- while (++j < mvnum_short - 1);
- }
-
- {
- int j = 0;
-
- do
- calc_prob(Pnew + MVPbits + j, bit_ct[j]);
-
- while (++j < mvlong_width);
- }
-
- update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++, updated);
-
- update(w, sign_ct, Pcur + MVPsign, Pnew[MVPsign], *Pupdate++, updated);
-
- {
- const vp8_prob *const new_p = Pnew + MVPshort;
- vp8_prob *const cur_p = Pcur + MVPshort;
-
- int j = 0;
-
- do
-
- update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvnum_short - 1);
- }
-
- {
- const vp8_prob *const new_p = Pnew + MVPbits;
- vp8_prob *const cur_p = Pcur + MVPbits;
-
- int j = 0;
-
- do
-
- update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvlong_width);
- }
-}
-
-void vp8_write_mvprobs(VP8_COMP *cpi) {
- vp8_writer *const w = & cpi->bc;
- MV_CONTEXT *mvc = cpi->common.fc.mvc;
- int flags[2] = {0, 0};
-#ifdef ENTROPY_STATS
- active_section = 4;
-#endif
- write_component_probs(
- w, &mvc[0], &vp8_default_mv_context[0], &vp8_mv_update_probs[0], cpi->MVcount[0], 0, &flags[0]
- );
- write_component_probs(
- w, &mvc[1], &vp8_default_mv_context[1], &vp8_mv_update_probs[1], cpi->MVcount[1], 1, &flags[1]
- );
-
- if (flags[0] || flags[1])
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
-
-#ifdef ENTROPY_STATS
- active_section = 5;
-#endif
-}
-
-
-static void encode_mvcomponent_hp(
- vp8_writer *const w,
- const int v,
- const struct mv_context_hp *mvc
-) {
- const vp8_prob *p = mvc->prob;
- const int x = v < 0 ? -v : v;
-
- if (x < mvnum_short_hp) { // Small
- vp8_write(w, 0, p [mvpis_short_hp]);
- vp8_treed_write(w, vp8_small_mvtree_hp, p + MVPshort_hp, x,
- mvnum_short_bits_hp);
- if (!x)
- return; // no sign bit
- } else { // Large
- int i = 0;
-
- vp8_write(w, 1, p [mvpis_short_hp]);
-
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits_hp + i]);
-
- while (++i < mvnum_short_bits_hp);
-
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- vp8_write(w, (x >> i) & 1, p [MVPbits_hp + i]);
-
- while (--i > mvnum_short_bits_hp);
-
- if (x & ~((2 << mvnum_short_bits_hp) - 1))
- vp8_write(w, (x >> mvnum_short_bits_hp) & 1,
- p [MVPbits_hp + mvnum_short_bits_hp]);
- }
-
- vp8_write(w, v < 0, p [MVPsign_hp]);
-}
-
-void vp8_encode_motion_vector_hp(vp8_writer *w, const MV *mv,
- const MV_CONTEXT_HP *mvc) {
-
- encode_mvcomponent_hp(w, mv->row, &mvc[0]);
- encode_mvcomponent_hp(w, mv->col, &mvc[1]);
-}
-
-
-static unsigned int cost_mvcomponent_hp(const int v,
- const struct mv_context_hp *mvc) {
- const vp8_prob *p = mvc->prob;
- const int x = v; // v<0? -v:v;
- unsigned int cost;
-
- if (x < mvnum_short_hp) {
- cost = vp8_cost_zero(p [mvpis_short_hp])
- + vp8_treed_cost(vp8_small_mvtree_hp, p + MVPshort_hp, x,
- mvnum_short_bits_hp);
-
- if (!x)
- return cost;
- } else {
- int i = 0;
- cost = vp8_cost_one(p [mvpis_short_hp]);
-
- do
- cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
-
- while (++i < mvnum_short_bits_hp);
-
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
-
- while (--i > mvnum_short_bits_hp);
-
- if (x & ~((2 << mvnum_short_bits_hp) - 1))
- cost += vp8_cost_bit(p [MVPbits_hp + mvnum_short_bits_hp],
- (x >> mvnum_short_bits_hp) & 1);
- }
-
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
-}
-
-void vp8_build_component_cost_table_hp(int *mvcost[2],
- const MV_CONTEXT_HP *mvc,
- int mvc_flag[2]) {
- int i = 1; // -mv_max;
- unsigned int cost0 = 0;
- unsigned int cost1 = 0;
-
- vp8_clear_system_state();
-
- i = 1;
-
- if (mvc_flag[0]) {
- mvcost [0] [0] = cost_mvcomponent_hp(0, &mvc[0]);
-
- do {
- // mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
- cost0 = cost_mvcomponent_hp(i, &mvc[0]);
-
- mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign_hp]);
- mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign_hp]);
- } while (++i <= mv_max_hp);
- }
-
- i = 1;
-
- if (mvc_flag[1]) {
- mvcost [1] [0] = cost_mvcomponent_hp(0, &mvc[1]);
-
- do {
- // mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
- cost1 = cost_mvcomponent_hp(i, &mvc[1]);
-
- mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign_hp]);
- mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign_hp]);
- } while (++i <= mv_max_hp);
- }
-}
-
-
-static void write_component_probs_hp(
- vp8_writer *const w,
- struct mv_context_hp *cur_mvc,
- const struct mv_context_hp *default_mvc_,
- const struct mv_context_hp *update_mvc,
- const unsigned int events [MVvals_hp],
- unsigned int rc,
- int *updated
-) {
- vp8_prob *Pcur = cur_mvc->prob;
- const vp8_prob *default_mvc = default_mvc_->prob;
- const vp8_prob *Pupdate = update_mvc->prob;
- unsigned int is_short_ct[2], sign_ct[2];
-
- unsigned int bit_ct [mvlong_width_hp] [2];
-
- unsigned int short_ct [mvnum_short_hp];
- unsigned int short_bct [mvnum_short_hp - 1] [2];
-
- vp8_prob Pnew [MVPcount_hp];
-
- (void) rc;
- vp8_copy_array(Pnew, default_mvc, MVPcount_hp);
-
- vp8_zero(is_short_ct)
- vp8_zero(sign_ct)
- vp8_zero(bit_ct)
- vp8_zero(short_ct)
- vp8_zero(short_bct)
-
-
- // j=0
- {
- const int c = events [mv_max_hp];
-
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
-
- // j: 1 ~ mv_max (1023)
- {
- int j = 1;
-
- do {
- const int c1 = events [mv_max_hp + j]; // positive
- const int c2 = events [mv_max_hp - j]; // negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short_hp) {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- } else {
- int k = mvlong_width_hp - 1;
- is_short_ct [1] += c; // Long vector
-
- /* bit 3 not always encoded. */
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- } while (++j <= mv_max_hp);
- }
-
- calc_prob(Pnew + mvpis_short_hp, is_short_ct);
-
- calc_prob(Pnew + MVPsign_hp, sign_ct);
-
- {
- vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
- int j = 0;
-
- vp8_tree_probs_from_distribution(
- mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
- p, short_bct, short_ct,
- 256, 1
- );
-
- do
- calc_prob(Pnew + MVPshort_hp + j, short_bct[j]);
-
- while (++j < mvnum_short_hp - 1);
- }
-
- {
- int j = 0;
-
- do
- calc_prob(Pnew + MVPbits_hp + j, bit_ct[j]);
-
- while (++j < mvlong_width_hp);
- }
-
- update(w, is_short_ct, Pcur + mvpis_short_hp, Pnew[mvpis_short_hp],
- *Pupdate++, updated);
-
- update(w, sign_ct, Pcur + MVPsign_hp, Pnew[MVPsign_hp], *Pupdate++,
- updated);
-
- {
- const vp8_prob *const new_p = Pnew + MVPshort_hp;
- vp8_prob *const cur_p = Pcur + MVPshort_hp;
-
- int j = 0;
-
- do
-
- update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvnum_short_hp - 1);
- }
-
- {
- const vp8_prob *const new_p = Pnew + MVPbits_hp;
- vp8_prob *const cur_p = Pcur + MVPbits_hp;
-
- int j = 0;
-
- do
-
- update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvlong_width_hp);
- }
-}
-
-void vp8_write_mvprobs_hp(VP8_COMP *cpi) {
- vp8_writer *const w = & cpi->bc;
- MV_CONTEXT_HP *mvc = cpi->common.fc.mvc_hp;
- int flags[2] = {0, 0};
-#ifdef ENTROPY_STATS
- active_section = 4;
-#endif
- write_component_probs_hp(
- w, &mvc[0], &vp8_default_mv_context_hp[0], &vp8_mv_update_probs_hp[0],
- cpi->MVcount_hp[0], 0, &flags[0]
- );
- write_component_probs_hp(
- w, &mvc[1], &vp8_default_mv_context_hp[1], &vp8_mv_update_probs_hp[1],
- cpi->MVcount_hp[1], 1, &flags[1]
- );
-
- if (flags[0] || flags[1])
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp,
- (const MV_CONTEXT_HP *)
- cpi->common.fc.mvc_hp, flags);
-#ifdef ENTROPY_STATS
- active_section = 5;
-#endif
-}
-
-#endif /* CONFIG_NEWMVENTROPY */
diff --git a/vp8/encoder/encodemv.h b/vp8/encoder/encodemv.h
index e675fe058..254536580 100644
--- a/vp8/encoder/encodemv.h
+++ b/vp8/encoder/encodemv.h
@@ -14,31 +14,17 @@
#include "onyx_int.h"
-#if CONFIG_NEWMVENTROPY
-void vp8_write_nmvprobs(VP8_COMP *, int usehp);
-void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref,
- const nmv_context *mvctx);
-void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
- const nmv_context *mvctx, int usehp);
+void vp8_write_nmvprobs(VP8_COMP* const, int usehp, vp8_writer* const);
+void vp8_encode_nmv(vp8_writer* const w, const MV* const mv,
+ const MV* const ref, const nmv_context* const mvctx);
+void vp8_encode_nmv_fp(vp8_writer* const w, const MV* const mv,
+ const MV* const ref, const nmv_context *mvctx,
+ int usehp);
void vp8_build_nmv_cost_table(int *mvjoint,
int *mvcost[2],
const nmv_context *mvctx,
int usehp,
int mvc_flag_v,
int mvc_flag_h);
-#else /* CONFIG_NEWMVENTROPY */
-void vp8_write_mvprobs(VP8_COMP *);
-void vp8_encode_motion_vector(vp8_writer *, const MV *,
- const MV_CONTEXT *);
-void vp8_build_component_cost_table(int *mvcost[2],
- const MV_CONTEXT *mvc,
- int mvc_flag[2]);
-void vp8_write_mvprobs_hp(VP8_COMP *);
-void vp8_encode_motion_vector_hp(vp8_writer *, const MV *,
- const MV_CONTEXT_HP *);
-void vp8_build_component_cost_table_hp(int *mvcost[2],
- const MV_CONTEXT_HP *mvc,
- int mvc_flag[2]);
-#endif /* CONFIG_NEWMVENTROPY */
#endif
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index bad50b606..09d5a762e 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -350,7 +350,7 @@ void vp8_end_first_pass(VP8_COMP *cpi) {
}
static void zz_motion_search(VP8_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *recon_buffer, int *best_motion_err, int recon_yoffset) {
- MACROBLOCKD *const xd = & x->e_mbd;
+ MACROBLOCKD *const xd = &x->e_mbd;
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
@@ -364,14 +364,15 @@ static void zz_motion_search(VP8_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre);
- VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16)(src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
+ vp8_mse16x16(src_ptr, src_stride, ref_ptr, ref_stride,
+ (unsigned int *)(best_motion_err));
}
static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *ref_mv, MV *best_mv,
YV12_BUFFER_CONFIG *recon_buffer,
int *best_motion_err, int recon_yoffset) {
- MACROBLOCKD *const xd = & x->e_mbd;
+ MACROBLOCKD *const xd = &x->e_mbd;
BLOCK *b = &x->block[0];
BLOCKD *d = &x->e_mbd.block[0];
int num00;
@@ -387,7 +388,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int new_mv_mode_penalty = 256;
// override the default variance function to use MSE
- v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
+ v_fn_ptr.vf = vp8_mse16x16;
// Set up pointers for this macro block recon buffer
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
@@ -436,9 +437,9 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
void vp8_first_pass(VP8_COMP *cpi) {
int mb_row, mb_col;
- MACROBLOCK *const x = & cpi->mb;
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & x->e_mbd;
+ MACROBLOCK *const x = &cpi->mb;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &x->e_mbd;
int recon_yoffset, recon_uvoffset;
YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx];
@@ -492,12 +493,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
{
int flag[2] = {1, 1};
vp8_init_mv_probs(cm);
-#if CONFIG_NEWMVENTROPY
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
-#else
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
-#endif
}
// for each macroblock row in image
diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c
index 356e32c3f..44e83fdc7 100644
--- a/vp8/encoder/generic/csystemdependent.c
+++ b/vp8/encoder/generic/csystemdependent.c
@@ -23,80 +23,6 @@ extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER
void vp8_cmachine_specific_config(VP8_COMP *cpi) {
#if CONFIG_RUNTIME_CPU_DETECT
cpi->rtcd.common = &cpi->common.rtcd;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.sad32x32 = vp8_sad32x32_c;
-#endif
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
-
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.sad32x32x3 = vp8_sad32x32x3_c;
-#endif
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
- cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
- cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
- cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
-
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.sad32x32x8 = vp8_sad32x32x8_c;
-#endif
- cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
- cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
- cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
- cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
- cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
-
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.sad32x32x4d = vp8_sad32x32x4d_c;
-#endif
- cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
- cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
- cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
- cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
- cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
-#if ARCH_X86 || ARCH_X86_64
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
-#endif
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.var32x32 = vp8_variance32x32_c;
-#endif
-
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.subpixvar32x32 = vp8_sub_pixel_variance32x32_c;
-#endif
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.halfpixvar32x32_h = vp8_variance_halfpixvar32x32_h_c;
-#endif
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.halfpixvar32x32_v = vp8_variance_halfpixvar32x32_v_c;
-#endif
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.halfpixvar32x32_hv = vp8_variance_halfpixvar32x32_hv_c;
-#endif
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
-#if CONFIG_SUPERBLOCKS
- cpi->rtcd.variance.subpixmse32x32 = vp8_sub_pixel_mse32x32_c;
-#endif
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
cpi->rtcd.fdct.short16x16 = vp8_short_fdct16x16_c;
@@ -118,16 +44,11 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
- cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.fast8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.walsh_short4x4 = vp8_short_walsh4x4_c;
-#if CONFIG_INTERNAL_STATS
- cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
- cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
-#endif
#endif
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
diff --git a/vp8/encoder/mbgraph.c b/vp8/encoder/mbgraph.c
index 180ee5870..2eecfcdad 100644
--- a/vp8/encoder/mbgraph.c
+++ b/vp8/encoder/mbgraph.c
@@ -83,10 +83,8 @@ static unsigned int do_16x16_motion_iteration
vp8_set_mbmode_and_mvs(x, NEWMV, dst_mv);
vp8_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
- // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- best_err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (xd->dst.y_buffer, xd->dst.y_stride,
- xd->predictor, 16, INT_MAX);
+ best_err = vp8_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
+ xd->predictor, 16, INT_MAX);
/* restore UMV window */
x->mv_col_min = tmp_col_min;
@@ -130,11 +128,8 @@ static int do_16x16_motion_search
// FIXME should really use something like near/nearest MV and/or MV prediction
xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
xd->pre.y_stride = ref->y_stride;
- // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (ref->y_buffer + mb_y_offset,
- ref->y_stride, xd->dst.y_buffer,
- xd->dst.y_stride, INT_MAX);
+ err = vp8_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
+ xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
dst_mv->as_int = 0;
// Test last reference frame using the previous best mv as the
@@ -193,10 +188,8 @@ static int do_16x16_zerozero_search
xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
xd->pre.y_stride = ref->y_stride;
// VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (ref->y_buffer + mb_y_offset,
- ref->y_stride, xd->dst.y_buffer,
- xd->dst.y_stride, INT_MAX);
+ err = vp8_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
+ xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
dst_mv->as_int = 0;
@@ -221,11 +214,8 @@ static int find_best_16x16_intra
xd->mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby(xd);
- // VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
- err = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16)
- (xd->predictor, 16,
- buf->y_buffer + mb_y_offset,
- buf->y_stride, best_err);
+ err = vp8_sad16x16(xd->predictor, 16, buf->y_buffer + mb_y_offset,
+ buf->y_stride, best_err);
// find best
if (err < best_err) {
best_err = err;
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index a6cf2f18b..210887491 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -47,15 +47,9 @@ int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
-#if CONFIG_NEWMVENTROPY
return ((mvjcost[vp8_get_mv_joint(v)] +
mvcost[0][v.row] + mvcost[1][v.col]) *
Weight) >> 7;
-#else
- return ((mvcost[0][v.row >> (ishp == 0)] +
- mvcost[1][v.col >> (ishp == 0)])
- * Weight) >> 7;
-#endif
}
static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
@@ -64,14 +58,9 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
-#if CONFIG_NEWMVENTROPY
return ((mvjcost[vp8_get_mv_joint(v)] +
mvcost[0][v.row] + mvcost[1][v.col]) *
error_per_bit + 128) >> 8;
-#else
- return ((mvcost[0][v.row >> (ishp == 0)] +
- mvcost[1][v.col >> (ishp == 0)]) * error_per_bit + 128) >> 8;
-#endif
}
return 0;
}
@@ -83,14 +72,9 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, DEC_MVSADCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
-#if CONFIG_NEWMVENTROPY
return ((mvjsadcost[vp8_get_mv_joint(v)] +
mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
error_per_bit + 128) >> 8;
-#else
- return ((mvsadcost[0][v.row] + mvsadcost[1][v.col])
- * error_per_bit + 128) >> 8;
-#endif
}
return 0;
}
@@ -220,35 +204,42 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
* could reduce the area.
*/
-#if CONFIG_NEWMVENTROPY
/* estimated cost of a motion vector (r,c) */
-#define MVC(r,c) \
- (mvcost ? \
- ((mvjcost[((r)!=rr)*2 + ((c)!=rc)] + \
- mvcost[0][((r)-rr)] + mvcost[1][((c)-rc)]) * error_per_bit + 128 )>>8 : 0)
-#else
-#define MVC(r,c) \
- (mvcost ? \
- ((mvcost[0][((r)-rr)>>(xd->allow_high_precision_mv==0)] + \
- mvcost[1][((c)-rc)>>(xd->allow_high_precision_mv==0)]) * \
- error_per_bit + 128 )>>8 : 0)
-#endif /* CONFIG_NEWMVENTROPY */
+#define MVC(r, c) \
+ (mvcost ? \
+ ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
+ mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
+ error_per_bit + 128) >> 8 : 0)
-#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
+#define SP(x) (((x) & 7) << 1) // convert motion vector component to offset
+ // for svf calc
-#define IFMVCV(r,c,s,e) \
- if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
+#define IFMVCV(r, c, s, e) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) \
+ s \
+ else \
+ e;
/* pointer to predictor base of a motionvector */
-#define PRE(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset)))
+#define PRE(r, c) (y + (((r) >> 3) * y_stride + ((c) >> 3) -(offset)))
/* returns subpixel variance error function */
-#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
-
-/* checks if (r,c) has better score than previous best */
-#define CHECK_BETTER(v,r,c) \
- IFMVCV(r,c,{thismse = (DIST(r,c)); if((v = MVC(r,c)+thismse) < besterr) \
- { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)
+#define DIST(r, c) \
+ vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse)
+
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER(v, r, c) \
+ IFMVCV(r, c, { \
+ thismse = (DIST(r, c)); \
+ if ((v = MVC(r, c) + thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ }, \
+ v = INT_MAX;)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
@@ -307,17 +298,10 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
br = bestmv->as_mv.row << 3;
bc = bestmv->as_mv.col << 3;
hstep = 4;
-#if CONFIG_NEWMVENTROPY
minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
-#else
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
-#endif
tr = br;
tc = bc;
@@ -403,13 +387,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
-#if CONFIG_NEWMVENTROPY
if (xd->allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
-#endif
if (usehp) {
hstep >>= 1;
@@ -771,13 +753,11 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
-#if CONFIG_NEWMVENTROPY
if (x->e_mbd.allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
-#endif
if (!usehp)
return bestmse;
@@ -1304,16 +1284,8 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1423,16 +1395,8 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1479,7 +1443,8 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
for (t = 0; t < 4; t++)
block_offset[t] = ss[i + t].offset + best_address;
- fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
+ fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
+ sad_array);
for (t = 0; t < 4; t++, i++) {
if (sad_array[t] < bestsad) {
@@ -1631,16 +1596,8 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int col_max = ref_col + distance;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1735,16 +1692,8 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1872,16 +1821,8 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -2022,16 +1963,8 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -2106,16 +2039,8 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
diff --git a/vp8/encoder/mcomp.h b/vp8/encoder/mcomp.h
index afca58084..f09106927 100644
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -15,21 +15,12 @@
#include "block.h"
#include "variance.h"
-#if CONFIG_NEWMVENTROPY
#define MVCOSTS mvjcost, mvcost
#define MVSADCOSTS mvjsadcost, mvsadcost
#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
#define DEC_MVSADCOSTS int *mvjsadcost, int *mvsadcost[2]
#define NULLMVCOST NULL, NULL
#define XMVCOST x->nmvjointcost, (x->e_mbd.allow_high_precision_mv?x->nmvcost_hp:x->nmvcost)
-#else
-#define MVCOSTS mvcost
-#define MVSADCOSTS mvsadcost
-#define DEC_MVCOSTS int *mvcost[2]
-#define DEC_MVSADCOSTS int *mvsadcost[2]
-#define NULLMVCOST NULL
-#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
-#endif /* CONFIG_NEWMVENTROPY */
#ifdef ENTROPY_STATS
extern void init_mv_ref_counts();
diff --git a/vp8/encoder/modecosts.c b/vp8/encoder/modecosts.c
index b1abd1e2a..23b9973c3 100644
--- a/vp8/encoder/modecosts.c
+++ b/vp8/encoder/modecosts.c
@@ -46,14 +46,11 @@ void vp8_init_mode_costs(VP8_COMP *c) {
vp8_cost_tokens(c->mb.i8x8_mode_costs,
x->fc.i8x8_mode_prob, vp8_i8x8_mode_tree);
-#if CONFIG_SWITCHABLE_INTERP
{
int i;
for (i = 0; i <= VP8_SWITCHABLE_FILTERS; ++i)
- //for (i = 0; i <= 0; ++i)
vp8_cost_tokens((int *)c->mb.switchable_interp_costs[i],
x->fc.switchable_interp_prob[i],
vp8_switchable_interp_tree);
}
-#endif
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 717fe96ee..f11ff5936 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -77,7 +77,7 @@ extern void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFF
extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
#endif
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
@@ -85,6 +85,7 @@ static void set_default_lf_deltas(VP8_COMP *cpi);
extern const int vp8_gf_interval_table[101];
+#define DEFAULT_INTERP_FILTER EIGHTTAP /* SWITCHABLE for better performance */
#define SEARCH_BEST_FILTER 0 /* to search exhaustively for
best filter */
#define RESET_FOREACH_FILTER 0 /* whether to reset the encoder state
@@ -101,25 +102,14 @@ extern const int vp8_gf_interval_table[101];
#if CONFIG_INTERNAL_STATS
#include "math.h"
-extern double vp8_calc_ssim
-(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- int lumamask,
- double *weight,
- const vp8_variance_rtcd_vtable_t *rtcd
-);
+extern double vp8_calc_ssim(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int lumamask,
+ double *weight);
-extern double vp8_calc_ssimg
-(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *ssim_y,
- double *ssim_u,
- double *ssim_v,
- const vp8_variance_rtcd_vtable_t *rtcd
-);
+extern double vp8_calc_ssimg(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, double *ssim_y,
+ double *ssim_u, double *ssim_v);
#endif
@@ -149,12 +139,10 @@ extern int skip_false_count;
extern int intra_mode_stats[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
#endif
-#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
extern void init_nmvstats();
extern void print_nmvstats();
#endif
-#endif
#ifdef SPEEDSTATS
unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -608,7 +596,7 @@ static void init_seg_features(VP8_COMP *cpi) {
// DEBUG: Print out the segment id of each MB in the current frame.
static void print_seg_map(VP8_COMP *cpi) {
- VP8_COMMON *cm = & cpi->common;
+ VP8_COMMON *cm = &cpi->common;
int row, col;
int map_index = 0;
FILE *statsfile;
@@ -1282,7 +1270,7 @@ static int vp8_alloc_partition_data(VP8_COMP *cpi) {
}
void vp8_alloc_compressor_data(VP8_COMP *cpi) {
- VP8_COMMON *cm = & cpi->common;
+ VP8_COMMON *cm = &cpi->common;
int width = cm->Width;
int height = cm->Height;
@@ -1363,7 +1351,8 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi) {
vpx_free(cpi->tplist);
- CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cpi->common.mb_rows));
+ CHECK_MEM_ERROR(cpi->tplist,
+ vpx_malloc(sizeof(TOKENLIST) * (cpi->common.mb_rows)));
}
@@ -1629,7 +1618,7 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
cpi->cq_target_quality = cpi->oxcf.cq_level;
if (!cm->use_bilinear_mc_filter)
- cm->mcomp_filter_type = EIGHTTAP;
+ cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
else
cm->mcomp_filter_type = BILINEAR;
@@ -1699,8 +1688,6 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
#define M_LOG2_E 0.693147180559945309417
#define log2f(x) (log (x) / (float) M_LOG2_E)
-#if CONFIG_NEWMVENTROPY
-
static void cal_nmvjointsadcost(int *mvjointsadcost) {
mvjointsadcost[0] = 600;
mvjointsadcost[1] = 300;
@@ -1738,40 +1725,6 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
} while (++i <= MV_MAX);
}
-#else
-
-static void cal_mvsadcosts(int *mvsadcost[2]) {
- int i = 1;
-
- mvsadcost [0] [0] = 300;
- mvsadcost [1] [0] = 300;
-
- do {
- double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
- } while (++i <= mvfp_max);
-}
-
-static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
- int i = 1;
-
- mvsadcost [0] [0] = 300;
- mvsadcost [1] [0] = 300;
-
- do {
- double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
- } while (++i <= mvfp_max_hp);
-}
-
-#endif /* CONFIG_NEWMVENTROPY */
-
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
int i;
volatile union {
@@ -1823,10 +1776,8 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
#endif
for (i = 0; i < COMP_PRED_CONTEXTS; i++)
cm->prob_comppred[i] = 128;
-#if CONFIG_TX_SELECT
for (i = 0; i < TX_SIZE_MAX - 1; i++)
cm->prob_tx[i] = 128;
-#endif
// Prime the recent reference frame useage counters.
// Hereafter they will be maintained as a sort of moving average
@@ -1887,11 +1838,9 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
vp8_zero(inter_uv_modes);
vp8_zero(inter_b_modes);
#endif
-#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
init_nmvstats();
#endif
-#endif
/*Initialize the feed-forward activity masking.*/
cpi->activity_avg = 90 << 12;
@@ -1957,7 +1906,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->gf_rate_correction_factor = 1.0;
cpi->twopass.est_max_qcorrection_factor = 1.0;
-#if CONFIG_NEWMVENTROPY
cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX];
cpi->mb.nmvcost[1] = &cpi->mb.nmvcosts[1][MV_MAX];
@@ -1970,19 +1918,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->mb.nmvsadcost_hp[0] = &cpi->mb.nmvsadcosts_hp[0][MV_MAX];
cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
-#else
- cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max + 1];
- cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max + 1];
- cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mvfp_max + 1];
- cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mvfp_max + 1];
- cal_mvsadcosts(cpi->mb.mvsadcost);
-
- cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
- cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
- cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
- cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
- cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
-#endif /* CONFIG_NEWMVENTROPY */
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
@@ -2026,74 +1961,48 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
init_mv_ref_counts();
#endif
+#define BFP(BT, SDF, VF, SVF, SVFHH, SVFHV, SVFHHV, SDX3F, SDX8F, SDX4DF) \
+ cpi->fn_ptr[BT].sdf = SDF; \
+ cpi->fn_ptr[BT].vf = VF; \
+ cpi->fn_ptr[BT].svf = SVF; \
+ cpi->fn_ptr[BT].svf_halfpix_h = SVFHH; \
+ cpi->fn_ptr[BT].svf_halfpix_v = SVFHV; \
+ cpi->fn_ptr[BT].svf_halfpix_hv = SVFHHV; \
+ cpi->fn_ptr[BT].sdx3f = SDX3F; \
+ cpi->fn_ptr[BT].sdx8f = SDX8F; \
+ cpi->fn_ptr[BT].sdx4df = SDX4DF;
+
+
#if CONFIG_SUPERBLOCKS
- cpi->fn_ptr[BLOCK_32X32].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32);
- cpi->fn_ptr[BLOCK_32X32].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var32x32);
- cpi->fn_ptr[BLOCK_32X32].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar32x32);
- cpi->fn_ptr[BLOCK_32X32].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar32x32_h);
- cpi->fn_ptr[BLOCK_32X32].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar32x32_v);
- cpi->fn_ptr[BLOCK_32X32].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar32x32_hv);
- cpi->fn_ptr[BLOCK_32X32].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32x3);
- cpi->fn_ptr[BLOCK_32X32].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32x8);
- cpi->fn_ptr[BLOCK_32X32].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad32x32x4d);
+ BFP(BLOCK_32X32, vp8_sad32x32, vp8_variance32x32, vp8_sub_pixel_variance32x32,
+ vp8_variance_halfpixvar32x32_h, vp8_variance_halfpixvar32x32_v,
+ vp8_variance_halfpixvar32x32_hv, vp8_sad32x32x3, vp8_sad32x32x8,
+ vp8_sad32x32x4d)
#endif
- cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
- cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
- cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
- cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
- cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
- cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
- cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
-
- cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
- cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
- cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3);
- cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8);
- cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d);
-
- cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16);
- cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16);
- cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16);
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3);
- cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8);
- cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d);
-
- cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8);
- cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8);
- cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8);
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3);
- cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8);
- cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d);
-
- cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4);
- cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4);
- cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4);
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL;
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL;
- cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL;
- cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3);
- cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8);
- cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d);
+ BFP(BLOCK_16X16, vp8_sad16x16, vp8_variance16x16, vp8_sub_pixel_variance16x16,
+ vp8_variance_halfpixvar16x16_h, vp8_variance_halfpixvar16x16_v,
+ vp8_variance_halfpixvar16x16_hv, vp8_sad16x16x3, vp8_sad16x16x8,
+ vp8_sad16x16x4d)
+
+ BFP(BLOCK_16X8, vp8_sad16x8, vp8_variance16x8, vp8_sub_pixel_variance16x8,
+ NULL, NULL, NULL, vp8_sad16x8x3, vp8_sad16x8x8, vp8_sad16x8x4d)
+
+ BFP(BLOCK_8X16, vp8_sad8x16, vp8_variance8x16, vp8_sub_pixel_variance8x16,
+ NULL, NULL, NULL, vp8_sad8x16x3, vp8_sad8x16x8, vp8_sad8x16x4d)
+
+ BFP(BLOCK_8X8, vp8_sad8x8, vp8_variance8x8, vp8_sub_pixel_variance8x8,
+ NULL, NULL, NULL, vp8_sad8x8x3, vp8_sad8x8x8, vp8_sad8x8x4d)
+
+ BFP(BLOCK_4X4, vp8_sad4x4, vp8_variance4x4, vp8_sub_pixel_variance4x4,
+ NULL, NULL, NULL, vp8_sad4x4x3, vp8_sad4x4x8, vp8_sad4x4x4d)
#if ARCH_X86 || ARCH_X86_64
- cpi->fn_ptr[BLOCK_16X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_16X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_8X16].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_8X8].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
- cpi->fn_ptr[BLOCK_4X4].copymem = VARIANCE_INVOKE(&cpi->rtcd.variance, copy32xn);
+ cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn;
+ cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn;
#endif
cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search);
@@ -2135,12 +2044,10 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
print_mode_context();
}
#endif
-#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
if (cpi->pass != 1)
print_nmvstats();
#endif
-#endif
#if CONFIG_INTERNAL_STATS
@@ -2369,8 +2276,7 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
unsigned char *recon, int recon_stride,
- unsigned int cols, unsigned int rows,
- vp8_variance_rtcd_vtable_t *rtcd) {
+ unsigned int cols, unsigned int rows) {
unsigned int row, col;
uint64_t total_sse = 0;
int diff;
@@ -2379,9 +2285,7 @@ static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
for (col = 0; col + 16 <= cols; col += 16) {
unsigned int sse;
- VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride,
- recon + col, recon_stride,
- &sse);
+ vp8_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse);
total_sse += sse;
}
@@ -2433,8 +2337,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
pkt.kind = VPX_CODEC_PSNR_PKT;
sse = calc_plane_error(orig->y_buffer, orig->y_stride,
recon->y_buffer, recon->y_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
+ width, height);
pkt.data.psnr.sse[0] = sse;
pkt.data.psnr.sse[1] = sse;
pkt.data.psnr.samples[0] = width * height;
@@ -2445,8 +2348,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
sse = calc_plane_error(orig->u_buffer, orig->uv_stride,
recon->u_buffer, recon->uv_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
+ width, height);
pkt.data.psnr.sse[0] += sse;
pkt.data.psnr.sse[2] = sse;
pkt.data.psnr.samples[0] += width * height;
@@ -2454,8 +2356,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
sse = calc_plane_error(orig->v_buffer, orig->uv_stride,
recon->v_buffer, recon->uv_stride,
- width, height,
- IF_RTCD(&cpi->rtcd.variance));
+ width, height);
pkt.data.psnr.sse[0] += sse;
pkt.data.psnr.sse[3] = sse;
pkt.data.psnr.samples[0] += width * height;
@@ -3033,13 +2934,10 @@ static void encode_frame_to_data_rate
/* list of filters to search over */
int mcomp_filters_to_search[] = {
-#if CONFIG_SWITCHABLE_INTERP
EIGHTTAP, EIGHTTAP_SHARP, SIXTAP, SWITCHABLE
-#else
- EIGHTTAP, EIGHTTAP_SHARP, SIXTAP,
-#endif
};
- int mcomp_filters = sizeof(mcomp_filters_to_search) / sizeof(*mcomp_filters_to_search);
+ int mcomp_filters = sizeof(mcomp_filters_to_search) /
+ sizeof(*mcomp_filters_to_search);
int mcomp_filter_index = 0;
INT64 mcomp_filter_cost[4];
@@ -3264,12 +3162,7 @@ static void encode_frame_to_data_rate
cm->mcomp_filter_type = mcomp_filters_to_search[0];
mcomp_filter_index = 0;
} else {
-#if CONFIG_SWITCHABLE_INTERP
- cm->mcomp_filter_type = SWITCHABLE;
-#else
- cm->mcomp_filter_type =
- (Q < SHARP_FILTER_QTHRESH ? EIGHTTAP_SHARP : EIGHTTAP);
-#endif
+ cm->mcomp_filter_type = DEFAULT_INTERP_FILTER;
}
/* TODO: Decide this more intelligently */
xd->allow_high_precision_mv = (Q < HIGH_PRECISION_MV_QTHRESH);
@@ -3427,8 +3320,7 @@ static void encode_frame_to_data_rate
if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) {
int last_q = Q;
int kf_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
int high_err_target = cpi->ambient_err;
int low_err_target = (cpi->ambient_err >> 1);
@@ -3583,7 +3475,6 @@ static void encode_frame_to_data_rate
if (cpi->is_src_frame_alt_ref)
Loop = FALSE;
-#if CONFIG_SWITCHABLE_INTERP
if (cm->frame_type != KEY_FRAME &&
!sf->search_best_filter &&
cm->mcomp_filter_type == SWITCHABLE) {
@@ -3609,19 +3500,16 @@ static void encode_frame_to_data_rate
if (count[i]) {
cm->mcomp_filter_type = vp8_switchable_interp[i];
Loop = TRUE; /* Make sure to loop since the filter changed */
- //loop_count = -1;
break;
}
}
}
}
-#endif
if (Loop == FALSE && cm->frame_type != KEY_FRAME && sf->search_best_filter) {
if (mcomp_filter_index < mcomp_filters) {
INT64 err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
INT64 rate = cpi->projected_frame_size << 8;
mcomp_filter_cost[mcomp_filter_index] =
(RDCOST(cpi->RDMULT, cpi->RDDIV, rate, err));
@@ -3683,8 +3571,7 @@ static void encode_frame_to_data_rate
// the force key frame
if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) {
cpi->ambient_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
}
// This frame's MVs are saved and will be used in next frame's MV
@@ -3757,18 +3644,12 @@ static void encode_frame_to_data_rate
update_reference_frames(cm);
vp8_copy(cpi->common.fc.coef_counts, cpi->coef_counts);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cpi->common.fc.hybrid_coef_counts, cpi->hybrid_coef_counts);
-#endif
vp8_copy(cpi->common.fc.coef_counts_8x8, cpi->coef_counts_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8);
-#endif
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
cpi->hybrid_coef_counts_16x16);
-#endif
vp8_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {
vp8_copy(cpi->common.fc.ymode_counts, cpi->ymode_count);
@@ -3779,14 +3660,8 @@ static void encode_frame_to_data_rate
vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
vp8_adapt_mode_probs(&cpi->common);
-#if CONFIG_NEWMVENTROPY
cpi->common.fc.NMVcount = cpi->NMVcount;
vp8_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
-#else
- vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
- vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
- vp8_adapt_mv_probs(&cpi->common);
-#endif /* CONFIG_NEWMVENTROPY */
vp8_update_mode_context(&cpi->common);
}
@@ -3902,8 +3777,7 @@ static void encode_frame_to_data_rate
vp8_clear_system_state(); // __asm emms;
recon_err = vp8_calc_ss_err(cpi->Source,
- &cm->yv12_fb[cm->new_fb_idx],
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->yv12_fb[cm->new_fb_idx]);
if (cpi->twopass.total_left_stats->coded_error != 0.0)
fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d"
@@ -4389,16 +4263,16 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
int64_t sq_error;
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
- recon->y_buffer, recon->y_stride, orig->y_width, orig->y_height,
- IF_RTCD(&cpi->rtcd.variance));
+ recon->y_buffer, recon->y_stride, orig->y_width,
+ orig->y_height);
ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
- recon->u_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ recon->u_buffer, recon->uv_stride, orig->uv_width,
+ orig->uv_height);
ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
- recon->v_buffer, recon->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ recon->v_buffer, recon->uv_stride, orig->uv_width,
+ orig->uv_height);
sq_error = ye + ue + ve;
@@ -4418,16 +4292,16 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
vp8_clear_system_state();
ye = calc_plane_error(orig->y_buffer, orig->y_stride,
- pp->y_buffer, pp->y_stride, orig->y_width, orig->y_height,
- IF_RTCD(&cpi->rtcd.variance));
+ pp->y_buffer, pp->y_stride, orig->y_width,
+ orig->y_height);
ue = calc_plane_error(orig->u_buffer, orig->uv_stride,
- pp->u_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ pp->u_buffer, pp->uv_stride, orig->uv_width,
+ orig->uv_height);
ve = calc_plane_error(orig->v_buffer, orig->uv_stride,
- pp->v_buffer, pp->uv_stride, orig->uv_width, orig->uv_height,
- IF_RTCD(&cpi->rtcd.variance));
+ pp->v_buffer, pp->uv_stride, orig->uv_width,
+ orig->uv_height);
sq_error = ye + ue + ve;
@@ -4440,8 +4314,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
cpi->totalp += frame_psnr2;
frame_ssim2 = vp8_calc_ssim(cpi->Source,
- &cm->post_proc_buffer, 1, &weight,
- IF_RTCD(&cpi->rtcd.variance));
+ &cm->post_proc_buffer, 1, &weight);
cpi->summed_quality += frame_ssim2 * weight;
cpi->summed_weights += weight;
@@ -4460,7 +4333,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
if (cpi->b_calculate_ssimg) {
double y, u, v, frame_all;
frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show,
- &y, &u, &v, IF_RTCD(&cpi->rtcd.variance));
+ &y, &u, &v);
cpi->total_ssimg_y += y;
cpi->total_ssimg_u += u;
cpi->total_ssimg_v += v;
@@ -4603,19 +4476,19 @@ int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert
-int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) {
+int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) {
int i, j;
int Total = 0;
unsigned char *src = source->y_buffer;
unsigned char *dst = dest->y_buffer;
- (void)rtcd;
// Loop through the Y plane raw and reconstruction data summing (square differences)
for (i = 0; i < source->y_height; i += 16) {
for (j = 0; j < source->y_width; j += 16) {
unsigned int sse;
- Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
+ Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
}
src += 16 * source->y_stride;
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index 1e7494039..ab6802509 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -60,17 +60,10 @@
#define VP8_TEMPORAL_ALT_REF 1
typedef struct {
-#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
int nmvjointcost[MV_JOINTS];
int nmvcosts[2][MV_VALS];
int nmvcosts_hp[2][MV_VALS];
-#else
- MV_CONTEXT mvc[2];
- int mvcosts[2][MVvals + 1];
- MV_CONTEXT_HP mvc_hp[2];
- int mvcosts_hp[2][MVvals_hp + 1];
-#endif
#ifdef MODE_STATS
// Stats
@@ -97,24 +90,18 @@ typedef struct {
vp8_prob coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM
vp8_prob hybrid_coef_probs[BLOCK_TYPES]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
-#endif
vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
@@ -123,10 +110,8 @@ typedef struct {
vp8_prob sub_mv_ref_prob [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
vp8_prob mbsplit_prob [VP8_NUMMBSPLITS - 1];
-#if CONFIG_SWITCHABLE_INTERP
vp8_prob switchable_interp_prob[VP8_SWITCHABLE_FILTERS + 1]
[VP8_SWITCHABLE_FILTERS - 1];
-#endif
int mv_ref_ct[6][4][2];
int mode_context[6][4];
@@ -365,7 +350,6 @@ typedef struct {
typedef struct VP8_ENCODER_RTCD {
VP8_COMMON_RTCD *common;
- vp8_variance_rtcd_vtable_t variance;
vp8_fdct_rtcd_vtable_t fdct;
vp8_encodemb_rtcd_vtable_t encodemb;
vp8_search_rtcd_vtable_t search;
@@ -373,10 +357,10 @@ typedef struct VP8_ENCODER_RTCD {
} VP8_ENCODER_RTCD;
enum {
- BLOCK_16X8,
- BLOCK_8X16,
- BLOCK_8X8,
- BLOCK_4X4,
+ BLOCK_16X8 = PARTITIONING_16X8,
+ BLOCK_8X16 = PARTITIONING_8X16,
+ BLOCK_8X8 = PARTITIONING_8X8,
+ BLOCK_4X4 = PARTITIONING_4X4,
BLOCK_16X16,
BLOCK_MAX_SEGMENTS,
BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
@@ -420,9 +404,6 @@ typedef struct VP8_COMP {
MACROBLOCK mb;
VP8_COMMON common;
- vp8_writer bc, bc2;
- // bool_writer *bc2;
-
VP8_CONFIG oxcf;
struct lookahead_ctx *lookahead;
@@ -468,13 +449,11 @@ typedef struct VP8_COMP {
int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
int comp_pred_count[COMP_PRED_CONTEXTS];
int single_pred_count[COMP_PRED_CONTEXTS];
-#if CONFIG_TX_SELECT
// FIXME contextualize
int txfm_count[TX_SIZE_MAX];
int txfm_count_8x8p[TX_SIZE_MAX - 1];
int64_t rd_tx_select_diff[NB_TXFM_MODES];
int rd_tx_select_threshes[4][NB_TXFM_MODES];
-#endif
int RDMULT;
int RDDIV;
@@ -566,39 +545,28 @@ typedef struct VP8_COMP {
// int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
-#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
-#else
- unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
- unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
-#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
-#endif
int gfu_boost;
int last_boost;
@@ -783,10 +751,8 @@ typedef struct VP8_COMP {
int pred_filter_on_count;
int pred_filter_off_count;
#endif
-#if CONFIG_SWITCHABLE_INTERP
- unsigned int switchable_interp_count[VP8_SWITCHABLE_FILTERS+1]
+ unsigned int switchable_interp_count[VP8_SWITCHABLE_FILTERS + 1]
[VP8_SWITCHABLE_FILTERS];
-#endif
#if CONFIG_NEW_MVREF
unsigned int best_ref_index_counts[MAX_MV_REFS];
diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c
index 954997889..57bd41468 100644
--- a/vp8/encoder/picklpf.c
+++ b/vp8/encoder/picklpf.c
@@ -21,7 +21,8 @@
#include "vpx_ports/arm.h"
#endif
-extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
+extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest);
#if HAVE_ARMV7
extern void vp8_yv12_copy_frame_yonly_no_extend_frame_borders_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc);
#endif
@@ -71,7 +72,8 @@ vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst
vpx_memcpy(dst_y, src_y, ystride * (linestocopy + 16));
}
-static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int Fraction, const vp8_variance_rtcd_vtable_t *rtcd) {
+static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
+ YV12_BUFFER_CONFIG *dest, int Fraction) {
int i, j;
int Total = 0;
int srcoffset, dstoffset;
@@ -79,7 +81,6 @@ static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONF
unsigned char *dst = dest->y_buffer;
int linestocopy = (source->y_height >> (Fraction + 4));
- (void)rtcd;
if (linestocopy < 1)
linestocopy = 1;
@@ -97,7 +98,8 @@ static int vp8_calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONF
for (i = 0; i < linestocopy; i += 16) {
for (j = 0; j < source->y_width; j += 16) {
unsigned int sse;
- Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse);
+ Total += vp8_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride,
+ &sse);
}
src += 16 * source->y_stride;
@@ -179,7 +181,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
// Get the err using the previous frame's filter value.
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
- best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+ best_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
// Re-instate the unfiltered frame
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
@@ -192,7 +194,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
// Get the err for filtered frame
- filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
// Re-instate the unfiltered frame
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
@@ -221,7 +223,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
// Get the err for filtered frame
- filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_partial_ssl_err(sd, cm->frame_to_show, 3);
// Re-instate the unfiltered frame
vp8_yv12_copy_partial_frame_ptr(&cpi->last_frame_uf, cm->frame_to_show, 3);
@@ -308,7 +310,7 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme
vp8cx_set_alt_lf_level(cpi, filt_mid);
vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_mid, segment);
- best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ best_err = vp8_calc_ss_err(sd, cm->frame_to_show);
filt_best = filt_mid;
// Re-instate the unfiltered frame
@@ -348,7 +350,7 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme
vp8cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_low, segment);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
// Re-instate the unfiltered frame
#if HAVE_ARMV7
@@ -383,7 +385,7 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme
vp8cx_set_alt_lf_level(cpi, filt_high);
vp8_loop_filter_frame_segment(cm, &cpi->mb.e_mbd, filt_high, segment);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
// Re-instate the unfiltered frame
#if HAVE_ARMV7
@@ -517,7 +519,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
vp8cx_set_alt_lf_level(cpi, filt_mid);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
- best_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ best_err = vp8_calc_ss_err(sd, cm->frame_to_show);
filt_best = filt_mid;
// Re-instate the unfiltered frame
@@ -557,7 +559,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
vp8cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
// Re-instate the unfiltered frame
#if HAVE_ARMV7
@@ -592,7 +594,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
vp8cx_set_alt_lf_level(cpi, filt_high);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
- filt_err = vp8_calc_ss_err(sd, cm->frame_to_show, IF_RTCD(&cpi->rtcd.variance));
+ filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
// Re-instate the unfiltered frame
#if HAVE_ARMV7
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 8ae3029ee..16b4e6e1d 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -21,8 +21,7 @@
extern int enc_debug;
#endif
-#if CONFIG_HYBRIDTRANSFORM
-void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
+void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
@@ -39,7 +38,7 @@ void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int const *pt_scan ;
- switch(d->bmi.as_mode.tx_type) {
+ switch (tx_type) {
case ADST_DCT :
pt_scan = vp8_row_scan;
break;
@@ -85,7 +84,6 @@ void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
-#endif
void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
@@ -653,12 +651,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
}
/* save this macroblock QIndex for vp8_update_zbin_extra() */
- x->q_index = QIndex;
+ x->e_mbd.q_index = QIndex;
}
void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
int i;
- int QIndex = x->q_index;
+ int QIndex = x->e_mbd.q_index;
int zbin_extra;
// Y
diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h
index ad3a3fc0e..e39433fc3 100644
--- a/vp8/encoder/quantize.h
+++ b/vp8/encoder/quantize.h
@@ -30,9 +30,9 @@
#include "arm/quantize_arm.h"
#endif
-#if CONFIG_HYBRIDTRANSFORM
-extern prototype_quantize_block(vp8_ht_quantize_b_4x4);
-#endif
+#define prototype_quantize_block_type(sym) \
+ void (sym)(BLOCK *b, BLOCKD *d, TX_TYPE type)
+extern prototype_quantize_block_type(vp8_ht_quantize_b_4x4);
#ifndef vp8_quantize_quantb_4x4
#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 2b5f699b6..cc3c82e74 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -123,7 +123,7 @@ int vp8_bits_per_mb(FRAME_TYPE frame_type, int qindex) {
void vp8_save_coding_context(VP8_COMP *cpi) {
- CODING_CONTEXT *const cc = & cpi->coding_context;
+ CODING_CONTEXT *const cc = &cpi->coding_context;
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
@@ -132,17 +132,10 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
// intended for use in a re-code loop in vp8_compress_frame where the
// quantizer value is adjusted between loop iterations.
-#if CONFIG_NEWMVENTROPY
cc->nmvc = cm->fc.nmvc;
vp8_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
vp8_copy(cc->nmvcosts, cpi->mb.nmvcosts);
vp8_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp);
-#else
- vp8_copy(cc->mvc, cm->fc.mvc);
- vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
- vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
- vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
-#endif
vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
vp8_copy(cc->mode_context, cm->fc.mode_context);
@@ -178,41 +171,26 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
vp8_copy(cc->last_mode_lf_deltas, xd->last_mode_lf_deltas);
vp8_copy(cc->coef_probs, cm->fc.coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cc->hybrid_coef_probs, cm->fc.hybrid_coef_probs);
-#endif
vp8_copy(cc->coef_probs_8x8, cm->fc.coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
-#endif
vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
-#endif
-#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
-#endif
}
void vp8_restore_coding_context(VP8_COMP *cpi) {
- CODING_CONTEXT *const cc = & cpi->coding_context;
+ CODING_CONTEXT *const cc = &cpi->coding_context;
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Restore key state variables to the snapshot state stored in the
// previous call to vp8_save_coding_context.
-#if CONFIG_NEWMVENTROPY
cm->fc.nmvc = cc->nmvc;
vp8_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
vp8_copy(cpi->mb.nmvcosts, cc->nmvcosts);
vp8_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp);
-#else
- vp8_copy(cm->fc.mvc, cc->mvc);
- vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
- vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
- vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
-#endif
vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
vp8_copy(cm->fc.mode_context, cc->mode_context);
@@ -249,20 +227,12 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
vp8_copy(xd->last_mode_lf_deltas, cc->last_mode_lf_deltas);
vp8_copy(cm->fc.coef_probs, cc->coef_probs);
-#if CONFIG_HYBRIDTRANSFORM
vp8_copy(cm->fc.hybrid_coef_probs, cc->hybrid_coef_probs);
-#endif
vp8_copy(cm->fc.coef_probs_8x8, cc->coef_probs_8x8);
-#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
-#endif
vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
-#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
-#endif
-#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
-#endif
}
@@ -275,16 +245,6 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
vp8_default_bmode_probs(cm->fc.bmode_prob);
vp8_init_mv_probs(& cpi->common);
-#if CONFIG_NEWMVENTROPY == 0
- /* this is not really required */
- {
- int flag[2] = {1, 1};
- vp8_build_component_cost_table(
- cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
- vp8_build_component_cost_table_hp(
- cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
- }
-#endif
// cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index c82a87d69..97a38dd8b 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -60,10 +60,8 @@ extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
#define INVALID_MV 0x80008000
-#if CONFIG_SWITCHABLE_INTERP
/* Factor to weigh the rate for switchable interp filters */
#define SWITCHABLE_INTERP_RATE_FACTOR 1
-#endif
static const int auto_speed_thresh[17] = {
1000,
@@ -355,37 +353,31 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
cpi->mb.token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs,
BLOCK_TYPES);
-#if CONFIG_HYBRIDTRANSFORM
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_4X4],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs,
BLOCK_TYPES);
-#endif
fill_token_costs(
cpi->mb.token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_8x8,
BLOCK_TYPES_8X8);
-#if CONFIG_HYBRIDTRANSFORM8X8
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_8X8],
(const vp8_prob( *)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_8x8,
BLOCK_TYPES_8X8);
-#endif
fill_token_costs(
cpi->mb.token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16,
BLOCK_TYPES_16X16);
-#if CONFIG_HYBRIDTRANSFORM16X16
fill_token_costs(
cpi->mb.hybrid_token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_16x16,
BLOCK_TYPES_16X16);
-#endif
/*rough estimate for costing*/
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
@@ -393,14 +385,12 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
if (cpi->common.frame_type != KEY_FRAME)
{
-#if CONFIG_NEWMVENTROPY
vp8_build_nmv_cost_table(
cpi->mb.nmvjointcost,
cpi->mb.e_mbd.allow_high_precision_mv ?
cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
&cpi->common.fc.nmvc,
cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
-#endif
}
}
@@ -409,19 +399,6 @@ void vp8_auto_select_speed(VP8_COMP *cpi) {
milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
-#if 0
-
- if (0) {
- FILE *f;
-
- f = fopen("speed.stt", "a");
- fprintf(f, " %8ld %10ld %10ld %10ld\n",
- cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
- fclose(f);
- }
-
-#endif
-
/*
// this is done during parameter valid check
if( cpi->oxcf.cpu_used > 16)
@@ -520,7 +497,7 @@ int vp8_mbuverror_c(MACROBLOCK *mb) {
return error;
}
-int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
+int vp8_uvsse(MACROBLOCK *x) {
unsigned char *uptr, *vptr;
unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
@@ -551,16 +528,14 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd) {
vptr = x->e_mbd.pre.v_buffer + offset;
if ((mv_row | mv_col) & 7) {
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
- (mv_col & 7) << 1, (mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
- (mv_col & 7) << 1, (mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
+ vp8_sub_pixel_variance8x8(uptr, pre_stride, (mv_col & 7) << 1,
+ (mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
+ vp8_sub_pixel_variance8x8(vptr, pre_stride, (mv_col & 7) << 1,
+ (mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
} else {
- VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
- upred_ptr, uv_stride, &sse2);
- VARIANCE_INVOKE(rtcd, var8x8)(vptr, pre_stride,
- vpred_ptr, uv_stride, &sse1);
+ vp8_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2);
+ vp8_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
}
return sse2;
@@ -607,9 +582,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
short *qcoeff_ptr = b->qcoeff;
MACROBLOCKD *xd = &mb->e_mbd;
MB_MODE_INFO *mbmi = &mb->e_mbd.mode_info_context->mbmi;
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = DCT_DCT;
-#endif
int segment_id = mbmi->segment_id;
switch (tx_size) {
@@ -617,55 +590,47 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
scan = vp8_default_zig_zag1d;
band = vp8_coef_bands;
default_eob = 16;
-#if CONFIG_HYBRIDTRANSFORM
- if (type == PLANE_TYPE_Y_WITH_DC &&
- mb->q_index < ACTIVE_HT &&
- mbmi->mode == B_PRED) {
- tx_type = b->bmi.as_mode.tx_type;
- switch (tx_type) {
- case ADST_DCT:
- scan = vp8_row_scan;
- break;
-
- case DCT_ADST:
- scan = vp8_col_scan;
- break;
-
- default:
- scan = vp8_default_zig_zag1d;
- break;
- }
+ if (type == PLANE_TYPE_Y_WITH_DC) {
+ tx_type = get_tx_type_4x4(xd, b);
+ if (tx_type != DCT_DCT) {
+ switch (tx_type) {
+ case ADST_DCT:
+ scan = vp8_row_scan;
+ break;
+
+ case DCT_ADST:
+ scan = vp8_col_scan;
+ break;
+ default:
+ scan = vp8_default_zig_zag1d;
+ break;
+ }
+ }
}
-#endif
+
break;
case TX_8X8:
scan = vp8_default_zig_zag1d_8x8;
band = vp8_coef_bands_8x8;
default_eob = 64;
-#if CONFIG_HYBRIDTRANSFORM8X8
- {
+ if (type == PLANE_TYPE_Y_WITH_DC) {
BLOCKD *bb;
int ib = (b - xd->block);
if (ib < 16) {
ib = (ib & 8) + ((ib & 4) >> 1);
bb = xd->block + ib;
- if (mbmi->mode == I8X8_PRED)
- tx_type = bb->bmi.as_mode.tx_type;
+ tx_type = get_tx_type_8x8(xd, bb);
}
}
-#endif
break;
case TX_16X16:
scan = vp8_default_zig_zag1d_16x16;
band = vp8_coef_bands_16x16;
default_eob = 256;
-#if CONFIG_HYBRIDTRANSFORM16X16
- if (type == PLANE_TYPE_Y_WITH_DC &&
- mbmi->mode < I8X8_PRED &&
- mb->q_index < ACTIVE_HT16)
- tx_type = b->bmi.as_mode.tx_type;
-#endif
+ if (type == PLANE_TYPE_Y_WITH_DC) {
+ tx_type = get_tx_type_16x16(xd, b);
+ }
break;
default:
break;
@@ -675,11 +640,8 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
else
seg_eob = default_eob;
- //mbmi->mode = mode;
-
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT) {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
@@ -691,9 +653,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
if (c < seg_eob)
cost += mb->hybrid_token_costs[tx_size][type][band[c]]
[pt][DCT_EOB_TOKEN];
- } else
-#endif
- {
+ } else {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
int t = vp8_dct_value_tokens_ptr[v].Token;
@@ -871,6 +831,10 @@ static int vp8_rdcost_mby_16x16(MACROBLOCK *mb) {
static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
const VP8_ENCODER_RTCD *rtcd, int *skippable) {
int d;
+ MACROBLOCKD *xd = &mb->e_mbd;
+ BLOCKD *b = &mb->e_mbd.block[0];
+ BLOCK *be = &mb->block[0];
+ TX_TYPE tx_type;
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(
mb->src_diff,
@@ -878,27 +842,18 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
mb->e_mbd.predictor,
mb->block[0].src_stride);
-#if CONFIG_HYBRIDTRANSFORM16X16
- if ((mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED) &&
- (mb->q_index < ACTIVE_HT16)) {
- BLOCKD *b = &mb->e_mbd.block[0];
- BLOCK *be = &mb->block[0];
- txfm_map(b, pred_mode_conv(mb->e_mbd.mode_info_context->mbmi.mode));
- vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 16);
+ tx_type = get_tx_type_16x16(xd, b);
+ if (tx_type != DCT_DCT) {
+ vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 16);
} else
vp8_transform_mby_16x16(mb);
-#else
- vp8_transform_mby_16x16(mb);
-#endif
vp8_quantize_mby_16x16(mb);
-#if CONFIG_HYBRIDTRANSFORM16X16
// TODO(jingning) is it possible to quickly determine whether to force
// trailing coefficients to be zero, instead of running trellis
// optimization in the rate-distortion optimization loop?
if (mb->e_mbd.mode_info_context->mbmi.mode < I8X8_PRED)
vp8_optimize_mby_16x16(mb, rtcd);
-#endif
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 0);
@@ -914,8 +869,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
VP8_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
-#if CONFIG_TX_SELECT
-
MACROBLOCKD *xd = &x->e_mbd;
int can_skip = cm->mb_no_coeff_skip;
vp8_prob skip_prob = can_skip ? get_pred_prob(cm, xd, PRED_MBSKIP) : 128;
@@ -1023,25 +976,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
else
txfm_cache[TX_MODE_SELECT] = rd4x4s < rd8x8s ? rd4x4s : rd8x8s;
-#else /* CONFIG_TX_SELECT */
-
- switch (cpi->common.txfm_mode) {
- case ALLOW_16X16:
- macro_block_yrd_16x16(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
- mbmi->txfm_size = TX_16X16;
- break;
- case ALLOW_8X8:
- macro_block_yrd_8x8(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
- mbmi->txfm_size = TX_8X8;
- break;
- default:
- case ONLY_4X4:
- macro_block_yrd_4x4(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
- mbmi->txfm_size = TX_4X4;
- break;
- }
-
-#endif /* CONFIG_TX_SELECT */
}
static void copy_predictor(unsigned char *dst, const unsigned char *predictor) {
@@ -1145,12 +1079,7 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
int *bestrate, int *bestratey,
int *bestdistortion) {
B_PREDICTION_MODE mode;
-
-#if CONFIG_HYBRIDTRANSFORM
- int QIndex = x->q_index;
- int active_ht = (QIndex < ACTIVE_HT);
- TX_TYPE best_tx_type;
-#endif
+ MACROBLOCKD *xd = &x->e_mbd;
#if CONFIG_COMP_INTRA_PRED
B_PREDICTION_MODE mode2;
@@ -1161,6 +1090,8 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
ENTROPY_CONTEXT ta = *a, tempa = *a;
ENTROPY_CONTEXT tl = *l, templ = *l;
+ TX_TYPE tx_type = DCT_DCT;
+ TX_TYPE best_tx_type = DCT_DCT;
/*
* The predictor buffer is a 2d buffer with a stride of 16. Create
* a temp buffer that meets the stride requirements, but we are only
@@ -1177,11 +1108,6 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
int64_t this_rd;
int ratey;
- // TODO Temporarily ignore modes that need the above-right data. SB
- // encoding means this data is not available for the bottom right MB
- // Do we need to do this for mode2 also?
- if (mode == B_LD_PRED || mode == B_VL_PRED)
- continue;
b->bmi.as_mode.first = mode;
rate = bmode_costs[mode];
@@ -1197,48 +1123,42 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
#endif
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
-#if CONFIG_HYBRIDTRANSFORM
- if (active_ht) {
- txfm_map(b, mode);
- vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
- vp8_ht_quantize_b_4x4(be, b);
+ b->bmi.as_mode.first = mode;
+ tx_type = get_tx_type_4x4(xd, b);
+ if (tx_type != DCT_DCT) {
+ vp8_fht_c(be->src_diff, be->coeff, 32, tx_type, 4);
+ vp8_ht_quantize_b_4x4(be, b, tx_type);
} else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b_4x4(be, b);
}
-#else
- x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
- x->quantize_b_4x4(be, b);
-#endif
- tempa = ta;
- templ = tl;
+ tempa = ta;
+ templ = tl;
- ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
- rate += ratey;
- distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(
- be->coeff, b->dqcoeff, 16) >> 2;
+ ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
+ rate += ratey;
+ distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(
+ be->coeff, b->dqcoeff, 16) >> 2;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
- if (this_rd < best_rd) {
- *bestrate = rate;
- *bestratey = ratey;
- *bestdistortion = distortion;
- best_rd = this_rd;
- *best_mode = mode;
-#if CONFIG_HYBRIDTRANSFORM
- best_tx_type = b->bmi.as_mode.tx_type ;
-#endif
+ if (this_rd < best_rd) {
+ *bestrate = rate;
+ *bestratey = ratey;
+ *bestdistortion = distortion;
+ best_rd = this_rd;
+ *best_mode = mode;
+ best_tx_type = tx_type;
#if CONFIG_COMP_INTRA_PRED
- *best_second_mode = mode2;
+ *best_second_mode = mode2;
#endif
- *a = tempa;
- *l = templ;
- copy_predictor(best_predictor, b->predictor);
- vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
- }
+ *a = tempa;
+ *l = templ;
+ copy_predictor(best_predictor, b->predictor);
+ vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
+ }
#if CONFIG_COMP_INTRA_PRED
}
#endif
@@ -1248,18 +1168,12 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
b->bmi.as_mode.second = (B_PREDICTION_MODE)(*best_second_mode);
#endif
-#if CONFIG_HYBRIDTRANSFORM
- b->bmi.as_mode.tx_type = best_tx_type;
-
// inverse transform
- if (active_ht)
- vp8_ihtllm_c(best_dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
+ if (best_tx_type != DCT_DCT)
+ vp8_ihtllm_c(best_dqcoeff, b->diff, 32, best_tx_type, 4);
else
- IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff,
- b->diff, 32);
-#else
- IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
-#endif
+ IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(
+ best_dqcoeff, b->diff, 32);
vp8_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
@@ -1295,12 +1209,11 @@ static int64_t rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rat
tl = (ENTROPY_CONTEXT *)&t_left;
}
- // TODO(agrange)
- // vp8_intra_prediction_down_copy(xd);
-
xd->mode_info_context->mbmi.mode = B_PRED;
bmode_costs = mb->inter_bmode_costs;
+ vp8_intra_prediction_down_copy(xd);
+
for (i = 0; i < 16; i++) {
MODE_INFO *const mic = xd->mode_info_context;
const int mis = xd->mode_info_stride;
@@ -1413,14 +1326,9 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
int64_t this_rd;
MACROBLOCKD *xd = &x->e_mbd;
-#if CONFIG_HYBRIDTRANSFORM16X16
- int best_txtype, rd_txtype;
-#endif
-#if CONFIG_TX_SELECT
int i;
for (i = 0; i < NB_TXFM_MODES; i++)
txfm_cache[i] = INT64_MAX;
-#endif
// Y Search for 16x16 intra prediction mode
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
@@ -1449,9 +1357,6 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
-#if CONFIG_HYBRIDTRANSFORM16X16
- rd_txtype = x->e_mbd.block[0].bmi.as_mode.tx_type;
-#endif
if (this_rd < best_rd) {
mode_selected = mode;
@@ -1463,13 +1368,9 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
*Rate = rate;
*rate_y = ratey;
*Distortion = distortion;
-#if CONFIG_HYBRIDTRANSFORM16X16
- best_txtype = rd_txtype;
-#endif
*skippable = skip;
}
-#if CONFIG_TX_SELECT
for (i = 0; i < NB_TXFM_MODES; i++) {
int64_t adj_rd = this_rd + local_txfm_cache[i] -
local_txfm_cache[cpi->common.txfm_mode];
@@ -1477,7 +1378,6 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
txfm_cache[i] = adj_rd;
}
}
-#endif
#if CONFIG_COMP_INTRA_PRED
}
@@ -1486,9 +1386,6 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
mbmi->txfm_size = txfm_size;
mbmi->mode = mode_selected;
-#if CONFIG_HYBRIDTRANSFORM16X16
- x->e_mbd.block[0].bmi.as_mode.tx_type = best_txtype;
-#endif
#if CONFIG_COMP_INTRA_PRED
mbmi->second_mode = mode2_selected;
@@ -1539,6 +1436,7 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
// FIXME rate for compound mode and second intrapred mode
rate = mode_costs[mode];
+ b->bmi.as_mode.first = mode;
#if CONFIG_COMP_INTRA_PRED
if (mode2 == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
@@ -1554,21 +1452,18 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
vp8_subtract_4b_c(be, b, 16);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
-#if CONFIG_HYBRIDTRANSFORM8X8
- txfm_map(b, pred_mode_conv(mode));
- vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32,
- b->bmi.as_mode.tx_type, 8);
-
-#else
- x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
-#endif
+ TX_TYPE tx_type = get_tx_type_8x8(xd, b);
+ if (tx_type != DCT_DCT)
+ vp8_fht_c(be->src_diff, (x->block + idx)->coeff, 32, tx_type, 8);
+ else
+ x->vp8_short_fdct8x8(be->src_diff, (x->block + idx)->coeff, 32);
x->quantize_b_8x8(x->block + idx, xd->block + idx);
// compute quantization mse of 8x8 block
distortion = vp8_block_error_c((x->block + idx)->coeff,
(xd->block + idx)->dqcoeff, 64);
- ta0 = *(a + vp8_block2above_8x8[idx]);
- tl0 = *(l + vp8_block2left_8x8 [idx]);
+ ta0 = a[vp8_block2above_8x8[idx]];
+ tl0 = l[vp8_block2left_8x8[idx]];
rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_8X8);
@@ -1594,10 +1489,10 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
distortion += vp8_block_error_c((x->block + ib + 5)->coeff,
(xd->block + ib + 5)->dqcoeff, 16);
- ta0 = *(a + vp8_block2above[ib]);
- ta1 = *(a + vp8_block2above[ib + 1]);
- tl0 = *(l + vp8_block2above[ib]);
- tl1 = *(l + vp8_block2above[ib + 4]);
+ ta0 = a[vp8_block2above[ib]];
+ ta1 = a[vp8_block2above[ib + 1]];
+ tl0 = l[vp8_block2left[ib]];
+ tl1 = l[vp8_block2left[ib + 4]];
rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_4X4);
rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC,
@@ -1639,15 +1534,15 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
vp8_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
- *(a + vp8_block2above_8x8[idx]) = besta0;
- *(a + vp8_block2above_8x8[idx] + 1) = besta1;
- *(l + vp8_block2left_8x8 [idx]) = bestl0;
- *(l + vp8_block2left_8x8 [idx] + 1) = bestl1;
+ a[vp8_block2above_8x8[idx]] = besta0;
+ a[vp8_block2above_8x8[idx] + 1] = besta1;
+ l[vp8_block2left_8x8[idx]] = bestl0;
+ l[vp8_block2left_8x8[idx] + 1] = bestl1;
} else {
- *(a + vp8_block2above[ib]) = besta0;
- *(a + vp8_block2above[ib + 1]) = besta1;
- *(l + vp8_block2above[ib]) = bestl0;
- *(l + vp8_block2above[ib + 4]) = bestl1;
+ a[vp8_block2above[ib]] = besta0;
+ a[vp8_block2above[ib + 1]] = besta1;
+ l[vp8_block2left[ib]] = bestl0;
+ l[vp8_block2left[ib + 4]] = bestl1;
}
return best_rd;
@@ -1839,7 +1734,7 @@ static int64_t rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
- int *distortion, int fullpixel) {
+ int *distortion, int *skippable, int fullpixel) {
vp8_build_inter4x4_predictors_mbuv(&x->e_mbd);
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
@@ -1849,6 +1744,7 @@ static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
*rate = rd_cost_mbuv(x);
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
+ *skippable = mbuv_is_skippable_4x4(&x->e_mbd);
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
@@ -2105,7 +2001,7 @@ static int labels2mode(
int_mv *best_ref_mv,
int_mv *second_best_ref_mv,
DEC_MVCOSTS) {
- MACROBLOCKD *const xd = & x->e_mbd;
+ MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *const mic = xd->mode_info_context;
MB_MODE_INFO * mbmi = &mic->mbmi;
const int mis = xd->mode_info_stride;
@@ -2199,30 +2095,19 @@ static int labels2mode(
return cost;
}
-static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
- int which_label, ENTROPY_CONTEXT *ta,
- ENTROPY_CONTEXT *tl) {
- int b, cost = 0;
- MACROBLOCKD *xd = &mb->e_mbd;
-
- for (b = 0; b < 16; b++)
- if (labels[ b] == which_label)
- cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[b],
- tl + vp8_block2left[b], TX_4X4);
-
- return cost;
-
-}
-
-static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
- int const *labels,
- int which_label,
- const VP8_ENCODER_RTCD *rtcd) {
+static int64_t encode_inter_mb_segment(MACROBLOCK *x,
+ int const *labels,
+ int which_label,
+ int *labelyrate,
+ int *distortion,
+ ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl,
+ const VP8_ENCODER_RTCD *rtcd) {
int i;
- unsigned int distortion = 0;
MACROBLOCKD *xd = &x->e_mbd;
+ *labelyrate = 0;
+ *distortion = 0;
for (i = 0; i < 16; i++) {
if (labels[i] == which_label) {
BLOCKD *bd = &x->e_mbd.block[i];
@@ -2234,18 +2119,118 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
vp8_build_2nd_inter_predictors_b(bd, 16, xd->subpixel_predict_avg);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, bd, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
-
- // set to 0 no way to account for 2nd order DC so discount
- // be->coeff[0] = 0;
x->quantize_b_4x4(be, bd);
- thisdistortion = ENCODEMB_INVOKE(&rtcd->encodemb, berr)(
- be->coeff, bd->dqcoeff, 16) / 4;
- distortion += thisdistortion;
+ thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 16);
+ *distortion += thisdistortion;
+ *labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[i],
+ tl + vp8_block2left[i], TX_4X4);
}
}
- return distortion;
+ *distortion >>= 2;
+ return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
}
+static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
+ int const *labels,
+ int which_label,
+ int *labelyrate,
+ int *distortion,
+ int64_t *otherrd,
+ ENTROPY_CONTEXT *ta,
+ ENTROPY_CONTEXT *tl,
+ const VP8_ENCODER_RTCD *rtcd) {
+ int i, j;
+ MACROBLOCKD *xd = &x->e_mbd;
+ const int iblock[4] = { 0, 1, 4, 5 };
+ int othercost = 0, otherdist = 0;
+ ENTROPY_CONTEXT_PLANES tac, tlc;
+ ENTROPY_CONTEXT *tacp = (ENTROPY_CONTEXT *) &tac,
+ *tlcp = (ENTROPY_CONTEXT *) &tlc;
+
+ if (otherrd) {
+ memcpy(&tac, ta, sizeof(ENTROPY_CONTEXT_PLANES));
+ memcpy(&tlc, tl, sizeof(ENTROPY_CONTEXT_PLANES));
+ }
+
+ *distortion = 0;
+ *labelyrate = 0;
+ for (i = 0; i < 4; i++) {
+ int ib = vp8_i8x8_block[i];
+
+ if (labels[ib] == which_label) {
+ int idx = (ib & 8) + ((ib & 2) << 1);
+ BLOCKD *bd = &xd->block[ib], *bd2 = &xd->block[idx];
+ BLOCK *be = &x->block[ib], *be2 = &x->block[idx];
+ int thisdistortion;
+
+ vp8_build_inter_predictors4b(xd, bd, 16);
+ if (xd->mode_info_context->mbmi.second_ref_frame)
+ vp8_build_2nd_inter_predictors4b(xd, bd, 16);
+ vp8_subtract_4b_c(be, bd, 16);
+
+ if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
+ if (otherrd) {
+ x->vp8_short_fdct8x8(be->src_diff, be2->coeff, 32);
+ x->quantize_b_8x8(be2, bd2);
+ thisdistortion = vp8_block_error_c(be2->coeff, bd2->dqcoeff, 64);
+ otherdist += thisdistortion;
+ othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
+ tacp + vp8_block2above_8x8[idx],
+ tlcp + vp8_block2left_8x8[idx], TX_8X8);
+ }
+ for (j = 0; j < 4; j += 2) {
+ bd = &xd->block[ib + iblock[j]];
+ be = &x->block[ib + iblock[j]];
+ x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
+ x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
+ thisdistortion = vp8_block_error_c(be->coeff, bd->dqcoeff, 32);
+ *distortion += thisdistortion;
+ *labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[ib + iblock[j]],
+ tl + vp8_block2left[ib + iblock[j]],
+ TX_4X4);
+ *labelyrate += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above[ib + iblock[j] + 1],
+ tl + vp8_block2left[ib + iblock[j]],
+ TX_4X4);
+ }
+ } else /* 8x8 */ {
+ if (otherrd) {
+ for (j = 0; j < 4; j += 2) {
+ BLOCKD *bd3 = &xd->block[ib + iblock[j]];
+ BLOCK *be3 = &x->block[ib + iblock[j]];
+ x->vp8_short_fdct8x4(be3->src_diff, be3->coeff, 32);
+ x->quantize_b_4x4_pair(be3, be3 + 1, bd3, bd3 + 1);
+ thisdistortion = vp8_block_error_c(be3->coeff, bd3->dqcoeff, 32);
+ otherdist += thisdistortion;
+ othercost += cost_coeffs(x, bd3, PLANE_TYPE_Y_WITH_DC,
+ tacp + vp8_block2above[ib + iblock[j]],
+ tlcp + vp8_block2left[ib + iblock[j]],
+ TX_4X4);
+ othercost += cost_coeffs(x, bd3 + 1, PLANE_TYPE_Y_WITH_DC,
+ tacp + vp8_block2above[ib + iblock[j] + 1],
+ tlcp + vp8_block2left[ib + iblock[j]],
+ TX_4X4);
+ }
+ }
+ x->vp8_short_fdct8x8(be->src_diff, be2->coeff, 32);
+ x->quantize_b_8x8(be2, bd2);
+ thisdistortion = vp8_block_error_c(be2->coeff, bd2->dqcoeff, 64);
+ *distortion += thisdistortion;
+ *labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
+ ta + vp8_block2above_8x8[idx],
+ tl + vp8_block2left_8x8[idx], TX_8X8);
+ }
+ }
+ }
+ *distortion >>= 2;
+ if (otherrd) {
+ othercost >>= 2;
+ *otherrd = RDCOST(x->rdmult, x->rddiv, othercost, otherdist);
+ }
+ return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
+}
static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
@@ -2255,13 +2240,14 @@ typedef struct {
int_mv mvp;
int64_t segment_rd;
- int segment_num;
+ SPLITMV_PARTITIONING_TYPE segment_num;
+ TX_SIZE txfm_size;
int r;
int d;
int segment_yrate;
B_PREDICTION_MODE modes[16];
int_mv mvs[16], second_mvs[16];
- unsigned char eobs[16];
+ int eobs[16];
int mvthresh;
int *mdcounts;
@@ -2281,21 +2267,27 @@ int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
return r;
}
-static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
- BEST_SEG_INFO *bsi, unsigned int segmentation,
- int_mv seg_mvs[16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
- int i;
+static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
+ BEST_SEG_INFO *bsi,
+ SPLITMV_PARTITIONING_TYPE segmentation,
+ TX_SIZE tx_size, int64_t *otherrds,
+ int64_t *rds, int *completed,
+ /* 16 = n_blocks */
+ int_mv seg_mvs[16 /* n_blocks */]
+ [MAX_REF_FRAMES - 1]) {
+ int i, j;
int const *labels;
int br = 0, bd = 0;
B_PREDICTION_MODE this_mode;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
int label_count;
- int64_t this_segment_rd = 0;
+ int64_t this_segment_rd = 0, other_segment_rd;
int label_mv_thresh;
int rate = 0;
int sbr = 0, sbd = 0;
int segmentyrate = 0;
+ int best_eobs[16] = { 0 };
vp8_variance_fn_ptr_t *v_fn_ptr;
@@ -2323,20 +2315,23 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
label_mv_thresh = 1 * bsi->mvthresh / label_count;
// Segmentation method overheads
- rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
+ rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
+ vp8_mbsplit_encodings + segmentation);
rate += vp8_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
br += rate;
+ other_segment_rd = this_segment_rd;
- for (i = 0; i < label_count; i++) {
+ mbmi->txfm_size = tx_size;
+ for (i = 0; i < label_count && this_segment_rd < bsi->segment_rd; i++) {
int_mv mode_mv[B_MODE_COUNT], second_mode_mv[B_MODE_COUNT];
- int64_t best_label_rd = INT64_MAX;
+ int64_t best_label_rd = INT64_MAX, best_other_rd = INT64_MAX;
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
// search for the best motion vector on this segment
for (this_mode = LEFT4X4; this_mode <= NEW4X4; this_mode ++) {
- int64_t this_rd;
+ int64_t this_rd, other_rd;
int distortion;
int labelyrate;
ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
@@ -2358,21 +2353,23 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
BLOCK *c;
BLOCKD *e;
- // Is the best so far sufficiently good that we cant justify doing and new motion search.
+ /* Is the best so far sufficiently good that we cant justify doing
+ * and new motion search. */
if (best_label_rd < label_mv_thresh)
break;
if (cpi->compressor_speed) {
- if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) {
+ if (segmentation == PARTITIONING_8X16 ||
+ segmentation == PARTITIONING_16X8) {
bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
- if (i == 1 && segmentation == BLOCK_16X8)
+ if (i == 1 && segmentation == PARTITIONING_16X8)
bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
step_param = bsi->sv_istep[i];
}
// use previous block's result as next block's MV predictor.
- if (segmentation == BLOCK_4X4 && i > 0) {
+ if (segmentation == PARTITIONING_4X4 && i > 0) {
bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.as_mv.first.as_int;
if (i == 4 || i == 8 || i == 12)
bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.as_mv.first.as_int;
@@ -2404,7 +2401,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
// Should we do a full search (best quality only)
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
/* Check if mvp_full is within the range. */
- vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
+ vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
+ x->mv_row_min, x->mv_row_max);
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
sadpb, 16, v_fn_ptr,
@@ -2414,7 +2412,8 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
bestsme = thissme;
mode_mv[NEW4X4].as_int = e->bmi.as_mv.first.as_int;
} else {
- // The full search result is actually worse so re-instate the previous best vector
+ /* The full search result is actually worse so re-instate the
+ * previous best vector */
e->bmi.as_mv.first.as_int = mode_mv[NEW4X4].as_int;
}
}
@@ -2424,15 +2423,16 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
int distortion;
unsigned int sse;
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
- bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
- &distortion, &sse);
+ bsi->ref_mv, x->errorperbit, v_fn_ptr,
+ XMVCOST, &distortion, &sse);
// safe motion search result for use in compound prediction
seg_mvs[i][mbmi->ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
}
} /* NEW4X4 */
else if (mbmi->second_ref_frame && this_mode == NEW4X4) {
- // motion search not completed? Then skip newmv for this block with comppred
+ /* motion search not completed? Then skip newmv for this block with
+ * comppred */
if (seg_mvs[i][mbmi->second_ref_frame - 1].as_int == INVALID_MV ||
seg_mvs[i][mbmi->ref_frame - 1].as_int == INVALID_MV) {
continue;
@@ -2454,21 +2454,39 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
mv_check_bounds(x, &second_mode_mv[this_mode]))
continue;
- distortion = vp8_encode_inter_mb_segment(
- x, labels, i,
- IF_RTCD(&cpi->rtcd));
-
- labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
+ if (segmentation == PARTITIONING_4X4) {
+ this_rd = encode_inter_mb_segment(x, labels, i, &labelyrate,
+ &distortion,
+ ta_s, tl_s, IF_RTCD(&cpi->rtcd));
+ other_rd = this_rd;
+ } else {
+ this_rd = encode_inter_mb_segment_8x8(x, labels, i, &labelyrate,
+ &distortion, &other_rd,
+ ta_s, tl_s, IF_RTCD(&cpi->rtcd));
+ }
+ this_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
rate += labelyrate;
- this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
-
if (this_rd < best_label_rd) {
sbr = rate;
sbd = distortion;
bestlabelyrate = labelyrate;
mode_selected = this_mode;
best_label_rd = this_rd;
+ if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_4X4) {
+ for (j = 0; j < 16; j++)
+ if (labels[j] == i)
+ best_eobs[j] = x->e_mbd.block[j].eob;
+ } else {
+ for (j = 0; j < 4; j++) {
+ int ib = vp8_i8x8_block[j], idx = j * 4;
+
+ if (labels[ib] == i)
+ best_eobs[idx] = x->e_mbd.block[idx].eob;
+ }
+ }
+ if (other_rd < best_other_rd)
+ best_other_rd = other_rd;
vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
@@ -2480,18 +2498,18 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
- &second_mode_mv[mode_selected], seg_mvs[i], bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
+ &second_mode_mv[mode_selected], seg_mvs[i],
+ bsi->ref_mv, bsi->second_ref_mv, XMVCOST);
br += sbr;
bd += sbd;
segmentyrate += bestlabelyrate;
this_segment_rd += best_label_rd;
-
- if (this_segment_rd >= bsi->segment_rd) {
- break;
- }
-
-
+ other_segment_rd += best_other_rd;
+ if (rds)
+ rds[i] = this_segment_rd;
+ if (otherrds)
+ rds[i] = other_segment_rd;
} /* for each label */
if (this_segment_rd < bsi->segment_rd) {
@@ -2500,6 +2518,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
bsi->segment_yrate = segmentyrate;
bsi->segment_rd = this_segment_rd;
bsi->segment_num = segmentation;
+ bsi->txfm_size = mbmi->txfm_size;
// store everything needed to come back to this!!
for (i = 0; i < 16; i++) {
@@ -2509,7 +2528,106 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
if (mbmi->second_ref_frame)
bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
bsi->modes[i] = x->partition_info->bmi[i].mode;
- bsi->eobs[i] = bd->eob;
+ bsi->eobs[i] = best_eobs[i];
+ }
+ }
+
+ if (completed) {
+ *completed = i;
+ }
+}
+
+static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
+ BEST_SEG_INFO *bsi,
+ unsigned int segmentation,
+ /* 16 = n_blocks */
+ int_mv seg_mvs[16][MAX_REF_FRAMES - 1],
+ int64_t txfm_cache[NB_TXFM_MODES]) {
+ int i, n, c = vp8_mbsplit_count[segmentation];
+
+ if (segmentation == PARTITIONING_4X4) {
+ int64_t rd[16];
+
+ rd_check_segment_txsize(cpi, x, bsi, segmentation, TX_4X4, NULL,
+ rd, &n, seg_mvs);
+ if (n == c) {
+ for (i = 0; i < NB_TXFM_MODES; i++) {
+ if (rd[c - 1] < txfm_cache[i])
+ txfm_cache[i] = rd[c - 1];
+ }
+ }
+ } else {
+ int64_t diff, base_rd;
+ int cost4x4 = vp8_cost_bit(cpi->common.prob_tx[0], 0);
+ int cost8x8 = vp8_cost_bit(cpi->common.prob_tx[0], 1);
+
+ if (cpi->common.txfm_mode == TX_MODE_SELECT) {
+ int64_t rd4x4[4], rd8x8[4];
+ int n4x4, n8x8, nmin;
+ BEST_SEG_INFO bsi4x4, bsi8x8;
+
+ /* factor in cost of cost4x4/8x8 in decision */
+ vpx_memcpy(&bsi4x4, bsi, sizeof(*bsi));
+ vpx_memcpy(&bsi8x8, bsi, sizeof(*bsi));
+ rd_check_segment_txsize(cpi, x, &bsi4x4, segmentation,
+ TX_4X4, NULL, rd4x4, &n4x4, seg_mvs);
+ rd_check_segment_txsize(cpi, x, &bsi8x8, segmentation,
+ TX_8X8, NULL, rd8x8, &n8x8, seg_mvs);
+ if (bsi4x4.segment_num == segmentation) {
+ bsi4x4.segment_rd += RDCOST(x->rdmult, x->rddiv, cost4x4, 0);
+ if (bsi4x4.segment_rd < bsi->segment_rd)
+ vpx_memcpy(bsi, &bsi4x4, sizeof(*bsi));
+ }
+ if (bsi8x8.segment_num == segmentation) {
+ bsi8x8.segment_rd += RDCOST(x->rdmult, x->rddiv, cost8x8, 0);
+ if (bsi8x8.segment_rd < bsi->segment_rd)
+ vpx_memcpy(bsi, &bsi8x8, sizeof(*bsi));
+ }
+ n = n4x4 > n8x8 ? n4x4 : n8x8;
+ if (n == c) {
+ nmin = n4x4 < n8x8 ? n4x4 : n8x8;
+ diff = rd8x8[nmin - 1] - rd4x4[nmin - 1];
+ if (n == n4x4) {
+ base_rd = rd4x4[c - 1];
+ } else {
+ base_rd = rd8x8[c - 1] - diff;
+ }
+ }
+ } else {
+ int64_t rd[4], otherrd[4];
+
+ if (cpi->common.txfm_mode == ONLY_4X4) {
+ rd_check_segment_txsize(cpi, x, bsi, segmentation, TX_4X4, otherrd,
+ rd, &n, seg_mvs);
+ if (n == c) {
+ base_rd = rd[c - 1];
+ diff = otherrd[c - 1] - rd[c - 1];
+ }
+ } else /* use 8x8 transform */ {
+ rd_check_segment_txsize(cpi, x, bsi, segmentation, TX_8X8, otherrd,
+ rd, &n, seg_mvs);
+ if (n == c) {
+ diff = rd[c - 1] - otherrd[c - 1];
+ base_rd = otherrd[c - 1];
+ }
+ }
+ }
+
+ if (n == c) {
+ if (base_rd < txfm_cache[ONLY_4X4]) {
+ txfm_cache[ONLY_4X4] = base_rd;
+ }
+ if (base_rd + diff < txfm_cache[1]) {
+ txfm_cache[ALLOW_8X8] = txfm_cache[ALLOW_16X16] = base_rd + diff;
+ }
+ if (diff < 0) {
+ base_rd += diff + RDCOST(x->rdmult, x->rddiv, cost8x8, 0);
+ } else {
+ base_rd += RDCOST(x->rdmult, x->rddiv, cost4x4, 0);
+ }
+ if (base_rd < txfm_cache[TX_MODE_SELECT]) {
+ txfm_cache[TX_MODE_SELECT] = base_rd;
+ }
}
}
}
@@ -2527,17 +2645,26 @@ void vp8_cal_step_param(int sr, int *sp) {
*sp = MAX_MVSEARCH_STEPS - 1 - step;
}
-static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
- int_mv *best_ref_mv, int_mv *second_best_ref_mv, int64_t best_rd,
- int *mdcounts, int *returntotrate,
- int *returnyrate, int *returndistortion,
- int mvthresh,
- int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1]) {
+static int rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
+ int_mv *best_ref_mv,
+ int_mv *second_best_ref_mv,
+ int64_t best_rd,
+ int *mdcounts,
+ int *returntotrate,
+ int *returnyrate,
+ int *returndistortion,
+ int *skippable, int mvthresh,
+ int_mv seg_mvs[NB_PARTITIONINGS]
+ [16 /* n_blocks */]
+ [MAX_REF_FRAMES - 1],
+ int64_t txfm_cache[NB_TXFM_MODES]) {
int i;
BEST_SEG_INFO bsi;
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
vpx_memset(&bsi, 0, sizeof(bsi));
+ for (i = 0; i < NB_TXFM_MODES; i++)
+ txfm_cache[i] = INT64_MAX;
bsi.segment_rd = best_rd;
bsi.ref_mv = best_ref_mv;
@@ -2545,6 +2672,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
bsi.mvp.as_int = best_ref_mv->as_int;
bsi.mvthresh = mvthresh;
bsi.mdcounts = mdcounts;
+ bsi.txfm_size = TX_4X4;
for (i = 0; i < 16; i++)
bsi.modes[i] = ZERO4X4;
@@ -2552,15 +2680,19 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
if (cpi->compressor_speed == 0) {
/* for now, we will keep the original segmentation order
when in best quality mode */
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_16X8,
+ seg_mvs[PARTITIONING_16X8], txfm_cache);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X16,
+ seg_mvs[PARTITIONING_8X16], txfm_cache);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X8,
+ seg_mvs[PARTITIONING_8X8], txfm_cache);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_4X4,
+ seg_mvs[PARTITIONING_4X4], txfm_cache);
} else {
int sr;
- rd_check_segment(cpi, x, &bsi, BLOCK_8X8, seg_mvs[BLOCK_8X8]);
-
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X8,
+ seg_mvs[PARTITIONING_8X8], txfm_cache);
if (bsi.segment_rd < best_rd) {
int tmp_col_min = x->mv_col_min;
@@ -2576,34 +2708,40 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
- /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
+ /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range
+ * according to the closeness of 2 MV. */
/* block 8X16 */
- {
- sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+ sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_8X16, seg_mvs[BLOCK_8X16]);
- }
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_8X16,
+ seg_mvs[PARTITIONING_8X16], txfm_cache);
/* block 16X8 */
- {
- sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[0]);
+ sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[0]);
- sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
- vp8_cal_step_param(sr, &bsi.sv_istep[1]);
+ sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
+ (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
+ vp8_cal_step_param(sr, &bsi.sv_istep[1]);
- rd_check_segment(cpi, x, &bsi, BLOCK_16X8, seg_mvs[BLOCK_16X8]);
- }
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_16X8,
+ seg_mvs[PARTITIONING_16X8], txfm_cache);
/* If 8x8 is better than 16x8/8x16, then do 4x4 search */
/* Not skip 4x4 if speed=0 (good quality) */
- if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) { /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
+ if (cpi->sf.no_skip_block4x4_search ||
+ bsi.segment_num == PARTITIONING_8X8) {
+ /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
- rd_check_segment(cpi, x, &bsi, BLOCK_4X4, seg_mvs[BLOCK_4X4]);
+ rd_check_segment(cpi, x, &bsi, PARTITIONING_4X4,
+ seg_mvs[PARTITIONING_4X4], txfm_cache);
}
/* restore UMV window */
@@ -2627,8 +2765,12 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
*returntotrate = bsi.r;
*returndistortion = bsi.d;
*returnyrate = bsi.segment_yrate;
+ *skippable = bsi.txfm_size == TX_4X4 ?
+ mby_is_skippable_4x4(&x->e_mbd, 0) :
+ mby_is_skippable_8x8(&x->e_mbd, 0);
/* save partitions */
+ mbmi->txfm_size = bsi.txfm_size;
mbmi->partitioning = bsi.segment_num;
x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
@@ -2874,9 +3016,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
-#if CONFIG_NEWMVENTROPY
MV mv;
-#endif
if (mbmi->mode == SPLITMV) {
int i;
@@ -2884,7 +3024,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
-#if CONFIG_NEWMVENTROPY
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
@@ -2898,20 +3037,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 1);
}
-#else
- cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col)]++;
- }
-#endif
} else {
-#if CONFIG_NEWMVENTROPY
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
@@ -2925,24 +3051,11 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 0);
}
-#else
- cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col) >> 1)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col) >> 1)]++;
- }
-#endif
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
-#if CONFIG_NEWMVENTROPY
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
@@ -2951,20 +3064,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
}
-#else
- cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[0].as_mv.row
- - best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[0].as_mv.col
- - best_ref_mv->as_mv.col)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row
- - second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col
- - second_best_ref_mv->as_mv.col)]++;
- }
-#endif
} else {
-#if CONFIG_NEWMVENTROPY
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
@@ -2973,18 +3073,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
}
-#else
- cpi->MVcount[0][mv_max + ((mbmi->mv[0].as_mv.row
- - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((mbmi->mv[0].as_mv.col
- - best_ref_mv->as_mv.col) >> 1)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount[0][mv_max + ((mbmi->mv[1].as_mv.row
- - second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((mbmi->mv[1].as_mv.col
- - second_best_ref_mv->as_mv.col) >> 1)]++;
- }
-#endif
}
}
}
@@ -3158,9 +3246,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int hybrid_pred_diff,
int64_t txfm_size_diff[NB_TXFM_MODES]) {
MACROBLOCKD *xd = &x->e_mbd;
-#if CONFIG_TX_SELECT
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
-#endif
// Take a snapshot of the coding context so it can be
// restored if we decide to encode this way
@@ -3180,9 +3266,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
ctx->comp_pred_diff = comp_pred_diff;
ctx->hybrid_pred_diff = hybrid_pred_diff;
-#if CONFIG_TX_SELECT
memcpy(ctx->txfm_rd_diff, txfm_size_diff, sizeof(ctx->txfm_rd_diff));
-#endif
}
static void inter_mode_cost(VP8_COMP *cpi, MACROBLOCK *x, int this_mode,
@@ -3299,9 +3383,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
int_mv ref_mv[MAX_REF_FRAMES] = {{0}};
#endif
-#if CONFIG_SWITCHABLE_INTERP
int switchable_filter_index = 0;
-#endif
MB_PREDICTION_MODE uv_intra_mode;
MB_PREDICTION_MODE uv_intra_mode_8x8 = 0;
@@ -3317,11 +3399,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
unsigned char *y_buffer[4], *u_buffer[4], *v_buffer[4];
unsigned int ref_costs[MAX_REF_FRAMES];
- int_mv seg_mvs[BLOCK_MAX_SEGMENTS - 1][16 /* n_blocks */][MAX_REF_FRAMES - 1];
-
-#if CONFIG_HYBRIDTRANSFORM16X16
- int best_txtype, rd_txtype;
-#endif
+ int_mv seg_mvs[NB_PARTITIONINGS][16 /* n_blocks */][MAX_REF_FRAMES - 1];
vpx_memset(mode8x8, 0, sizeof(mode8x8));
vpx_memset(&frame_mv, 0, sizeof(frame_mv));
@@ -3336,7 +3414,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
for (i = 0; i < NB_TXFM_MODES; i++)
best_txfm_rd[i] = INT64_MAX;
- for (i = 0; i < BLOCK_MAX_SEGMENTS - 1; i++) {
+ for (i = 0; i < NB_PARTITIONINGS; i++) {
int j, k;
for (j = 0; j < 16; j++)
@@ -3402,12 +3480,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// that depend on the current prediction etc.
vp8_estimate_ref_frame_costs(cpi, segment_id, ref_costs);
-#if CONFIG_SWITCHABLE_INTERP
for (mode_index = 0; mode_index < MAX_MODES;
mode_index += (!switchable_filter_index)) {
-#else
- for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
-#endif
int64_t this_rd = INT64_MAX;
int is_comp_pred;
int disable_skip = 0, skippable = 0;
@@ -3435,19 +3509,16 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#if CONFIG_PRED_FILTER
mbmi->pred_filter_enabled = 0;
#endif
-#if CONFIG_SWITCHABLE_INTERP
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
this_mode >= NEARESTMV && this_mode <= SPLITMV) {
mbmi->interp_filter =
vp8_switchable_interp[switchable_filter_index++];
if (switchable_filter_index == VP8_SWITCHABLE_FILTERS)
switchable_filter_index = 0;
- //printf("Searching %d (%d)\n", this_mode, switchable_filter_index);
} else {
mbmi->interp_filter = cpi->common.mcomp_filter_type;
}
vp8_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
-#endif
// Test best rd so far against threshold for trying this mode.
if (best_rd <= cpi->rd_threshes[mode_index])
@@ -3546,9 +3617,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// FIXME compound intra prediction
vp8_build_intra_predictors_mby(&x->e_mbd);
macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
-#if CONFIG_HYBRIDTRANSFORM16X16
- rd_txtype = x->e_mbd.block[0].bmi.as_mode.tx_type;
-#endif
rate2 += rate_y;
distortion2 += distortion;
rate2 += x->mbmode_cost[x->e_mbd.frame_type][mbmi->mode];
@@ -3592,11 +3660,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
break;
case I8X8_PRED: {
-#if CONFIG_TX_SELECT
int cost0 = vp8_cost_bit(cm->prob_tx[0], 0);
int cost1 = vp8_cost_bit(cm->prob_tx[0], 1);
int64_t tmp_rd_4x4s, tmp_rd_8x8s;
-#endif
int64_t tmp_rd_4x4, tmp_rd_8x8, tmp_rd;
int r4x4, tok4x4, d4x4, r8x8, tok8x8, d8x8;
mbmi->txfm_size = TX_4X4;
@@ -3618,7 +3684,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
txfm_cache[ONLY_4X4] = tmp_rd_4x4;
txfm_cache[ALLOW_8X8] = tmp_rd_8x8;
txfm_cache[ALLOW_16X16] = tmp_rd_8x8;
-#if CONFIG_TX_SELECT
tmp_rd_4x4s = tmp_rd_4x4 + RDCOST(x->rdmult, x->rddiv, cost0, 0);
tmp_rd_8x8s = tmp_rd_8x8 + RDCOST(x->rdmult, x->rddiv, cost1, 0);
txfm_cache[TX_MODE_SELECT] = tmp_rd_4x4s < tmp_rd_8x8s ? tmp_rd_4x4s : tmp_rd_8x8s;
@@ -3647,9 +3712,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
#endif
}
- } else
-#endif
- if (cm->txfm_mode == ONLY_4X4) {
+ } else if (cm->txfm_mode == ONLY_4X4) {
rate = r4x4;
rate_y = tok4x4;
distortion = d4x4;
@@ -3705,26 +3768,29 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
(mbmi->ref_frame == GOLDEN_FRAME) ?
cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
- mbmi->txfm_size = TX_4X4; // FIXME use 8x8 in case of 8x8/8x16/16x8
- tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
- second_ref, best_yrd, mdcounts,
- &rate, &rate_y, &distortion,
- this_rd_thresh, seg_mvs);
+ tmp_rd = rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
+ second_ref, best_yrd, mdcounts,
+ &rate, &rate_y, &distortion,
+ &skippable,
+ this_rd_thresh, seg_mvs,
+ txfm_cache);
rate2 += rate;
distortion2 += distortion;
-#if CONFIG_SWITCHABLE_INTERP
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
[get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp8_switchable_interp_map[mbmi->interp_filter]];
-#endif
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
if (tmp_rd < best_yrd) {
- rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel);
+ int uv_skippable;
+
+ rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
+ cpi->common.full_pixel);
rate2 += rate_uv;
distortion2 += distortion_uv;
+ skippable = skippable && uv_skippable;
} else {
this_rd = INT64_MAX;
disable_skip = 1;
@@ -3852,13 +3918,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
xd->mode_info_context->mbmi.pred_filter_enabled);
#endif
-#if CONFIG_SWITCHABLE_INTERP
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
[get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp8_switchable_interp_map[
x->e_mbd.mode_info_context->mbmi.interp_filter]];
-#endif
/* We don't include the cost of the second reference here, because there are only
* three options: Last/Golden, ARF/Last or Golden/ARF, or in other words if you
@@ -3883,8 +3947,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (threshold < x->encode_breakout)
threshold = x->encode_breakout;
- var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
- (*(b->base_src), b->src_stride,
+ var = vp8_variance16x16(*(b->base_src), b->src_stride,
x->e_mbd.predictor, 16, &sse);
if (sse < threshold) {
@@ -3894,7 +3957,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if ((sse - var < q2dc *q2dc >> 4) ||
(sse / 2 > var && sse - var < 64)) {
// Check u and v to make sure skip is ok
- int sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
+ int sse2 = vp8_uvsse(x);
if (sse2 * 2 < threshold) {
x->skip = 1;
distortion2 = sse + sse2;
@@ -3906,22 +3969,22 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
disable_skip = 1;
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
-
- break;
}
}
}
}
- vp8_build_1st_inter16x16_predictors_mbuv(&x->e_mbd, &xd->predictor[256],
- &xd->predictor[320], 8);
- if (is_comp_pred)
- vp8_build_2nd_inter16x16_predictors_mbuv(&x->e_mbd,
- &xd->predictor[256],
+ if (!x->skip) {
+ vp8_build_1st_inter16x16_predictors_mbuv(&x->e_mbd, &xd->predictor[256],
&xd->predictor[320], 8);
- inter_mode_cost(cpi, x, this_mode, &rate2, &distortion2,
- &rate_y, &distortion, &rate_uv, &distortion_uv,
- &skippable, txfm_cache);
+ if (is_comp_pred)
+ vp8_build_2nd_inter16x16_predictors_mbuv(&x->e_mbd,
+ &xd->predictor[256],
+ &xd->predictor[320], 8);
+ inter_mode_cost(cpi, x, this_mode, &rate2, &distortion2,
+ &rate_y, &distortion, &rate_uv, &distortion_uv,
+ &skippable, txfm_cache);
+ }
if (is_comp_pred)
mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
else
@@ -4019,10 +4082,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// Note index of best mode so far
best_mode_index = mode_index;
-#if CONFIG_HYBRIDTRANSFORM16X16
- best_txtype = rd_txtype;
-#endif
-
if (this_mode <= B_PRED) {
if (mbmi->txfm_size != TX_4X4
&& this_mode != B_PRED
@@ -4106,7 +4165,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if (!mode_excluded && this_rd != INT64_MAX) {
for (i = 0; i < NB_TXFM_MODES; i++) {
int64_t adj_rd;
- if (this_mode != B_PRED && this_mode != SPLITMV) {
+ if (this_mode != B_PRED) {
adj_rd = this_rd + txfm_cache[i] - txfm_cache[cm->txfm_mode];
} else {
adj_rd = this_rd;
@@ -4130,7 +4189,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
else
++cpi->pred_filter_off_count;
#endif
-#if CONFIG_SWITCHABLE_INTERP
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
best_mbmode.mode >= NEARESTMV &&
best_mbmode.mode <= SPLITMV) {
@@ -4138,7 +4196,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
[get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp8_switchable_interp_map[best_mbmode.interp_filter]];
}
-#endif
// Reduce the activation RD thresholds for the best choice mode
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
@@ -4164,11 +4221,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
(cpi->oxcf.arnr_max_frames == 0) &&
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
mbmi->mode = ZEROMV;
-#if CONFIG_TX_SELECT
if (cm->txfm_mode != TX_MODE_SELECT)
mbmi->txfm_size = cm->txfm_mode;
else
-#endif
mbmi->txfm_size = TX_16X16;
mbmi->ref_frame = ALTREF_FRAME;
mbmi->mv[0].as_int = 0;
@@ -4195,11 +4250,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
}
-#if CONFIG_HYBRIDTRANSFORM16X16
- if (best_mbmode.mode < I8X8_PRED)
- xd->mode_info_context->bmi[0].as_mode.tx_type = best_txtype;
-#endif
-
if (best_mbmode.mode == I8X8_PRED)
set_i8x8_block_modes(x, mode8x8);
@@ -4223,7 +4273,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
best_pred_diff[i] = best_rd - best_pred_rd[i];
}
-#if CONFIG_TX_SELECT
if (!x->skip) {
for (i = 0; i < NB_TXFM_MODES; i++) {
if (best_txfm_rd[i] == INT64_MAX)
@@ -4234,7 +4283,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
} else {
vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
}
-#endif
end:
store_coding_context(x, &x->mb_context[xd->mb_index], best_mode_index, &best_partition,
@@ -4304,10 +4352,6 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
TX_SIZE txfm_size_16x16;
int i;
-#if CONFIG_HYBRIDTRANSFORM16X16
- int best_txtype;
-#endif
-
mbmi->ref_frame = INTRA_FRAME;
rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv,
&uv_intra_skippable);
@@ -4329,10 +4373,6 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
&rate16x16_tokenonly, &dist16x16,
&y_intra16x16_skippable, txfm_cache);
mode16x16 = mbmi->mode;
-#if CONFIG_HYBRIDTRANSFORM16X16
- best_txtype = xd->block[0].bmi.as_mode.tx_type;
- xd->mode_info_context->bmi[0].as_mode.tx_type = best_txtype;
-#endif
txfm_size_16x16 = mbmi->txfm_size;
// FIXME(rbultje) support transform-size selection
@@ -4373,10 +4413,8 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 1);
dist = dist16x16 + (distuv8x8 >> 2);
mbmi->txfm_size = txfm_size_16x16;
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
} else if (error8x8 > error16x16) {
if (error4x4 < error16x16) {
rate = rateuv;
@@ -4393,24 +4431,16 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
mbmi->mode = B_PRED;
mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2);
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
} else {
mbmi->txfm_size = txfm_size_16x16;
mbmi->mode = mode16x16;
rate = rate16x16 + rateuv8x8;
dist = dist16x16 + (distuv8x8 >> 2);
-#if CONFIG_HYBRIDTRANSFORM16X16
- // save this into supermacroblock coding decision buffer
- xd->mode_info_context->bmi[0].as_mode.tx_type = best_txtype;
-#endif
-#if CONFIG_TX_SELECT
for (i = 0; i < NB_TXFM_MODES; i++) {
x->mb_context[xd->mb_index].txfm_rd_diff[i] = error16x16 - txfm_cache[i];
}
-#endif
}
if (cpi->common.mb_no_coeff_skip)
rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
@@ -4430,10 +4460,8 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
mbmi->mode = B_PRED;
mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2);
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
} else {
// FIXME(rbultje) support transform-size selection
mbmi->mode = I8X8_PRED;
@@ -4441,10 +4469,8 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
set_i8x8_block_modes(x, mode8x8);
rate = rate8x8 + rateuv;
dist = dist8x8 + (distuv >> 2);
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
}
if (cpi->common.mb_no_coeff_skip)
rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
@@ -4801,8 +4827,8 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
if (threshold < x->encode_breakout)
threshold = x->encode_breakout;
- var = VARIANCE_INVOKE(&cpi->rtcd.variance, var32x32)(*(b->base_src),
- b->src_stride, xd->dst.y_buffer, xd->dst.y_stride, &sse);
+ var = vp8_variance32x32(*(b->base_src), b->src_stride,
+ xd->dst.y_buffer, xd->dst.y_stride, &sse);
if (sse < threshold) {
unsigned int q2dc = xd->block[24].dequant[0];
@@ -4812,11 +4838,9 @@ int64_t vp8_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
(sse / 2 > var && sse - var < 64)) {
// Check u and v to make sure skip is ok
unsigned int sse2, sse3;
- var += VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
- (x->src.u_buffer, x->src.uv_stride,
+ var += vp8_variance16x16(x->src.u_buffer, x->src.uv_stride,
xd->dst.u_buffer, xd->dst.uv_stride, &sse2);
- var += VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
- (x->src.v_buffer, x->src.uv_stride,
+ var += vp8_variance16x16(x->src.v_buffer, x->src.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride, &sse3);
sse2 += sse3;
if (sse2 * 2 < threshold) {
diff --git a/vp8/encoder/sad_c.c b/vp8/encoder/sad_c.c
index 2e86a16c0..f15e687c6 100644
--- a/vp8/encoder/sad_c.c
+++ b/vp8/encoder/sad_c.c
@@ -10,33 +10,10 @@
#include <stdlib.h>
+#include "vp8/common/sadmxn.h"
#include "vpx_ports/config.h"
#include "vpx/vpx_integer.h"
-static __inline
-unsigned int sad_mx_n_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int m,
- int n) {
-
- int r, c;
- unsigned int sad = 0;
-
- for (r = 0; r < n; r++) {
- for (c = 0; c < m; c++) {
- sad += abs(src_ptr[c] - ref_ptr[c]);
- }
-
- src_ptr += src_stride;
- ref_ptr += ref_stride;
- }
-
- return sad;
-}
-
unsigned int vp8_sad32x32_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
@@ -97,25 +74,6 @@ unsigned int vp8_sad4x4_c(
return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 4, 4);
}
-#if CONFIG_NEWBESTREFMV
-unsigned int vp8_sad3x16_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad){
- return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 3, 16);
-}
-unsigned int vp8_sad16x3_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad){
- return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, 16, 3);
-}
-#endif
-
void vp8_sad32x32x3_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
diff --git a/vp8/encoder/segmentation.c b/vp8/encoder/segmentation.c
index e88b80d34..e85bb45ce 100644
--- a/vp8/encoder/segmentation.c
+++ b/vp8/encoder/segmentation.c
@@ -161,8 +161,8 @@ static int cost_segmap(MACROBLOCKD *xd,
}
void choose_segmap_coding_method(VP8_COMP *cpi) {
- VP8_COMMON *const cm = & cpi->common;
- MACROBLOCKD *const xd = & cpi->mb.e_mbd;
+ VP8_COMMON *const cm = &cpi->common;
+ MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int i;
int tot_count;
diff --git a/vp8/encoder/ssim.c b/vp8/encoder/ssim.c
index d3d9711dc..865496ae2 100644
--- a/vp8/encoder/ssim.c
+++ b/vp8/encoder/ssim.c
@@ -11,18 +11,10 @@
#include "onyx_int.h"
-void vp8_ssim_parms_16x16_c
-(
- unsigned char *s,
- int sp,
- unsigned char *r,
- int rp,
- unsigned long *sum_s,
- unsigned long *sum_r,
- unsigned long *sum_sq_s,
- unsigned long *sum_sq_r,
- unsigned long *sum_sxr
-) {
+void vp8_ssim_parms_16x16_c(unsigned char *s, int sp, unsigned char *r,
+ int rp, unsigned long *sum_s, unsigned long *sum_r,
+ unsigned long *sum_sq_s, unsigned long *sum_sq_r,
+ unsigned long *sum_sxr) {
int i, j;
for (i = 0; i < 16; i++, s += sp, r += rp) {
for (j = 0; j < 16; j++) {
@@ -34,18 +26,10 @@ void vp8_ssim_parms_16x16_c
}
}
}
-void vp8_ssim_parms_8x8_c
-(
- unsigned char *s,
- int sp,
- unsigned char *r,
- int rp,
- unsigned long *sum_s,
- unsigned long *sum_r,
- unsigned long *sum_sq_s,
- unsigned long *sum_sq_r,
- unsigned long *sum_sxr
-) {
+void vp8_ssim_parms_8x8_c(unsigned char *s, int sp, unsigned char *r, int rp,
+ unsigned long *sum_s, unsigned long *sum_r,
+ unsigned long *sum_sq_s, unsigned long *sum_sq_r,
+ unsigned long *sum_sxr) {
int i, j;
for (i = 0; i < 8; i++, s += sp, r += rp) {
for (j = 0; j < 8; j++) {
@@ -61,15 +45,9 @@ void vp8_ssim_parms_8x8_c
const static int64_t cc1 = 26634; // (64^2*(.01*255)^2
const static int64_t cc2 = 239708; // (64^2*(.03*255)^2
-static double similarity
-(
- unsigned long sum_s,
- unsigned long sum_r,
- unsigned long sum_sq_s,
- unsigned long sum_sq_r,
- unsigned long sum_sxr,
- int count
-) {
+static double similarity(unsigned long sum_s, unsigned long sum_r,
+ unsigned long sum_sq_s, unsigned long sum_sq_r,
+ unsigned long sum_sxr, int count) {
int64_t ssim_n, ssim_d;
int64_t c1, c2;
@@ -87,23 +65,22 @@ static double similarity
return ssim_n * 1.0 / ssim_d;
}
-static double ssim_16x16(unsigned char *s, int sp, unsigned char *r, int rp,
- const vp8_variance_rtcd_vtable_t *rtcd) {
+static double ssim_16x16(unsigned char *s, int sp, unsigned char *r, int rp) {
unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
- SSIMPF_INVOKE(rtcd, 16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+ vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
}
-static double ssim_8x8(unsigned char *s, int sp, unsigned char *r, int rp,
- const vp8_variance_rtcd_vtable_t *rtcd) {
+static double ssim_8x8(unsigned char *s, int sp, unsigned char *r, int rp) {
unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
- SSIMPF_INVOKE(rtcd, 8x8)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+ vp8_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
}
// TODO: (jbb) tried to scale this function such that we may be able to use it
// for distortion metric in mode selection code ( provided we do a reconstruction)
-long dssim(unsigned char *s, int sp, unsigned char *r, int rp,
- const vp8_variance_rtcd_vtable_t *rtcd) {
+long dssim(unsigned char *s, int sp, unsigned char *r, int rp) {
unsigned long sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
int64_t ssim3;
int64_t ssim_n1, ssim_n2;
@@ -115,7 +92,8 @@ long dssim(unsigned char *s, int sp, unsigned char *r, int rp,
c1 = cc1 * 16;
c2 = cc2 * 16;
- SSIMPF_INVOKE(rtcd, 16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
+ vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r,
+ &sum_sxr);
ssim_n1 = (2 * sum_s * sum_r + c1);
ssim_n2 = ((int64_t) 2 * 256 * sum_sxr - (int64_t) 2 * sum_s * sum_r + c2);
@@ -137,16 +115,8 @@ long dssim(unsigned char *s, int sp, unsigned char *r, int rp,
// We are using a 8x8 moving window with starting location of each 8x8 window
// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
// block boundaries to penalize blocking artifacts.
-double vp8_ssim2
-(
- unsigned char *img1,
- unsigned char *img2,
- int stride_img1,
- int stride_img2,
- int width,
- int height,
- const vp8_variance_rtcd_vtable_t *rtcd
-) {
+double vp8_ssim2(unsigned char *img1, unsigned char *img2, int stride_img1,
+ int stride_img2, int width, int height) {
int i, j;
int samples = 0;
double ssim_total = 0;
@@ -154,7 +124,7 @@ double vp8_ssim2
// sample point start with each 4x4 location
for (i = 0; i < height - 8; i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) {
for (j = 0; j < width - 8; j += 4) {
- double v = ssim_8x8(img1 + j, stride_img1, img2 + j, stride_img2, rtcd);
+ double v = ssim_8x8(img1 + j, stride_img1, img2 + j, stride_img2);
ssim_total += v;
samples++;
}
@@ -162,28 +132,22 @@ double vp8_ssim2
ssim_total /= samples;
return ssim_total;
}
-double vp8_calc_ssim
-(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- int lumamask,
- double *weight,
- const vp8_variance_rtcd_vtable_t *rtcd
-) {
+double vp8_calc_ssim(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
+ int lumamask, double *weight) {
double a, b, c;
double ssimv;
a = vp8_ssim2(source->y_buffer, dest->y_buffer,
source->y_stride, dest->y_stride, source->y_width,
- source->y_height, rtcd);
+ source->y_height);
b = vp8_ssim2(source->u_buffer, dest->u_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
+ source->uv_height);
c = vp8_ssim2(source->v_buffer, dest->v_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
+ source->uv_height);
ssimv = a * .8 + .1 * (b + c);
@@ -192,29 +156,22 @@ double vp8_calc_ssim
return ssimv;
}
-double vp8_calc_ssimg
-(
- YV12_BUFFER_CONFIG *source,
- YV12_BUFFER_CONFIG *dest,
- double *ssim_y,
- double *ssim_u,
- double *ssim_v,
- const vp8_variance_rtcd_vtable_t *rtcd
-) {
+double vp8_calc_ssimg(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest,
+ double *ssim_y, double *ssim_u, double *ssim_v) {
double ssim_all = 0;
double a, b, c;
a = vp8_ssim2(source->y_buffer, dest->y_buffer,
source->y_stride, dest->y_stride, source->y_width,
- source->y_height, rtcd);
+ source->y_height);
b = vp8_ssim2(source->u_buffer, dest->u_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
+ source->uv_height);
c = vp8_ssim2(source->v_buffer, dest->v_buffer,
source->uv_stride, dest->uv_stride, source->uv_width,
- source->uv_height, rtcd);
+ source->uv_height);
*ssim_y = a;
*ssim_u = b;
*ssim_v = c;
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index c72c1e7e7..2ddae1cbd 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -26,38 +26,26 @@
#ifdef ENTROPY_STATS
INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM
INT64 hybrid_context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM8X8
INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_HYBRIDTRANSFORM16X16
INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#endif
extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-#if CONFIG_HYBRIDTRANSFORM
extern unsigned int hybrid_tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
-#endif
extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM8X8
extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#endif
extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#if CONFIG_HYBRIDTRANSFORM16X16
extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
-#endif
#endif /* ENTROPY_STATS */
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
@@ -134,9 +122,7 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
const int eob = b->eob; /* one beyond last nonzero coeff */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
int seg_eob = 256;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -162,21 +148,18 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
}
t->Token = x;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
+ assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_16x16[type][band][pt][x];
else
-#endif
++cpi->coef_counts_16x16[type][band][pt][x];
}
pt = vp8_prev_token_class[x];
@@ -309,10 +292,7 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0; /* start at DC unless type 0 */
TOKENEXTRA *t = *tp; /* store tokens starting here */
const short *qcoeff_ptr = b->qcoeff;
-#if CONFIG_HYBRIDTRANSFORM8X8
- TX_TYPE tx_type = xd->mode_info_context->mbmi.mode == I8X8_PRED ?
- get_tx_type(xd, b) : DCT_DCT;
-#endif
+ TX_TYPE tx_type = get_tx_type(xd, b);
const int eob = b->eob;
int seg_eob = 64;
int segment_id = xd->mode_info_context->mbmi.segment_id;
@@ -338,11 +318,9 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
x = DCT_EOB_TOKEN;
t->Token = x;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
@@ -350,11 +328,9 @@ static void tokenize1st_order_b_8x8(MACROBLOCKD *xd,
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_8x8[type][band][pt][x];
else
-#endif
++cpi->coef_counts_8x8[type][band][pt][x];
}
pt = vp8_prev_token_class[x];
@@ -427,103 +403,6 @@ static void tokenize1st_order_chroma_4x4(MACROBLOCKD *xd,
}
}
-#if CONFIG_HYBRIDTRANSFORM
-static void tokenize1st_order_ht_4x4(MACROBLOCKD *xd,
- TOKENEXTRA **tp,
- PLANE_TYPE type,
- VP8_COMP *cpi,
- int dry_run) {
- unsigned int block;
- const BLOCKD *b = xd->block;
- int pt; /* near block/prev token context index */
- TOKENEXTRA *t = *tp;/* store tokens starting here */
- ENTROPY_CONTEXT * a;
- ENTROPY_CONTEXT * l;
- int const *pt_scan ;
- int seg_eob = 16;
- int segment_id = xd->mode_info_context->mbmi.segment_id;
-
- if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) ) {
- seg_eob = get_segdata( xd, segment_id, SEG_LVL_EOB );
- }
-
- /* Luma */
- for (block = 0; block < 16; block++, b++) {
- const int eob = b->eob;
- TX_TYPE tx_type = DCT_DCT;
- const int tmp1 = vp8_block2above[block];
- const int tmp2 = vp8_block2left[block];
- const int16_t *qcoeff_ptr = b->qcoeff;
- int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
-
- a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
- l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
- VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
-
- if( xd->mode_info_context->mbmi.mode == B_PRED ) {
- tx_type = get_tx_type(xd, b);
- }
-
- // assign scanning order for luma components coded in intra4x4 mode
- if ((xd->mode_info_context->mbmi.mode == B_PRED) &&
- (type == PLANE_TYPE_Y_WITH_DC)) {
- switch (tx_type) {
- case ADST_DCT:
- pt_scan = vp8_row_scan;
- break;
- case DCT_ADST:
- pt_scan = vp8_col_scan;
- break;
- default :
- pt_scan = vp8_default_zig_zag1d;
- break;
- }
- } else {
- pt_scan = vp8_default_zig_zag1d;
- }
-
- do {
- const int band = vp8_coef_bands[c];
- int token;
-
- if (c < eob) {
- const int rc = pt_scan[c];
- const int v = qcoeff_ptr[rc];
-
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
- } else
- token = DCT_EOB_TOKEN;
-
- t->Token = token;
- if (tx_type != DCT_DCT)
- t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
- else
- t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
-
- t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
- (band > 1 && type == PLANE_TYPE_Y_NO_DC));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
-
- if (!dry_run) {
- if (tx_type != DCT_DCT)
- ++cpi->hybrid_coef_counts[type][band][pt][token];
- else
- ++cpi->coef_counts [type][band][pt][token];
- }
- pt = vp8_prev_token_class[token];
- ++t;
- } while (c < eob && ++c < seg_eob);
-
- *tp = t;
- pt = (c != !type); /* 0 <-> all coeff data is zero */
- *a = *l = pt;
- }
-
- tokenize1st_order_chroma_4x4(xd, tp, cpi, dry_run);
-}
-#endif
-
static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
TOKENEXTRA **tp,
PLANE_TYPE type,
@@ -536,6 +415,7 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, *l;
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
+ int const *pt_scan = vp8_default_zig_zag1d;
if (segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
seg_eob = get_segdata(xd, segment_id, SEG_LVL_EOB);
@@ -547,6 +427,18 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
const int16_t *qcoeff_ptr = b->qcoeff;
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
+ TX_TYPE tx_type = get_tx_type(xd, &xd->block[block]);
+ switch (tx_type) {
+ case ADST_DCT:
+ pt_scan = vp8_row_scan;
+ break;
+ case DCT_ADST:
+ pt_scan = vp8_col_scan;
+ break;
+ default :
+ pt_scan = vp8_default_zig_zag1d;
+ break;
+ }
a = (ENTROPY_CONTEXT *)xd->above_context + vp8_block2above[block];
l = (ENTROPY_CONTEXT *)xd->left_context + vp8_block2left[block];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
@@ -558,7 +450,7 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
int token;
if (c < eob) {
- const int rc = vp8_default_zig_zag1d[c];
+ const int rc = pt_scan[c];
const int v = qcoeff_ptr[rc];
t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
@@ -567,13 +459,20 @@ static void tokenize1st_order_b_4x4(MACROBLOCKD *xd,
token = DCT_EOB_TOKEN;
t->Token = token;
- t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
+ if (tx_type != DCT_DCT)
+ t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
+ else
+ t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = pt == 0 && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
- if (!dry_run)
- ++cpi->coef_counts[type][band][pt][token];
+ if (!dry_run) {
+ if (tx_type != DCT_DCT)
+ ++cpi->hybrid_coef_counts[type][band][pt][token];
+ else
+ ++cpi->coef_counts[type][band][pt][token];
+ }
pt = vp8_prev_token_class[token];
++t;
} while (c < eob && ++c < seg_eob);
@@ -674,12 +573,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
int skip_inc;
int segment_id = xd->mode_info_context->mbmi.segment_id;
-#if CONFIG_HYBRIDTRANSFORM
- int QIndex = cpi->mb.q_index;
- int active_ht = (QIndex < ACTIVE_HT) &&
- (xd->mode_info_context->mbmi.mode == B_PRED);
-#endif
-
if (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
(get_segdata(xd, segment_id, SEG_LVL_EOB) != 0)) {
skip_inc = 1;
@@ -696,7 +589,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(xd);
break;
case TX_8X8:
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
+ xd->mode_info_context->mbmi.mode == SPLITMV)
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0);
else
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd, has_y2_block);
@@ -745,17 +639,15 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
tokenize1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
A, L, cpi, dry_run);
+ A[1] = A[2] = A[3] = A[0];
+ L[1] = L[2] = L[3] = L[0];
- for (b = 1; b < 16; b++) {
- *(A + vp8_block2above[b]) = *(A);
- *(L + vp8_block2left[b] ) = *(L);
- }
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b], cpi, dry_run);
- *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
@@ -769,27 +661,23 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
+ xd->mode_info_context->mbmi.mode == SPLITMV) {
tokenize1st_order_chroma_4x4(xd, t, cpi, dry_run);
} else {
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b], cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
}
} else {
-#if CONFIG_HYBRIDTRANSFORM
- if (active_ht)
- tokenize1st_order_ht_4x4(xd, t, plane_type, cpi, dry_run);
- else
-#endif
- tokenize1st_order_b_4x4(xd, t, plane_type, cpi, dry_run);
+ tokenize1st_order_b_4x4(xd, t, plane_type, cpi, dry_run);
}
if (dry_run)
*t = t_backup;
@@ -1077,31 +965,24 @@ static __inline void stuff1st_order_b_8x8(MACROBLOCKD *xd,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM8X8
- TX_TYPE tx_type = xd->mode_info_context->mbmi.mode == I8X8_PRED ?
- get_tx_type(xd, b) : DCT_DCT;
-#endif
+ TX_TYPE tx_type = get_tx_type(xd, b);
const int band = vp8_coef_bands_8x8[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) b;
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_8x8[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_8x8[type][band][pt];
// t->section = 8;
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM8X8
if (tx_type == DCT_DCT)
++cpi->hybrid_coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts_8x8[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
@@ -1157,8 +1038,8 @@ static void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
for (b = 16; b < 24; b += 4) {
@@ -1166,8 +1047,8 @@ static void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
if (dry_run)
*t = t_backup;
@@ -1183,29 +1064,23 @@ static __inline void stuff1st_order_b_16x16(MACROBLOCKD *xd,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands_16x16[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
(void) b;
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs_16x16[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs_16x16[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM16X16
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
@@ -1221,17 +1096,15 @@ static void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
stuff1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
A, L, cpi, dry_run);
- for (i = 1; i < 16; i++) {
- *(A + vp8_block2above[i]) = *(A);
- *(L + vp8_block2left[i]) = *(L);
- }
+ A[1] = A[2] = A[3] = A[0];
+ L[1] = L[2] = L[3] = L[0];
for (b = 16; b < 24; b += 4) {
stuff1st_order_buv_8x8(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
@@ -1272,28 +1145,22 @@ static __inline void stuff1st_order_b_4x4(MACROBLOCKD *xd,
int dry_run) {
int pt; /* near block/prev token context index */
TOKENEXTRA *t = *tp; /* store tokens starting here */
-#if CONFIG_HYBRIDTRANSFORM
TX_TYPE tx_type = get_tx_type(xd, b);
-#endif
const int band = vp8_coef_bands[(type == PLANE_TYPE_Y_NO_DC) ? 1 : 0];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
t->Token = DCT_EOB_TOKEN;
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
t->context_tree = cpi->common.fc.hybrid_coef_probs[type][band][pt];
else
-#endif
t->context_tree = cpi->common.fc.coef_probs[type][band][pt];
t->skip_eob_node = 0;
++t;
*tp = t;
if (!dry_run) {
-#if CONFIG_HYBRIDTRANSFORM
if (tx_type != DCT_DCT)
++cpi->hybrid_coef_counts[type][band][pt][DCT_EOB_TOKEN];
else
-#endif
++cpi->coef_counts[type][band][pt][DCT_EOB_TOKEN];
}
pt = 0; /* 0 <-> all coeff data is zero */
@@ -1371,8 +1238,8 @@ static void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
for (b = 16; b < 24; b++)
@@ -1391,7 +1258,8 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
if (tx_size == TX_16X16) {
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
} else if (tx_size == TX_8X8) {
- if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
+ if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
+ xd->mode_info_context->mbmi.mode == SPLITMV) {
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
} else {
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
diff --git a/vp8/encoder/variance.h b/vp8/encoder/variance.h
index a2fadfc4c..cdeb390c3 100644
--- a/vp8/encoder/variance.h
+++ b/vp8/encoder/variance.h
@@ -12,507 +12,73 @@
#ifndef VARIANCE_H
#define VARIANCE_H
-#include "vpx_config.h"
-
-#define prototype_sad(sym)\
- unsigned int (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- int max_sad\
- )
-
-#define prototype_sad_multi_same_address(sym)\
- void (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned int *sad_array\
- )
-
-#define prototype_sad_multi_same_address_1(sym)\
- void (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned short *sad_array\
- )
-
-#define prototype_sad_multi_dif_address(sym)\
- void (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- unsigned char *ref_ptr[4], \
- int ref_stride, \
- unsigned int *sad_array\
- )
-
-#define prototype_variance(sym) \
- unsigned int (sym) \
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned int *sse\
- )
-
-#define prototype_variance2(sym) \
- unsigned int (sym) \
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride, \
- unsigned int *sse,\
- int *sum\
- )
-
-#define prototype_subpixvariance(sym) \
- unsigned int (sym) \
- ( \
- const unsigned char *src_ptr, \
- int source_stride, \
- int xoffset, \
- int yoffset, \
- const unsigned char *ref_ptr, \
- int Refstride, \
- unsigned int *sse \
- );
-
-#define prototype_ssimpf(sym) \
- void (sym) \
- ( \
- unsigned char *s, \
- int sp, \
- unsigned char *r, \
- int rp, \
- unsigned long *sum_s, \
- unsigned long *sum_r, \
- unsigned long *sum_sq_s, \
- unsigned long *sum_sq_r, \
- unsigned long *sum_sxr \
- );
-
-#define prototype_getmbss(sym) unsigned int (sym)(const short *)
-
-#define prototype_get16x16prederror(sym)\
- unsigned int (sym)\
- (\
- const unsigned char *src_ptr, \
- int source_stride, \
- const unsigned char *ref_ptr, \
- int ref_stride \
- )
-
-#if ARCH_X86 || ARCH_X86_64
-#include "x86/variance_x86.h"
-#endif
-
-#if ARCH_ARM
-#include "arm/variance_arm.h"
-#endif
-
-#ifndef vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_c
-#endif
-extern prototype_sad(vp8_variance_sad4x4);
-
-#ifndef vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_c
-#endif
-extern prototype_sad(vp8_variance_sad8x8);
-
-#ifndef vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_c
-#endif
-extern prototype_sad(vp8_variance_sad8x16);
-
-#ifndef vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_c
-#endif
-extern prototype_sad(vp8_variance_sad16x8);
-
-#ifndef vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_c
-#endif
-extern prototype_sad(vp8_variance_sad16x16);
-
-#ifndef vp8_variance_sad32x32
-#define vp8_variance_sad32x32 vp8_sad32x32_c
-#endif
-extern prototype_sad(vp8_variance_sad32x32);
-
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_sad32x32x3
-#define vp8_variance_sad32x32x3 vp8_sad32x32x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad32x32x3);
-
-#ifndef vp8_variance_sad16x16x3
-#define vp8_variance_sad16x16x3 vp8_sad16x16x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad16x16x3);
-
-#ifndef vp8_variance_sad16x8x3
-#define vp8_variance_sad16x8x3 vp8_sad16x8x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad16x8x3);
-
-#ifndef vp8_variance_sad8x8x3
-#define vp8_variance_sad8x8x3 vp8_sad8x8x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad8x8x3);
-
-#ifndef vp8_variance_sad8x16x3
-#define vp8_variance_sad8x16x3 vp8_sad8x16x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad8x16x3);
-
-#ifndef vp8_variance_sad4x4x3
-#define vp8_variance_sad4x4x3 vp8_sad4x4x3_c
-#endif
-extern prototype_sad_multi_same_address(vp8_variance_sad4x4x3);
-
-#ifndef vp8_variance_sad32x32x8
-#define vp8_variance_sad32x32x8 vp8_sad32x32x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad32x32x8);
-
-#ifndef vp8_variance_sad16x16x8
-#define vp8_variance_sad16x16x8 vp8_sad16x16x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad16x16x8);
-
-#ifndef vp8_variance_sad16x8x8
-#define vp8_variance_sad16x8x8 vp8_sad16x8x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad16x8x8);
-
-#ifndef vp8_variance_sad8x8x8
-#define vp8_variance_sad8x8x8 vp8_sad8x8x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad8x8x8);
-
-#ifndef vp8_variance_sad8x16x8
-#define vp8_variance_sad8x16x8 vp8_sad8x16x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad8x16x8);
-
-#ifndef vp8_variance_sad4x4x8
-#define vp8_variance_sad4x4x8 vp8_sad4x4x8_c
-#endif
-extern prototype_sad_multi_same_address_1(vp8_variance_sad4x4x8);
-
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_sad32x32x4d
-#define vp8_variance_sad32x32x4d vp8_sad32x32x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad32x32x4d);
-
-#ifndef vp8_variance_sad16x16x4d
-#define vp8_variance_sad16x16x4d vp8_sad16x16x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad16x16x4d);
-
-#ifndef vp8_variance_sad16x8x4d
-#define vp8_variance_sad16x8x4d vp8_sad16x8x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad16x8x4d);
-
-#ifndef vp8_variance_sad8x8x4d
-#define vp8_variance_sad8x8x4d vp8_sad8x8x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad8x8x4d);
-
-#ifndef vp8_variance_sad8x16x4d
-#define vp8_variance_sad8x16x4d vp8_sad8x16x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad8x16x4d);
-
-#ifndef vp8_variance_sad4x4x4d
-#define vp8_variance_sad4x4x4d vp8_sad4x4x4d_c
-#endif
-extern prototype_sad_multi_dif_address(vp8_variance_sad4x4x4d);
-
-#if ARCH_X86 || ARCH_X86_64
-#ifndef vp8_variance_copy32xn
-#define vp8_variance_copy32xn vp8_copy32xn_c
-#endif
-extern prototype_sad(vp8_variance_copy32xn);
-#endif
-
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_var4x4
-#define vp8_variance_var4x4 vp8_variance4x4_c
-#endif
-extern prototype_variance(vp8_variance_var4x4);
-
-#ifndef vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_c
-#endif
-extern prototype_variance(vp8_variance_var8x8);
-
-#ifndef vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_c
-#endif
-extern prototype_variance(vp8_variance_var8x16);
-
-#ifndef vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_c
-#endif
-extern prototype_variance(vp8_variance_var16x8);
-
-#ifndef vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_c
-#endif
-extern prototype_variance(vp8_variance_var16x16);
-
-#ifndef vp8_variance_var32x32
-#define vp8_variance_var32x32 vp8_variance32x32_c
-#endif
-extern prototype_variance(vp8_variance_var32x32);
-
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_subpixvar4x4
-#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar4x4);
-
-#ifndef vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar8x8);
-
-#ifndef vp8_variance_subpixvar8x16
-#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar8x16);
-
-#ifndef vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar16x8);
-
-#ifndef vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar16x16);
-
-#ifndef vp8_variance_subpixvar32x32
-#define vp8_variance_subpixvar32x32 vp8_sub_pixel_variance32x32_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixvar32x32);
-
-#ifndef vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar16x16_h);
-
-#ifndef vp8_variance_halfpixvar32x32_h
-#define vp8_variance_halfpixvar32x32_h vp8_variance_halfpixvar32x32_h_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar32x32_h);
-
-#ifndef vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar16x16_v);
-
-#ifndef vp8_variance_halfpixvar32x32_v
-#define vp8_variance_halfpixvar32x32_v vp8_variance_halfpixvar32x32_v_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar32x32_v);
-
-#ifndef vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv);
-
-#ifndef vp8_variance_halfpixvar32x32_hv
-#define vp8_variance_halfpixvar32x32_hv vp8_variance_halfpixvar32x32_hv_c
-#endif
-extern prototype_variance(vp8_variance_halfpixvar32x32_hv);
-
-#ifndef vp8_variance_subpixmse16x16
-#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixmse16x16);
-
-#ifndef vp8_variance_subpixmse32x32
-#define vp8_variance_subpixmse32x32 vp8_sub_pixel_mse32x32_c
-#endif
-extern prototype_subpixvariance(vp8_variance_subpixmse32x32);
-
-// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-#ifndef vp8_variance_getmbss
-#define vp8_variance_getmbss vp8_get_mb_ss_c
-#endif
-extern prototype_getmbss(vp8_variance_getmbss);
-
-#ifndef vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_c
-#endif
-extern prototype_variance(vp8_variance_mse16x16);
-
-#ifndef vp8_ssimpf_8x8
-#define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_c
-#endif
-extern prototype_ssimpf(vp8_ssimpf_8x8)
-
-#ifndef vp8_ssimpf_16x16
-#define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_c
-#endif
-extern prototype_ssimpf(vp8_ssimpf_16x16)
-
-#ifndef vp8_variance_satd16x16
-#define vp8_variance_satd16x16 vp8_satd16x16_c
-#endif
-extern prototype_variance(vp8_variance_satd16x16);
-
-typedef prototype_sad(*vp8_sad_fn_t);
-typedef prototype_sad_multi_same_address(*vp8_sad_multi_fn_t);
-typedef prototype_sad_multi_same_address_1(*vp8_sad_multi1_fn_t);
-typedef prototype_sad_multi_dif_address(*vp8_sad_multi_d_fn_t);
-typedef prototype_variance(*vp8_variance_fn_t);
-typedef prototype_variance2(*vp8_variance2_fn_t);
-typedef prototype_subpixvariance(*vp8_subpixvariance_fn_t);
-typedef prototype_getmbss(*vp8_getmbss_fn_t);
-typedef prototype_ssimpf(*vp8_ssimpf_fn_t);
-typedef prototype_get16x16prederror(*vp8_get16x16prederror_fn_t);
-
-typedef struct {
- vp8_sad_fn_t sad4x4;
- vp8_sad_fn_t sad8x8;
- vp8_sad_fn_t sad8x16;
- vp8_sad_fn_t sad16x8;
- vp8_sad_fn_t sad16x16;
-#if CONFIG_SUPERBLOCKS
- vp8_sad_fn_t sad32x32;
-#endif
-
- vp8_variance_fn_t var4x4;
- vp8_variance_fn_t var8x8;
- vp8_variance_fn_t var8x16;
- vp8_variance_fn_t var16x8;
- vp8_variance_fn_t var16x16;
-#if CONFIG_SUPERBLOCKS
- vp8_variance_fn_t var32x32;
-#endif
-
- vp8_subpixvariance_fn_t subpixvar4x4;
- vp8_subpixvariance_fn_t subpixvar8x8;
- vp8_subpixvariance_fn_t subpixvar8x16;
- vp8_subpixvariance_fn_t subpixvar16x8;
- vp8_subpixvariance_fn_t subpixvar16x16;
-#if CONFIG_SUPERBLOCKS
- vp8_subpixvariance_fn_t subpixvar32x32;
-#endif
- vp8_variance_fn_t halfpixvar16x16_h;
- vp8_variance_fn_t halfpixvar32x32_h;
- vp8_variance_fn_t halfpixvar16x16_v;
-#if CONFIG_SUPERBLOCKS
- vp8_variance_fn_t halfpixvar32x32_v;
-#endif
- vp8_variance_fn_t halfpixvar16x16_hv;
-#if CONFIG_SUPERBLOCKS
- vp8_variance_fn_t halfpixvar32x32_hv;
-#endif
- vp8_subpixvariance_fn_t subpixmse16x16;
-#if CONFIG_SUPERBLOCKS
- vp8_subpixvariance_fn_t subpixmse32x32;
-#endif
-
- vp8_getmbss_fn_t getmbss;
- vp8_variance_fn_t mse16x16;
-
-#if CONFIG_SUPERBLOCKS
- vp8_sad_multi_fn_t sad32x32x3;
-#endif
- vp8_sad_multi_fn_t sad16x16x3;
- vp8_sad_multi_fn_t sad16x8x3;
- vp8_sad_multi_fn_t sad8x16x3;
- vp8_sad_multi_fn_t sad8x8x3;
- vp8_sad_multi_fn_t sad4x4x3;
-
-#if CONFIG_SUPERBLOCKS
- vp8_sad_multi1_fn_t sad32x32x8;
-#endif
- vp8_sad_multi1_fn_t sad16x16x8;
- vp8_sad_multi1_fn_t sad16x8x8;
- vp8_sad_multi1_fn_t sad8x16x8;
- vp8_sad_multi1_fn_t sad8x8x8;
- vp8_sad_multi1_fn_t sad4x4x8;
-
-#if CONFIG_SUPERBLOCKS
- vp8_sad_multi_d_fn_t sad32x32x4d;
-#endif
- vp8_sad_multi_d_fn_t sad16x16x4d;
- vp8_sad_multi_d_fn_t sad16x8x4d;
- vp8_sad_multi_d_fn_t sad8x16x4d;
- vp8_sad_multi_d_fn_t sad8x8x4d;
- vp8_sad_multi_d_fn_t sad4x4x4d;
-
-#if ARCH_X86 || ARCH_X86_64
- vp8_sad_fn_t copy32xn;
-#endif
-
-#if CONFIG_INTERNAL_STATS
- vp8_ssimpf_fn_t ssimpf_8x8;
- vp8_ssimpf_fn_t ssimpf_16x16;
-#endif
-
- vp8_variance_fn_t satd16x16;
-} vp8_variance_rtcd_vtable_t;
-
-typedef struct {
- vp8_sad_fn_t sdf;
- vp8_variance_fn_t vf;
- vp8_subpixvariance_fn_t svf;
- vp8_variance_fn_t svf_halfpix_h;
- vp8_variance_fn_t svf_halfpix_v;
- vp8_variance_fn_t svf_halfpix_hv;
- vp8_sad_multi_fn_t sdx3f;
- vp8_sad_multi1_fn_t sdx8f;
- vp8_sad_multi_d_fn_t sdx4df;
-#if ARCH_X86 || ARCH_X86_64
- vp8_sad_fn_t copymem;
-#endif
+typedef unsigned int(*vp8_sad_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int max_sad);
+
+typedef void (*vp8_copy32xn_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ int n);
+
+typedef void (*vp8_sad_multi_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sad_array);
+
+typedef void (*vp8_sad_multi1_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned short *sad_array);
+
+typedef void (*vp8_sad_multi_d_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char * const ref_ptr[],
+ int ref_stride, unsigned int *sad_array);
+
+typedef unsigned int (*vp8_variance_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride,
+ unsigned int *sse);
+
+typedef unsigned int (*vp8_subpixvariance_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ int xoffset,
+ int yoffset,
+ const unsigned char *ref_ptr,
+ int Refstride,
+ unsigned int *sse);
+
+typedef void (*vp8_ssimpf_fn_t)(unsigned char *s, int sp, unsigned char *r,
+ int rp, unsigned long *sum_s,
+ unsigned long *sum_r, unsigned long *sum_sq_s,
+ unsigned long *sum_sq_r,
+ unsigned long *sum_sxr);
+
+typedef unsigned int (*vp8_getmbss_fn_t)(const short *);
+
+typedef unsigned int (*vp8_get16x16prederror_fn_t)(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int ref_stride);
+
+typedef struct variance_vtable {
+ vp8_sad_fn_t sdf;
+ vp8_variance_fn_t vf;
+ vp8_subpixvariance_fn_t svf;
+ vp8_variance_fn_t svf_halfpix_h;
+ vp8_variance_fn_t svf_halfpix_v;
+ vp8_variance_fn_t svf_halfpix_hv;
+ vp8_sad_multi_fn_t sdx3f;
+ vp8_sad_multi1_fn_t sdx8f;
+ vp8_sad_multi_d_fn_t sdx4df;
+ vp8_copy32xn_fn_t copymem;
} vp8_variance_fn_ptr_t;
-#if CONFIG_RUNTIME_CPU_DETECT
-#define VARIANCE_INVOKE(ctx,fn) (ctx)->fn
-#define SSIMPF_INVOKE(ctx,fn) (ctx)->ssimpf_##fn
-#else
-#define VARIANCE_INVOKE(ctx,fn) vp8_variance_##fn
-#define SSIMPF_INVOKE(ctx,fn) vp8_ssimpf_##fn
-#endif
-
-#if CONFIG_NEWBESTREFMV
-unsigned int vp8_sad2x16_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad);
-unsigned int vp8_sad16x2_c(
- const unsigned char *src_ptr,
- int src_stride,
- const unsigned char *ref_ptr,
- int ref_stride,
- int max_sad);
-#endif
-
#endif
diff --git a/vp8/encoder/x86/variance_x86.h b/vp8/encoder/x86/variance_x86.h
deleted file mode 100644
index 0971f11b0..000000000
--- a/vp8/encoder/x86/variance_x86.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#ifndef VARIANCE_X86_H
-#define VARIANCE_X86_H
-
-
-/* Note:
- *
- * This platform is commonly built for runtime CPU detection. If you modify
- * any of the function mappings present in this file, be sure to also update
- * them in the function pointer initialization code
- */
-#if HAVE_MMX
-extern prototype_sad(vp8_sad4x4_mmx);
-extern prototype_sad(vp8_sad8x8_mmx);
-extern prototype_sad(vp8_sad8x16_mmx);
-extern prototype_sad(vp8_sad16x8_mmx);
-extern prototype_sad(vp8_sad16x16_mmx);
-extern prototype_variance(vp8_variance4x4_mmx);
-extern prototype_variance(vp8_variance8x8_mmx);
-extern prototype_variance(vp8_variance8x16_mmx);
-extern prototype_variance(vp8_variance16x8_mmx);
-extern prototype_variance(vp8_variance16x16_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_mmx);
-extern prototype_variance(vp8_variance_halfpixvar16x16_h_mmx);
-extern prototype_variance(vp8_variance_halfpixvar16x16_v_mmx);
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv_mmx);
-extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_mmx);
-extern prototype_getmbss(vp8_get_mb_ss_mmx);
-extern prototype_variance(vp8_mse16x16_mmx);
-extern prototype_variance2(vp8_get8x8var_mmx);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_mmx
-
-#undef vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_mmx
-
-#undef vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_mmx
-
-#undef vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_mmx
-
-#undef vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_mmx
-
-#undef vp8_variance_var4x4
-#define vp8_variance_var4x4 vp8_variance4x4_mmx
-
-#undef vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_mmx
-
-#undef vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_mmx
-
-#undef vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_mmx
-
-#undef vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_mmx
-
-#undef vp8_variance_subpixvar4x4
-#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_mmx
-
-#undef vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_mmx
-
-#undef vp8_variance_subpixvar8x16
-#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_mmx
-
-#undef vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_mmx
-
-#undef vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_mmx
-
-#undef vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_mmx
-
-#undef vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_mmx
-
-#undef vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_mmx
-
-#undef vp8_variance_subpixmse16x16
-#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_mmx
-
-#undef vp8_variance_getmbss
-#define vp8_variance_getmbss vp8_get_mb_ss_mmx
-
-#undef vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_mmx
-
-#endif
-#endif
-
-
-#if HAVE_SSE2
-extern prototype_sad(vp8_sad4x4_wmt);
-extern prototype_sad(vp8_sad8x8_wmt);
-extern prototype_sad(vp8_sad8x16_wmt);
-extern prototype_sad(vp8_sad16x8_wmt);
-extern prototype_sad(vp8_sad16x16_wmt);
-extern prototype_sad(vp8_copy32xn_sse2);
-extern prototype_variance(vp8_variance4x4_wmt);
-extern prototype_variance(vp8_variance8x8_wmt);
-extern prototype_variance(vp8_variance8x16_wmt);
-extern prototype_variance(vp8_variance16x8_wmt);
-extern prototype_variance(vp8_variance16x16_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_wmt);
-extern prototype_variance(vp8_variance_halfpixvar16x16_h_wmt);
-extern prototype_variance(vp8_variance_halfpixvar16x16_v_wmt);
-extern prototype_variance(vp8_variance_halfpixvar16x16_hv_wmt);
-extern prototype_subpixvariance(vp8_sub_pixel_mse16x16_wmt);
-extern prototype_getmbss(vp8_get_mb_ss_sse2);
-extern prototype_variance(vp8_mse16x16_wmt);
-extern prototype_variance2(vp8_get8x8var_sse2);
-extern prototype_variance2(vp8_get16x16var_sse2);
-extern prototype_ssimpf(vp8_ssim_parms_8x8_sse2)
-extern prototype_ssimpf(vp8_ssim_parms_16x16_sse2)
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_variance_sad4x4
-#define vp8_variance_sad4x4 vp8_sad4x4_wmt
-
-#undef vp8_variance_sad8x8
-#define vp8_variance_sad8x8 vp8_sad8x8_wmt
-
-#undef vp8_variance_sad8x16
-#define vp8_variance_sad8x16 vp8_sad8x16_wmt
-
-#undef vp8_variance_sad16x8
-#define vp8_variance_sad16x8 vp8_sad16x8_wmt
-
-#undef vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_wmt
-
-#undef vp8_variance_copy32xn
-#define vp8_variance_copy32xn vp8_copy32xn_sse2
-
-#undef vp8_variance_var4x4
-#define vp8_variance_var4x4 vp8_variance4x4_wmt
-
-#undef vp8_variance_var8x8
-#define vp8_variance_var8x8 vp8_variance8x8_wmt
-
-#undef vp8_variance_var8x16
-#define vp8_variance_var8x16 vp8_variance8x16_wmt
-
-#undef vp8_variance_var16x8
-#define vp8_variance_var16x8 vp8_variance16x8_wmt
-
-#undef vp8_variance_var16x16
-#define vp8_variance_var16x16 vp8_variance16x16_wmt
-
-#undef vp8_variance_subpixvar4x4
-#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_wmt
-
-#undef vp8_variance_subpixvar8x8
-#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_wmt
-
-#undef vp8_variance_subpixvar8x16
-#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_wmt
-
-#undef vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_wmt
-
-#undef vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_wmt
-
-#undef vp8_variance_halfpixvar16x16_h
-#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_wmt
-
-#undef vp8_variance_halfpixvar16x16_v
-#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_wmt
-
-#undef vp8_variance_halfpixvar16x16_hv
-#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_wmt
-
-#undef vp8_variance_subpixmse16x16
-#define vp8_variance_subpixmse16x16 vp8_sub_pixel_mse16x16_wmt
-
-#undef vp8_variance_getmbss
-#define vp8_variance_getmbss vp8_get_mb_ss_sse2
-
-#undef vp8_variance_mse16x16
-#define vp8_variance_mse16x16 vp8_mse16x16_wmt
-
-#if ARCH_X86_64
-#undef vp8_ssimpf_8x8
-#define vp8_ssimpf_8x8 vp8_ssim_parms_8x8_sse2
-
-#undef vp8_ssimpf_16x16
-#define vp8_ssimpf_16x16 vp8_ssim_parms_16x16_sse2
-#endif
-
-#endif
-#endif
-
-
-#if HAVE_SSE3
-extern prototype_sad(vp8_sad16x16_sse3);
-extern prototype_sad(vp8_sad16x8_sse3);
-extern prototype_sad_multi_same_address(vp8_sad16x16x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad16x8x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad8x16x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad8x8x3_sse3);
-extern prototype_sad_multi_same_address(vp8_sad4x4x3_sse3);
-
-extern prototype_sad_multi_dif_address(vp8_sad16x16x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad16x8x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad8x16x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad8x8x4d_sse3);
-extern prototype_sad_multi_dif_address(vp8_sad4x4x4d_sse3);
-extern prototype_sad(vp8_copy32xn_sse3);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-
-#undef vp8_variance_sad16x16
-#define vp8_variance_sad16x16 vp8_sad16x16_sse3
-
-#undef vp8_variance_sad16x16x3
-#define vp8_variance_sad16x16x3 vp8_sad16x16x3_sse3
-
-#undef vp8_variance_sad16x8x3
-#define vp8_variance_sad16x8x3 vp8_sad16x8x3_sse3
-
-#undef vp8_variance_sad8x16x3
-#define vp8_variance_sad8x16x3 vp8_sad8x16x3_sse3
-
-#undef vp8_variance_sad8x8x3
-#define vp8_variance_sad8x8x3 vp8_sad8x8x3_sse3
-
-#undef vp8_variance_sad4x4x3
-#define vp8_variance_sad4x4x3 vp8_sad4x4x3_sse3
-
-#undef vp8_variance_sad16x16x4d
-#define vp8_variance_sad16x16x4d vp8_sad16x16x4d_sse3
-
-#undef vp8_variance_sad16x8x4d
-#define vp8_variance_sad16x8x4d vp8_sad16x8x4d_sse3
-
-#undef vp8_variance_sad8x16x4d
-#define vp8_variance_sad8x16x4d vp8_sad8x16x4d_sse3
-
-#undef vp8_variance_sad8x8x4d
-#define vp8_variance_sad8x8x4d vp8_sad8x8x4d_sse3
-
-#undef vp8_variance_sad4x4x4d
-#define vp8_variance_sad4x4x4d vp8_sad4x4x4d_sse3
-
-#undef vp8_variance_copy32xn
-#define vp8_variance_copy32xn vp8_copy32xn_sse3
-
-#endif
-#endif
-
-
-#if HAVE_SSSE3
-extern prototype_sad_multi_same_address(vp8_sad16x16x3_ssse3);
-extern prototype_sad_multi_same_address(vp8_sad16x8x3_ssse3);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_ssse3);
-extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_ssse3);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_variance_sad16x16x3
-#define vp8_variance_sad16x16x3 vp8_sad16x16x3_ssse3
-
-#undef vp8_variance_sad16x8x3
-#define vp8_variance_sad16x8x3 vp8_sad16x8x3_ssse3
-
-#undef vp8_variance_subpixvar16x8
-#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_ssse3
-
-#undef vp8_variance_subpixvar16x16
-#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_ssse3
-
-#endif
-#endif
-
-
-#if HAVE_SSE4_1
-extern prototype_sad_multi_same_address_1(vp8_sad16x16x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad16x8x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad8x16x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad8x8x8_sse4);
-extern prototype_sad_multi_same_address_1(vp8_sad4x4x8_sse4);
-
-#if !CONFIG_RUNTIME_CPU_DETECT
-#undef vp8_variance_sad16x16x8
-#define vp8_variance_sad16x16x8 vp8_sad16x16x8_sse4
-
-#undef vp8_variance_sad16x8x8
-#define vp8_variance_sad16x8x8 vp8_sad16x8x8_sse4
-
-#undef vp8_variance_sad8x16x8
-#define vp8_variance_sad8x16x8 vp8_sad8x16x8_sse4
-
-#undef vp8_variance_sad8x8x8
-#define vp8_variance_sad8x8x8 vp8_sad8x8x8_sse4
-
-#undef vp8_variance_sad4x4x8
-#define vp8_variance_sad4x4x8 vp8_sad4x4x8_sse4
-
-#endif
-#endif
-
-#endif
diff --git a/vp8/encoder/x86/x86_csystemdependent.c b/vp8/encoder/x86/x86_csystemdependent.c
index 71c51c14f..a169b493e 100644
--- a/vp8/encoder/x86/x86_csystemdependent.c
+++ b/vp8/encoder/x86/x86_csystemdependent.c
@@ -90,31 +90,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
/* Override default functions with fastest ones for this CPU. */
#if HAVE_MMX
if (flags & HAS_MMX) {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_mmx;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_mmx;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_mmx;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_mmx;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_mmx;
-
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_mmx;
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_mmx;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_mmx;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_mmx;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_mmx;
-
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_mmx;
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_mmx;
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_mmx;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_mmx;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_mmx;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_mmx;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_mmx;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_mmx;
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_mmx;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_mmx;
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_mmx;
-
cpi->rtcd.encodemb.berr = vp8_block_error_mmx;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_mmx;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_mmx;
@@ -126,32 +101,6 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
#if HAVE_SSE2
if (flags & HAS_SSE2) {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_wmt;
- cpi->rtcd.variance.sad16x8 = vp8_sad16x8_wmt;
- cpi->rtcd.variance.sad8x16 = vp8_sad8x16_wmt;
- cpi->rtcd.variance.sad8x8 = vp8_sad8x8_wmt;
- cpi->rtcd.variance.sad4x4 = vp8_sad4x4_wmt;
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse2;
-
- cpi->rtcd.variance.var4x4 = vp8_variance4x4_wmt;
- cpi->rtcd.variance.var8x8 = vp8_variance8x8_wmt;
- cpi->rtcd.variance.var8x16 = vp8_variance8x16_wmt;
- cpi->rtcd.variance.var16x8 = vp8_variance16x8_wmt;
- cpi->rtcd.variance.var16x16 = vp8_variance16x16_wmt;
-
- cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_wmt;
- cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_wmt;
- cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_wmt;
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_wmt;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_wmt;
- cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_wmt;
- cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_wmt;
- cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_wmt;
- cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_wmt;
-
- cpi->rtcd.variance.mse16x16 = vp8_mse16x16_wmt;
- cpi->rtcd.variance.getmbss = vp8_get_mb_ss_sse2;
-
cpi->rtcd.encodemb.berr = vp8_block_error_xmm;
cpi->rtcd.encodemb.mberr = vp8_mbblock_error_xmm;
cpi->rtcd.encodemb.mbuverr = vp8_mbuverror_xmm;
@@ -160,54 +109,20 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi) {
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_sse2;
-#if CONFIG_INTERNAL_STATS
-#if ARCH_X86_64
- cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_sse2;
- cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_sse2;
-#endif
-#endif
}
#endif
#if HAVE_SSE3
if (flags & HAS_SSE3) {
- cpi->rtcd.variance.sad16x16 = vp8_sad16x16_sse3;
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_sse3;
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_sse3;
- cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_sse3;
- cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_sse3;
- cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_sse3;
cpi->rtcd.search.full_search = vp8_full_search_sadx3;
- cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_sse3;
- cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_sse3;
- cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_sse3;
- cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_sse3;
- cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_sse3;
- cpi->rtcd.variance.copy32xn = vp8_copy32xn_sse3;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sadx4;
cpi->rtcd.search.refining_search = vp8_refining_search_sadx4;
}
#endif
-#if HAVE_SSSE3
- if (flags & HAS_SSSE3) {
- cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_ssse3;
- cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_ssse3;
-
- cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_ssse3;
- cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_ssse3;
- }
-#endif
-
-
#if HAVE_SSE4_1
if (flags & HAS_SSE4_1) {
- cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_sse4;
- cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_sse4;
- cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_sse4;
- cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_sse4;
- cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
cpi->rtcd.search.full_search = vp8_full_search_sadx8;
}
#endif