summaryrefslogtreecommitdiff
path: root/vp8/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vp8/encoder')
-rw-r--r--vp8/encoder/asm_enc_offsets.c70
-rw-r--r--vp8/encoder/bitstream.c64
-rw-r--r--vp8/encoder/bitstream.h14
-rw-r--r--vp8/encoder/boolhuff.c2
-rw-r--r--vp8/encoder/boolhuff.h10
-rw-r--r--vp8/encoder/dct.c26
-rw-r--r--vp8/encoder/encodeintra.c4
-rw-r--r--vp8/encoder/encodemb.c84
-rw-r--r--vp8/encoder/encodemv.c30
-rw-r--r--vp8/encoder/modecosts.c16
-rw-r--r--vp8/encoder/onyx_if.c12
-rw-r--r--vp8/encoder/quantize.c20
-rw-r--r--vp8/encoder/ratectrl.c3
-rw-r--r--vp8/encoder/rdopt.c182
-rw-r--r--vp8/encoder/tokenize.c126
-rw-r--r--vp8/encoder/tokenize.h4
-rw-r--r--vp8/encoder/treewriter.h2
-rw-r--r--vp8/encoder/variance_c.c32
-rw-r--r--vp8/encoder/x86/quantize_sse2.asm38
-rw-r--r--vp8/encoder/x86/quantize_sse4.asm26
-rw-r--r--vp8/encoder/x86/quantize_ssse3.asm14
-rw-r--r--vp8/encoder/x86/variance_mmx.c16
-rw-r--r--vp8/encoder/x86/variance_sse2.c4
23 files changed, 393 insertions, 406 deletions
diff --git a/vp8/encoder/asm_enc_offsets.c b/vp8/encoder/asm_enc_offsets.c
index 8e74901b3..9749f05b4 100644
--- a/vp8/encoder/asm_enc_offsets.c
+++ b/vp8/encoder/asm_enc_offsets.c
@@ -20,35 +20,35 @@
BEGIN
/* regular quantize */
-DEFINE(vp8_block_coeff, offsetof(BLOCK, coeff));
-DEFINE(vp8_block_zbin, offsetof(BLOCK, zbin));
-DEFINE(vp8_block_round, offsetof(BLOCK, round));
-DEFINE(vp8_block_quant, offsetof(BLOCK, quant));
-DEFINE(vp8_block_quant_fast, offsetof(BLOCK, quant_fast));
-DEFINE(vp8_block_zbin_extra, offsetof(BLOCK, zbin_extra));
-DEFINE(vp8_block_zrun_zbin_boost, offsetof(BLOCK, zrun_zbin_boost));
-DEFINE(vp8_block_quant_shift, offsetof(BLOCK, quant_shift));
-
-DEFINE(vp8_blockd_qcoeff, offsetof(BLOCKD, qcoeff));
-DEFINE(vp8_blockd_dequant, offsetof(BLOCKD, dequant));
-DEFINE(vp8_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff));
-DEFINE(vp8_blockd_eob, offsetof(BLOCKD, eob));
+DEFINE(vp9_block_coeff, offsetof(BLOCK, coeff));
+DEFINE(vp9_block_zbin, offsetof(BLOCK, zbin));
+DEFINE(vp9_block_round, offsetof(BLOCK, round));
+DEFINE(vp9_block_quant, offsetof(BLOCK, quant));
+DEFINE(vp9_block_quant_fast, offsetof(BLOCK, quant_fast));
+DEFINE(vp9_block_zbin_extra, offsetof(BLOCK, zbin_extra));
+DEFINE(vp9_block_zrun_zbin_boost, offsetof(BLOCK, zrun_zbin_boost));
+DEFINE(vp9_block_quant_shift, offsetof(BLOCK, quant_shift));
+
+DEFINE(vp9_blockd_qcoeff, offsetof(BLOCKD, qcoeff));
+DEFINE(vp9_blockd_dequant, offsetof(BLOCKD, dequant));
+DEFINE(vp9_blockd_dqcoeff, offsetof(BLOCKD, dqcoeff));
+DEFINE(vp9_blockd_eob, offsetof(BLOCKD, eob));
/* subtract */
-DEFINE(vp8_block_base_src, offsetof(BLOCK, base_src));
-DEFINE(vp8_block_src, offsetof(BLOCK, src));
-DEFINE(vp8_block_src_diff, offsetof(BLOCK, src_diff));
-DEFINE(vp8_block_src_stride, offsetof(BLOCK, src_stride));
+DEFINE(vp9_block_base_src, offsetof(BLOCK, base_src));
+DEFINE(vp9_block_src, offsetof(BLOCK, src));
+DEFINE(vp9_block_src_diff, offsetof(BLOCK, src_diff));
+DEFINE(vp9_block_src_stride, offsetof(BLOCK, src_stride));
-DEFINE(vp8_blockd_predictor, offsetof(BLOCKD, predictor));
+DEFINE(vp9_blockd_predictor, offsetof(BLOCKD, predictor));
/* pack tokens */
-DEFINE(vp8_writer_lowvalue, offsetof(vp8_writer, lowvalue));
-DEFINE(vp8_writer_range, offsetof(vp8_writer, range));
-DEFINE(vp8_writer_value, offsetof(vp8_writer, value));
-DEFINE(vp8_writer_count, offsetof(vp8_writer, count));
-DEFINE(vp8_writer_pos, offsetof(vp8_writer, pos));
-DEFINE(vp8_writer_buffer, offsetof(vp8_writer, buffer));
+DEFINE(vp9_writer_lowvalue, offsetof(vp8_writer, lowvalue));
+DEFINE(vp9_writer_range, offsetof(vp8_writer, range));
+DEFINE(vp9_writer_value, offsetof(vp8_writer, value));
+DEFINE(vp9_writer_count, offsetof(vp8_writer, count));
+DEFINE(vp9_writer_pos, offsetof(vp8_writer, pos));
+DEFINE(vp9_writer_buffer, offsetof(vp8_writer, buffer));
DEFINE(tokenextra_token, offsetof(TOKENEXTRA, Token));
DEFINE(tokenextra_extra, offsetof(TOKENEXTRA, Extra));
@@ -56,24 +56,24 @@ DEFINE(tokenextra_context_tree, offsetof(TOKENEXTRA, context_tre
DEFINE(tokenextra_skip_eob_node, offsetof(TOKENEXTRA, skip_eob_node));
DEFINE(TOKENEXTRA_SZ, sizeof(TOKENEXTRA));
-DEFINE(vp8_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct));
+DEFINE(vp9_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct));
-DEFINE(vp8_token_value, offsetof(vp8_token, value));
-DEFINE(vp8_token_len, offsetof(vp8_token, Len));
+DEFINE(vp9_token_value, offsetof(vp8_token, value));
+DEFINE(vp9_token_len, offsetof(vp8_token, Len));
-DEFINE(vp8_extra_bit_struct_tree, offsetof(vp8_extra_bit_struct, tree));
-DEFINE(vp8_extra_bit_struct_prob, offsetof(vp8_extra_bit_struct, prob));
-DEFINE(vp8_extra_bit_struct_len, offsetof(vp8_extra_bit_struct, Len));
-DEFINE(vp8_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, base_val));
+DEFINE(vp9_extra_bit_struct_tree, offsetof(vp8_extra_bit_struct, tree));
+DEFINE(vp9_extra_bit_struct_prob, offsetof(vp8_extra_bit_struct, prob));
+DEFINE(vp9_extra_bit_struct_len, offsetof(vp8_extra_bit_struct, Len));
+DEFINE(vp9_extra_bit_struct_base_val, offsetof(vp8_extra_bit_struct, base_val));
-DEFINE(vp8_comp_tplist, offsetof(VP8_COMP, tplist));
-DEFINE(vp8_comp_common, offsetof(VP8_COMP, common));
+DEFINE(vp9_comp_tplist, offsetof(VP8_COMP, tplist));
+DEFINE(vp9_comp_common, offsetof(VP8_COMP, common));
DEFINE(tokenlist_start, offsetof(TOKENLIST, start));
DEFINE(tokenlist_stop, offsetof(TOKENLIST, stop));
DEFINE(TOKENLIST_SZ, sizeof(TOKENLIST));
-DEFINE(vp8_common_mb_rows, offsetof(VP8_COMMON, mb_rows));
+DEFINE(vp9_common_mb_rows, offsetof(VP8_COMMON, mb_rows));
END
@@ -86,5 +86,5 @@ END
#if HAVE_ARMV5TE
ct_assert(TOKENEXTRA_SZ, sizeof(TOKENEXTRA) == 8)
-ct_assert(vp8_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct) == 16)
+ct_assert(vp9_extra_bit_struct_sz, sizeof(vp8_extra_bit_struct) == 16)
#endif
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 5fe479238..c50c59f2a 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -164,7 +164,7 @@ static void update_mbintra_mode_probs(VP8_COMP* const cpi,
unsigned int bct [VP8_YMODES - 1] [2];
update_mode(
- bc, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
+ bc, VP8_YMODES, vp9_ymode_encodings, vp9_ymode_tree,
Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
);
}
@@ -205,7 +205,7 @@ static void update_switchable_interp_probs(VP8_COMP *cpi,
for (j = 0; j <= VP8_SWITCHABLE_FILTERS; ++j) {
vp9_tree_probs_from_distribution(
VP8_SWITCHABLE_FILTERS,
- vp8_switchable_interp_encodings, vp8_switchable_interp_tree,
+ vp9_switchable_interp_encodings, vp9_switchable_interp_tree,
pc->fc.switchable_interp_prob[j], branch_ct,
cpi->switchable_interp_count[j], 256, 1);
for (i = 0; i < VP8_SWITCHABLE_FILTERS - 1; ++i) {
@@ -325,35 +325,35 @@ static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
}
static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
+ vp8_write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m);
}
static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
+ vp8_write_token(bc, vp9_kf_ymode_tree, p, vp9_kf_ymode_encodings + m);
}
#if CONFIG_SUPERBLOCKS
static void sb_kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_sb_kf_ymode_encodings + m);
+ vp8_write_token(bc, vp9_uv_mode_tree, p, vp9_sb_kf_ymode_encodings + m);
}
#endif
static void write_i8x8_mode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_i8x8_mode_tree, p, vp8_i8x8_mode_encodings + m);
+ vp8_write_token(bc, vp9_i8x8_mode_tree, p, vp9_i8x8_mode_encodings + m);
}
static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
+ vp8_write_token(bc, vp9_uv_mode_tree, p, vp9_uv_mode_encodings + m);
}
static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) {
- vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
+ vp8_write_token(bc, vp9_bmode_tree, p, vp9_bmode_encodings + m);
}
static void write_split(vp8_writer *bc, int x, const vp8_prob *p) {
vp8_write_token(
- bc, vp8_mbsplit_tree, p, vp8_mbsplit_encodings + x
+ bc, vp9_mbsplit_tree, p, vp9_mbsplit_encodings + x
);
}
@@ -412,8 +412,8 @@ static void pack_mb_tokens(vp8_writer* const bc,
while (p < stop) {
const int t = p->Token;
- vp8_token *const a = vp8_coef_encodings + t;
- const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
+ vp8_token *const a = vp9_coef_encodings + t;
+ const vp8_extra_bit_struct *const b = vp9_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
@@ -434,7 +434,7 @@ static void pack_mb_tokens(vp8_writer* const bc,
do {
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i >> 1]) >> 8);
- i = vp8_coef_tree[i + bb];
+ i = vp9_coef_tree[i + bb];
if (bb) {
lowvalue += split;
@@ -443,7 +443,7 @@ static void pack_mb_tokens(vp8_writer* const bc,
range = split;
}
- shift = vp8_norm[range];
+ shift = vp9_norm[range];
range <<= shift;
count += shift;
@@ -493,7 +493,7 @@ static void pack_mb_tokens(vp8_writer* const bc,
range = split;
}
- shift = vp8_norm[range];
+ shift = vp9_norm[range];
range <<= shift;
count += shift;
@@ -586,8 +586,8 @@ static void write_mv_ref
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m <= SPLITMV);
#endif
- vp8_write_token(bc, vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_write_token(bc, vp9_mv_ref_tree, p,
+ vp9_mv_ref_encoding_array - NEARESTMV + m);
}
#if CONFIG_SUPERBLOCKS
@@ -596,8 +596,8 @@ static void write_sb_mv_ref(vp8_writer *bc, MB_PREDICTION_MODE m,
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m < SPLITMV);
#endif
- vp8_write_token(bc, vp8_sb_mv_ref_tree, p,
- vp8_sb_mv_ref_encoding_array - NEARESTMV + m);
+ vp8_write_token(bc, vp9_sb_mv_ref_tree, p,
+ vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
}
#endif
@@ -608,8 +608,8 @@ static void write_sub_mv_ref
#if CONFIG_DEBUG
assert(LEFT4X4 <= m && m <= NEW4X4);
#endif
- vp8_write_token(bc, vp8_sub_mv_ref_tree, p,
- vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
+ vp8_write_token(bc, vp9_sub_mv_ref_tree, p,
+ vp9_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
@@ -1110,11 +1110,11 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
if (mode >= NEARESTMV && mode <= SPLITMV)
{
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
- vp8_write_token(bc, vp8_switchable_interp_tree,
+ vp8_write_token(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(&cpi->common, xd,
PRED_SWITCHABLE_INTERP),
- vp8_switchable_interp_encodings +
- vp8_switchable_interp_map[mi->interp_filter]);
+ vp9_switchable_interp_encodings +
+ vp9_switchable_interp_map[mi->interp_filter]);
} else {
assert (mi->interp_filter ==
cpi->common.mcomp_filter_type);
@@ -1207,7 +1207,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
B_PREDICTION_MODE blockmode;
int_mv blockmv;
const int *const L =
- vp8_mbsplits [mi->partitioning];
+ vp9_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
@@ -1524,7 +1524,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_coef_probs [i][j][k],
cpi->frame_branch_ct [i][j][k],
cpi->coef_counts [i][j][k],
@@ -1544,7 +1544,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_hybrid_coef_probs [i][j][k],
cpi->frame_hybrid_branch_ct [i][j][k],
cpi->hybrid_coef_counts [i][j][k],
@@ -1570,7 +1570,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_coef_probs_8x8 [i][j][k],
cpi->frame_branch_ct_8x8 [i][j][k],
cpi->coef_counts_8x8 [i][j][k],
@@ -1594,7 +1594,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_hybrid_coef_probs_8x8 [i][j][k],
cpi->frame_hybrid_branch_ct_8x8 [i][j][k],
cpi->hybrid_coef_counts_8x8 [i][j][k],
@@ -1617,7 +1617,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_coef_probs_16x16[i][j][k],
cpi->frame_branch_ct_16x16[i][j][k],
cpi->coef_counts_16x16[i][j][k], 256, 1);
@@ -1636,7 +1636,7 @@ static void build_coeff_contexts(VP8_COMP *cpi) {
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
continue;
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
cpi->frame_hybrid_coef_probs_16x16[i][j][k],
cpi->frame_hybrid_branch_ct_16x16[i][j][k],
cpi->hybrid_coef_counts_16x16[i][j][k], 256, 1);
@@ -1820,7 +1820,7 @@ static void decide_kf_ymode_entropy(VP8_COMP *cpi) {
int i, j;
for (i = 0; i < 8; i++) {
- vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
+ vp9_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp9_kf_ymode_tree);
cost = 0;
for (j = 0; j < VP8_YMODES; j++) {
cost += mode_cost[j] * cpi->ymode_count[j];
@@ -2163,7 +2163,7 @@ void vp9_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
/* Only one filter is used. So set the filter at frame level */
for (i = 0; i < VP8_SWITCHABLE_FILTERS; ++i) {
if (count[i]) {
- pc->mcomp_filter_type = vp8_switchable_interp[i];
+ pc->mcomp_filter_type = vp9_switchable_interp[i];
break;
}
}
diff --git a/vp8/encoder/bitstream.h b/vp8/encoder/bitstream.h
index 5156bf4e7..87f0aa80b 100644
--- a/vp8/encoder/bitstream.h
+++ b/vp8/encoder/bitstream.h
@@ -12,16 +12,6 @@
#ifndef __INC_BITSTREAM_H
#define __INC_BITSTREAM_H
-#if HAVE_ARMV5TE
-void vp8cx_pack_tokens_armv5(vp8_writer *w, const TOKENEXTRA *p, int xcount,
- vp8_token *,
- vp8_extra_bit_struct *,
- const vp8_tree_index *);
-# define pack_tokens(a,b,c) \
- vp8cx_pack_tokens_armv5(a,b,c,vp8_coef_encodings,vp8_extra_bits,vp8_coef_tree)
-#else
-# define pack_tokens(a,b,c) pack_tokens_c(a,b,c)
-#endif
-#endif
-
void vp9_update_skip_probs(VP8_COMP *cpi);
+
+#endif
diff --git a/vp8/encoder/boolhuff.c b/vp8/encoder/boolhuff.c
index ef03d8b14..1f885d8c5 100644
--- a/vp8/encoder/boolhuff.c
+++ b/vp8/encoder/boolhuff.c
@@ -20,7 +20,7 @@ unsigned __int64 Sectionbits[500];
unsigned int active_section = 0;
#endif
-const unsigned int vp8_prob_cost[256] = {
+const unsigned int vp9_prob_cost[256] = {
2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046,
1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778,
767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625,
diff --git a/vp8/encoder/boolhuff.h b/vp8/encoder/boolhuff.h
index 8516aa219..3fbe41ecc 100644
--- a/vp8/encoder/boolhuff.h
+++ b/vp8/encoder/boolhuff.h
@@ -38,7 +38,7 @@ extern void vp9_start_encode(BOOL_CODER *bc, unsigned char *buffer);
extern void vp9_encode_value(BOOL_CODER *br, int data, int bits);
extern void vp9_stop_encode(BOOL_CODER *bc);
-extern const unsigned int vp8_prob_cost[256];
+extern const unsigned int vp9_prob_cost[256];
extern void vp9_encode_uniform(BOOL_CODER *bc, int v, int n);
extern void vp9_encode_term_subexp(BOOL_CODER *bc, int v, int k, int n);
@@ -46,7 +46,7 @@ extern int vp9_count_uniform(int v, int n);
extern int vp9_count_term_subexp(int v, int k, int n);
extern int vp9_recenter_nonneg(int v, int m);
-DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
+DECLARE_ALIGNED(16, extern const unsigned char, vp9_norm[256]);
static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
@@ -60,9 +60,9 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
#if defined(SECTIONBITS_OUTPUT)
if (bit)
- Sectionbits[active_section] += vp8_prob_cost[255 - probability];
+ Sectionbits[active_section] += vp9_prob_cost[255 - probability];
else
- Sectionbits[active_section] += vp8_prob_cost[probability];
+ Sectionbits[active_section] += vp9_prob_cost[probability];
#endif
#endif
@@ -76,7 +76,7 @@ static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) {
range = br->range - split;
}
- shift = vp8_norm[range];
+ shift = vp9_norm[range];
range <<= shift;
count += shift;
diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c
index b56b6113c..18d782c52 100644
--- a/vp8/encoder/dct.c
+++ b/vp8/encoder/dct.c
@@ -19,21 +19,21 @@
// TODO: these transforms can be converted into integer forms to reduce
// the complexity
-float dct_4[16] = {
+static const float dct_4[16] = {
0.500000000000000, 0.500000000000000, 0.500000000000000, 0.500000000000000,
0.653281482438188, 0.270598050073099, -0.270598050073099, -0.653281482438188,
0.500000000000000, -0.500000000000000, -0.500000000000000, 0.500000000000000,
0.270598050073099, -0.653281482438188, 0.653281482438188, -0.270598050073099
};
-float adst_4[16] = {
+static const float adst_4[16] = {
0.228013428883779, 0.428525073124360, 0.577350269189626, 0.656538502008139,
0.577350269189626, 0.577350269189626, 0.000000000000000, -0.577350269189626,
0.656538502008139, -0.228013428883779, -0.577350269189626, 0.428525073124359,
0.428525073124360, -0.656538502008139, 0.577350269189626, -0.228013428883779
};
-float dct_8[64] = {
+static const float dct_8[64] = {
0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274,
0.353553390593274, 0.353553390593274, 0.353553390593274, 0.353553390593274,
0.490392640201615, 0.415734806151273, 0.277785116509801, 0.097545161008064,
@@ -52,7 +52,7 @@ float dct_8[64] = {
0.490392640201615, -0.415734806151273, 0.277785116509801, -0.097545161008064
};
-float adst_8[64] = {
+static const float adst_8[64] = {
0.089131608307533, 0.175227946595735, 0.255357107325376, 0.326790388032145,
0.387095214016349, 0.434217976756762, 0.466553967085785, 0.483002021635509,
0.255357107325376, 0.434217976756762, 0.483002021635509, 0.387095214016349,
@@ -72,21 +72,21 @@ float adst_8[64] = {
};
/* Converted the transforms to integers. */
-const int16_t dct_i4[16] = {
+static const int16_t dct_i4[16] = {
16384, 16384, 16384, 16384,
21407, 8867, -8867, -21407,
16384, -16384, -16384, 16384,
8867, -21407, 21407, -8867
};
-const int16_t adst_i4[16] = {
+static const int16_t adst_i4[16] = {
7472, 14042, 18919, 21513,
18919, 18919, 0, -18919,
21513, -7472, -18919, 14042,
14042, -21513, 18919, -7472
};
-const int16_t dct_i8[64] = {
+static const int16_t dct_i8[64] = {
11585, 11585, 11585, 11585,
11585, 11585, 11585, 11585,
16069, 13623, 9102, 3196,
@@ -105,7 +105,7 @@ const int16_t dct_i8[64] = {
16069, -13623, 9102, -3196
};
-const int16_t adst_i8[64] = {
+static const int16_t adst_i8[64] = {
2921, 5742, 8368, 10708,
12684, 14228, 15288, 15827,
8368, 14228, 15827, 12684,
@@ -124,7 +124,7 @@ const int16_t adst_i8[64] = {
15288, -12684, 8368, -2921
};
-float dct_16[256] = {
+static const float dct_16[256] = {
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000, 0.250000,
0.351851, 0.338330, 0.311806, 0.273300, 0.224292, 0.166664, 0.102631, 0.034654,
@@ -159,7 +159,7 @@ float dct_16[256] = {
0.351851, -0.338330, 0.311806, -0.273300, 0.224292, -0.166664, 0.102631, -0.034654
};
-float adst_16[256] = {
+static const float adst_16[256] = {
0.033094, 0.065889, 0.098087, 0.129396, 0.159534, 0.188227, 0.215215, 0.240255,
0.263118, 0.283599, 0.301511, 0.316693, 0.329007, 0.338341, 0.344612, 0.347761,
0.098087, 0.188227, 0.263118, 0.316693, 0.344612, 0.344612, 0.316693, 0.263118,
@@ -195,7 +195,7 @@ float adst_16[256] = {
};
/* Converted the transforms to integers. */
-const int16_t dct_i16[256] = {
+static const int16_t dct_i16[256] = {
8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
8192, 8192, 8192, 8192, 8192, 8192, 8192, 8192,
11529, 11086, 10217, 8955, 7350, 5461, 3363, 1136,
@@ -230,7 +230,7 @@ const int16_t dct_i16[256] = {
11529, -11086, 10217, -8955, 7350, -5461, 3363, -1136
};
-const int16_t adst_i16[256] = {
+static const int16_t adst_i16[256] = {
1084, 2159, 3214, 4240, 5228, 6168, 7052, 7873,
8622, 9293, 9880, 10377, 10781, 11087, 11292, 11395,
3214, 6168, 8622, 10377, 11292, 11292, 10377, 8622,
@@ -542,7 +542,7 @@ void vp9_fht_float_c(const int16_t *input, int pitch, int16_t *output,
float *pfb = &bufb[0];
// pointers to vertical and horizontal transforms
- float *ptv, *pth;
+ const float *ptv, *pth;
assert(tx_type != DCT_DCT);
// load and convert residual array into floating-point
diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c
index 3cd450533..60cf60256 100644
--- a/vp8/encoder/encodeintra.c
+++ b/vp8/encoder/encodeintra.c
@@ -238,7 +238,7 @@ void vp9_encode_intra8x8mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int i, ib;
for (i = 0; i < 4; i++) {
- ib = vp8_i8x8_block[i];
+ ib = vp9_i8x8_block[i];
vp9_encode_intra8x8(rtcd, x, ib);
}
}
@@ -273,7 +273,7 @@ void vp9_encode_intra8x8mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
BLOCKD *b;
for (i = 0; i < 4; i++) {
- ib = vp8_i8x8_block[i];
+ ib = vp9_i8x8_block[i];
b = &x->e_mbd.block[ib];
mode = b->bmi.as_mode.first;
#if CONFIG_COMP_INTRA_PRED
diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c
index a87fd7c2e..456468dca 100644
--- a/vp8/encoder/encodemb.c
+++ b/vp8/encoder/encodemb.c
@@ -303,8 +303,8 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
switch (tx_size) {
default:
case TX_4X4:
- scan = vp8_default_zig_zag1d;
- bands = vp8_coef_bands;
+ scan = vp9_default_zig_zag1d;
+ bands = vp9_coef_bands;
default_eob = 16;
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
@@ -313,25 +313,25 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
if (tx_type != DCT_DCT) {
switch (tx_type) {
case ADST_DCT:
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
break;
case DCT_ADST:
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
break;
default:
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
break;
}
} else {
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
}
}
break;
case TX_8X8:
- scan = vp8_default_zig_zag1d_8x8;
- bands = vp8_coef_bands_8x8;
+ scan = vp9_default_zig_zag1d_8x8;
+ bands = vp9_coef_bands_8x8;
default_eob = 64;
break;
}
@@ -372,11 +372,11 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0 = (vp9_dct_value_tokens_ptr + x)->Token;
/* Consider both possible successor states. */
if (next < default_eob) {
band = bands[i + 1];
- pt = vp8_prev_token_class[t0];
+ pt = vp9_prev_token_class[t0];
rate0 +=
mb->token_costs[tx_size][type][band][pt][tokens[next][0].token];
rate1 +=
@@ -385,7 +385,7 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
d2 = dx * dx;
tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
@@ -419,17 +419,17 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
DCT_EOB_TOKEN : ZERO_TOKEN;
} else {
- t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0 = t1 = (vp9_dct_value_tokens_ptr + x)->Token;
}
if (next < default_eob) {
band = bands[i + 1];
if (t0 != DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t0];
+ pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[tx_size][type][band][pt][
tokens[next][0].token];
}
if (t1 != DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t1];
+ pt = vp9_prev_token_class[t1];
rate1 += mb->token_costs[tx_size][type][band][pt][
tokens[next][1].token];
}
@@ -438,7 +438,7 @@ static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
if (shortcut) {
dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
@@ -524,7 +524,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *xd,
return;
for (i = 0; i < bd->eob; i++) {
- int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
+ int coef = bd->dqcoeff[vp9_default_zig_zag1d[i]];
sum += (coef >= 0) ? coef : -coef;
if (sum >= SUM_2ND_COEFF_THRESH)
return;
@@ -532,7 +532,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *xd,
if (sum < SUM_2ND_COEFF_THRESH) {
for (i = 0; i < bd->eob; i++) {
- int rc = vp8_default_zig_zag1d[i];
+ int rc = vp9_default_zig_zag1d[i];
bd->qcoeff[rc] = 0;
bd->dqcoeff[rc] = 0;
}
@@ -594,15 +594,15 @@ void vp9_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
for (b = 0; b < 16; b++) {
optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
+ ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4);
}
if (has_2nd_order) {
b = 24;
optimize_b(x, b, PLANE_TYPE_Y2,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
+ ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4);
check_reset_2nd_coeffs(&x->e_mbd,
- ta + vp8_block2above[b], tl + vp8_block2left[b]);
+ ta + vp9_block2above[b], tl + vp9_block2left[b]);
}
}
@@ -623,7 +623,7 @@ void vp9_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
for (b = 16; b < 24; b++) {
optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd, TX_4X4);
+ ta + vp9_block2above[b], tl + vp9_block2left[b], rtcd, TX_4X4);
}
}
@@ -651,17 +651,17 @@ void vp9_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b],
rtcd, TX_8X8);
- ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
- tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
+ ta[vp9_block2above_8x8[b] + 1] = ta[vp9_block2above_8x8[b]];
+ tl[vp9_block2left_8x8[b] + 1] = tl[vp9_block2left_8x8[b]];
}
// 8x8 always have 2nd roder haar block
if (has_2nd_order) {
check_reset_8x8_2nd_coeffs(&x->e_mbd,
- ta + vp8_block2above_8x8[24],
- tl + vp8_block2left_8x8[24]);
+ ta + vp9_block2above_8x8[24],
+ tl + vp9_block2left_8x8[24]);
}
}
@@ -682,10 +682,10 @@ void vp9_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
for (b = 16; b < 24; b += 4) {
optimize_b(x, b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b],
rtcd, TX_8X8);
- ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
- tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
+ ta[vp9_block2above_8x8[b] + 1] = ta[vp9_block2above_8x8[b]];
+ tl[vp9_block2left_8x8[b] + 1] = tl[vp9_block2left_8x8[b]];
}
}
@@ -728,7 +728,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
for (i = eob; i-- > 0;) {
int base_bits, d2, dx;
- rc = vp8_default_zig_zag1d_16x16[i];
+ rc = vp9_default_zig_zag1d_16x16[i];
x = qcoeff_ptr[rc];
/* Only add a trellis state for non-zero coefficients. */
if (x) {
@@ -738,18 +738,18 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0 = (vp9_dct_value_tokens_ptr + x)->Token;
/* Consider both possible successor states. */
if (next < 256) {
- band = vp8_coef_bands_16x16[i + 1];
- pt = vp8_prev_token_class[t0];
+ band = vp9_coef_bands_16x16[i + 1];
+ pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[TX_16X16][type][band][pt][tokens[next][0].token];
rate1 += mb->token_costs[TX_16X16][type][band][pt][tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
d2 = dx*dx;
tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
@@ -784,16 +784,16 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
DCT_EOB_TOKEN : ZERO_TOKEN;
}
else
- t0=t1 = (vp8_dct_value_tokens_ptr + x)->Token;
+ t0=t1 = (vp9_dct_value_tokens_ptr + x)->Token;
if (next < 256) {
- band = vp8_coef_bands_16x16[i + 1];
+ band = vp9_coef_bands_16x16[i + 1];
if (t0 != DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t0];
+ pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[TX_16X16][type][band][pt]
[tokens[next][0].token];
}
if (t1!=DCT_EOB_TOKEN) {
- pt = vp8_prev_token_class[t1];
+ pt = vp9_prev_token_class[t1];
rate1 += mb->token_costs[TX_16X16][type][band][pt]
[tokens[next][1].token];
}
@@ -801,7 +801,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = *(vp8_dct_value_cost_ptr + x);
+ base_bits = *(vp9_dct_value_cost_ptr + x);
if(shortcut) {
dx -= (dequant_ptr[rc!=0] + sz) ^ sz;
@@ -820,7 +820,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
* add a new trellis node, but we do need to update the costs.
*/
else {
- band = vp8_coef_bands_16x16[i + 1];
+ band = vp9_coef_bands_16x16[i + 1];
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
/* Update the cost of each path if we're past the EOB token. */
@@ -837,7 +837,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
}
/* Now pick the best path through the whole trellis. */
- band = vp8_coef_bands_16x16[i + 1];
+ band = vp9_coef_bands_16x16[i + 1];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
@@ -855,7 +855,7 @@ static void optimize_b_16x16(MACROBLOCK *mb, int i, PLANE_TYPE type,
x = tokens[i][best].qc;
if (x)
final_eob = i;
- rc = vp8_default_zig_zag1d_16x16[i];
+ rc = vp9_default_zig_zag1d_16x16[i];
qcoeff_ptr[rc] = x;
dqcoeff_ptr[rc] = (x * dequant_ptr[rc!=0]);
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index 970e89f57..972255459 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -38,14 +38,14 @@ static void encode_nmv_component(vp8_writer* const bc,
c = vp9_get_mv_class(z, &o);
- vp8_write_token(bc, vp8_mv_class_tree, mvcomp->classes,
- vp8_mv_class_encodings + c);
+ vp8_write_token(bc, vp9_mv_class_tree, mvcomp->classes,
+ vp9_mv_class_encodings + c);
d = (o >> 3); /* int mv data */
if (c == MV_CLASS_0) {
- vp8_write_token(bc, vp8_mv_class0_tree, mvcomp->class0,
- vp8_mv_class0_encodings + d);
+ vp8_write_token(bc, vp9_mv_class0_tree, mvcomp->class0,
+ vp9_mv_class0_encodings + d);
} else {
int i, b;
b = c + CLASS0_BITS - 1; /* number of bits */
@@ -72,11 +72,11 @@ static void encode_nmv_component_fp(vp8_writer *bc,
/* Code the fractional pel bits */
if (c == MV_CLASS_0) {
- vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->class0_fp[d],
- vp8_mv_fp_encodings + f);
+ vp8_write_token(bc, vp9_mv_fp_tree, mvcomp->class0_fp[d],
+ vp9_mv_fp_encodings + f);
} else {
- vp8_write_token(bc, vp8_mv_fp_tree, mvcomp->fp,
- vp8_mv_fp_encodings + f);
+ vp8_write_token(bc, vp9_mv_fp_tree, mvcomp->fp,
+ vp9_mv_fp_encodings + f);
}
/* Code the high precision bit */
if (usehp) {
@@ -99,16 +99,16 @@ static void build_nmv_component_cost_table(int *mvcost,
sign_cost[0] = vp8_cost_zero(mvcomp->sign);
sign_cost[1] = vp8_cost_one(mvcomp->sign);
- vp9_cost_tokens(class_cost, mvcomp->classes, vp8_mv_class_tree);
- vp9_cost_tokens(class0_cost, mvcomp->class0, vp8_mv_class0_tree);
+ vp9_cost_tokens(class_cost, mvcomp->classes, vp9_mv_class_tree);
+ vp9_cost_tokens(class0_cost, mvcomp->class0, vp9_mv_class0_tree);
for (i = 0; i < MV_OFFSET_BITS; ++i) {
bits_cost[i][0] = vp8_cost_zero(mvcomp->bits[i]);
bits_cost[i][1] = vp8_cost_one(mvcomp->bits[i]);
}
for (i = 0; i < CLASS0_SIZE; ++i)
- vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp8_mv_fp_tree);
- vp9_cost_tokens(fp_cost, mvcomp->fp, vp8_mv_fp_tree);
+ vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree);
+ vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree);
if (usehp) {
class0_hp_cost[0] = vp8_cost_zero(mvcomp->class0_hp);
@@ -511,8 +511,8 @@ void vp9_write_nmvprobs(VP8_COMP* const cpi, int usehp, vp8_writer* const bc) {
void vp9_encode_nmv(vp8_writer* const bc, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx) {
MV_JOINT_TYPE j = vp9_get_mv_joint(*mv);
- vp8_write_token(bc, vp8_mv_joint_tree, mvctx->joints,
- vp8_mv_joint_encodings + j);
+ vp8_write_token(bc, vp9_mv_joint_tree, mvctx->joints,
+ vp9_mv_joint_encodings + j);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
encode_nmv_component(bc, mv->row, ref->col, &mvctx->comps[0]);
}
@@ -541,7 +541,7 @@ void vp9_build_nmv_cost_table(int *mvjoint,
int mvc_flag_v,
int mvc_flag_h) {
vp8_clear_system_state();
- vp9_cost_tokens(mvjoint, mvctx->joints, vp8_mv_joint_tree);
+ vp9_cost_tokens(mvjoint, mvctx->joints, vp9_mv_joint_tree);
if (mvc_flag_v)
build_nmv_component_cost_table(mvcost[0], &mvctx->comps[0], usehp);
if (mvc_flag_h)
diff --git a/vp8/encoder/modecosts.c b/vp8/encoder/modecosts.c
index 5111a81f7..523dfc241 100644
--- a/vp8/encoder/modecosts.c
+++ b/vp8/encoder/modecosts.c
@@ -17,7 +17,7 @@
void vp9_init_mode_costs(VP8_COMP *c) {
VP8_COMMON *x = &c->common;
- const vp8_tree_p T = vp8_bmode_tree;
+ const vp8_tree_p T = vp9_bmode_tree;
int i, j;
for (i = 0; i < VP8_BINTRAMODES; i++) {
@@ -29,21 +29,21 @@ void vp9_init_mode_costs(VP8_COMP *c) {
vp9_cost_tokens((int *)c->mb.inter_bmode_costs, x->fc.bmode_prob, T);
vp9_cost_tokens((int *)c->mb.inter_bmode_costs,
- x->fc.sub_mv_ref_prob[0], vp8_sub_mv_ref_tree);
+ x->fc.sub_mv_ref_prob[0], vp9_sub_mv_ref_tree);
- vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree);
+ vp9_cost_tokens(c->mb.mbmode_cost[1], x->fc.ymode_prob, vp9_ymode_tree);
vp9_cost_tokens(c->mb.mbmode_cost[0],
x->kf_ymode_prob[c->common.kf_ymode_probs_index],
- vp8_kf_ymode_tree);
+ vp9_kf_ymode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[1],
- x->fc.uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
+ x->fc.uv_mode_prob[VP8_YMODES - 1], vp9_uv_mode_tree);
vp9_cost_tokens(c->mb.intra_uv_mode_cost[0],
- x->kf_uv_mode_prob[VP8_YMODES - 1], vp8_uv_mode_tree);
+ x->kf_uv_mode_prob[VP8_YMODES - 1], vp9_uv_mode_tree);
vp9_cost_tokens(c->mb.i8x8_mode_costs,
- x->fc.i8x8_mode_prob, vp8_i8x8_mode_tree);
+ x->fc.i8x8_mode_prob, vp9_i8x8_mode_tree);
for (i = 0; i <= VP8_SWITCHABLE_FILTERS; ++i)
vp9_cost_tokens((int *)c->mb.switchable_interp_costs[i],
x->fc.switchable_interp_prob[i],
- vp8_switchable_interp_tree);
+ vp9_switchable_interp_tree);
}
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 86b469fa4..bc17432d3 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -182,7 +182,7 @@ extern unsigned int inter_b_modes[B_MODE_COUNT];
extern void vp9_init_quantizer(VP8_COMP *cpi);
-int vp8cx_base_skip_false_prob[QINDEX_RANGE][3];
+static int base_skip_false_prob[QINDEX_RANGE][3];
// Tables relating active max Q to active min Q
static int kf_low_motion_minq[QINDEX_RANGE];
@@ -271,21 +271,21 @@ static void init_base_skip_probs(void) {
skip_prob = 1;
else if (skip_prob > 255)
skip_prob = 255;
- vp8cx_base_skip_false_prob[i][1] = skip_prob;
+ base_skip_false_prob[i][1] = skip_prob;
skip_prob = t * 0.75;
if (skip_prob < 1)
skip_prob = 1;
else if (skip_prob > 255)
skip_prob = 255;
- vp8cx_base_skip_false_prob[i][2] = skip_prob;
+ base_skip_false_prob[i][2] = skip_prob;
skip_prob = t * 1.25;
if (skip_prob < 1)
skip_prob = 1;
else if (skip_prob > 255)
skip_prob = 255;
- vp8cx_base_skip_false_prob[i][0] = skip_prob;
+ base_skip_false_prob[i][0] = skip_prob;
}
}
@@ -1762,7 +1762,7 @@ VP8_PTR vp9_create_compressor(VP8_CONFIG *oxcf) {
init_config((VP8_PTR)cpi, oxcf);
- memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob));
+ memcpy(cpi->base_skip_false_prob, base_skip_false_prob, sizeof(base_skip_false_prob));
cpi->common.current_video_frame = 0;
cpi->kf_overspend_bits = 0;
cpi->kf_bitrate_adjustment = 0;
@@ -3484,7 +3484,7 @@ static void encode_frame_to_data_rate
/* Mostly one filter is used. So set the filter at frame level */
for (i = 0; i < VP8_SWITCHABLE_FILTERS; ++i) {
if (count[i]) {
- cm->mcomp_filter_type = vp8_switchable_interp[i];
+ cm->mcomp_filter_type = vp9_switchable_interp[i];
Loop = TRUE; /* Make sure to loop since the filter changed */
break;
}
diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c
index 0bee9ec50..b5476b698 100644
--- a/vp8/encoder/quantize.c
+++ b/vp8/encoder/quantize.c
@@ -40,15 +40,15 @@ void vp9_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
switch (tx_type) {
case ADST_DCT :
- pt_scan = vp8_row_scan;
+ pt_scan = vp9_row_scan;
break;
case DCT_ADST :
- pt_scan = vp8_col_scan;
+ pt_scan = vp9_col_scan;
break;
default :
- pt_scan = vp8_default_zig_zag1d;
+ pt_scan = vp9_default_zig_zag1d;
break;
}
@@ -106,7 +106,7 @@ void vp9_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
eob = -1;
for (i = 0; i < b->eob_max_offset; i++) {
- rc = vp8_default_zig_zag1d[i];
+ rc = vp9_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
@@ -179,7 +179,7 @@ void vp9_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
eob = -1;
for (i = 0; i < b->eob_max_offset_8x8; i++) {
- rc = vp8_default_zig_zag1d[i];
+ rc = vp9_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin_boost_ptr = &b->zrun_zbin_boost[zbin_zrun_index];
@@ -228,7 +228,7 @@ void vp9_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
eob = -1;
for (i = 0; i < b->eob_max_offset_8x8; i++) {
- rc = vp8_default_zig_zag1d_8x8[i];
+ rc = vp9_default_zig_zag1d_8x8[i];
z = coeff_ptr[rc];
zbin = (zbin_ptr[rc != 0] + *zbin_boost_ptr + zbin_oq_value);
@@ -318,7 +318,7 @@ void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
eob = -1;
for (i = 0; i < b->eob_max_offset_16x16; i++) {
- rc = vp8_default_zig_zag1d_16x16[i];
+ rc = vp9_default_zig_zag1d_16x16[i];
z = coeff_ptr[rc];
zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
@@ -460,7 +460,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
// all the 4x4 ac values =;
for (i = 1; i < 16; i++) {
- int rc = vp8_default_zig_zag1d[i];
+ int rc = vp9_default_zig_zag1d[i];
quant_val = vp9_ac_yquant(Q);
invert_quant(cpi->Y1quant[Q] + rc,
@@ -494,7 +494,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
// This needs cleaning up for 8x8 especially if we are to add
// support for non flat Q matices
for (i = 1; i < 64; i++) {
- int rc = vp8_default_zig_zag1d_8x8[i];
+ int rc = vp9_default_zig_zag1d_8x8[i];
quant_val = vp9_ac_yquant(Q);
cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
@@ -514,7 +514,7 @@ void vp9_init_quantizer(VP8_COMP *cpi) {
// 16x16 structures. Same comment above applies.
for (i = 1; i < 256; i++) {
- int rc = vp8_default_zig_zag1d_16x16[i];
+ int rc = vp9_default_zig_zag1d_16x16[i];
quant_val = vp9_ac_yquant(Q);
cpi->Y1zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index 8197bfd78..3191ab802 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -28,9 +28,6 @@
#define MIN_BPB_FACTOR 0.005
#define MAX_BPB_FACTOR 50
-extern const MODE_DEFINITION vp8_mode_order[MAX_MODES];
-
-
#ifdef MODE_STATS
extern unsigned int y_modes[VP8_YMODES];
extern unsigned int uv_modes[VP8_UV_MODES];
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index aa955baa5..02076b280 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -83,7 +83,7 @@ static const int auto_speed_thresh[17] = {
};
#if CONFIG_PRED_FILTER
-const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
+const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, 0, 0},
{ZEROMV, LAST_FRAME, 0, 1},
{DC_PRED, INTRA_FRAME, 0, 0},
@@ -155,7 +155,7 @@ const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
{SPLITMV, GOLDEN_FRAME, ALTREF_FRAME, 0}
};
#else
-const MODE_DEFINITION vp8_mode_order[MAX_MODES] = {
+const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, 0},
{DC_PRED, INTRA_FRAME, 0},
@@ -228,11 +228,11 @@ static void fill_token_costs(
if (k == 0 && ((j > 0 && i > 0) || (j > 1 && i == 0)))
vp9_cost_tokens_skip((int *)(c[i][j][k]),
p[i][j][k],
- vp8_coef_tree);
+ vp9_coef_tree);
else
vp9_cost_tokens((int *)(c[i][j][k]),
p[i][j][k],
- vp8_coef_tree);
+ vp9_coef_tree);
}
}
@@ -553,15 +553,15 @@ static int cost_coeffs_2x2(MACROBLOCK *mb,
assert(eob <= 4);
for (; c < eob; c++) {
- int v = qcoeff_ptr[vp8_default_zig_zag1d[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
- cost += mb->token_costs[TX_8X8][type][vp8_coef_bands[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+ int v = qcoeff_ptr[vp9_default_zig_zag1d[c]];
+ int t = vp9_dct_value_tokens_ptr[v].Token;
+ cost += mb->token_costs[TX_8X8][type][vp9_coef_bands[c]][pt][t];
+ cost += vp9_dct_value_cost_ptr[v];
+ pt = vp9_prev_token_class[t];
}
if (c < 4)
- cost += mb->token_costs[TX_8X8][type][vp8_coef_bands[c]]
+ cost += mb->token_costs[TX_8X8][type][vp9_coef_bands[c]]
[pt] [DCT_EOB_TOKEN];
pt = (c != !type); // is eob first coefficient;
@@ -585,23 +585,23 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
switch (tx_size) {
case TX_4X4:
- scan = vp8_default_zig_zag1d;
- band = vp8_coef_bands;
+ scan = vp9_default_zig_zag1d;
+ band = vp9_coef_bands;
default_eob = 16;
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_4x4(xd, b);
if (tx_type != DCT_DCT) {
switch (tx_type) {
case ADST_DCT:
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
break;
case DCT_ADST:
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
break;
default:
- scan = vp8_default_zig_zag1d;
+ scan = vp9_default_zig_zag1d;
break;
}
}
@@ -609,8 +609,8 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
break;
case TX_8X8:
- scan = vp8_default_zig_zag1d_8x8;
- band = vp8_coef_bands_8x8;
+ scan = vp9_default_zig_zag1d_8x8;
+ band = vp9_coef_bands_8x8;
default_eob = 64;
if (type == PLANE_TYPE_Y_WITH_DC) {
BLOCKD *bb;
@@ -623,8 +623,8 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
}
break;
case TX_16X16:
- scan = vp8_default_zig_zag1d_16x16;
- band = vp8_coef_bands_16x16;
+ scan = vp9_default_zig_zag1d_16x16;
+ band = vp9_coef_bands_16x16;
default_eob = 256;
if (type == PLANE_TYPE_Y_WITH_DC) {
tx_type = get_tx_type_16x16(xd, b);
@@ -643,10 +643,10 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
if (tx_type != DCT_DCT) {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
+ int t = vp9_dct_value_tokens_ptr[v].Token;
cost += mb->hybrid_token_costs[tx_size][type][band[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+ cost += vp9_dct_value_cost_ptr[v];
+ pt = vp9_prev_token_class[t];
}
if (c < seg_eob)
cost += mb->hybrid_token_costs[tx_size][type][band[c]]
@@ -654,10 +654,10 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, PLANE_TYPE type,
} else {
for (; c < eob; c++) {
int v = qcoeff_ptr[scan[c]];
- int t = vp8_dct_value_tokens_ptr[v].Token;
+ int t = vp9_dct_value_tokens_ptr[v].Token;
cost += mb->token_costs[tx_size][type][band[c]][pt][t];
- cost += vp8_dct_value_cost_ptr[v];
- pt = vp8_prev_token_class[t];
+ cost += vp9_dct_value_cost_ptr[v];
+ pt = vp9_prev_token_class[t];
}
if (c < seg_eob)
cost += mb->token_costs[tx_size][type][band[c]]
@@ -685,11 +685,11 @@ static int vp8_rdcost_mby(MACROBLOCK *mb) {
for (b = 0; b < 16; b++)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
+ ta + vp9_block2above[b], tl + vp9_block2left[b],
TX_4X4);
cost += cost_coeffs(mb, xd->block + 24, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24],
+ ta + vp9_block2above[24], tl + vp9_block2left[24],
TX_4X4);
return cost;
@@ -761,11 +761,11 @@ static int vp8_rdcost_mby_8x8(MACROBLOCK *mb, int backup) {
for (b = 0; b < 16; b += 4)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_NO_DC,
- ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
+ ta + vp9_block2above_8x8[b], tl + vp9_block2left_8x8[b],
TX_8X8);
cost += cost_coeffs_2x2(mb, xd->block + 24, PLANE_TYPE_Y2,
- ta + vp8_block2above[24], tl + vp8_block2left[24]);
+ ta + vp9_block2above[24], tl + vp9_block2left[24]);
return cost;
}
@@ -1221,8 +1221,8 @@ static int64_t rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rat
#if CONFIG_COMP_INTRA_PRED
& best_second_mode, allow_comp,
#endif
- bmode_costs, ta + vp8_block2above[i],
- tl + vp8_block2left[i], &r, &ry, &d);
+ bmode_costs, ta + vp9_block2above[i],
+ tl + vp9_block2left[i], &r, &ry, &d);
cost += r;
distortion += d;
@@ -1448,8 +1448,8 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
// compute quantization mse of 8x8 block
distortion = vp9_block_error_c((x->block + idx)->coeff,
(xd->block + idx)->dqcoeff, 64);
- ta0 = a[vp8_block2above_8x8[idx]];
- tl0 = l[vp8_block2left_8x8[idx]];
+ ta0 = a[vp9_block2above_8x8[idx]];
+ tl0 = l[vp9_block2left_8x8[idx]];
rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_8X8);
@@ -1475,10 +1475,10 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
distortion += vp9_block_error_c((x->block + ib + 5)->coeff,
(xd->block + ib + 5)->dqcoeff, 16);
- ta0 = a[vp8_block2above[ib]];
- ta1 = a[vp8_block2above[ib + 1]];
- tl0 = l[vp8_block2left[ib]];
- tl1 = l[vp8_block2left[ib + 4]];
+ ta0 = a[vp9_block2above[ib]];
+ ta1 = a[vp9_block2above[ib + 1]];
+ tl0 = l[vp9_block2left[ib]];
+ tl1 = l[vp9_block2left[ib + 4]];
rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_4X4);
rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC,
@@ -1520,15 +1520,15 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
vp9_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
- a[vp8_block2above_8x8[idx]] = besta0;
- a[vp8_block2above_8x8[idx] + 1] = besta1;
- l[vp8_block2left_8x8[idx]] = bestl0;
- l[vp8_block2left_8x8[idx] + 1] = bestl1;
+ a[vp9_block2above_8x8[idx]] = besta0;
+ a[vp9_block2above_8x8[idx] + 1] = besta1;
+ l[vp9_block2left_8x8[idx]] = bestl0;
+ l[vp9_block2left_8x8[idx] + 1] = bestl1;
} else {
- a[vp8_block2above[ib]] = besta0;
- a[vp8_block2above[ib + 1]] = besta1;
- l[vp8_block2left[ib]] = bestl0;
- l[vp8_block2left[ib + 4]] = bestl1;
+ a[vp9_block2above[ib]] = besta0;
+ a[vp9_block2above[ib + 1]] = besta1;
+ l[vp9_block2left[ib]] = bestl0;
+ l[vp9_block2left[ib + 4]] = bestl1;
}
return best_rd;
@@ -1564,7 +1564,7 @@ static int64_t rd_pick_intra8x8mby_modes(VP8_COMP *cpi, MACROBLOCK *mb,
#endif
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
- ib = vp8_i8x8_block[i];
+ ib = vp9_i8x8_block[i];
total_rd += rd_pick_intra8x8block(
cpi, mb, ib, &best_mode,
#if CONFIG_COMP_INTRA_PRED
@@ -1600,7 +1600,7 @@ static int rd_cost_mbuv(MACROBLOCK *mb) {
for (b = 16; b < 24; b++)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above[b], tl + vp8_block2left[b],
+ ta + vp9_block2above[b], tl + vp9_block2left[b],
TX_4X4);
return cost;
@@ -1642,8 +1642,8 @@ static int rd_cost_mbuv_8x8(MACROBLOCK *mb, int backup) {
for (b = 16; b < 24; b += 4)
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
- ta + vp8_block2above_8x8[b],
- tl + vp8_block2left_8x8[b], TX_8X8);
+ ta + vp9_block2above_8x8[b],
+ tl + vp9_block2left_8x8[b], TX_8X8);
return cost;
}
@@ -1959,8 +1959,8 @@ int vp9_cost_mv_ref(VP8_COMP *cpi,
vp8_prob p [VP8_MVREFS - 1];
assert(NEARESTMV <= m && m <= SPLITMV);
vp9_mv_ref_probs(pc, p, near_mv_ref_ct);
- return vp8_cost_token(vp8_mv_ref_tree, p,
- vp8_mv_ref_encoding_array - NEARESTMV + m);
+ return vp8_cost_token(vp9_mv_ref_tree, p,
+ vp9_mv_ref_encoding_array - NEARESTMV + m);
} else
return 0;
}
@@ -2101,8 +2101,8 @@ static int64_t encode_inter_mb_segment(MACROBLOCK *x,
thisdistortion = vp9_block_error(be->coeff, bd->dqcoeff, 16);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[i],
- tl + vp8_block2left[i], TX_4X4);
+ ta + vp9_block2above[i],
+ tl + vp9_block2left[i], TX_4X4);
}
}
*distortion >>= 2;
@@ -2134,7 +2134,7 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
*distortion = 0;
*labelyrate = 0;
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
if (labels[ib] == which_label) {
int idx = (ib & 8) + ((ib & 2) << 1);
@@ -2154,8 +2154,8 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
otherdist += thisdistortion;
othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
- tacp + vp8_block2above_8x8[idx],
- tlcp + vp8_block2left_8x8[idx], TX_8X8);
+ tacp + vp9_block2above_8x8[idx],
+ tlcp + vp9_block2left_8x8[idx], TX_8X8);
}
for (j = 0; j < 4; j += 2) {
bd = &xd->block[ib + iblock[j]];
@@ -2165,12 +2165,12 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[ib + iblock[j]],
- tl + vp8_block2left[ib + iblock[j]],
+ ta + vp9_block2above[ib + iblock[j]],
+ tl + vp9_block2left[ib + iblock[j]],
TX_4X4);
*labelyrate += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above[ib + iblock[j] + 1],
- tl + vp8_block2left[ib + iblock[j]],
+ ta + vp9_block2above[ib + iblock[j] + 1],
+ tl + vp9_block2left[ib + iblock[j]],
TX_4X4);
}
} else /* 8x8 */ {
@@ -2183,12 +2183,12 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
thisdistortion = vp9_block_error_c(be3->coeff, bd3->dqcoeff, 32);
otherdist += thisdistortion;
othercost += cost_coeffs(x, bd3, PLANE_TYPE_Y_WITH_DC,
- tacp + vp8_block2above[ib + iblock[j]],
- tlcp + vp8_block2left[ib + iblock[j]],
+ tacp + vp9_block2above[ib + iblock[j]],
+ tlcp + vp9_block2left[ib + iblock[j]],
TX_4X4);
othercost += cost_coeffs(x, bd3 + 1, PLANE_TYPE_Y_WITH_DC,
- tacp + vp8_block2above[ib + iblock[j] + 1],
- tlcp + vp8_block2left[ib + iblock[j]],
+ tacp + vp9_block2above[ib + iblock[j] + 1],
+ tlcp + vp9_block2left[ib + iblock[j]],
TX_4X4);
}
}
@@ -2197,8 +2197,8 @@ static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
*distortion += thisdistortion;
*labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
- ta + vp8_block2above_8x8[idx],
- tl + vp8_block2left_8x8[idx], TX_8X8);
+ ta + vp9_block2above_8x8[idx],
+ tl + vp9_block2left_8x8[idx], TX_8X8);
}
}
}
@@ -2283,8 +2283,8 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
tl_b = (ENTROPY_CONTEXT *)&t_left_b;
v_fn_ptr = &cpi->fn_ptr[segmentation];
- labels = vp8_mbsplits[segmentation];
- label_count = vp8_mbsplit_count[segmentation];
+ labels = vp9_mbsplits[segmentation];
+ label_count = vp9_mbsplit_count[segmentation];
// 64 makes this threshold really big effectively
// making it so that we very rarely check mvs on
@@ -2293,8 +2293,8 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
label_mv_thresh = 1 * bsi->mvthresh / label_count;
// Segmentation method overheads
- rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs,
- vp8_mbsplit_encodings + segmentation);
+ rate = vp8_cost_token(vp9_mbsplit_tree, vp9_mbsplit_probs,
+ vp9_mbsplit_encodings + segmentation);
rate += vp9_cost_mv_ref(cpi, SPLITMV, bsi->mdcounts);
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
br += rate;
@@ -2365,7 +2365,7 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
// find first label
- n = vp8_mbsplit_offset[segmentation][i];
+ n = vp9_mbsplit_offset[segmentation][i];
c = &x->block[n];
e = &x->e_mbd.block[n];
@@ -2457,7 +2457,7 @@ static void rd_check_segment_txsize(VP8_COMP *cpi, MACROBLOCK *x,
best_eobs[j] = x->e_mbd.block[j].eob;
} else {
for (j = 0; j < 4; j++) {
- int ib = vp8_i8x8_block[j], idx = j * 4;
+ int ib = vp9_i8x8_block[j], idx = j * 4;
if (labels[ib] == i)
best_eobs[idx] = x->e_mbd.block[idx].eob;
@@ -2521,7 +2521,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
/* 16 = n_blocks */
int_mv seg_mvs[16][MAX_REF_FRAMES - 1],
int64_t txfm_cache[NB_TXFM_MODES]) {
- int i, n, c = vp8_mbsplit_count[segmentation];
+ int i, n, c = vp9_mbsplit_count[segmentation];
if (segmentation == PARTITIONING_4X4) {
int64_t rd[16];
@@ -2750,12 +2750,12 @@ static int rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
/* save partitions */
mbmi->txfm_size = bsi.txfm_size;
mbmi->partitioning = bsi.segment_num;
- x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
+ x->partition_info->count = vp9_mbsplit_count[bsi.segment_num];
for (i = 0; i < x->partition_info->count; i++) {
int j;
- j = vp8_mbsplit_offset[bsi.segment_num][i];
+ j = vp9_mbsplit_offset[bsi.segment_num][i];
x->partition_info->bmi[i].mode = bsi.modes[j];
x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
@@ -3021,7 +3021,7 @@ static void set_i8x8_block_modes(MACROBLOCK *x, int modes[2][4]) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
for (i = 0; i < 4; i++) {
- int ib = vp8_i8x8_block[i];
+ int ib = vp9_i8x8_block[i];
xd->mode_info_context->bmi[ib + 0].as_mode.first = modes[0][i];
xd->mode_info_context->bmi[ib + 1].as_mode.first = modes[0][i];
xd->mode_info_context->bmi[ib + 4].as_mode.first = modes[0][i];
@@ -3402,13 +3402,13 @@ static int64_t handle_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
#if CONFIG_PRED_FILTER
// Filtered prediction:
- mbmi->pred_filter_enabled = vp8_mode_order[mode_index].pred_filter_flag;
+ mbmi->pred_filter_enabled = vp9_mode_order[mode_index].pred_filter_flag;
*rate2 += vp8_cost_bit(cpi->common.prob_pred_filter_off,
mbmi->pred_filter_enabled);
#endif
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
- const int m = vp8_switchable_interp_map[mbmi->interp_filter];
+ const int m = vp9_switchable_interp_map[mbmi->interp_filter];
*rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
}
@@ -3668,18 +3668,18 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
rate_y = 0;
rate_uv = 0;
- this_mode = vp8_mode_order[mode_index].mode;
+ this_mode = vp9_mode_order[mode_index].mode;
mbmi->mode = this_mode;
mbmi->uv_mode = DC_PRED;
- mbmi->ref_frame = vp8_mode_order[mode_index].ref_frame;
- mbmi->second_ref_frame = vp8_mode_order[mode_index].second_ref_frame;
+ mbmi->ref_frame = vp9_mode_order[mode_index].ref_frame;
+ mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
#if CONFIG_PRED_FILTER
mbmi->pred_filter_enabled = 0;
#endif
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
this_mode >= NEARESTMV && this_mode <= SPLITMV) {
mbmi->interp_filter =
- vp8_switchable_interp[switchable_filter_index++];
+ vp9_switchable_interp[switchable_filter_index++];
if (switchable_filter_index == VP8_SWITCHABLE_FILTERS)
switchable_filter_index = 0;
} else {
@@ -3747,15 +3747,15 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
if (cpi->zbin_mode_boost_enabled) {
- if (vp8_mode_order[mode_index].ref_frame == INTRA_FRAME)
+ if (vp9_mode_order[mode_index].ref_frame == INTRA_FRAME)
cpi->zbin_mode_boost = 0;
else {
- if (vp8_mode_order[mode_index].mode == ZEROMV) {
- if (vp8_mode_order[mode_index].ref_frame != LAST_FRAME)
+ if (vp9_mode_order[mode_index].mode == ZEROMV) {
+ if (vp9_mode_order[mode_index].ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
- } else if (vp8_mode_order[mode_index].mode == SPLITMV)
+ } else if (vp9_mode_order[mode_index].mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
@@ -3946,7 +3946,7 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
if (cpi->common.mcomp_filter_type == SWITCHABLE)
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
- [vp8_switchable_interp_map[mbmi->interp_filter]];
+ [vp9_switchable_interp_map[mbmi->interp_filter]];
// If even the 'Y' rd value of split is higher than best so far
// then dont bother looking at UV
if (tmp_rd < best_yrd) {
@@ -4183,7 +4183,7 @@ void vp9_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x,
best_mbmode.mode <= SPLITMV) {
++cpi->switchable_interp_count
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
- [vp8_switchable_interp_map[best_mbmode.interp_filter]];
+ [vp9_switchable_interp_map[best_mbmode.interp_filter]];
}
// Reduce the activation RD thresholds for the best choice mode
@@ -4530,10 +4530,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP8_COMP *cpi, MACROBLOCK *x,
continue;
}
- this_mode = vp8_mode_order[mode_index].mode;
- ref_frame = vp8_mode_order[mode_index].ref_frame;
+ this_mode = vp9_mode_order[mode_index].mode;
+ ref_frame = vp9_mode_order[mode_index].ref_frame;
mbmi->ref_frame = ref_frame;
- comp_pred = vp8_mode_order[mode_index].second_ref_frame != INTRA_FRAME;
+ comp_pred = vp9_mode_order[mode_index].second_ref_frame != INTRA_FRAME;
mbmi->mode = this_mode;
mbmi->uv_mode = DC_PRED;
#if CONFIG_COMP_INTRA_PRED
diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c
index 9482b5f47..1a30deb95 100644
--- a/vp8/encoder/tokenize.c
+++ b/vp8/encoder/tokenize.c
@@ -52,14 +52,14 @@ void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp9_fix_contexts(MACROBLOCKD *xd);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
-const TOKENVALUE *vp8_dct_value_tokens_ptr;
+const TOKENVALUE *vp9_dct_value_tokens_ptr;
static int dct_value_cost[DCT_MAX_VALUE * 2];
-const int *vp8_dct_value_cost_ptr;
+const int *vp9_dct_value_cost_ptr;
static void fill_value_tokens() {
TOKENVALUE *const t = dct_value_tokens + DCT_MAX_VALUE;
- vp8_extra_bit_struct *const e = vp8_extra_bits;
+ vp8_extra_bit_struct *const e = vp9_extra_bits;
int i = -DCT_MAX_VALUE;
int sign = 1;
@@ -88,7 +88,7 @@ static void fill_value_tokens() {
// initialize the cost for extra bits for all possible coefficient value.
{
int cost = 0;
- vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token;
+ vp8_extra_bit_struct *p = vp9_extra_bits + t[i].Token;
if (p->base_val) {
const int extra = t[i].Extra;
@@ -105,8 +105,8 @@ static void fill_value_tokens() {
} while (++i < DCT_MAX_VALUE);
- vp8_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
- vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+ vp9_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
+ vp9_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
static void tokenize_b(VP8_COMP *cpi,
@@ -136,15 +136,15 @@ static void tokenize_b(VP8_COMP *cpi,
default:
case TX_4X4:
seg_eob = 16;
- bands = vp8_coef_bands;
- scan = vp8_default_zig_zag1d;
+ bands = vp9_coef_bands;
+ scan = vp9_default_zig_zag1d;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts;
probs = cpi->common.fc.hybrid_coef_probs;
if (tx_type == ADST_DCT) {
- scan = vp8_row_scan;
+ scan = vp9_row_scan;
} else if (tx_type == DCT_ADST) {
- scan = vp8_col_scan;
+ scan = vp9_col_scan;
}
} else {
counts = cpi->coef_counts;
@@ -154,12 +154,12 @@ static void tokenize_b(VP8_COMP *cpi,
case TX_8X8:
if (type == PLANE_TYPE_Y2) {
seg_eob = 4;
- bands = vp8_coef_bands;
- scan = vp8_default_zig_zag1d;
+ bands = vp9_coef_bands;
+ scan = vp9_default_zig_zag1d;
} else {
seg_eob = 64;
- bands = vp8_coef_bands_8x8;
- scan = vp8_default_zig_zag1d_8x8;
+ bands = vp9_coef_bands_8x8;
+ scan = vp9_default_zig_zag1d_8x8;
}
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_8x8;
@@ -171,8 +171,8 @@ static void tokenize_b(VP8_COMP *cpi,
break;
case TX_16X16:
seg_eob = 256;
- bands = vp8_coef_bands_16x16;
- scan = vp8_default_zig_zag1d_16x16;
+ bands = vp9_coef_bands_16x16;
+ scan = vp9_default_zig_zag1d_16x16;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_16x16;
probs = cpi->common.fc.hybrid_coef_probs_16x16;
@@ -196,8 +196,8 @@ static void tokenize_b(VP8_COMP *cpi,
assert(-DCT_MAX_VALUE <= v && v < DCT_MAX_VALUE);
- t->Extra = vp8_dct_value_tokens_ptr[v].Extra;
- token = vp8_dct_value_tokens_ptr[v].Token;
+ t->Extra = vp9_dct_value_tokens_ptr[v].Extra;
+ token = vp9_dct_value_tokens_ptr[v].Token;
} else {
token = DCT_EOB_TOKEN;
}
@@ -206,11 +206,11 @@ static void tokenize_b(VP8_COMP *cpi,
t->context_tree = probs[type][band][pt];
t->skip_eob_node = (pt == 0) && ((band > 0 && type != PLANE_TYPE_Y_NO_DC) ||
(band > 1 && type == PLANE_TYPE_Y_NO_DC));
- assert(vp8_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
+ assert(vp9_coef_encodings[t->Token].Len - t->skip_eob_node > 0);
if (!dry_run) {
++counts[type][band][pt][token];
}
- pt = vp8_prev_token_class[token];
+ pt = vp9_prev_token_class[token];
++t;
} while (c < eob && ++c < seg_eob);
@@ -352,11 +352,11 @@ void vp9_tokenize_mb(VP8_COMP *cpi,
if (has_y2_block) {
if (tx_size == TX_8X8) {
tokenize_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2,
- A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24],
+ A + vp9_block2above_8x8[24], L + vp9_block2left_8x8[24],
TX_8X8, dry_run);
} else {
tokenize_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2,
- A + vp8_block2above[24], L + vp8_block2left[24],
+ A + vp9_block2above[24], L + vp9_block2left[24],
TX_4X4, dry_run);
}
@@ -372,47 +372,47 @@ void vp9_tokenize_mb(VP8_COMP *cpi,
for (b = 16; b < 24; b += 4) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
} else if (tx_size == TX_8X8) {
for (b = 0; b < 16; b += 4) {
tokenize_b(cpi, xd, xd->block + b, t, plane_type,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
for (b = 16; b < 24; b++) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above[b], L + vp8_block2left[b],
+ A + vp9_block2above[b], L + vp9_block2left[b],
TX_4X4, dry_run);
}
} else {
for (b = 16; b < 24; b += 4) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
}
} else {
for (b = 0; b < 16; b++) {
tokenize_b(cpi, xd, xd->block + b, t, plane_type,
- A + vp8_block2above[b], L + vp8_block2left[b],
+ A + vp9_block2above[b], L + vp9_block2left[b],
TX_4X4, dry_run);
}
for (b = 16; b < 24; b++) {
tokenize_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above[b], L + vp8_block2left[b],
+ A + vp9_block2above[b], L + vp9_block2left[b],
TX_4X4, dry_run);
}
}
@@ -569,7 +569,7 @@ void print_context_counters() {
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
coef_counts[t] = context_counters [type] [band] [pt] [t];
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, coef_counts, 256, 1);
fprintf(f, "%s\n {", Comma(pt));
@@ -604,7 +604,7 @@ void print_context_counters() {
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
coef_counts[t] = context_counters_8x8[type] [band] [pt] [t];
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, coef_counts, 256, 1);
fprintf(f, "%s\n {", Comma(pt));
@@ -637,7 +637,7 @@ void print_context_counters() {
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
coef_counts[t] = context_counters_16x16[type] [band] [pt] [t];
vp9_tree_probs_from_distribution(
- MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
+ MAX_ENTROPY_TOKENS, vp9_coef_encodings, vp9_coef_tree,
coef_probs, branch_ct, coef_counts, 256, 1);
fprintf(f, "%s\n {", Comma(pt));
@@ -688,7 +688,7 @@ static __inline void stuff_b(VP8_COMP *cpi,
switch (tx_size) {
default:
case TX_4X4:
- bands = vp8_coef_bands;
+ bands = vp9_coef_bands;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts;
probs = cpi->common.fc.hybrid_coef_probs;
@@ -698,7 +698,7 @@ static __inline void stuff_b(VP8_COMP *cpi,
}
break;
case TX_8X8:
- bands = vp8_coef_bands_8x8;
+ bands = vp9_coef_bands_8x8;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_8x8;
probs = cpi->common.fc.hybrid_coef_probs_8x8;
@@ -708,7 +708,7 @@ static __inline void stuff_b(VP8_COMP *cpi,
}
break;
case TX_16X16:
- bands = vp8_coef_bands_16x16;
+ bands = vp9_coef_bands_16x16;
if (tx_type != DCT_DCT) {
counts = cpi->hybrid_coef_counts_16x16;
probs = cpi->common.fc.hybrid_coef_probs_16x16;
@@ -742,7 +742,7 @@ static void vp9_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
if (has_y2_block) {
stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2,
- A + vp8_block2above_8x8[24], L + vp8_block2left_8x8[24],
+ A + vp9_block2above_8x8[24], L + vp9_block2left_8x8[24],
TX_8X8, dry_run);
plane_type = PLANE_TYPE_Y_NO_DC;
} else {
@@ -750,18 +750,18 @@ static void vp9_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *xd,
}
for (b = 0; b < 16; b += 4) {
- stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp8_block2above_8x8[b],
- L + vp8_block2left_8x8[b], TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp9_block2above_8x8[b],
+ L + vp9_block2left_8x8[b], TX_8X8, dry_run);
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
for (b = 16; b < 24; b += 4) {
stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
}
@@ -775,10 +775,10 @@ static void vp9_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
A[1] = A[2] = A[3] = A[0];
L[1] = L[2] = L[3] = L[0];
for (b = 16; b < 24; b += 4) {
- stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b],
- L + vp8_block2above_8x8[b], TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b],
+ L + vp9_block2above_8x8[b], TX_8X8, dry_run);
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
@@ -795,20 +795,20 @@ static void vp9_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode != SPLITMV);
if (has_y2_block) {
- stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, A + vp8_block2above[24],
- L + vp8_block2left[24], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + 24, t, PLANE_TYPE_Y2, A + vp9_block2above[24],
+ L + vp9_block2left[24], TX_4X4, dry_run);
plane_type = PLANE_TYPE_Y_NO_DC;
} else {
plane_type = PLANE_TYPE_Y_WITH_DC;
}
for (b = 0; b < 16; b++)
- stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp8_block2above[b],
- L + vp8_block2left[b], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + b, t, plane_type, A + vp9_block2above[b],
+ L + vp9_block2left[b], TX_4X4, dry_run);
for (b = 16; b < 24; b++)
- stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b],
- L + vp8_block2left[b], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b],
+ L + vp9_block2left[b], TX_4X4, dry_run);
}
static void vp9_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
@@ -819,15 +819,15 @@ static void vp9_stuff_mb_8x8_4x4uv(VP8_COMP *cpi, MACROBLOCKD *xd,
for (b = 0; b < 16; b += 4) {
stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
- A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b],
+ A + vp9_block2above_8x8[b], L + vp9_block2left_8x8[b],
TX_8X8, dry_run);
- A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
- L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
+ A[vp9_block2above_8x8[b] + 1] = A[vp9_block2above_8x8[b]];
+ L[vp9_block2left_8x8[b] + 1] = L[vp9_block2left_8x8[b]];
}
for (b = 16; b < 24; b++)
- stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp8_block2above[b],
- L + vp8_block2left[b], TX_4X4, dry_run);
+ stuff_b(cpi, xd, xd->block + b, t, PLANE_TYPE_UV, A + vp9_block2above[b],
+ L + vp9_block2left[b], TX_4X4, dry_run);
}
void vp9_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h
index 7e36561e0..e02f002ff 100644
--- a/vp8/encoder/tokenize.h
+++ b/vp8/encoder/tokenize.h
@@ -49,11 +49,11 @@ extern INT64 context_counters_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
#endif
-extern const int *vp8_dct_value_cost_ptr;
+extern const int *vp9_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the
* fields are not.
*/
-extern const TOKENVALUE *vp8_dct_value_tokens_ptr;
+extern const TOKENVALUE *vp9_dct_value_tokens_ptr;
#endif /* tokenize_h */
diff --git a/vp8/encoder/treewriter.h b/vp8/encoder/treewriter.h
index 4c34db4f1..1a97f5a51 100644
--- a/vp8/encoder/treewriter.h
+++ b/vp8/encoder/treewriter.h
@@ -32,7 +32,7 @@ typedef BOOL_CODER vp8_writer;
/* Approximate length of an encoded bool in 256ths of a bit at given prob */
-#define vp8_cost_zero( x) ( vp8_prob_cost[x])
+#define vp8_cost_zero( x) ( vp9_prob_cost[x])
#define vp8_cost_one( x) vp8_cost_zero( vp8_complement(x))
#define vp8_cost_bit( x, b) vp8_cost_zero( (b)? vp8_complement(x) : (x) )
diff --git a/vp8/encoder/variance_c.c b/vp8/encoder/variance_c.c
index 2684df7b2..760c519ea 100644
--- a/vp8/encoder/variance_c.c
+++ b/vp8/encoder/variance_c.c
@@ -264,8 +264,8 @@ unsigned int vp9_sub_pixel_variance4x4_c(const unsigned char *src_ptr,
const short *HFilter, *VFilter;
unsigned short FData3[5 * 4]; // Temp data bufffer used in filtering
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
// First filter 1d Horizontal
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
@@ -288,8 +288,8 @@ unsigned int vp9_sub_pixel_variance8x8_c(const unsigned char *src_ptr,
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
@@ -308,8 +308,8 @@ unsigned int vp9_sub_pixel_variance16x16_c(const unsigned char *src_ptr,
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
@@ -329,8 +329,8 @@ unsigned int vp9_sub_pixel_variance32x32_c(const unsigned char *src_ptr,
unsigned char temp2[36 * 32];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 33, 32, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 32, 32, 32, 32, VFilter);
@@ -439,8 +439,8 @@ unsigned int vp9_sub_pixel_variance16x8_c(const unsigned char *src_ptr,
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
@@ -459,8 +459,8 @@ unsigned int vp9_sub_pixel_variance8x16_c(const unsigned char *src_ptr,
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line,
1, 17, 8, HFilter);
@@ -507,8 +507,8 @@ unsigned int vp8_sub_pixel_variance16x2_c(const unsigned char *src_ptr,
unsigned char temp2[20 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3,
src_pixels_per_line, 1, 3, 16, HFilter);
@@ -528,8 +528,8 @@ unsigned int vp8_sub_pixel_variance2x16_c(const unsigned char *src_ptr,
unsigned char temp2[2 * 16];
const short *HFilter, *VFilter;
- HFilter = vp8_bilinear_filters[xoffset];
- VFilter = vp8_bilinear_filters[yoffset];
+ HFilter = vp9_bilinear_filters[xoffset];
+ VFilter = vp9_bilinear_filters[yoffset];
var_filter_block2d_bil_first_pass(src_ptr, FData3,
src_pixels_per_line, 1, 17, 2, HFilter);
diff --git a/vp8/encoder/x86/quantize_sse2.asm b/vp8/encoder/x86/quantize_sse2.asm
index b12a5850a..153060e74 100644
--- a/vp8/encoder/x86/quantize_sse2.asm
+++ b/vp8/encoder/x86/quantize_sse2.asm
@@ -55,14 +55,14 @@ sym(vp9_regular_quantize_b_sse2):
%endif
%endif
- mov rdx, [rdi + vp8_block_coeff] ; coeff_ptr
- mov rcx, [rdi + vp8_block_zbin] ; zbin_ptr
- movd xmm7, [rdi + vp8_block_zbin_extra] ; zbin_oq_value
+ mov rdx, [rdi + vp9_block_coeff] ; coeff_ptr
+ mov rcx, [rdi + vp9_block_zbin] ; zbin_ptr
+ movd xmm7, [rdi + vp9_block_zbin_extra] ; zbin_oq_value
; z
movdqa xmm0, [rdx]
movdqa xmm4, [rdx + 16]
- mov rdx, [rdi + vp8_block_round] ; round_ptr
+ mov rdx, [rdi + vp9_block_round] ; round_ptr
pshuflw xmm7, xmm7, 0
punpcklwd xmm7, xmm7 ; duplicated zbin_oq_value
@@ -84,7 +84,7 @@ sym(vp9_regular_quantize_b_sse2):
movdqa xmm2, [rcx]
movdqa xmm3, [rcx + 16]
- mov rcx, [rdi + vp8_block_quant] ; quant_ptr
+ mov rcx, [rdi + vp9_block_quant] ; quant_ptr
; *zbin_ptr + zbin_oq_value
paddw xmm2, xmm7
@@ -126,8 +126,8 @@ sym(vp9_regular_quantize_b_sse2):
movdqa [rsp + qcoeff], xmm6
movdqa [rsp + qcoeff + 16], xmm6
- mov rdx, [rdi + vp8_block_zrun_zbin_boost] ; zbin_boost_ptr
- mov rax, [rdi + vp8_block_quant_shift] ; quant_shift_ptr
+ mov rdx, [rdi + vp9_block_zrun_zbin_boost] ; zbin_boost_ptr
+ mov rax, [rdi + vp9_block_quant_shift] ; quant_shift_ptr
mov [rsp + zrun_zbin_boost], rdx
%macro ZIGZAG_LOOP 1
@@ -149,7 +149,7 @@ sym(vp9_regular_quantize_b_sse2):
mov rdx, [rsp + zrun_zbin_boost] ; reset to b->zrun_zbin_boost
.rq_zigzag_loop_%1:
%endmacro
-; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
+; in vp9_default_zig_zag1d order: see vp8/common/entropy.c
ZIGZAG_LOOP 0
ZIGZAG_LOOP 1
ZIGZAG_LOOP 4
@@ -170,8 +170,8 @@ ZIGZAG_LOOP 15
movdqa xmm2, [rsp + qcoeff]
movdqa xmm3, [rsp + qcoeff + 16]
- mov rcx, [rsi + vp8_blockd_dequant] ; dequant_ptr
- mov rdi, [rsi + vp8_blockd_dqcoeff] ; dqcoeff_ptr
+ mov rcx, [rsi + vp9_blockd_dequant] ; dequant_ptr
+ mov rdi, [rsi + vp9_blockd_dqcoeff] ; dqcoeff_ptr
; y ^ sz
pxor xmm2, xmm0
@@ -184,7 +184,7 @@ ZIGZAG_LOOP 15
movdqa xmm0, [rcx]
movdqa xmm1, [rcx + 16]
- mov rcx, [rsi + vp8_blockd_qcoeff] ; qcoeff_ptr
+ mov rcx, [rsi + vp9_blockd_qcoeff] ; qcoeff_ptr
pmullw xmm0, xmm2
pmullw xmm1, xmm3
@@ -214,7 +214,7 @@ ZIGZAG_LOOP 15
pmaxsw xmm2, xmm3
movd eax, xmm2
and eax, 0xff
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
add rsp, stack_size
@@ -270,9 +270,9 @@ sym(vp9_fast_quantize_b_sse2):
%endif
%endif
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_round]
- mov rdx, [rdi + vp8_block_quant_fast]
+ mov rax, [rdi + vp9_block_coeff]
+ mov rcx, [rdi + vp9_block_round]
+ mov rdx, [rdi + vp9_block_quant_fast]
; z = coeff
movdqa xmm0, [rax]
@@ -296,9 +296,9 @@ sym(vp9_fast_quantize_b_sse2):
paddw xmm1, [rcx]
paddw xmm5, [rcx + 16]
- mov rax, [rsi + vp8_blockd_qcoeff]
- mov rcx, [rsi + vp8_blockd_dequant]
- mov rdi, [rsi + vp8_blockd_dqcoeff]
+ mov rax, [rsi + vp9_blockd_qcoeff]
+ mov rcx, [rsi + vp9_blockd_dequant]
+ mov rdi, [rsi + vp9_blockd_dqcoeff]
; y = x * quant >> 16
pmulhw xmm1, [rdx]
@@ -354,7 +354,7 @@ sym(vp9_fast_quantize_b_sse2):
movd eax, xmm1
and eax, 0xff
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
%if ABI_IS_32BIT
diff --git a/vp8/encoder/x86/quantize_sse4.asm b/vp8/encoder/x86/quantize_sse4.asm
index 76f7a9278..8ce1b7cff 100644
--- a/vp8/encoder/x86/quantize_sse4.asm
+++ b/vp8/encoder/x86/quantize_sse4.asm
@@ -52,10 +52,10 @@ sym(vp9_regular_quantize_b_sse4):
%endif
%endif
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_zbin]
- mov rdx, [rdi + vp8_block_round]
- movd xmm7, [rdi + vp8_block_zbin_extra]
+ mov rax, [rdi + vp9_block_coeff]
+ mov rcx, [rdi + vp9_block_zbin]
+ mov rdx, [rdi + vp9_block_round]
+ movd xmm7, [rdi + vp9_block_zbin_extra]
; z
movdqa xmm0, [rax]
@@ -99,9 +99,9 @@ sym(vp9_regular_quantize_b_sse4):
movdqa xmm4, [rdx]
movdqa xmm5, [rdx + 16]
- mov rax, [rdi + vp8_block_quant_shift]
- mov rcx, [rdi + vp8_block_quant]
- mov rdx, [rdi + vp8_block_zrun_zbin_boost]
+ mov rax, [rdi + vp9_block_quant_shift]
+ mov rcx, [rdi + vp9_block_quant]
+ mov rdx, [rdi + vp9_block_zrun_zbin_boost]
; x + round
paddw xmm2, xmm4
@@ -156,7 +156,7 @@ sym(vp9_regular_quantize_b_sse4):
mov rdx, rax ; reset to b->zrun_zbin_boost
.rq_zigzag_loop_%1:
%endmacro
-; in vp8_default_zig_zag1d order: see vp8/common/entropy.c
+; in vp9_default_zig_zag1d order: see vp8/common/entropy.c
ZIGZAG_LOOP 0, 0, xmm2, xmm6, xmm4
ZIGZAG_LOOP 1, 1, xmm2, xmm6, xmm4
ZIGZAG_LOOP 4, 4, xmm2, xmm6, xmm4
@@ -174,8 +174,8 @@ ZIGZAG_LOOP 11, 3, xmm3, xmm7, xmm8
ZIGZAG_LOOP 14, 6, xmm3, xmm7, xmm8
ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
- mov rcx, [rsi + vp8_blockd_dequant]
- mov rdi, [rsi + vp8_blockd_dqcoeff]
+ mov rcx, [rsi + vp9_blockd_dequant]
+ mov rdi, [rsi + vp9_blockd_dqcoeff]
%if ABI_IS_32BIT
movdqa xmm4, [rsp + qcoeff]
@@ -195,7 +195,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
movdqa xmm0, [rcx]
movdqa xmm1, [rcx + 16]
- mov rcx, [rsi + vp8_blockd_qcoeff]
+ mov rcx, [rsi + vp9_blockd_qcoeff]
pmullw xmm0, xmm4
pmullw xmm1, xmm5
@@ -225,7 +225,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
add eax, 1
and eax, edi
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
%if ABI_IS_32BIT
@@ -249,6 +249,6 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
SECTION_RODATA
align 16
-; vp8/common/entropy.c: vp8_default_zig_zag1d
+; vp8/common/entropy.c: vp9_default_zig_zag1d
zig_zag1d:
db 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15
diff --git a/vp8/encoder/x86/quantize_ssse3.asm b/vp8/encoder/x86/quantize_ssse3.asm
index 018161e7c..14a9912d2 100644
--- a/vp8/encoder/x86/quantize_ssse3.asm
+++ b/vp8/encoder/x86/quantize_ssse3.asm
@@ -47,9 +47,9 @@ sym(vp9_fast_quantize_b_ssse3):
%endif
%endif
- mov rax, [rdi + vp8_block_coeff]
- mov rcx, [rdi + vp8_block_round]
- mov rdx, [rdi + vp8_block_quant_fast]
+ mov rax, [rdi + vp9_block_coeff]
+ mov rcx, [rdi + vp9_block_round]
+ mov rdx, [rdi + vp9_block_quant_fast]
; coeff
movdqa xmm0, [rax]
@@ -76,9 +76,9 @@ sym(vp9_fast_quantize_b_ssse3):
pmulhw xmm1, [rdx]
pmulhw xmm5, [rdx + 16]
- mov rax, [rsi + vp8_blockd_qcoeff]
- mov rdi, [rsi + vp8_blockd_dequant]
- mov rcx, [rsi + vp8_blockd_dqcoeff]
+ mov rax, [rsi + vp9_blockd_qcoeff]
+ mov rdi, [rsi + vp9_blockd_dequant]
+ mov rcx, [rsi + vp9_blockd_dqcoeff]
pxor xmm1, xmm0
pxor xmm5, xmm4
@@ -115,7 +115,7 @@ sym(vp9_fast_quantize_b_ssse3):
add eax, 1
and eax, edi ;if the bit mask was all zero,
;then eob = 0
- mov [rsi + vp8_blockd_eob], eax
+ mov [rsi + vp9_blockd_eob], eax
; begin epilog
%if ABI_IS_32BIT
diff --git a/vp8/encoder/x86/variance_mmx.c b/vp8/encoder/x86/variance_mmx.c
index 58aae066f..2d72d50a8 100644
--- a/vp8/encoder/x86/variance_mmx.c
+++ b/vp8/encoder/x86/variance_mmx.c
@@ -198,7 +198,7 @@ unsigned int vp9_variance8x16_mmx(
// the mmx function that does the bilinear filtering and var calculation //
// int one pass //
///////////////////////////////////////////////////////////////////////////
-DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[16][8]) = {
+DECLARE_ALIGNED(16, const short, vp9_bilinear_filters_mmx[16][8]) = {
{ 128, 128, 128, 128, 0, 0, 0, 0 },
{ 120, 120, 120, 120, 8, 8, 8, 8 },
{ 112, 112, 112, 112, 16, 16, 16, 16 },
@@ -233,7 +233,7 @@ unsigned int vp9_sub_pixel_variance4x4_mmx
vp9_filter_block2d_bil4x4_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
@@ -257,7 +257,7 @@ unsigned int vp9_sub_pixel_variance8x8_mmx
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
@@ -281,14 +281,14 @@ unsigned int vp9_sub_pixel_variance16x16_mmx
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0
);
vp9_filter_block2d_bil_var_mmx(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum1, &xxsum1
);
@@ -331,7 +331,7 @@ unsigned int vp9_sub_pixel_variance16x8_mmx
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0
);
@@ -339,7 +339,7 @@ unsigned int vp9_sub_pixel_variance16x8_mmx
vp9_filter_block2d_bil_var_mmx(
src_ptr + 8, src_pixels_per_line,
dst_ptr + 8, dst_pixels_per_line, 8,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum1, &xxsum1
);
@@ -365,7 +365,7 @@ unsigned int vp9_sub_pixel_variance8x16_mmx
vp9_filter_block2d_bil_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line, 16,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;
diff --git a/vp8/encoder/x86/variance_sse2.c b/vp8/encoder/x86/variance_sse2.c
index 79e13005e..f3b0b600a 100644
--- a/vp8/encoder/x86/variance_sse2.c
+++ b/vp8/encoder/x86/variance_sse2.c
@@ -137,7 +137,7 @@ void vp9_half_vert_variance16x_h_sse2
unsigned int *sumsquared
);
-DECLARE_ALIGNED(16, extern short, vp8_vp7_bilinear_filters_mmx[16][8]);
+DECLARE_ALIGNED(16, extern short, vp9_bilinear_filters_mmx[16][8]);
unsigned int vp9_variance4x4_wmt(
const unsigned char *src_ptr,
@@ -257,7 +257,7 @@ unsigned int vp9_sub_pixel_variance4x4_wmt
vp9_filter_block2d_bil4x4_var_mmx(
src_ptr, src_pixels_per_line,
dst_ptr, dst_pixels_per_line,
- vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
+ vp9_bilinear_filters_mmx[xoffset], vp9_bilinear_filters_mmx[yoffset],
&xsum, &xxsum
);
*sse = xxsum;