summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure2
-rw-r--r--vp8/common/blockd.h4
-rw-r--r--vp8/common/entropymv.c402
-rw-r--r--vp8/common/entropymv.h62
-rw-r--r--vp8/common/findnearmv.c4
-rw-r--r--vp8/common/onyxc_int.h18
-rw-r--r--vp8/decoder/decodemv.c197
-rw-r--r--vp8/decoder/decodframe.c20
-rw-r--r--vp8/encoder/bitstream.c92
-rw-r--r--vp8/encoder/block.h13
-rw-r--r--vp8/encoder/encodeframe.c33
-rw-r--r--vp8/encoder/encodemv.c592
-rw-r--r--vp8/encoder/encodemv.h15
-rw-r--r--vp8/encoder/firstpass.c5
-rw-r--r--vp8/encoder/mcomp.c134
-rw-r--r--vp8/encoder/mcomp.h9
-rw-r--r--vp8/encoder/onyx_if.c64
-rw-r--r--vp8/encoder/onyx_int.h14
-rw-r--r--vp8/encoder/ratectrl.c24
-rw-r--r--vp8/encoder/rdopt.c126
20 files changed, 39 insertions, 1791 deletions
diff --git a/configure b/configure
index 1612f36bc..77ae9deca 100755
--- a/configure
+++ b/configure
@@ -224,8 +224,6 @@ EXPERIMENT_LIST="
switchable_interp
newbestrefmv
new_mvref
- newmventropy
- tx_select
"
CONFIG_LIST="
external_build
diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h
index de148066c..7de58104e 100644
--- a/vp8/common/blockd.h
+++ b/vp8/common/blockd.h
@@ -137,8 +137,8 @@ typedef enum {
typedef enum {
DCT_DCT = 0, // DCT in both horizontal and vertical
- ADST_DCT = 1, // ADST in horizontal, DCT in vertical
- DCT_ADST = 2, // DCT in horizontal, ADST in vertical
+ ADST_DCT = 1, // ADST in vertical, DCT in horizontal
+ DCT_ADST = 2, // DCT in vertical, ADST in horizontal
ADST_ADST = 3 // ADST in both directions
} TX_TYPE;
diff --git a/vp8/common/entropymv.c b/vp8/common/entropymv.c
index 6c31236ec..a442a2438 100644
--- a/vp8/common/entropymv.c
+++ b/vp8/common/entropymv.c
@@ -14,8 +14,6 @@
//#define MV_COUNT_TESTING
-#if CONFIG_NEWMVENTROPY
-
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
@@ -450,413 +448,13 @@ void vp8_adapt_nmv_probs(VP8_COMMON *cm, int usehp) {
}
}
-#else /* CONFIG_NEWMVENTROPY */
-
-#define MV_COUNT_SAT 16
-#define MV_MAX_UPDATE_FACTOR 128
-
-const MV_CONTEXT_HP vp8_mv_update_probs_hp[2] = {
- {{
- 237,
- 246,
- 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
- 254, 254, 254, 254, 254, 250, 250, 252, 254, 254, 254
- }
- },
- {{
- 231,
- 243,
- 245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
- 254, 254, 254, 254, 254, 251, 251, 254, 254, 254, 254
- }
- }
-};
-const MV_CONTEXT_HP vp8_default_mv_context_hp[2] = {
- {{
- /* row */
- 162, /* is short */
- 128, /* sign */
- 220, 204, 180, 192, 192, 119, 192, 192, 180, 140, 192, 192, 224, 224, 224, /* short tree */
- 128, 129, 132, 75, 145, 178, 206, 239, 254, 254, 254 /* long bits */
- }
- },
- {{
- /* same for column */
- 164, /* is short */
- 128,
- 220, 204, 180, 192, 192, 119, 192, 192, 180, 140, 192, 192, 224, 224, 224, /* short tree */
- 128, 130, 130, 74, 148, 180, 203, 236, 254, 254, 254 /* long bits */
- }
- }
-};
-
-const MV_CONTEXT vp8_mv_update_probs[2] = {
- {{
- 237,
- 246,
- 253, 253, 254, 254, 254, 254, 254,
- 254, 254, 254, 254, 254, 250, 250, 252, 254, 254
- }
- },
- {{
- 231,
- 243,
- 245, 253, 254, 254, 254, 254, 254,
- 254, 254, 254, 254, 254, 251, 251, 254, 254, 254
- }
- }
-};
-const MV_CONTEXT vp8_default_mv_context[2] = {
- {{
- /* row */
- 162, /* is short */
- 128, /* sign */
- 225, 146, 172, 147, 214, 39, 156, /* short tree */
- 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 /* long bits */
- }
- },
- {{
- /* same for column */
- 164, /* is short */
- 128,
- 204, 170, 119, 235, 140, 230, 228,
- 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 /* long bits */
- }
- }
-};
-
-const vp8_tree_index vp8_small_mvtree_hp [30] = {
- 2, 16,
- 4, 10,
- 6, 8,
- -0, -1,
- -2, -3,
- 12, 14,
- -4, -5,
- -6, -7,
- 18, 24,
- 20, 22,
- -8, -9,
- -10, -11,
- 26, 28,
- -12, -13,
- -14, -15
-};
-struct vp8_token_struct vp8_small_mvencodings_hp [16];
-
-const vp8_tree_index vp8_small_mvtree [14] = {
- 2, 8,
- 4, 6,
- -0, -1,
- -2, -3,
- 10, 12,
- -4, -5,
- -6, -7
-};
-struct vp8_token_struct vp8_small_mvencodings [8];
-
-__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2], int pbits) {
- const unsigned int tot = ct[0] + ct[1];
- if (tot) {
- const vp8_prob x = ((ct[0] * 255) / tot) & -(1 << (8 - pbits));
- *p = x ? x : 1;
- } else {
- *p = 128;
- }
-}
-
-static void compute_component_probs(
- const unsigned int events [MVvals],
- vp8_prob Pnew [MVPcount],
- unsigned int is_short_ct[2],
- unsigned int sign_ct[2],
- unsigned int bit_ct [mvlong_width] [2],
- unsigned int short_ct [mvnum_short],
- unsigned int short_bct [mvnum_short - 1] [2]
-) {
- is_short_ct[0] = is_short_ct[1] = 0;
- sign_ct[0] = sign_ct[1] = 0;
- vpx_memset(bit_ct, 0, sizeof(unsigned int)*mvlong_width * 2);
- vpx_memset(short_ct, 0, sizeof(unsigned int)*mvnum_short);
- vpx_memset(short_bct, 0, sizeof(unsigned int) * (mvnum_short - 1) * 2);
-
- {
- const int c = events [mv_max];
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
- {
- int j = 1;
- do {
- const int c1 = events [mv_max + j]; // positive
- const int c2 = events [mv_max - j]; // negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short) {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- } else {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- } while (++j <= mv_max);
- }
- calc_prob(Pnew + mvpis_short, is_short_ct, 8);
-
- calc_prob(Pnew + MVPsign, sign_ct, 8);
-
- {
- vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
- int j = 0;
-
- vp8_tree_probs_from_distribution(
- mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
- p, short_bct, short_ct,
- 256, 1
- );
-
- do
- calc_prob(Pnew + MVPshort + j, short_bct[j], 8);
- while (++j < mvnum_short - 1);
- }
-
- {
- int j = 0;
- do
- calc_prob(Pnew + MVPbits + j, bit_ct[j], 8);
- while (++j < mvlong_width);
- }
-}
-
-static void compute_component_probs_hp(
- const unsigned int events [MVvals_hp],
- vp8_prob Pnew [MVPcount_hp],
- unsigned int is_short_ct[2],
- unsigned int sign_ct[2],
- unsigned int bit_ct [mvlong_width_hp] [2],
- unsigned int short_ct [mvnum_short_hp],
- unsigned int short_bct [mvnum_short_hp - 1] [2]
-) {
- is_short_ct[0] = is_short_ct[1] = 0;
- sign_ct[0] = sign_ct[1] = 0;
- vpx_memset(bit_ct, 0, sizeof(unsigned int)*mvlong_width_hp * 2);
- vpx_memset(short_ct, 0, sizeof(unsigned int)*mvnum_short_hp);
- vpx_memset(short_bct, 0, sizeof(unsigned int) * (mvnum_short_hp - 1) * 2);
-
- {
- const int c = events [mv_max_hp];
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
- {
- int j = 1;
- do {
- const int c1 = events [mv_max_hp + j]; // positive
- const int c2 = events [mv_max_hp - j]; // negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short_hp) {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- } else {
- int k = mvlong_width_hp - 1;
- is_short_ct [1] += c; // Long vector
-
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- } while (++j <= mv_max_hp);
- }
- calc_prob(Pnew + mvpis_short_hp, is_short_ct, 8);
-
- calc_prob(Pnew + MVPsign_hp, sign_ct, 8);
-
- {
- vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
- int j = 0;
-
- vp8_tree_probs_from_distribution(
- mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
- p, short_bct, short_ct,
- 256, 1
- );
-
- do
- calc_prob(Pnew + MVPshort_hp + j, short_bct[j], 8);
- while (++j < mvnum_short_hp - 1);
- }
-
- {
- int j = 0;
- do
- calc_prob(Pnew + MVPbits_hp + j, bit_ct[j], 8);
- while (++j < mvlong_width_hp);
- }
-}
-
-void vp8_adapt_mv_probs(VP8_COMMON *cm) {
- int i, t, count, factor;
-#ifdef MV_COUNT_TESTING
- printf("static const unsigned int\nMVcount[2][MVvals]={\n");
- for (i = 0; i < 2; ++i) {
- printf(" { ");
- for (t = 0; t < MVvals; t++) {
- printf("%d, ", cm->fc.MVcount[i][t]);
- if (t % 16 == 15 && t != MVvals - 1) printf("\n ");
- }
- printf("},\n");
- }
- printf("};\n");
- printf("static const unsigned int\nMVcount_hp[2][MVvals_hp]={\n");
- for (i = 0; i < 2; ++i) {
- printf(" { ");
- for (t = 0; t < MVvals_hp; t++) {
- printf("%d, ", cm->fc.MVcount_hp[i][t]);
- if (t % 16 == 15 && t != MVvals_hp - 1) printf("\n ");
- }
- printf("},\n");
- }
- printf("};\n");
-#endif /* MV_COUNT_TESTING */
-
- for (i = 0; i < 2; ++i) {
- int prob;
- unsigned int is_short_ct[2];
- unsigned int sign_ct[2];
- unsigned int bit_ct [mvlong_width] [2];
- unsigned int short_ct [mvnum_short];
- unsigned int short_bct [mvnum_short - 1] [2];
- vp8_prob Pnew [MVPcount];
- compute_component_probs(cm->fc.MVcount[i], Pnew,
- is_short_ct, sign_ct,
- bit_ct, short_ct, short_bct);
- count = is_short_ct[0] + is_short_ct[1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc[i].prob[mvpis_short] * (256 - factor) +
- (int)Pnew[mvpis_short] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc[i].prob[mvpis_short] = 1;
- else if (prob > 255) cm->fc.mvc[i].prob[mvpis_short] = 255;
- else cm->fc.mvc[i].prob[mvpis_short] = prob;
-
- count = sign_ct[0] + sign_ct[1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc[i].prob[MVPsign] * (256 - factor) +
- (int)Pnew[MVPsign] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc[i].prob[MVPsign] = 1;
- else if (prob > 255) cm->fc.mvc[i].prob[MVPsign] = 255;
- else cm->fc.mvc[i].prob[MVPsign] = prob;
-
- for (t = 0; t < mvnum_short - 1; ++t) {
- count = short_bct[t][0] + short_bct[t][1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc[i].prob[MVPshort + t] * (256 - factor) +
- (int)Pnew[MVPshort + t] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc[i].prob[MVPshort + t] = 1;
- else if (prob > 255) cm->fc.mvc[i].prob[MVPshort + t] = 255;
- else cm->fc.mvc[i].prob[MVPshort + t] = prob;
- }
- for (t = 0; t < mvlong_width; ++t) {
- count = bit_ct[t][0] + bit_ct[t][1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc[i].prob[MVPbits + t] * (256 - factor) +
- (int)Pnew[MVPbits + t] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc[i].prob[MVPbits + t] = 1;
- else if (prob > 255) cm->fc.mvc[i].prob[MVPbits + t] = 255;
- else cm->fc.mvc[i].prob[MVPbits + t] = prob;
- }
- }
- for (i = 0; i < 2; ++i) {
- int prob;
- unsigned int is_short_ct[2];
- unsigned int sign_ct[2];
- unsigned int bit_ct [mvlong_width_hp] [2];
- unsigned int short_ct [mvnum_short_hp];
- unsigned int short_bct [mvnum_short_hp - 1] [2];
- vp8_prob Pnew [MVPcount_hp];
- compute_component_probs_hp(cm->fc.MVcount_hp[i], Pnew,
- is_short_ct, sign_ct,
- bit_ct, short_ct, short_bct);
- count = is_short_ct[0] + is_short_ct[1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc_hp[i].prob[mvpis_short_hp] * (256 - factor) +
- (int)Pnew[mvpis_short_hp] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc_hp[i].prob[mvpis_short_hp] = 1;
- else if (prob > 255) cm->fc.mvc_hp[i].prob[mvpis_short_hp] = 255;
- else cm->fc.mvc_hp[i].prob[mvpis_short_hp] = prob;
-
- count = sign_ct[0] + sign_ct[1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc_hp[i].prob[MVPsign_hp] * (256 - factor) +
- (int)Pnew[MVPsign_hp] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc_hp[i].prob[MVPsign_hp] = 1;
- else if (prob > 255) cm->fc.mvc_hp[i].prob[MVPsign_hp] = 255;
- else cm->fc.mvc_hp[i].prob[MVPsign_hp] = prob;
-
- for (t = 0; t < mvnum_short_hp - 1; ++t) {
- count = short_bct[t][0] + short_bct[t][1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc_hp[i].prob[MVPshort_hp + t] * (256 - factor) +
- (int)Pnew[MVPshort_hp + t] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc_hp[i].prob[MVPshort_hp + t] = 1;
- else if (prob > 255) cm->fc.mvc_hp[i].prob[MVPshort_hp + t] = 255;
- else cm->fc.mvc_hp[i].prob[MVPshort_hp + t] = prob;
- }
- for (t = 0; t < mvlong_width_hp; ++t) {
- count = bit_ct[t][0] + bit_ct[t][1];
- count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
- factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
- prob = ((int)cm->fc.pre_mvc_hp[i].prob[MVPbits_hp + t] * (256 - factor) +
- (int)Pnew[MVPbits_hp + t] * factor + 128) >> 8;
- if (prob <= 0) cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = 1;
- else if (prob > 255) cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = 255;
- else cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = prob;
- }
- }
-}
-
-#endif /* CONFIG_NEWMVENTROPY */
-
void vp8_entropy_mv_init() {
-#if CONFIG_NEWMVENTROPY
vp8_tokens_from_tree(vp8_mv_joint_encodings, vp8_mv_joint_tree);
vp8_tokens_from_tree(vp8_mv_class_encodings, vp8_mv_class_tree);
vp8_tokens_from_tree(vp8_mv_class0_encodings, vp8_mv_class0_tree);
vp8_tokens_from_tree(vp8_mv_fp_encodings, vp8_mv_fp_tree);
-#else
- vp8_tokens_from_tree(vp8_small_mvencodings, vp8_small_mvtree);
- vp8_tokens_from_tree(vp8_small_mvencodings_hp, vp8_small_mvtree_hp);
-#endif
}
void vp8_init_mv_probs(VP8_COMMON *cm) {
-#if CONFIG_NEWMVENTROPY
vpx_memcpy(&cm->fc.nmvc, &vp8_default_nmv_context, sizeof(nmv_context));
-#else
- vpx_memcpy(cm->fc.mvc,
- vp8_default_mv_context, sizeof(vp8_default_mv_context));
- vpx_memcpy(cm->fc.mvc_hp,
- vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
-#endif
}
diff --git a/vp8/common/entropymv.h b/vp8/common/entropymv.h
index 1a193b172..80540a54c 100644
--- a/vp8/common/entropymv.h
+++ b/vp8/common/entropymv.h
@@ -22,7 +22,6 @@ void vp8_entropy_mv_init();
void vp8_init_mv_probs(struct VP8Common *cm);
void vp8_adapt_mv_probs(struct VP8Common *cm);
-#if CONFIG_NEWMVENTROPY
void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
void vp8_lower_mv_precision(MV *mv);
int vp8_use_nmv_hp(const MV *ref);
@@ -129,65 +128,4 @@ void vp8_counts_to_nmv_context(
unsigned int (*branch_ct_class0_hp)[2],
unsigned int (*branch_ct_hp)[2]);
-#else /* CONFIG_NEWMVENTROPY */
-
-enum {
- mv_max = 1023, /* max absolute value of a MV component */
- MVvals = (2 * mv_max) + 1, /* # possible values "" */
- mvlong_width = 10, /* Large MVs have 9 bit magnitudes */
- mvnum_short = 8, /* magnitudes 0 through 7 */
- mvnum_short_bits = 3, /* number of bits for short mvs */
-
- mvfp_max = 255, /* max absolute value of a full pixel MV component */
- MVfpvals = (2 * mvfp_max) + 1, /* # possible full pixel MV values */
-
- /* probability offsets for coding each MV component */
-
- mvpis_short = 0, /* short (<= 7) vs long (>= 8) */
- MVPsign, /* sign for non-zero */
- MVPshort, /* 8 short values = 7-position tree */
-
- MVPbits = MVPshort + mvnum_short - 1, /* mvlong_width long value bits */
- MVPcount = MVPbits + mvlong_width /* (with independent probabilities) */
-};
-
-typedef struct mv_context {
- vp8_prob prob[MVPcount]; /* often come in row, col pairs */
-} MV_CONTEXT;
-
-extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2];
-
-enum {
- mv_max_hp = 2047, /* max absolute value of a MV component */
- MVvals_hp = (2 * mv_max_hp) + 1, /* # possible values "" */
- mvlong_width_hp = 11, /* Large MVs have 9 bit magnitudes */
- mvnum_short_hp = 16, /* magnitudes 0 through 15 */
- mvnum_short_bits_hp = 4, /* number of bits for short mvs */
-
- mvfp_max_hp = 255, /* max absolute value of a full pixel MV component */
- MVfpvals_hp = (2 * mvfp_max_hp) + 1, /* # possible full pixel MV values */
-
- /* probability offsets for coding each MV component */
-
- mvpis_short_hp = 0, /* short (<= 7) vs long (>= 8) */
- MVPsign_hp, /* sign for non-zero */
- MVPshort_hp, /* 8 short values = 7-position tree */
-
- MVPbits_hp = MVPshort_hp + mvnum_short_hp - 1, /* mvlong_width long value bits */
- MVPcount_hp = MVPbits_hp + mvlong_width_hp /* (with independent probabilities) */
-};
-
-typedef struct mv_context_hp {
- vp8_prob prob[MVPcount_hp]; /* often come in row, col pairs */
-} MV_CONTEXT_HP;
-
-extern const MV_CONTEXT_HP vp8_mv_update_probs_hp[2], vp8_default_mv_context_hp[2];
-
-extern const vp8_tree_index vp8_small_mvtree[];
-extern struct vp8_token_struct vp8_small_mvencodings [8];
-extern const vp8_tree_index vp8_small_mvtree_hp[];
-extern struct vp8_token_struct vp8_small_mvencodings_hp [16];
-
-#endif /* CONFIG_NEWMVENTROPY */
-
#endif
diff --git a/vp8/common/findnearmv.c b/vp8/common/findnearmv.c
index be495ff81..e1bf5361a 100644
--- a/vp8/common/findnearmv.c
+++ b/vp8/common/findnearmv.c
@@ -22,11 +22,7 @@ const unsigned char vp8_mbsplit_offset[4][16] = {
static void lower_mv_precision(int_mv *mv, int usehp)
{
-#if CONFIG_NEWMVENTROPY
if (!usehp || !vp8_use_nmv_hp(&mv->as_mv)) {
-#else
- if (!usehp) {
-#endif
if (mv->as_mv.row & 1)
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
if (mv->as_mv.col & 1)
diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h
index a212bc898..646b7a08f 100644
--- a/vp8/common/onyxc_int.h
+++ b/vp8/common/onyxc_int.h
@@ -57,15 +57,8 @@ typedef struct frame_contexts {
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
-#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
nmv_context pre_nmvc;
-#else
- MV_CONTEXT mvc[2];
- MV_CONTEXT_HP mvc_hp[2];
- MV_CONTEXT pre_mvc[2];
- MV_CONTEXT_HP pre_mvc_hp[2];
-#endif
vp8_prob pre_bmode_prob [VP8_BINTRAMODES - 1];
vp8_prob pre_ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob pre_uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
@@ -109,12 +102,7 @@ typedef struct frame_contexts {
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
-#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
-#else
- unsigned int MVcount [2] [MVvals];
- unsigned int MVcount_hp [2] [MVvals_hp];
-#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_prob switchable_interp_prob[VP8_SWITCHABLE_FILTERS+1]
[VP8_SWITCHABLE_FILTERS-1];
@@ -143,10 +131,8 @@ typedef enum {
ONLY_4X4 = 0,
ALLOW_8X8 = 1,
ALLOW_16X16 = 2,
-#if CONFIG_TX_SELECT
TX_MODE_SELECT = 3,
-#endif
- NB_TXFM_MODES = 3 + CONFIG_TX_SELECT,
+ NB_TXFM_MODES = 4,
} TXFM_MODE;
typedef struct VP8_COMMON_RTCD {
@@ -284,10 +270,8 @@ typedef struct VP8Common {
vp8_prob prob_comppred[COMP_PRED_CONTEXTS];
-#if CONFIG_TX_SELECT
// FIXME contextualize
vp8_prob prob_tx[TX_SIZE_MAX - 1];
-#endif
vp8_prob mbskip_pred_probs[MBSKIP_CONTEXTS];
diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c
index 064d16426..83b899edf 100644
--- a/vp8/decoder/decodemv.c
+++ b/vp8/decoder/decodemv.c
@@ -170,16 +170,13 @@ static void kfread_modes(VP8D_COMP *pbi,
m->mbmi.second_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
#endif
-#if CONFIG_TX_SELECT
if (cm->txfm_mode == TX_MODE_SELECT && m->mbmi.mb_skip_coeff == 0 &&
m->mbmi.mode <= I8X8_PRED) {
// FIXME(rbultje) code ternary symbol once all experiments are merged
m->mbmi.txfm_size = vp8_read(bc, cm->prob_tx[0]);
if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED)
m->mbmi.txfm_size += vp8_read(bc, cm->prob_tx[1]);
- } else
-#endif
- if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
+ } else if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
m->mbmi.txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != B_PRED) {
m->mbmi.txfm_size = TX_8X8;
@@ -188,7 +185,6 @@ static void kfread_modes(VP8D_COMP *pbi,
}
}
-#if CONFIG_NEWMVENTROPY
static int read_nmv_component(vp8_reader *r,
int rv,
const nmv_component *mvcomp) {
@@ -207,7 +203,7 @@ static int read_nmv_component(vp8_reader *r,
o = d << 3;
z = vp8_get_mv_mag(c, o);
- v = (s ? -(z + 1) : (z + 1));
+ v = (s ? -(z + 8) : (z + 8));
return v;
}
@@ -219,6 +215,7 @@ static int read_nmv_component_fp(vp8_reader *r,
int s, z, c, o, d, e, f;
s = v < 0;
z = (s ? -v : v) - 1; /* magnitude - 1 */
+ z &= ~7;
c = vp8_get_mv_class(z, &o);
d = o >> 3;
@@ -332,124 +329,6 @@ static void read_nmvprobs(vp8_reader *bc, nmv_context *mvctx,
}
}
-#else
-
-static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
- const vp8_prob *const p = (const vp8_prob *) mvc;
- int x = 0;
-
- if (vp8_read(r, p [mvpis_short])) { /* Large */
- int i = 0;
-
- do {
- x += vp8_read(r, p [MVPbits + i]) << i;
- } while (++i < mvnum_short_bits);
-
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
-
- do {
- x += vp8_read(r, p [MVPbits + i]) << i;
- } while (--i > mvnum_short_bits);
-
- if (!(x & ~((2 << mvnum_short_bits) - 1)) || vp8_read(r, p [MVPbits + mvnum_short_bits]))
- x += (mvnum_short);
- } else /* small */
- x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
-
- if (x && vp8_read(r, p [MVPsign]))
- x = -x;
-
- return x;
-}
-
-static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
- mv->row = (short)(read_mvcomponent(r, mvc) << 1);
- mv->col = (short)(read_mvcomponent(r, ++mvc) << 1);
-#ifdef DEBUG_DEC_MV
- int i;
- printf("%d (np): %d %d\n", dec_mvcount++, mv->row, mv->col);
- // for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[-1])->prob[i]); printf("\n");
- // for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[0])->prob[i]); printf("\n");
-#endif
-}
-
-static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
- int i = 0;
-
- do {
- const vp8_prob *up = vp8_mv_update_probs[i].prob;
- vp8_prob *p = (vp8_prob *)(mvc + i);
- vp8_prob *const pstop = p + MVPcount;
-
- do {
- if (vp8_read(bc, *up++)) {
- const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
-
- *p = x ? x << 1 : 1;
- }
- } while (++p < pstop);
- } while (++i < 2);
-}
-
-static int read_mvcomponent_hp(vp8_reader *r, const MV_CONTEXT_HP *mvc) {
- const vp8_prob *const p = (const vp8_prob *) mvc;
- int x = 0;
-
- if (vp8_read(r, p [mvpis_short_hp])) { /* Large */
- int i = 0;
-
- do {
- x += vp8_read(r, p [MVPbits_hp + i]) << i;
- } while (++i < mvnum_short_bits_hp);
-
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
-
- do {
- x += vp8_read(r, p [MVPbits_hp + i]) << i;
- } while (--i > mvnum_short_bits_hp);
-
- if (!(x & ~((2 << mvnum_short_bits_hp) - 1)) || vp8_read(r, p [MVPbits_hp + mvnum_short_bits_hp]))
- x += (mvnum_short_hp);
- } else /* small */
- x = vp8_treed_read(r, vp8_small_mvtree_hp, p + MVPshort_hp);
-
- if (x && vp8_read(r, p [MVPsign_hp]))
- x = -x;
-
- return x;
-}
-
-static void read_mv_hp(vp8_reader *r, MV *mv, const MV_CONTEXT_HP *mvc) {
- mv->row = (short)(read_mvcomponent_hp(r, mvc));
- mv->col = (short)(read_mvcomponent_hp(r, ++mvc));
-#ifdef DEBUG_DEC_MV
- int i;
- printf("%d (hp): %d %d\n", dec_mvcount++, mv->row, mv->col);
- // for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[-1])->prob[i]); printf("\n");
- // for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[0])->prob[i]); printf("\n");
-#endif
-}
-
-static void read_mvcontexts_hp(vp8_reader *bc, MV_CONTEXT_HP *mvc) {
- int i = 0;
-
- do {
- const vp8_prob *up = vp8_mv_update_probs_hp[i].prob;
- vp8_prob *p = (vp8_prob *)(mvc + i);
- vp8_prob *const pstop = p + MVPcount_hp;
-
- do {
- if (vp8_read(bc, *up++)) {
- const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
-
- *p = x ? x << 1 : 1;
- }
- } while (++p < pstop);
- } while (++i < 2);
-}
-
-#endif /* CONFIG_NEWMVENTROPY */
-
// Read the referncence frame
static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
vp8_reader *const bc,
@@ -614,12 +493,7 @@ static void read_switchable_interp_probs(VP8D_COMP* const pbi,
static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
VP8_COMMON *const cm = &pbi->common;
-#if CONFIG_NEWMVENTROPY
nmv_context *const nmvc = &pbi->common.fc.nmvc;
-#else
- MV_CONTEXT *const mvc = pbi->common.fc.mvc;
- MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
-#endif
MACROBLOCKD *const xd = &pbi->mb;
if (cm->frame_type == KEY_FRAME) {
@@ -661,14 +535,7 @@ static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
-#if CONFIG_NEWMVENTROPY
read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv)
- read_mvcontexts_hp(bc, mvc_hp);
- else
- read_mvcontexts(bc, mvc);
-#endif
}
}
@@ -751,12 +618,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_row, int mb_col,
BOOL_DECODER* const bc) {
VP8_COMMON *const cm = &pbi->common;
-#if CONFIG_NEWMVENTROPY
nmv_context *const nmvc = &pbi->common.fc.nmvc;
-#else
- MV_CONTEXT *const mvc = pbi->common.fc.mvc;
- MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
-#endif
const int mis = pbi->common.mode_info_stride;
MACROBLOCKD *const xd = &pbi->mb;
@@ -1005,44 +867,20 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
switch (blockmode) {
case NEW4X4:
-#if CONFIG_NEWMVENTROPY
read_nmv(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&blockmv.as_mv, &best_mv.as_mv,
&cm->fc.NMVcount, xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- read_mv_hp(bc, &blockmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
- cm->fc.MVcount_hp[0][mv_max_hp + (blockmv.as_mv.row)]++;
- cm->fc.MVcount_hp[1][mv_max_hp + (blockmv.as_mv.col)]++;
- } else {
- read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
- cm->fc.MVcount[0][mv_max + (blockmv.as_mv.row >> 1)]++;
- cm->fc.MVcount[1][mv_max + (blockmv.as_mv.col >> 1)]++;
- }
-#endif /* CONFIG_NEWMVENTROPY */
blockmv.as_mv.row += best_mv.as_mv.row;
blockmv.as_mv.col += best_mv.as_mv.col;
if (mbmi->second_ref_frame) {
-#if CONFIG_NEWMVENTROPY
read_nmv(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&secondmv.as_mv, &best_mv_second.as_mv,
&cm->fc.NMVcount, xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- read_mv_hp(bc, &secondmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
- cm->fc.MVcount_hp[0][mv_max_hp + (secondmv.as_mv.row)]++;
- cm->fc.MVcount_hp[1][mv_max_hp + (secondmv.as_mv.col)]++;
- } else {
- read_mv(bc, &secondmv.as_mv, (const MV_CONTEXT *) mvc);
- cm->fc.MVcount[0][mv_max + (secondmv.as_mv.row >> 1)]++;
- cm->fc.MVcount[1][mv_max + (secondmv.as_mv.col >> 1)]++;
- }
-#endif /* CONFIG_NEWMVENTROPY */
secondmv.as_mv.row += best_mv_second.as_mv.row;
secondmv.as_mv.col += best_mv_second.as_mv.col;
}
@@ -1147,23 +985,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
break;
case NEWMV:
-#if CONFIG_NEWMVENTROPY
read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- read_mv_hp(bc, &mv->as_mv, (const MV_CONTEXT_HP *) mvc_hp);
- cm->fc.MVcount_hp[0][mv_max_hp + (mv->as_mv.row)]++;
- cm->fc.MVcount_hp[1][mv_max_hp + (mv->as_mv.col)]++;
- } else {
- read_mv(bc, &mv->as_mv, (const MV_CONTEXT *) mvc);
- cm->fc.MVcount[0][mv_max + (mv->as_mv.row >> 1)]++;
- cm->fc.MVcount[1][mv_max + (mv->as_mv.col >> 1)]++;
- }
-#endif /* CONFIG_NEWMVENTROPY */
mv->as_mv.row += best_mv.as_mv.row;
mv->as_mv.col += best_mv.as_mv.col;
@@ -1178,23 +1004,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mb_to_top_edge,
mb_to_bottom_edge);
if (mbmi->second_ref_frame) {
-#if CONFIG_NEWMVENTROPY
read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mbmi->mv[1].as_mv, &best_mv_second.as_mv,
&cm->fc.NMVcount, xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- read_mv_hp(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT_HP *) mvc_hp);
- cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row)]++;
- cm->fc.MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col)]++;
- } else {
- read_mv(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT *) mvc);
- cm->fc.MVcount[0][mv_max + (mbmi->mv[1].as_mv.row >> 1)]++;
- cm->fc.MVcount[1][mv_max + (mbmi->mv[1].as_mv.col >> 1)]++;
- }
-#endif /* CONFIG_NEWMVENTROPY */
mbmi->mv[1].as_mv.row += best_mv_second.as_mv.row;
mbmi->mv[1].as_mv.col += best_mv_second.as_mv.col;
mbmi->need_to_clamp_secondmv |=
@@ -1282,7 +1096,6 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#endif
}
-#if CONFIG_TX_SELECT
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= I8X8_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && !(mbmi->mode == SPLITMV &&
@@ -1292,9 +1105,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV)
mbmi->txfm_size += vp8_read(bc, cm->prob_tx[1]);
- } else
-#endif
- if (cm->txfm_mode >= ALLOW_16X16 &&
+ } else if (cm->txfm_mode >= ALLOW_16X16 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->txfm_size = TX_16X16;
diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c
index d961c7b2a..8e1ebe35d 100644
--- a/vp8/decoder/decodframe.c
+++ b/vp8/decoder/decodframe.c
@@ -1190,17 +1190,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
#endif
/* Read the loop filter level and type */
-#if CONFIG_TX_SELECT
pc->txfm_mode = vp8_read_literal(&header_bc, 2);
if (pc->txfm_mode == TX_MODE_SELECT) {
pc->prob_tx[0] = vp8_read_literal(&header_bc, 8);
pc->prob_tx[1] = vp8_read_literal(&header_bc, 8);
}
-#else
- pc->txfm_mode = (TXFM_MODE) vp8_read_bit(&header_bc);
- if (pc->txfm_mode == ALLOW_8X8)
- pc->txfm_mode = ALLOW_16X16;
-#endif
pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(&header_bc);
pc->filter_level = vp8_read_literal(&header_bc, 6);
@@ -1354,12 +1348,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_i8x8_mode_prob, pbi->common.fc.i8x8_mode_prob);
vp8_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob);
vp8_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob);
-#if CONFIG_NEWMVENTROPY
pbi->common.fc.pre_nmvc = pbi->common.fc.nmvc;
-#else
- vp8_copy(pbi->common.fc.pre_mvc, pbi->common.fc.mvc);
- vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp);
-#endif
vp8_zero(pbi->common.fc.coef_counts);
vp8_zero(pbi->common.fc.hybrid_coef_counts);
vp8_zero(pbi->common.fc.coef_counts_8x8);
@@ -1372,12 +1361,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_zero(pbi->common.fc.i8x8_mode_counts);
vp8_zero(pbi->common.fc.sub_mv_ref_counts);
vp8_zero(pbi->common.fc.mbsplit_counts);
-#if CONFIG_NEWMVENTROPY
vp8_zero(pbi->common.fc.NMVcount);
-#else
- vp8_zero(pbi->common.fc.MVcount);
- vp8_zero(pbi->common.fc.MVcount_hp);
-#endif
vp8_zero(pbi->common.fc.mv_ref_ct);
vp8_zero(pbi->common.fc.mv_ref_ct_a);
@@ -1436,11 +1420,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_adapt_coef_probs(pc);
if (pc->frame_type != KEY_FRAME) {
vp8_adapt_mode_probs(pc);
-#if CONFIG_NEWMVENTROPY
vp8_adapt_nmv_probs(pc, xd->allow_high_precision_mv);
-#else
- vp8_adapt_mv_probs(pc);
-#endif
vp8_update_mode_context(&pbi->common);
}
diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c
index 508ada85b..d66886561 100644
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -643,7 +643,6 @@ static void write_sub_mv_ref
vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
-#if CONFIG_NEWMVENTROPY
static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
const nmv_context *nmvc, int usehp) {
MV e;
@@ -654,31 +653,6 @@ static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
-#else
-
-static void write_mv
-(
- vp8_writer *bc, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
-) {
- MV e;
- e.row = mv->row - ref->as_mv.row;
- e.col = mv->col - ref->as_mv.col;
-
- vp8_encode_motion_vector(bc, &e, mvc);
-}
-
-static void write_mv_hp
-(
- vp8_writer *bc, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
-) {
- MV e;
- e.row = mv->row - ref->as_mv.row;
- e.col = mv->col - ref->as_mv.col;
-
- vp8_encode_motion_vector_hp(bc, &e, mvc);
-}
-#endif /* CONFIG_NEWMVENTROPY */
-
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *bc,
@@ -815,12 +789,7 @@ static void update_ref_probs(VP8_COMP *const cpi) {
static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
int i;
VP8_COMMON *const pc = &cpi->common;
-#if CONFIG_NEWMVENTROPY
const nmv_context *nmvc = &pc->fc.nmvc;
-#else
- const MV_CONTEXT *mvc = pc->fc.mvc;
- const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
-#endif
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
@@ -1093,17 +1062,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
}
#endif
-#if CONFIG_NEWMVENTROPY
write_nmv(bc, &mi->mv[0].as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(bc, &mi->mv[0].as_mv, &best_mv, mvc_hp);
- } else {
- write_mv(bc, &mi->mv[0].as_mv, &best_mv, mvc);
- }
-#endif
if (mi->second_ref_frame) {
#if 0 //CONFIG_NEW_MVREF
@@ -1120,17 +1081,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
&best_second_mv);
cpi->best_ref_index_counts[best_index]++;
#endif
-#if CONFIG_NEWMVENTROPY
write_nmv(bc, &mi->mv[1].as_mv, &best_second_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(bc, &mi->mv[1].as_mv, &best_second_mv, mvc_hp);
- } else {
- write_mv(bc, &mi->mv[1].as_mv, &best_second_mv, mvc);
- }
-#endif
}
break;
case SPLITMV: {
@@ -1172,40 +1125,16 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
#ifdef ENTROPY_STATS
active_section = 11;
#endif
-#if CONFIG_NEWMVENTROPY
write_nmv(bc, &blockmv.as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(bc, &blockmv.as_mv, &best_mv,
- (const MV_CONTEXT_HP *) mvc_hp);
- } else {
- write_mv(bc, &blockmv.as_mv, &best_mv,
- (const MV_CONTEXT *) mvc);
- }
-#endif
if (mi->second_ref_frame) {
-#if CONFIG_NEWMVENTROPY
write_nmv(bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
-#else
- if (xd->allow_high_precision_mv) {
- write_mv_hp(
- bc,
- &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &best_second_mv, (const MV_CONTEXT_HP *)mvc_hp);
- } else {
- write_mv(
- bc,
- &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
- &best_second_mv, (const MV_CONTEXT *) mvc);
- }
-#endif
}
}
} while (++j < cpi->mb.partition_info->count);
@@ -1217,7 +1146,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
}
}
-#if CONFIG_TX_SELECT
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
(rf != INTRA_FRAME && !(mode == SPLITMV &&
mi->partitioning == PARTITIONING_4X4))) &&
@@ -1231,7 +1159,6 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV)
vp8_write(bc, sz != TX_8X8, pc->prob_tx[1]);
}
-#endif
#ifdef ENTROPY_STATS
active_section = 1;
@@ -1360,7 +1287,6 @@ static void write_mb_modes_kf(const VP8_COMMON *c,
} else
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
-#if CONFIG_TX_SELECT
if (ym <= I8X8_PRED && c->txfm_mode == TX_MODE_SELECT &&
!((c->mb_no_coeff_skip && m->mbmi.mb_skip_coeff) ||
(segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
@@ -1371,7 +1297,6 @@ static void write_mb_modes_kf(const VP8_COMMON *c,
if (sz != TX_4X4 && ym <= TM_PRED)
vp8_write(bc, sz != TX_8X8, c->prob_tx[1]);
}
-#endif
}
static void write_kfmodes(VP8_COMP* const cpi, vp8_writer* const bc) {
@@ -2548,7 +2473,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
#endif
-#if CONFIG_TX_SELECT
{
if (pc->txfm_mode == TX_MODE_SELECT) {
pc->prob_tx[0] = get_prob(cpi->txfm_count[0] + cpi->txfm_count_8x8p[0],
@@ -2565,9 +2489,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_write_literal(&header_bc, pc->prob_tx[1], 8);
}
}
-#else
- vp8_write_bit(&header_bc, !!pc->txfm_mode);
-#endif
// Encode the loop filter level and type
vp8_write_bit(&header_bc, pc->filter_type);
@@ -2724,12 +2645,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
-#if CONFIG_NEWMVENTROPY
cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
-#else
- vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
- vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
-#endif
vp8_zero(cpi->sub_mv_ref_count);
vp8_zero(cpi->mbsplit_count);
vp8_zero(cpi->common.fc.mv_ref_ct)
@@ -2801,15 +2717,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
update_mbintra_mode_probs(cpi, &header_bc);
-#if CONFIG_NEWMVENTROPY
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
-#else
- if (xd->allow_high_precision_mv) {
- vp8_write_mvprobs_hp(cpi, &header_bc);
- } else {
- vp8_write_mvprobs(cpi, &header_bc);
- }
-#endif
}
vp8_stop_encode(&header_bc);
diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h
index 292011f81..c18b64f2c 100644
--- a/vp8/encoder/block.h
+++ b/vp8/encoder/block.h
@@ -79,9 +79,7 @@ typedef struct {
int hybrid_pred_diff;
int comp_pred_diff;
int single_pred_diff;
-#if CONFIG_TX_SELECT
int64_t txfm_rd_diff[NB_TXFM_MODES];
-#endif
} PICK_MODE_CONTEXT;
typedef struct {
@@ -114,7 +112,6 @@ typedef struct {
int *mb_norm_activity_ptr;
signed int act_zbin_adj;
-#if CONFIG_NEWMVENTROPY
int nmvjointcost[MV_JOINTS];
int nmvcosts[2][MV_VALS];
int *nmvcost[2];
@@ -126,16 +123,6 @@ typedef struct {
int *nmvsadcost[2];
int nmvsadcosts_hp[2][MV_VALS];
int *nmvsadcost_hp[2];
-#else
- int mvcosts[2][MVvals + 1];
- int *mvcost[2];
- int mvsadcosts[2][MVfpvals + 1];
- int *mvsadcost[2];
- int mvcosts_hp[2][MVvals_hp + 1];
- int *mvcost_hp[2];
- int mvsadcosts_hp[2][MVfpvals_hp + 1];
- int *mvsadcost_hp[2];
-#endif /* CONFIG_NEWMVENTROPY */
int mbmode_cost[2][MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];
diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c
index af9b3f598..20e63725c 100644
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -411,7 +411,6 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
}
-#if CONFIG_TX_SELECT
{
int segment_id = mbmi->segment_id;
if (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
@@ -421,7 +420,6 @@ static void update_state(VP8_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
}
}
}
-#endif
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
@@ -1355,12 +1353,7 @@ static void encode_frame_internal(VP8_COMP *cpi) {
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
-#if CONFIG_NEWMVENTROPY
vp8_zero(cpi->NMVcount);
-#else
- vp8_zero(cpi->MVcount);
- vp8_zero(cpi->MVcount_hp);
-#endif
vp8_zero(cpi->coef_counts);
vp8_zero(cpi->hybrid_coef_counts);
vp8_zero(cpi->coef_counts_8x8);
@@ -1387,11 +1380,9 @@ static void encode_frame_internal(VP8_COMP *cpi) {
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
-#if CONFIG_TX_SELECT
vpx_memset(cpi->txfm_count, 0, sizeof(cpi->txfm_count));
vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
-#endif
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
@@ -1451,7 +1442,6 @@ static int check_dual_ref_flags(VP8_COMP *cpi) {
}
}
-#if CONFIG_TX_SELECT
static void reset_skip_txfm_size(VP8_COMP *cpi, TX_SIZE txfm_max) {
VP8_COMMON *cm = &cpi->common;
int mb_row, mb_col, mis = cm->mode_info_stride;
@@ -1475,7 +1465,6 @@ static void reset_skip_txfm_size(VP8_COMP *cpi, TX_SIZE txfm_max) {
}
}
}
-#endif
void vp8_encode_frame(VP8_COMP *cpi) {
if (cpi->sf.RD) {
@@ -1521,7 +1510,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
txfm_type = ONLY_4X4;
} else
#endif
-#if CONFIG_TX_SELECT
/* FIXME (rbultje)
* this is a hack (no really), basically to work around the complete
* nonsense coefficient cost prediction for keyframes. The probabilities
@@ -1569,16 +1557,11 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_16X16 : TX_MODE_SELECT;
#endif
-#else
- txfm_type = ALLOW_16X16;
-#endif // CONFIG_TX_SELECT
cpi->common.txfm_mode = txfm_type;
-#if CONFIG_TX_SELECT
if (txfm_type != TX_MODE_SELECT) {
cpi->common.prob_tx[0] = 128;
cpi->common.prob_tx[1] = 128;
}
-#endif
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
@@ -1588,7 +1571,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
-#if CONFIG_TX_SELECT
for (i = 0; i < NB_TXFM_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
@@ -1598,7 +1580,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
-#endif
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
@@ -1616,7 +1597,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
}
}
-#if CONFIG_TX_SELECT
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
const int count4x4 = cpi->txfm_count[TX_4X4] + cpi->txfm_count_8x8p[TX_4X4];
const int count8x8 = cpi->txfm_count[TX_8X8];
@@ -1633,7 +1613,6 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->common.txfm_mode = ALLOW_16X16;
}
}
-#endif
} else {
encode_frame_internal(cpi);
}
@@ -1951,15 +1930,12 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
}
if (output_enabled) {
-#if CONFIG_TX_SELECT
int segment_id = mbmi->segment_id;
-#endif
// Tokenize
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
-#if CONFIG_TX_SELECT
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
(segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
@@ -1969,9 +1945,7 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
} else if (mbmi->mode == I8X8_PRED) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
}
- } else
-#endif
- if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
+ } else if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
mbmi->txfm_size = TX_16X16;
} else
if (cpi->common.txfm_mode >= ALLOW_8X8 && mbmi->mode != B_PRED) {
@@ -2152,7 +2126,6 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
if (output_enabled) {
-#if CONFIG_TX_SELECT
int segment_id = mbmi->segment_id;
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
@@ -2166,9 +2139,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
mbmi->partitioning != PARTITIONING_4X4)) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
}
- } else
-#endif
- if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
+ } else if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16;
} else if (mbmi->mode != B_PRED &&
diff --git a/vp8/encoder/encodemv.c b/vp8/encoder/encodemv.c
index d520d995a..75dad2f9b 100644
--- a/vp8/encoder/encodemv.c
+++ b/vp8/encoder/encodemv.c
@@ -22,8 +22,6 @@ extern unsigned int active_section;
//extern int final_packing;
-#if CONFIG_NEWMVENTROPY
-
#ifdef NMV_STATS
nmv_context_counts tnmvcounts;
#endif
@@ -549,593 +547,3 @@ void vp8_build_nmv_cost_table(int *mvjoint,
if (mvc_flag_h)
build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
}
-
-#else /* CONFIG_NEWMVENTROPY */
-
-static void encode_mvcomponent(
- vp8_writer *const bc,
- const int v,
- const struct mv_context *mvc
-) {
- const vp8_prob *p = mvc->prob;
- const int x = v < 0 ? -v : v;
-
- if (x < mvnum_short) { // Small
- vp8_write(bc, 0, p[mvpis_short]);
- vp8_treed_write(bc, vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
- if (!x)
- return; // no sign bit
- } else { // Large
- int i = 0;
-
- vp8_write(bc, 1, p[mvpis_short]);
-
- do
- vp8_write(bc, (x >> i) & 1, p[MVPbits + i]);
-
- while (++i < mvnum_short_bits);
-
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- vp8_write(bc, (x >> i) & 1, p[MVPbits + i]);
-
- while (--i > mvnum_short_bits);
-
- if (x & ~((2 << mvnum_short_bits) - 1))
- vp8_write(bc, (x >> mvnum_short_bits) & 1, p[MVPbits + mvnum_short_bits]);
- }
-
- vp8_write(bc, v < 0, p[MVPsign]);
-}
-
-void vp8_encode_motion_vector(vp8_writer* const bc,
- const MV* const mv,
- const MV_CONTEXT* const mvc) {
- encode_mvcomponent(bc, mv->row >> 1, &mvc[0]);
- encode_mvcomponent(bc, mv->col >> 1, &mvc[1]);
-}
-
-
-static unsigned int cost_mvcomponent(const int v,
- const struct mv_context* const mvc) {
- const vp8_prob *p = mvc->prob;
- const int x = v; // v<0? -v:v;
- unsigned int cost;
-
- if (x < mvnum_short) {
- cost = vp8_cost_zero(p [mvpis_short])
- + vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
-
- if (!x)
- return cost;
- } else {
- int i = 0;
- cost = vp8_cost_one(p [mvpis_short]);
-
- do
- cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
-
- while (++i < mvnum_short_bits);
-
- i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
-
- while (--i > mvnum_short_bits);
-
- if (x & ~((2 << mvnum_short_bits) - 1))
- cost += vp8_cost_bit(p [MVPbits + mvnum_short_bits], (x >> mvnum_short_bits) & 1);
- }
-
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
-}
-
-void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc,
- const int mvc_flag[2]) {
- int i = 1; // -mv_max;
- unsigned int cost0 = 0;
- unsigned int cost1 = 0;
-
- vp8_clear_system_state();
-
- i = 1;
-
- if (mvc_flag[0]) {
- mvcost [0] [0] = cost_mvcomponent(0, &mvc[0]);
-
- do {
- // mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
- cost0 = cost_mvcomponent(i, &mvc[0]);
-
- mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
- mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]);
- } while (++i <= mv_max);
- }
-
- i = 1;
-
- if (mvc_flag[1]) {
- mvcost [1] [0] = cost_mvcomponent(0, &mvc[1]);
-
- do {
- // mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
- cost1 = cost_mvcomponent(i, &mvc[1]);
-
- mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
- mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]);
- } while (++i <= mv_max);
- }
-}
-
-
-// Motion vector probability table update depends on benefit.
-// Small correction allows for the fact that an update to an MV probability
-// may have benefit in subsequent frames as well as the current one.
-
-#define MV_PROB_UPDATE_CORRECTION -1
-
-
-__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2]) {
- const unsigned int tot = ct[0] + ct[1];
-
- if (tot) {
- const vp8_prob x = ((ct[0] * 255) / tot) & -2;
- *p = x ? x : 1;
- }
-}
-
-static void update(
- vp8_writer *const bc,
- const unsigned int ct[2],
- vp8_prob *const cur_p,
- const vp8_prob new_p,
- const vp8_prob update_p,
- int *updated
-) {
- const int cur_b = vp8_cost_branch(ct, *cur_p);
- const int new_b = vp8_cost_branch(ct, new_p);
- const int cost = 7 + MV_PROB_UPDATE_CORRECTION + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8);
-
- if (cur_b - new_b > cost) {
- *cur_p = new_p;
- vp8_write(bc, 1, update_p);
- vp8_write_literal(bc, new_p >> 1, 7);
- *updated = 1;
-
- } else
- vp8_write(bc, 0, update_p);
-}
-
-static void write_component_probs(
- vp8_writer *const bc,
- struct mv_context *cur_mvc,
- const struct mv_context *default_mvc_,
- const struct mv_context *update_mvc,
- const unsigned int events [MVvals],
- unsigned int rc,
- int *updated
-) {
- vp8_prob *Pcur = cur_mvc->prob;
- const vp8_prob *default_mvc = default_mvc_->prob;
- const vp8_prob *Pupdate = update_mvc->prob;
- unsigned int is_short_ct[2], sign_ct[2];
-
- unsigned int bit_ct [mvlong_width] [2];
-
- unsigned int short_ct [mvnum_short];
- unsigned int short_bct [mvnum_short - 1] [2];
-
- vp8_prob Pnew [MVPcount];
-
- (void) rc;
- vp8_copy_array(Pnew, default_mvc, MVPcount);
-
- vp8_zero(is_short_ct)
- vp8_zero(sign_ct)
- vp8_zero(bit_ct)
- vp8_zero(short_ct)
- vp8_zero(short_bct)
-
-
- // j=0
- {
- const int c = events [mv_max];
-
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
-
- // j: 1 ~ mv_max (1023)
- {
- int j = 1;
-
- do {
- const int c1 = events [mv_max + j]; // positive
- const int c2 = events [mv_max - j]; // negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short) {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- } else {
- int k = mvlong_width - 1;
- is_short_ct [1] += c; // Long vector
-
- /* bit 3 not always encoded. */
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- } while (++j <= mv_max);
- }
-
- calc_prob(Pnew + mvpis_short, is_short_ct);
-
- calc_prob(Pnew + MVPsign, sign_ct);
-
- {
- vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
- int j = 0;
-
- vp8_tree_probs_from_distribution(
- mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
- p, short_bct, short_ct,
- 256, 1
- );
-
- do
- calc_prob(Pnew + MVPshort + j, short_bct[j]);
-
- while (++j < mvnum_short - 1);
- }
-
- {
- int j = 0;
-
- do
- calc_prob(Pnew + MVPbits + j, bit_ct[j]);
-
- while (++j < mvlong_width);
- }
-
- update(bc, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short],
- *Pupdate++, updated);
-
- update(bc, sign_ct, Pcur + MVPsign, Pnew[MVPsign],
- *Pupdate++, updated);
-
- {
- const vp8_prob *const new_p = Pnew + MVPshort;
- vp8_prob *const cur_p = Pcur + MVPshort;
-
- int j = 0;
-
- do
-
- update(bc, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvnum_short - 1);
- }
-
- {
- const vp8_prob *const new_p = Pnew + MVPbits;
- vp8_prob *const cur_p = Pcur + MVPbits;
-
- int j = 0;
-
- do
-
- update(bc, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvlong_width);
- }
-}
-
-void vp8_write_mvprobs(VP8_COMP* const cpi, vp8_writer* const bc) {
- MV_CONTEXT *mvc = cpi->common.fc.mvc;
- int flags[2] = {0, 0};
-#ifdef ENTROPY_STATS
- active_section = 4;
-#endif
- write_component_probs(
- bc, &mvc[0], &vp8_default_mv_context[0], &vp8_mv_update_probs[0],
- cpi->MVcount[0], 0, &flags[0]);
-
- write_component_probs(
- bc, &mvc[1], &vp8_default_mv_context[1], &vp8_mv_update_probs[1],
- cpi->MVcount[1], 1, &flags[1]);
-
- if (flags[0] || flags[1])
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
-
-#ifdef ENTROPY_STATS
- active_section = 5;
-#endif
-}
-
-
-static void encode_mvcomponent_hp(
- vp8_writer *const bc,
- const int v,
- const struct mv_context_hp *mvc
-) {
- const vp8_prob *p = mvc->prob;
- const int x = v < 0 ? -v : v;
-
- if (x < mvnum_short_hp) { // Small
- vp8_write(bc, 0, p[mvpis_short_hp]);
- vp8_treed_write(bc, vp8_small_mvtree_hp, p + MVPshort_hp, x,
- mvnum_short_bits_hp);
- if (!x)
- return; // no sign bit
- } else { // Large
- int i = 0;
-
- vp8_write(bc, 1, p[mvpis_short_hp]);
-
- do
- vp8_write(bc, (x >> i) & 1, p[MVPbits_hp + i]);
-
- while (++i < mvnum_short_bits_hp);
-
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- vp8_write(bc, (x >> i) & 1, p[MVPbits_hp + i]);
-
- while (--i > mvnum_short_bits_hp);
-
- if (x & ~((2 << mvnum_short_bits_hp) - 1))
- vp8_write(bc, (x >> mvnum_short_bits_hp) & 1,
- p[MVPbits_hp + mvnum_short_bits_hp]);
- }
-
- vp8_write(bc, v < 0, p[MVPsign_hp]);
-}
-
-void vp8_encode_motion_vector_hp(vp8_writer *bc, const MV *mv,
- const MV_CONTEXT_HP *mvc) {
-
- encode_mvcomponent_hp(bc, mv->row, &mvc[0]);
- encode_mvcomponent_hp(bc, mv->col, &mvc[1]);
-}
-
-
-static unsigned int cost_mvcomponent_hp(const int v,
- const struct mv_context_hp *mvc) {
- const vp8_prob *p = mvc->prob;
- const int x = v; // v<0? -v:v;
- unsigned int cost;
-
- if (x < mvnum_short_hp) {
- cost = vp8_cost_zero(p [mvpis_short_hp])
- + vp8_treed_cost(vp8_small_mvtree_hp, p + MVPshort_hp, x,
- mvnum_short_bits_hp);
-
- if (!x)
- return cost;
- } else {
- int i = 0;
- cost = vp8_cost_one(p [mvpis_short_hp]);
-
- do
- cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
-
- while (++i < mvnum_short_bits_hp);
-
- i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
-
- do
- cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
-
- while (--i > mvnum_short_bits_hp);
-
- if (x & ~((2 << mvnum_short_bits_hp) - 1))
- cost += vp8_cost_bit(p [MVPbits_hp + mvnum_short_bits_hp],
- (x >> mvnum_short_bits_hp) & 1);
- }
-
- return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
-}
-
-void vp8_build_component_cost_table_hp(int *mvcost[2],
- const MV_CONTEXT_HP *mvc,
- const int mvc_flag[2]) {
- int i = 1; // -mv_max;
- unsigned int cost0 = 0;
- unsigned int cost1 = 0;
-
- vp8_clear_system_state();
-
- i = 1;
-
- if (mvc_flag[0]) {
- mvcost [0] [0] = cost_mvcomponent_hp(0, &mvc[0]);
-
- do {
- // mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
- cost0 = cost_mvcomponent_hp(i, &mvc[0]);
-
- mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign_hp]);
- mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign_hp]);
- } while (++i <= mv_max_hp);
- }
-
- i = 1;
-
- if (mvc_flag[1]) {
- mvcost [1] [0] = cost_mvcomponent_hp(0, &mvc[1]);
-
- do {
- // mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
- cost1 = cost_mvcomponent_hp(i, &mvc[1]);
-
- mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign_hp]);
- mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign_hp]);
- } while (++i <= mv_max_hp);
- }
-}
-
-
-static void write_component_probs_hp(
- vp8_writer *const bc,
- struct mv_context_hp *cur_mvc,
- const struct mv_context_hp *default_mvc_,
- const struct mv_context_hp *update_mvc,
- const unsigned int events [MVvals_hp],
- unsigned int rc,
- int *updated
-) {
- vp8_prob *Pcur = cur_mvc->prob;
- const vp8_prob *default_mvc = default_mvc_->prob;
- const vp8_prob *Pupdate = update_mvc->prob;
- unsigned int is_short_ct[2], sign_ct[2];
-
- unsigned int bit_ct [mvlong_width_hp] [2];
-
- unsigned int short_ct [mvnum_short_hp];
- unsigned int short_bct [mvnum_short_hp - 1] [2];
-
- vp8_prob Pnew [MVPcount_hp];
-
- (void) rc;
- vp8_copy_array(Pnew, default_mvc, MVPcount_hp);
-
- vp8_zero(is_short_ct)
- vp8_zero(sign_ct)
- vp8_zero(bit_ct)
- vp8_zero(short_ct)
- vp8_zero(short_bct)
-
-
- // j=0
- {
- const int c = events [mv_max_hp];
-
- is_short_ct [0] += c; // Short vector
- short_ct [0] += c; // Magnitude distribution
- }
-
- // j: 1 ~ mv_max (1023)
- {
- int j = 1;
-
- do {
- const int c1 = events [mv_max_hp + j]; // positive
- const int c2 = events [mv_max_hp - j]; // negative
- const int c = c1 + c2;
- int a = j;
-
- sign_ct [0] += c1;
- sign_ct [1] += c2;
-
- if (a < mvnum_short_hp) {
- is_short_ct [0] += c; // Short vector
- short_ct [a] += c; // Magnitude distribution
- } else {
- int k = mvlong_width_hp - 1;
- is_short_ct [1] += c; // Long vector
-
- /* bit 3 not always encoded. */
- do
- bit_ct [k] [(a >> k) & 1] += c;
-
- while (--k >= 0);
- }
- } while (++j <= mv_max_hp);
- }
-
- calc_prob(Pnew + mvpis_short_hp, is_short_ct);
-
- calc_prob(Pnew + MVPsign_hp, sign_ct);
-
- {
- vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
- int j = 0;
-
- vp8_tree_probs_from_distribution(
- mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
- p, short_bct, short_ct,
- 256, 1
- );
-
- do
- calc_prob(Pnew + MVPshort_hp + j, short_bct[j]);
-
- while (++j < mvnum_short_hp - 1);
- }
-
- {
- int j = 0;
-
- do
- calc_prob(Pnew + MVPbits_hp + j, bit_ct[j]);
-
- while (++j < mvlong_width_hp);
- }
-
- update(bc, is_short_ct, Pcur + mvpis_short_hp, Pnew[mvpis_short_hp],
- *Pupdate++, updated);
-
- update(bc, sign_ct, Pcur + MVPsign_hp, Pnew[MVPsign_hp], *Pupdate++,
- updated);
-
- {
- const vp8_prob *const new_p = Pnew + MVPshort_hp;
- vp8_prob *const cur_p = Pcur + MVPshort_hp;
-
- int j = 0;
-
- do
-
- update(bc, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvnum_short_hp - 1);
- }
-
- {
- const vp8_prob *const new_p = Pnew + MVPbits_hp;
- vp8_prob *const cur_p = Pcur + MVPbits_hp;
-
- int j = 0;
-
- do
-
- update(bc, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
-
- while (++j < mvlong_width_hp);
- }
-}
-
-void vp8_write_mvprobs_hp(VP8_COMP* const cpi, vp8_writer* const bc) {
- MV_CONTEXT_HP *mvc = cpi->common.fc.mvc_hp;
- int flags[2] = {0, 0};
-#ifdef ENTROPY_STATS
- active_section = 4;
-#endif
- write_component_probs_hp(
- bc, &mvc[0], &vp8_default_mv_context_hp[0], &vp8_mv_update_probs_hp[0],
- cpi->MVcount_hp[0], 0, &flags[0]
- );
- write_component_probs_hp(
- bc, &mvc[1], &vp8_default_mv_context_hp[1], &vp8_mv_update_probs_hp[1],
- cpi->MVcount_hp[1], 1, &flags[1]
- );
-
- if (flags[0] || flags[1])
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp,
- (const MV_CONTEXT_HP *)
- cpi->common.fc.mvc_hp, flags);
-#ifdef ENTROPY_STATS
- active_section = 5;
-#endif
-}
-
-#endif /* CONFIG_NEWMVENTROPY */
diff --git a/vp8/encoder/encodemv.h b/vp8/encoder/encodemv.h
index c06831cb2..254536580 100644
--- a/vp8/encoder/encodemv.h
+++ b/vp8/encoder/encodemv.h
@@ -14,7 +14,6 @@
#include "onyx_int.h"
-#if CONFIG_NEWMVENTROPY
void vp8_write_nmvprobs(VP8_COMP* const, int usehp, vp8_writer* const);
void vp8_encode_nmv(vp8_writer* const w, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx);
@@ -27,19 +26,5 @@ void vp8_build_nmv_cost_table(int *mvjoint,
int usehp,
int mvc_flag_v,
int mvc_flag_h);
-#else /* CONFIG_NEWMVENTROPY */
-void vp8_write_mvprobs(VP8_COMP* const, vp8_writer* const);
-void vp8_encode_motion_vector(vp8_writer* const, const MV* const,
- const MV_CONTEXT* const);
-void vp8_build_component_cost_table(int *mvcost[2],
- const MV_CONTEXT*,
- const int mvc_flag[2]);
-void vp8_write_mvprobs_hp(VP8_COMP* const, vp8_writer* const);
-void vp8_encode_motion_vector_hp(vp8_writer* const, const MV* const,
- const MV_CONTEXT_HP* const);
-void vp8_build_component_cost_table_hp(int *mvcost[2],
- const MV_CONTEXT_HP*,
- const int mvc_flag[2]);
-#endif /* CONFIG_NEWMVENTROPY */
#endif
diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c
index 1e88454a1..09d5a762e 100644
--- a/vp8/encoder/firstpass.c
+++ b/vp8/encoder/firstpass.c
@@ -493,12 +493,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
{
int flag[2] = {1, 1};
vp8_init_mv_probs(cm);
-#if CONFIG_NEWMVENTROPY
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
-#else
- vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
- vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
-#endif
}
// for each macroblock row in image
diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c
index 85f5f289e..210887491 100644
--- a/vp8/encoder/mcomp.c
+++ b/vp8/encoder/mcomp.c
@@ -47,15 +47,9 @@ int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
-#if CONFIG_NEWMVENTROPY
return ((mvjcost[vp8_get_mv_joint(v)] +
mvcost[0][v.row] + mvcost[1][v.col]) *
Weight) >> 7;
-#else
- return ((mvcost[0][v.row >> (ishp == 0)] +
- mvcost[1][v.col >> (ishp == 0)])
- * Weight) >> 7;
-#endif
}
static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
@@ -64,14 +58,9 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
-#if CONFIG_NEWMVENTROPY
return ((mvjcost[vp8_get_mv_joint(v)] +
mvcost[0][v.row] + mvcost[1][v.col]) *
error_per_bit + 128) >> 8;
-#else
- return ((mvcost[0][v.row >> (ishp == 0)] +
- mvcost[1][v.col >> (ishp == 0)]) * error_per_bit + 128) >> 8;
-#endif
}
return 0;
}
@@ -83,14 +72,9 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, DEC_MVSADCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
-#if CONFIG_NEWMVENTROPY
return ((mvjsadcost[vp8_get_mv_joint(v)] +
mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
error_per_bit + 128) >> 8;
-#else
- return ((mvsadcost[0][v.row] + mvsadcost[1][v.col])
- * error_per_bit + 128) >> 8;
-#endif
}
return 0;
}
@@ -220,35 +204,42 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
* could reduce the area.
*/
-#if CONFIG_NEWMVENTROPY
/* estimated cost of a motion vector (r,c) */
-#define MVC(r,c) \
- (mvcost ? \
- ((mvjcost[((r)!=rr)*2 + ((c)!=rc)] + \
- mvcost[0][((r)-rr)] + mvcost[1][((c)-rc)]) * error_per_bit + 128 )>>8 : 0)
-#else
-#define MVC(r,c) \
- (mvcost ? \
- ((mvcost[0][((r)-rr)>>(xd->allow_high_precision_mv==0)] + \
- mvcost[1][((c)-rc)>>(xd->allow_high_precision_mv==0)]) * \
- error_per_bit + 128 )>>8 : 0)
-#endif /* CONFIG_NEWMVENTROPY */
+#define MVC(r, c) \
+ (mvcost ? \
+ ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
+ mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
+ error_per_bit + 128) >> 8 : 0)
-#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
+#define SP(x) (((x) & 7) << 1) // convert motion vector component to offset
+ // for svf calc
-#define IFMVCV(r,c,s,e) \
- if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
+#define IFMVCV(r, c, s, e) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) \
+ s \
+ else \
+ e;
/* pointer to predictor base of a motionvector */
-#define PRE(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset)))
+#define PRE(r, c) (y + (((r) >> 3) * y_stride + ((c) >> 3) -(offset)))
/* returns subpixel variance error function */
-#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
-
-/* checks if (r,c) has better score than previous best */
-#define CHECK_BETTER(v,r,c) \
- IFMVCV(r,c,{thismse = (DIST(r,c)); if((v = MVC(r,c)+thismse) < besterr) \
- { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)
+#define DIST(r, c) \
+ vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse)
+
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER(v, r, c) \
+ IFMVCV(r, c, { \
+ thismse = (DIST(r, c)); \
+ if ((v = MVC(r, c) + thismse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ }, \
+ v = INT_MAX;)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
@@ -307,17 +298,10 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
br = bestmv->as_mv.row << 3;
bc = bestmv->as_mv.col << 3;
hstep = 4;
-#if CONFIG_NEWMVENTROPY
minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
-#else
- minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
- maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
- minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
- maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
-#endif
tr = br;
tc = bc;
@@ -403,13 +387,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
-#if CONFIG_NEWMVENTROPY
if (xd->allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
-#endif
if (usehp) {
hstep >>= 1;
@@ -771,13 +753,11 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
-#if CONFIG_NEWMVENTROPY
if (x->e_mbd.allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
-#endif
if (!usehp)
return bestmse;
@@ -1304,16 +1284,8 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1423,16 +1395,8 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1632,16 +1596,8 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int col_max = ref_col + distance;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1736,16 +1692,8 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -1873,16 +1821,8 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -2023,16 +1963,8 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@@ -2107,16 +2039,8 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
-#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
-#else
- int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
- if (xd->allow_high_precision_mv) {
- mvsadcost[0] = x->mvsadcost_hp[0];
- mvsadcost[1] = x->mvsadcost_hp[1];
- }
-#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
diff --git a/vp8/encoder/mcomp.h b/vp8/encoder/mcomp.h
index afca58084..f09106927 100644
--- a/vp8/encoder/mcomp.h
+++ b/vp8/encoder/mcomp.h
@@ -15,21 +15,12 @@
#include "block.h"
#include "variance.h"
-#if CONFIG_NEWMVENTROPY
#define MVCOSTS mvjcost, mvcost
#define MVSADCOSTS mvjsadcost, mvsadcost
#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
#define DEC_MVSADCOSTS int *mvjsadcost, int *mvsadcost[2]
#define NULLMVCOST NULL, NULL
#define XMVCOST x->nmvjointcost, (x->e_mbd.allow_high_precision_mv?x->nmvcost_hp:x->nmvcost)
-#else
-#define MVCOSTS mvcost
-#define MVSADCOSTS mvsadcost
-#define DEC_MVCOSTS int *mvcost[2]
-#define DEC_MVSADCOSTS int *mvsadcost[2]
-#define NULLMVCOST NULL
-#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
-#endif /* CONFIG_NEWMVENTROPY */
#ifdef ENTROPY_STATS
extern void init_mv_ref_counts();
diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c
index 8f0a52be8..ce0caf8e2 100644
--- a/vp8/encoder/onyx_if.c
+++ b/vp8/encoder/onyx_if.c
@@ -138,12 +138,10 @@ extern int skip_false_count;
extern int intra_mode_stats[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
#endif
-#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
extern void init_nmvstats();
extern void print_nmvstats();
#endif
-#endif
#ifdef SPEEDSTATS
unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -1689,8 +1687,6 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
#define M_LOG2_E 0.693147180559945309417
#define log2f(x) (log (x) / (float) M_LOG2_E)
-#if CONFIG_NEWMVENTROPY
-
static void cal_nmvjointsadcost(int *mvjointsadcost) {
mvjointsadcost[0] = 600;
mvjointsadcost[1] = 300;
@@ -1728,40 +1724,6 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
} while (++i <= MV_MAX);
}
-#else
-
-static void cal_mvsadcosts(int *mvsadcost[2]) {
- int i = 1;
-
- mvsadcost [0] [0] = 300;
- mvsadcost [1] [0] = 300;
-
- do {
- double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
- } while (++i <= mvfp_max);
-}
-
-static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
- int i = 1;
-
- mvsadcost [0] [0] = 300;
- mvsadcost [1] [0] = 300;
-
- do {
- double z = 256 * (2 * (log2f(8 * i) + .6));
- mvsadcost [0][i] = (int) z;
- mvsadcost [1][i] = (int) z;
- mvsadcost [0][-i] = (int) z;
- mvsadcost [1][-i] = (int) z;
- } while (++i <= mvfp_max_hp);
-}
-
-#endif /* CONFIG_NEWMVENTROPY */
-
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
int i;
volatile union {
@@ -1813,10 +1775,8 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
#endif
for (i = 0; i < COMP_PRED_CONTEXTS; i++)
cm->prob_comppred[i] = 128;
-#if CONFIG_TX_SELECT
for (i = 0; i < TX_SIZE_MAX - 1; i++)
cm->prob_tx[i] = 128;
-#endif
// Prime the recent reference frame useage counters.
// Hereafter they will be maintained as a sort of moving average
@@ -1877,11 +1837,9 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
vp8_zero(inter_uv_modes);
vp8_zero(inter_b_modes);
#endif
-#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
init_nmvstats();
#endif
-#endif
/*Initialize the feed-forward activity masking.*/
cpi->activity_avg = 90 << 12;
@@ -1947,7 +1905,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->gf_rate_correction_factor = 1.0;
cpi->twopass.est_max_qcorrection_factor = 1.0;
-#if CONFIG_NEWMVENTROPY
cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX];
cpi->mb.nmvcost[1] = &cpi->mb.nmvcosts[1][MV_MAX];
@@ -1960,19 +1917,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->mb.nmvsadcost_hp[0] = &cpi->mb.nmvsadcosts_hp[0][MV_MAX];
cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
-#else
- cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max + 1];
- cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max + 1];
- cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mvfp_max + 1];
- cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mvfp_max + 1];
- cal_mvsadcosts(cpi->mb.mvsadcost);
-
- cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
- cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
- cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
- cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
- cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
-#endif /* CONFIG_NEWMVENTROPY */
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
@@ -2099,12 +2043,10 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
print_mode_context();
}
#endif
-#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
if (cpi->pass != 1)
print_nmvstats();
#endif
-#endif
#if CONFIG_INTERNAL_STATS
@@ -3728,14 +3670,8 @@ static void encode_frame_to_data_rate
vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
vp8_adapt_mode_probs(&cpi->common);
-#if CONFIG_NEWMVENTROPY
cpi->common.fc.NMVcount = cpi->NMVcount;
vp8_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
-#else
- vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
- vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
- vp8_adapt_mv_probs(&cpi->common);
-#endif /* CONFIG_NEWMVENTROPY */
vp8_update_mode_context(&cpi->common);
}
diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h
index ef17568ce..ab1bb44ec 100644
--- a/vp8/encoder/onyx_int.h
+++ b/vp8/encoder/onyx_int.h
@@ -60,17 +60,10 @@
#define VP8_TEMPORAL_ALT_REF 1
typedef struct {
-#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
int nmvjointcost[MV_JOINTS];
int nmvcosts[2][MV_VALS];
int nmvcosts_hp[2][MV_VALS];
-#else
- MV_CONTEXT mvc[2];
- int mvcosts[2][MVvals + 1];
- MV_CONTEXT_HP mvc_hp[2];
- int mvcosts_hp[2][MVvals_hp + 1];
-#endif
#ifdef MODE_STATS
// Stats
@@ -458,13 +451,11 @@ typedef struct VP8_COMP {
int rd_prediction_type_threshes[4][NB_PREDICTION_TYPES];
int comp_pred_count[COMP_PRED_CONTEXTS];
int single_pred_count[COMP_PRED_CONTEXTS];
-#if CONFIG_TX_SELECT
// FIXME contextualize
int txfm_count[TX_SIZE_MAX];
int txfm_count_8x8p[TX_SIZE_MAX - 1];
int64_t rd_tx_select_diff[NB_TXFM_MODES];
int rd_tx_select_threshes[4][NB_TXFM_MODES];
-#endif
int RDMULT;
int RDDIV;
@@ -556,12 +547,7 @@ typedef struct VP8_COMP {
// int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
-#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
-#else
- unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
- unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
-#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c
index e0e2a4e5b..85036e4c0 100644
--- a/vp8/encoder/ratectrl.c
+++ b/vp8/encoder/ratectrl.c
@@ -132,17 +132,10 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
// intended for use in a re-code loop in vp8_compress_frame where the
// quantizer value is adjusted between loop iterations.
-#if CONFIG_NEWMVENTROPY
cc->nmvc = cm->fc.nmvc;
vp8_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
vp8_copy(cc->nmvcosts, cpi->mb.nmvcosts);
vp8_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp);
-#else
- vp8_copy(cc->mvc, cm->fc.mvc);
- vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
- vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
- vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
-#endif
vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
vp8_copy(cc->mode_context, cm->fc.mode_context);
@@ -196,17 +189,10 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
// Restore key state variables to the snapshot state stored in the
// previous call to vp8_save_coding_context.
-#if CONFIG_NEWMVENTROPY
cm->fc.nmvc = cc->nmvc;
vp8_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
vp8_copy(cpi->mb.nmvcosts, cc->nmvcosts);
vp8_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp);
-#else
- vp8_copy(cm->fc.mvc, cc->mvc);
- vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
- vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
- vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
-#endif
vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
vp8_copy(cm->fc.mode_context, cc->mode_context);
@@ -263,16 +249,6 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
vp8_default_bmode_probs(cm->fc.bmode_prob);
vp8_init_mv_probs(& cpi->common);
-#if CONFIG_NEWMVENTROPY == 0
- /* this is not really required */
- {
- int flag[2] = {1, 1};
- vp8_build_component_cost_table(
- cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
- vp8_build_component_cost_table_hp(
- cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
- }
-#endif
// cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index 4a55e712d..136151bef 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -387,14 +387,12 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
if (cpi->common.frame_type != KEY_FRAME)
{
-#if CONFIG_NEWMVENTROPY
vp8_build_nmv_cost_table(
cpi->mb.nmvjointcost,
cpi->mb.e_mbd.allow_high_precision_mv ?
cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
&cpi->common.fc.nmvc,
cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
-#endif
}
}
@@ -403,19 +401,6 @@ void vp8_auto_select_speed(VP8_COMP *cpi) {
milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
-#if 0
-
- if (0) {
- FILE *f;
-
- f = fopen("speed.stt", "a");
- fprintf(f, " %8ld %10ld %10ld %10ld\n",
- cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
- fclose(f);
- }
-
-#endif
-
/*
// this is done during parameter valid check
if( cpi->oxcf.cpu_used > 16)
@@ -886,8 +871,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
VP8_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
-#if CONFIG_TX_SELECT
-
MACROBLOCKD *xd = &x->e_mbd;
int can_skip = cm->mb_no_coeff_skip;
vp8_prob skip_prob = can_skip ? get_pred_prob(cm, xd, PRED_MBSKIP) : 128;
@@ -995,25 +978,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
else
txfm_cache[TX_MODE_SELECT] = rd4x4s < rd8x8s ? rd4x4s : rd8x8s;
-#else /* CONFIG_TX_SELECT */
-
- switch (cpi->common.txfm_mode) {
- case ALLOW_16X16:
- macro_block_yrd_16x16(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
- mbmi->txfm_size = TX_16X16;
- break;
- case ALLOW_8X8:
- macro_block_yrd_8x8(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
- mbmi->txfm_size = TX_8X8;
- break;
- default:
- case ONLY_4X4:
- macro_block_yrd_4x4(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
- mbmi->txfm_size = TX_4X4;
- break;
- }
-
-#endif /* CONFIG_TX_SELECT */
}
static void copy_predictor(unsigned char *dst, const unsigned char *predictor) {
@@ -1364,11 +1328,9 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
int64_t this_rd;
MACROBLOCKD *xd = &x->e_mbd;
-#if CONFIG_TX_SELECT
int i;
for (i = 0; i < NB_TXFM_MODES; i++)
txfm_cache[i] = INT64_MAX;
-#endif
// Y Search for 16x16 intra prediction mode
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
@@ -1411,7 +1373,6 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
*skippable = skip;
}
-#if CONFIG_TX_SELECT
for (i = 0; i < NB_TXFM_MODES; i++) {
int64_t adj_rd = this_rd + local_txfm_cache[i] -
local_txfm_cache[cpi->common.txfm_mode];
@@ -1419,7 +1380,6 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
txfm_cache[i] = adj_rd;
}
}
-#endif
#if CONFIG_COMP_INTRA_PRED
}
@@ -2585,7 +2545,6 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
/* 16 = n_blocks */
int_mv seg_mvs[16][MAX_REF_FRAMES - 1],
int64_t txfm_cache[NB_TXFM_MODES]) {
-#if CONFIG_TX_SELECT
int i, n, c = vp8_mbsplit_count[segmentation];
if (segmentation == PARTITIONING_4X4) {
@@ -2673,12 +2632,6 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
}
}
}
-#else
- rd_check_segment_txsize(cpi, x, bsi, segmentation,
- (segmentation == PARTITIONING_4X4 ||
- cpi->common.txfm_mode == ONLY_4X4) ? TX_4X4 : TX_8X8,
- NULL, NULL, NULL, seg_mvs);
-#endif
}
static __inline
@@ -3065,9 +3018,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
-#if CONFIG_NEWMVENTROPY
MV mv;
-#endif
if (mbmi->mode == SPLITMV) {
int i;
@@ -3075,7 +3026,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
-#if CONFIG_NEWMVENTROPY
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
@@ -3089,20 +3039,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 1);
}
-#else
- cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col)]++;
- }
-#endif
} else {
-#if CONFIG_NEWMVENTROPY
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
@@ -3116,24 +3053,11 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 0);
}
-#else
- cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
- - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col
- - best_ref_mv->as_mv.col) >> 1)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.row
- - second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.col
- - second_best_ref_mv->as_mv.col) >> 1)]++;
- }
-#endif
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
-#if CONFIG_NEWMVENTROPY
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
@@ -3142,20 +3066,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
}
-#else
- cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[0].as_mv.row
- - best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[0].as_mv.col
- - best_ref_mv->as_mv.col)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row
- - second_best_ref_mv->as_mv.row)]++;
- cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col
- - second_best_ref_mv->as_mv.col)]++;
- }
-#endif
} else {
-#if CONFIG_NEWMVENTROPY
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
@@ -3164,18 +3075,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
}
-#else
- cpi->MVcount[0][mv_max + ((mbmi->mv[0].as_mv.row
- - best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((mbmi->mv[0].as_mv.col
- - best_ref_mv->as_mv.col) >> 1)]++;
- if (mbmi->second_ref_frame) {
- cpi->MVcount[0][mv_max + ((mbmi->mv[1].as_mv.row
- - second_best_ref_mv->as_mv.row) >> 1)]++;
- cpi->MVcount[1][mv_max + ((mbmi->mv[1].as_mv.col
- - second_best_ref_mv->as_mv.col) >> 1)]++;
- }
-#endif
}
}
}
@@ -3349,9 +3248,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int hybrid_pred_diff,
int64_t txfm_size_diff[NB_TXFM_MODES]) {
MACROBLOCKD *xd = &x->e_mbd;
-#if CONFIG_TX_SELECT
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
-#endif
// Take a snapshot of the coding context so it can be
// restored if we decide to encode this way
@@ -3371,9 +3268,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
ctx->comp_pred_diff = comp_pred_diff;
ctx->hybrid_pred_diff = hybrid_pred_diff;
-#if CONFIG_TX_SELECT
memcpy(ctx->txfm_rd_diff, txfm_size_diff, sizeof(ctx->txfm_rd_diff));
-#endif
}
static void inter_mode_cost(VP8_COMP *cpi, MACROBLOCK *x, int this_mode,
@@ -3776,11 +3671,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
break;
case I8X8_PRED: {
-#if CONFIG_TX_SELECT
int cost0 = vp8_cost_bit(cm->prob_tx[0], 0);
int cost1 = vp8_cost_bit(cm->prob_tx[0], 1);
int64_t tmp_rd_4x4s, tmp_rd_8x8s;
-#endif
int64_t tmp_rd_4x4, tmp_rd_8x8, tmp_rd;
int r4x4, tok4x4, d4x4, r8x8, tok8x8, d8x8;
mbmi->txfm_size = TX_4X4;
@@ -3802,7 +3695,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
txfm_cache[ONLY_4X4] = tmp_rd_4x4;
txfm_cache[ALLOW_8X8] = tmp_rd_8x8;
txfm_cache[ALLOW_16X16] = tmp_rd_8x8;
-#if CONFIG_TX_SELECT
tmp_rd_4x4s = tmp_rd_4x4 + RDCOST(x->rdmult, x->rddiv, cost0, 0);
tmp_rd_8x8s = tmp_rd_8x8 + RDCOST(x->rdmult, x->rddiv, cost1, 0);
txfm_cache[TX_MODE_SELECT] = tmp_rd_4x4s < tmp_rd_8x8s ? tmp_rd_4x4s : tmp_rd_8x8s;
@@ -3831,9 +3723,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mode8x8[1][3] = x->e_mbd.mode_info_context->bmi[10].as_mode.second;
#endif
}
- } else
-#endif
- if (cm->txfm_mode == ONLY_4X4) {
+ } else if (cm->txfm_mode == ONLY_4X4) {
rate = r4x4;
rate_y = tok4x4;
distortion = d4x4;
@@ -4348,11 +4238,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
(cpi->oxcf.arnr_max_frames == 0) &&
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
mbmi->mode = ZEROMV;
-#if CONFIG_TX_SELECT
if (cm->txfm_mode != TX_MODE_SELECT)
mbmi->txfm_size = cm->txfm_mode;
else
-#endif
mbmi->txfm_size = TX_16X16;
mbmi->ref_frame = ALTREF_FRAME;
mbmi->mv[0].as_int = 0;
@@ -4402,7 +4290,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
best_pred_diff[i] = best_rd - best_pred_rd[i];
}
-#if CONFIG_TX_SELECT
if (!x->skip) {
for (i = 0; i < NB_TXFM_MODES; i++) {
if (best_txfm_rd[i] == INT64_MAX)
@@ -4413,7 +4300,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
} else {
vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
}
-#endif
end:
store_coding_context(x, &x->mb_context[xd->mb_index], best_mode_index, &best_partition,
@@ -4544,10 +4430,8 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 1);
dist = dist16x16 + (distuv8x8 >> 2);
mbmi->txfm_size = txfm_size_16x16;
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
} else if (error8x8 > error16x16) {
if (error4x4 < error16x16) {
rate = rateuv;
@@ -4564,20 +4448,16 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
mbmi->mode = B_PRED;
mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2);
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
} else {
mbmi->txfm_size = txfm_size_16x16;
mbmi->mode = mode16x16;
rate = rate16x16 + rateuv8x8;
dist = dist16x16 + (distuv8x8 >> 2);
-#if CONFIG_TX_SELECT
for (i = 0; i < NB_TXFM_MODES; i++) {
x->mb_context[xd->mb_index].txfm_rd_diff[i] = error16x16 - txfm_cache[i];
}
-#endif
}
if (cpi->common.mb_no_coeff_skip)
rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);
@@ -4597,10 +4477,8 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
mbmi->mode = B_PRED;
mbmi->txfm_size = TX_4X4;
dist = dist4x4 + (distuv >> 2);
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
} else {
// FIXME(rbultje) support transform-size selection
mbmi->mode = I8X8_PRED;
@@ -4608,10 +4486,8 @@ void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x,
set_i8x8_block_modes(x, mode8x8);
rate = rate8x8 + rateuv;
dist = dist8x8 + (distuv >> 2);
-#if CONFIG_TX_SELECT
memset(x->mb_context[xd->mb_index].txfm_rd_diff, 0,
sizeof(x->mb_context[xd->mb_index].txfm_rd_diff));
-#endif
}
if (cpi->common.mb_no_coeff_skip)
rate += vp8_cost_bit(get_pred_prob(cm, xd, PRED_MBSKIP), 0);